body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
1d79c3d9585a357bc448ecf3332ce0817cab0d3ddbdaacea654a57c30ae6a530 | @service_now.setter
def service_now(self, service_now):
'Sets the service_now of this ApiAlertProfile.\n\n\n :param service_now: The service_now of this ApiAlertProfile. # noqa: E501\n :type service_now: ApiAlertProfileServiceNowSettings\n '
self._service_now = service_now | Sets the service_now of this ApiAlertProfile.
:param service_now: The service_now of this ApiAlertProfile. # noqa: E501
:type service_now: ApiAlertProfileServiceNowSettings | openapi_client/models/api_alert_profile.py | service_now | hi-artem/twistlock-py | 0 | python | @service_now.setter
def service_now(self, service_now):
'Sets the service_now of this ApiAlertProfile.\n\n\n :param service_now: The service_now of this ApiAlertProfile. # noqa: E501\n :type service_now: ApiAlertProfileServiceNowSettings\n '
self._service_now = service_now | @service_now.setter
def service_now(self, service_now):
'Sets the service_now of this ApiAlertProfile.\n\n\n :param service_now: The service_now of this ApiAlertProfile. # noqa: E501\n :type service_now: ApiAlertProfileServiceNowSettings\n '
self._service_now = service_now<|docstring|>Sets the service_now of this ApiAlertProfile.
:param service_now: The service_now of this ApiAlertProfile. # noqa: E501
:type service_now: ApiAlertProfileServiceNowSettings<|endoftext|> |
ec19509e6579bb3896021ff5b14d8cdf8b313f056716ff98fa3b292dd056df07 | @property
def slack(self):
'Gets the slack of this ApiAlertProfile. # noqa: E501\n\n\n :return: The slack of this ApiAlertProfile. # noqa: E501\n :rtype: ApiAlertProfileSlackSettings\n '
return self._slack | Gets the slack of this ApiAlertProfile. # noqa: E501
:return: The slack of this ApiAlertProfile. # noqa: E501
:rtype: ApiAlertProfileSlackSettings | openapi_client/models/api_alert_profile.py | slack | hi-artem/twistlock-py | 0 | python | @property
def slack(self):
'Gets the slack of this ApiAlertProfile. # noqa: E501\n\n\n :return: The slack of this ApiAlertProfile. # noqa: E501\n :rtype: ApiAlertProfileSlackSettings\n '
return self._slack | @property
def slack(self):
'Gets the slack of this ApiAlertProfile. # noqa: E501\n\n\n :return: The slack of this ApiAlertProfile. # noqa: E501\n :rtype: ApiAlertProfileSlackSettings\n '
return self._slack<|docstring|>Gets the slack of this ApiAlertProfile. # noqa: E501
:return: The slack of this ApiAlertProfile. # noqa: E501
:rtype: ApiAlertProfileSlackSettings<|endoftext|> |
cb9dcec8642c3d151bfe629db06246996e96bfb8bb1621b1c0f747846a533a5b | @slack.setter
def slack(self, slack):
'Sets the slack of this ApiAlertProfile.\n\n\n :param slack: The slack of this ApiAlertProfile. # noqa: E501\n :type slack: ApiAlertProfileSlackSettings\n '
self._slack = slack | Sets the slack of this ApiAlertProfile.
:param slack: The slack of this ApiAlertProfile. # noqa: E501
:type slack: ApiAlertProfileSlackSettings | openapi_client/models/api_alert_profile.py | slack | hi-artem/twistlock-py | 0 | python | @slack.setter
def slack(self, slack):
'Sets the slack of this ApiAlertProfile.\n\n\n :param slack: The slack of this ApiAlertProfile. # noqa: E501\n :type slack: ApiAlertProfileSlackSettings\n '
self._slack = slack | @slack.setter
def slack(self, slack):
'Sets the slack of this ApiAlertProfile.\n\n\n :param slack: The slack of this ApiAlertProfile. # noqa: E501\n :type slack: ApiAlertProfileSlackSettings\n '
self._slack = slack<|docstring|>Sets the slack of this ApiAlertProfile.
:param slack: The slack of this ApiAlertProfile. # noqa: E501
:type slack: ApiAlertProfileSlackSettings<|endoftext|> |
80d71c6b7decefdbb68d84a8b24b9dc77779d154cb015563f48f1045903f1e55 | @property
def webhook(self):
'Gets the webhook of this ApiAlertProfile. # noqa: E501\n\n\n :return: The webhook of this ApiAlertProfile. # noqa: E501\n :rtype: ApiAlertProfileWebhookSettings\n '
return self._webhook | Gets the webhook of this ApiAlertProfile. # noqa: E501
:return: The webhook of this ApiAlertProfile. # noqa: E501
:rtype: ApiAlertProfileWebhookSettings | openapi_client/models/api_alert_profile.py | webhook | hi-artem/twistlock-py | 0 | python | @property
def webhook(self):
'Gets the webhook of this ApiAlertProfile. # noqa: E501\n\n\n :return: The webhook of this ApiAlertProfile. # noqa: E501\n :rtype: ApiAlertProfileWebhookSettings\n '
return self._webhook | @property
def webhook(self):
'Gets the webhook of this ApiAlertProfile. # noqa: E501\n\n\n :return: The webhook of this ApiAlertProfile. # noqa: E501\n :rtype: ApiAlertProfileWebhookSettings\n '
return self._webhook<|docstring|>Gets the webhook of this ApiAlertProfile. # noqa: E501
:return: The webhook of this ApiAlertProfile. # noqa: E501
:rtype: ApiAlertProfileWebhookSettings<|endoftext|> |
67aaa21b8af4f876c85ff973002fbc6331c64917c61eec8e80dd235099cd0da3 | @webhook.setter
def webhook(self, webhook):
'Sets the webhook of this ApiAlertProfile.\n\n\n :param webhook: The webhook of this ApiAlertProfile. # noqa: E501\n :type webhook: ApiAlertProfileWebhookSettings\n '
self._webhook = webhook | Sets the webhook of this ApiAlertProfile.
:param webhook: The webhook of this ApiAlertProfile. # noqa: E501
:type webhook: ApiAlertProfileWebhookSettings | openapi_client/models/api_alert_profile.py | webhook | hi-artem/twistlock-py | 0 | python | @webhook.setter
def webhook(self, webhook):
'Sets the webhook of this ApiAlertProfile.\n\n\n :param webhook: The webhook of this ApiAlertProfile. # noqa: E501\n :type webhook: ApiAlertProfileWebhookSettings\n '
self._webhook = webhook | @webhook.setter
def webhook(self, webhook):
'Sets the webhook of this ApiAlertProfile.\n\n\n :param webhook: The webhook of this ApiAlertProfile. # noqa: E501\n :type webhook: ApiAlertProfileWebhookSettings\n '
self._webhook = webhook<|docstring|>Sets the webhook of this ApiAlertProfile.
:param webhook: The webhook of this ApiAlertProfile. # noqa: E501
:type webhook: ApiAlertProfileWebhookSettings<|endoftext|> |
e4728736e14d22851bff0d6eb0260eeb4fce3796046374609f4bb76e6915c33e | @property
def xsoar(self):
'Gets the xsoar of this ApiAlertProfile. # noqa: E501\n\n\n :return: The xsoar of this ApiAlertProfile. # noqa: E501\n :rtype: ApiAlertProfileXSOARSettings\n '
return self._xsoar | Gets the xsoar of this ApiAlertProfile. # noqa: E501
:return: The xsoar of this ApiAlertProfile. # noqa: E501
:rtype: ApiAlertProfileXSOARSettings | openapi_client/models/api_alert_profile.py | xsoar | hi-artem/twistlock-py | 0 | python | @property
def xsoar(self):
'Gets the xsoar of this ApiAlertProfile. # noqa: E501\n\n\n :return: The xsoar of this ApiAlertProfile. # noqa: E501\n :rtype: ApiAlertProfileXSOARSettings\n '
return self._xsoar | @property
def xsoar(self):
'Gets the xsoar of this ApiAlertProfile. # noqa: E501\n\n\n :return: The xsoar of this ApiAlertProfile. # noqa: E501\n :rtype: ApiAlertProfileXSOARSettings\n '
return self._xsoar<|docstring|>Gets the xsoar of this ApiAlertProfile. # noqa: E501
:return: The xsoar of this ApiAlertProfile. # noqa: E501
:rtype: ApiAlertProfileXSOARSettings<|endoftext|> |
42a004c60853a7254f9159086e3808760045ff2579677e1cdaef0c621d9cb0af | @xsoar.setter
def xsoar(self, xsoar):
'Sets the xsoar of this ApiAlertProfile.\n\n\n :param xsoar: The xsoar of this ApiAlertProfile. # noqa: E501\n :type xsoar: ApiAlertProfileXSOARSettings\n '
self._xsoar = xsoar | Sets the xsoar of this ApiAlertProfile.
:param xsoar: The xsoar of this ApiAlertProfile. # noqa: E501
:type xsoar: ApiAlertProfileXSOARSettings | openapi_client/models/api_alert_profile.py | xsoar | hi-artem/twistlock-py | 0 | python | @xsoar.setter
def xsoar(self, xsoar):
'Sets the xsoar of this ApiAlertProfile.\n\n\n :param xsoar: The xsoar of this ApiAlertProfile. # noqa: E501\n :type xsoar: ApiAlertProfileXSOARSettings\n '
self._xsoar = xsoar | @xsoar.setter
def xsoar(self, xsoar):
'Sets the xsoar of this ApiAlertProfile.\n\n\n :param xsoar: The xsoar of this ApiAlertProfile. # noqa: E501\n :type xsoar: ApiAlertProfileXSOARSettings\n '
self._xsoar = xsoar<|docstring|>Sets the xsoar of this ApiAlertProfile.
:param xsoar: The xsoar of this ApiAlertProfile. # noqa: E501
:type xsoar: ApiAlertProfileXSOARSettings<|endoftext|> |
eac645eb3fed416aa3cbf5116a0cf9e44cd06b29a875d0af1efcfac28b9a6f29 | def to_dict(self, serialize=False):
'Returns the model properties as a dict'
result = {}
def convert(x):
if hasattr(x, 'to_dict'):
args = getfullargspec(x.to_dict).args
if (len(args) == 1):
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = (self.attribute_map.get(attr, attr) if serialize else attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: convert(x)), value))
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: (item[0], convert(item[1]))), value.items()))
else:
result[attr] = convert(value)
return result | Returns the model properties as a dict | openapi_client/models/api_alert_profile.py | to_dict | hi-artem/twistlock-py | 0 | python | def to_dict(self, serialize=False):
result = {}
def convert(x):
if hasattr(x, 'to_dict'):
args = getfullargspec(x.to_dict).args
if (len(args) == 1):
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = (self.attribute_map.get(attr, attr) if serialize else attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: convert(x)), value))
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: (item[0], convert(item[1]))), value.items()))
else:
result[attr] = convert(value)
return result | def to_dict(self, serialize=False):
result = {}
def convert(x):
if hasattr(x, 'to_dict'):
args = getfullargspec(x.to_dict).args
if (len(args) == 1):
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = (self.attribute_map.get(attr, attr) if serialize else attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: convert(x)), value))
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: (item[0], convert(item[1]))), value.items()))
else:
result[attr] = convert(value)
return result<|docstring|>Returns the model properties as a dict<|endoftext|> |
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99 | def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | Returns the string representation of the model | openapi_client/models/api_alert_profile.py | to_str | hi-artem/twistlock-py | 0 | python | def to_str(self):
return pprint.pformat(self.to_dict()) | def to_str(self):
return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|> |
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703 | def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | For `print` and `pprint` | openapi_client/models/api_alert_profile.py | __repr__ | hi-artem/twistlock-py | 0 | python | def __repr__(self):
return self.to_str() | def __repr__(self):
return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|> |
cf8148ca74fd7e54c73b85169d9fca9a5d366a71f72e232c8aaaf427255cf095 | def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, ApiAlertProfile)):
return False
return (self.to_dict() == other.to_dict()) | Returns true if both objects are equal | openapi_client/models/api_alert_profile.py | __eq__ | hi-artem/twistlock-py | 0 | python | def __eq__(self, other):
if (not isinstance(other, ApiAlertProfile)):
return False
return (self.to_dict() == other.to_dict()) | def __eq__(self, other):
if (not isinstance(other, ApiAlertProfile)):
return False
return (self.to_dict() == other.to_dict())<|docstring|>Returns true if both objects are equal<|endoftext|> |
625f9ecd20ad7ef84b59e24e1b2c5dcfc06982882b22f69b78a14ea3984eb4dc | def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, ApiAlertProfile)):
return True
return (self.to_dict() != other.to_dict()) | Returns true if both objects are not equal | openapi_client/models/api_alert_profile.py | __ne__ | hi-artem/twistlock-py | 0 | python | def __ne__(self, other):
if (not isinstance(other, ApiAlertProfile)):
return True
return (self.to_dict() != other.to_dict()) | def __ne__(self, other):
if (not isinstance(other, ApiAlertProfile)):
return True
return (self.to_dict() != other.to_dict())<|docstring|>Returns true if both objects are not equal<|endoftext|> |
ddf004243f7720cce5cd83f1472bdb62e7df998ff93e839d64be8406795add2b | def getLabeledData(self, data, label):
'For example: 1 2 3 12345 normal; 1 3 5 12346 tip; 1 4 8 12347 scallop'
print('Adding labels to data:', label)
arr = copy.deepcopy(data)
for ele in arr:
ele.append(label)
return arr | For example: 1 2 3 12345 normal; 1 3 5 12346 tip; 1 4 8 12347 scallop | script/feature/SimpleFeatureExtractor.py | getLabeledData | inverthermit/sensor_data_ML | 2 | python | def getLabeledData(self, data, label):
print('Adding labels to data:', label)
arr = copy.deepcopy(data)
for ele in arr:
ele.append(label)
return arr | def getLabeledData(self, data, label):
print('Adding labels to data:', label)
arr = copy.deepcopy(data)
for ele in arr:
ele.append(label)
return arr<|docstring|>For example: 1 2 3 12345 normal; 1 3 5 12346 tip; 1 4 8 12347 scallop<|endoftext|> |
7efe44f3058c848097b5bbecb5cb8dd8dcba8e088cc0533d72393cfaefc70dbb | def add_vec(self, vectors, ids=None, replace=False):
'多次加载好像有问题'
if (ids is None):
ids = range(len(vectors))
self.model.add_vectors(ids, vectors, replace=replace)
return self | 多次加载好像有问题 | meutils/annzoo/ann_gensim.py | add_vec | Jie-Yuan/MeUtils | 3 | python | def add_vec(self, vectors, ids=None, replace=False):
if (ids is None):
ids = range(len(vectors))
self.model.add_vectors(ids, vectors, replace=replace)
return self | def add_vec(self, vectors, ids=None, replace=False):
if (ids is None):
ids = range(len(vectors))
self.model.add_vectors(ids, vectors, replace=replace)
return self<|docstring|>多次加载好像有问题<|endoftext|> |
5027c38d11f29a5647657c5d009b18913c6a2aabc9a3bc59e20552856e882763 | def conv_interface(f_n, f_c, f_y, f_x, f_k, f_r, f_s, l_n, l_c, l_y, l_x, l_k, l_r, l_s, c_n, c_c, c_y, c_x, c_k, c_r, c_s, d_n, d_c, d_y, d_x, d_k, d_r, d_s, dtype):
'\n l_n, l_c, l_y, l_x, l_k, l_r, l_s: last iteration size\n c_n, c_c, c_y, c_x, c_k, c_r, c_s: last iteration conditif_son\n '
(_, tensors) = conv_intrinsic(f_n, f_c, f_y, f_x, f_k, f_r, f_s, dtype)
(tA, tB, tC) = tensors
strideA1 = tvm.var('strideA1')
strideA2 = tvm.var('strideA2')
strideA3 = tvm.var('strideA3')
sA = tvm.decl_buffer(tA.shape, tA.dtype, name='sA', offset_factor=1, strides=[strideA1, strideA2, strideA3, 1])
strideB1 = tvm.var('strideB1')
strideB2 = tvm.var('strideB2')
strideB3 = tvm.var('strideB3')
sB = tvm.decl_buffer(tB.shape, tB.dtype, name='sB', offset_factor=1, strides=[strideB1, strideB2, strideB3, 1])
strideC1 = tvm.var('strideC1')
strideC2 = tvm.var('strideC2')
strideC3 = tvm.var('strideC3')
sC = tvm.decl_buffer(tC.shape, tC.dtype, name='sC', offset_factor=1, strides=[strideC1, strideC2, strideC3, 1])
iter_n = ((f_n // d_n) + (0 if ((f_n % d_n) == 0) else 1))
iter_c = ((f_c // d_c) + (0 if ((f_c % d_c) == 0) else 1))
iter_y = ((f_y // d_y) + (0 if ((f_y % d_y) == 0) else 1))
iter_x = ((f_x // d_x) + (0 if ((f_x % d_x) == 0) else 1))
iter_k = ((f_k // d_k) + (0 if ((f_k % d_k) == 0) else 1))
iter_r = ((f_r // d_r) + (0 if ((f_r % d_r) == 0) else 1))
iter_s = ((f_s // d_s) + (0 if ((f_s % d_s) == 0) else 1))
pad_n = (0 if ((f_n % d_n) == 0) else (d_n - (f_n % d_n)))
pad_c = (0 if ((f_c % d_c) == 0) else (d_c - (f_c % d_c)))
pad_y = (0 if ((f_y % d_y) == 0) else (d_y - (f_y % d_y)))
pad_x = (0 if ((f_x % d_x) == 0) else (d_x - (f_x % d_x)))
pad_k = (0 if ((f_k % d_k) == 0) else (d_k - (f_k % d_k)))
pad_r = (0 if ((f_r % d_r) == 0) else (d_r - (f_r % d_r)))
pad_s = (0 if ((f_s % d_s) == 0) else (d_s - (f_s % d_s)))
last_iter_n = ((l_n // d_n) + (0 if ((l_n % d_n) == 0) else 1))
last_iter_c = ((l_c // d_c) + (0 if ((l_c % d_c) == 0) else 1))
last_iter_y = ((l_y // d_y) + (0 if ((l_y % d_y) == 0) else 1))
last_iter_x = ((l_x // d_x) + (0 if ((l_x % d_x) == 0) else 1))
last_iter_k = ((l_k // d_k) + (0 if ((l_k % d_k) == 0) else 1))
last_iter_r = ((l_r // d_r) + (0 if ((l_r % d_r) == 0) else 1))
last_iter_s = ((l_s // d_s) + (0 if ((l_s % d_s) == 0) else 1))
last_pad_n = (0 if ((l_n % d_n) == 0) else (d_n - (l_n % d_n)))
last_pad_c = (0 if ((l_c % d_c) == 0) else (d_c - (l_c % d_c)))
last_pad_y = (0 if ((l_y % d_y) == 0) else (d_y - (l_y % d_y)))
last_pad_x = (0 if ((l_x % d_x) == 0) else (d_x - (l_x % d_x)))
last_pad_k = (0 if ((l_k % d_k) == 0) else (d_k - (l_k % d_k)))
last_pad_r = (0 if ((l_r % d_r) == 0) else (d_r - (l_r % d_r)))
last_pad_s = (0 if ((l_s % d_s) == 0) else (d_s - (l_s % d_s)))
iter_n = tvm.if_then_else(c_n, last_iter_n, iter_n)
iter_c = tvm.if_then_else(c_c, last_iter_c, iter_c)
iter_y = tvm.if_then_else(c_y, last_iter_y, iter_y)
iter_x = tvm.if_then_else(c_x, last_iter_x, iter_x)
iter_k = tvm.if_then_else(c_k, last_iter_k, iter_k)
iter_r = tvm.if_then_else(c_r, last_iter_r, iter_r)
iter_s = tvm.if_then_else(c_s, last_iter_s, iter_s)
pad_n = tvm.if_then_else(c_n, last_pad_n, pad_n)
pad_c = tvm.if_then_else(c_c, last_pad_c, pad_c)
pad_y = tvm.if_then_else(c_y, last_pad_y, pad_y)
pad_x = tvm.if_then_else(c_x, last_pad_x, pad_x)
pad_k = tvm.if_then_else(c_k, last_pad_k, pad_k)
pad_r = tvm.if_then_else(c_r, last_pad_r, pad_r)
pad_s = tvm.if_then_else(c_s, last_pad_s, pad_s)
def interface_func(ins, outs):
(sa, sb) = ins
(sc,) = outs
def _body():
ib = tvm.ir_builder.create()
ib.emit(tvm.call_extern(dtype, 'tensorized_CONV', sa.access_ptr('r'), sb.access_ptr('r'), sc.access_ptr('rw'), 1, iter_n, iter_c, iter_y, iter_x, iter_k, iter_r, iter_s, pad_n, pad_c, pad_y, pad_x, pad_k, pad_r, pad_s, True, False))
return ib.get()
def _reset():
ib = tvm.ir_builder.create()
ib.emit(tvm.call_extern(dtype, 'init_output', sc.access_ptr('w'), iter_n, iter_y, iter_x, iter_k, pad_n, pad_y, pad_x, pad_k))
return ib.get()
def _finalize():
ib = tvm.ir_builder.create()
ib.emit(tvm.call_extern(dtype, 'store_output', sc.access_ptr('rw'), iter_n, iter_y, iter_x, iter_k, pad_n, pad_y, pad_x, pad_k))
return ib.get()
return (None, _reset(), _body(), _finalize())
with tvm.build_config(offset_factor=1):
return tvm.decl_tensor_intrin(tC.op, interface_func, binds={tA: sA, tB: sB, tC: sC}, name='conv_interface') | l_n, l_c, l_y, l_x, l_k, l_r, l_s: last iteration size
c_n, c_c, c_y, c_x, c_k, c_r, c_s: last iteration conditif_son | src/hw_generator/generator_conv.py | conv_interface | hanlinxuy/HASCO | 20 | python | def conv_interface(f_n, f_c, f_y, f_x, f_k, f_r, f_s, l_n, l_c, l_y, l_x, l_k, l_r, l_s, c_n, c_c, c_y, c_x, c_k, c_r, c_s, d_n, d_c, d_y, d_x, d_k, d_r, d_s, dtype):
'\n l_n, l_c, l_y, l_x, l_k, l_r, l_s: last iteration size\n c_n, c_c, c_y, c_x, c_k, c_r, c_s: last iteration conditif_son\n '
(_, tensors) = conv_intrinsic(f_n, f_c, f_y, f_x, f_k, f_r, f_s, dtype)
(tA, tB, tC) = tensors
strideA1 = tvm.var('strideA1')
strideA2 = tvm.var('strideA2')
strideA3 = tvm.var('strideA3')
sA = tvm.decl_buffer(tA.shape, tA.dtype, name='sA', offset_factor=1, strides=[strideA1, strideA2, strideA3, 1])
strideB1 = tvm.var('strideB1')
strideB2 = tvm.var('strideB2')
strideB3 = tvm.var('strideB3')
sB = tvm.decl_buffer(tB.shape, tB.dtype, name='sB', offset_factor=1, strides=[strideB1, strideB2, strideB3, 1])
strideC1 = tvm.var('strideC1')
strideC2 = tvm.var('strideC2')
strideC3 = tvm.var('strideC3')
sC = tvm.decl_buffer(tC.shape, tC.dtype, name='sC', offset_factor=1, strides=[strideC1, strideC2, strideC3, 1])
iter_n = ((f_n // d_n) + (0 if ((f_n % d_n) == 0) else 1))
iter_c = ((f_c // d_c) + (0 if ((f_c % d_c) == 0) else 1))
iter_y = ((f_y // d_y) + (0 if ((f_y % d_y) == 0) else 1))
iter_x = ((f_x // d_x) + (0 if ((f_x % d_x) == 0) else 1))
iter_k = ((f_k // d_k) + (0 if ((f_k % d_k) == 0) else 1))
iter_r = ((f_r // d_r) + (0 if ((f_r % d_r) == 0) else 1))
iter_s = ((f_s // d_s) + (0 if ((f_s % d_s) == 0) else 1))
pad_n = (0 if ((f_n % d_n) == 0) else (d_n - (f_n % d_n)))
pad_c = (0 if ((f_c % d_c) == 0) else (d_c - (f_c % d_c)))
pad_y = (0 if ((f_y % d_y) == 0) else (d_y - (f_y % d_y)))
pad_x = (0 if ((f_x % d_x) == 0) else (d_x - (f_x % d_x)))
pad_k = (0 if ((f_k % d_k) == 0) else (d_k - (f_k % d_k)))
pad_r = (0 if ((f_r % d_r) == 0) else (d_r - (f_r % d_r)))
pad_s = (0 if ((f_s % d_s) == 0) else (d_s - (f_s % d_s)))
last_iter_n = ((l_n // d_n) + (0 if ((l_n % d_n) == 0) else 1))
last_iter_c = ((l_c // d_c) + (0 if ((l_c % d_c) == 0) else 1))
last_iter_y = ((l_y // d_y) + (0 if ((l_y % d_y) == 0) else 1))
last_iter_x = ((l_x // d_x) + (0 if ((l_x % d_x) == 0) else 1))
last_iter_k = ((l_k // d_k) + (0 if ((l_k % d_k) == 0) else 1))
last_iter_r = ((l_r // d_r) + (0 if ((l_r % d_r) == 0) else 1))
last_iter_s = ((l_s // d_s) + (0 if ((l_s % d_s) == 0) else 1))
last_pad_n = (0 if ((l_n % d_n) == 0) else (d_n - (l_n % d_n)))
last_pad_c = (0 if ((l_c % d_c) == 0) else (d_c - (l_c % d_c)))
last_pad_y = (0 if ((l_y % d_y) == 0) else (d_y - (l_y % d_y)))
last_pad_x = (0 if ((l_x % d_x) == 0) else (d_x - (l_x % d_x)))
last_pad_k = (0 if ((l_k % d_k) == 0) else (d_k - (l_k % d_k)))
last_pad_r = (0 if ((l_r % d_r) == 0) else (d_r - (l_r % d_r)))
last_pad_s = (0 if ((l_s % d_s) == 0) else (d_s - (l_s % d_s)))
iter_n = tvm.if_then_else(c_n, last_iter_n, iter_n)
iter_c = tvm.if_then_else(c_c, last_iter_c, iter_c)
iter_y = tvm.if_then_else(c_y, last_iter_y, iter_y)
iter_x = tvm.if_then_else(c_x, last_iter_x, iter_x)
iter_k = tvm.if_then_else(c_k, last_iter_k, iter_k)
iter_r = tvm.if_then_else(c_r, last_iter_r, iter_r)
iter_s = tvm.if_then_else(c_s, last_iter_s, iter_s)
pad_n = tvm.if_then_else(c_n, last_pad_n, pad_n)
pad_c = tvm.if_then_else(c_c, last_pad_c, pad_c)
pad_y = tvm.if_then_else(c_y, last_pad_y, pad_y)
pad_x = tvm.if_then_else(c_x, last_pad_x, pad_x)
pad_k = tvm.if_then_else(c_k, last_pad_k, pad_k)
pad_r = tvm.if_then_else(c_r, last_pad_r, pad_r)
pad_s = tvm.if_then_else(c_s, last_pad_s, pad_s)
def interface_func(ins, outs):
(sa, sb) = ins
(sc,) = outs
def _body():
ib = tvm.ir_builder.create()
ib.emit(tvm.call_extern(dtype, 'tensorized_CONV', sa.access_ptr('r'), sb.access_ptr('r'), sc.access_ptr('rw'), 1, iter_n, iter_c, iter_y, iter_x, iter_k, iter_r, iter_s, pad_n, pad_c, pad_y, pad_x, pad_k, pad_r, pad_s, True, False))
return ib.get()
def _reset():
ib = tvm.ir_builder.create()
ib.emit(tvm.call_extern(dtype, 'init_output', sc.access_ptr('w'), iter_n, iter_y, iter_x, iter_k, pad_n, pad_y, pad_x, pad_k))
return ib.get()
def _finalize():
ib = tvm.ir_builder.create()
ib.emit(tvm.call_extern(dtype, 'store_output', sc.access_ptr('rw'), iter_n, iter_y, iter_x, iter_k, pad_n, pad_y, pad_x, pad_k))
return ib.get()
return (None, _reset(), _body(), _finalize())
with tvm.build_config(offset_factor=1):
return tvm.decl_tensor_intrin(tC.op, interface_func, binds={tA: sA, tB: sB, tC: sC}, name='conv_interface') | def conv_interface(f_n, f_c, f_y, f_x, f_k, f_r, f_s, l_n, l_c, l_y, l_x, l_k, l_r, l_s, c_n, c_c, c_y, c_x, c_k, c_r, c_s, d_n, d_c, d_y, d_x, d_k, d_r, d_s, dtype):
'\n l_n, l_c, l_y, l_x, l_k, l_r, l_s: last iteration size\n c_n, c_c, c_y, c_x, c_k, c_r, c_s: last iteration conditif_son\n '
(_, tensors) = conv_intrinsic(f_n, f_c, f_y, f_x, f_k, f_r, f_s, dtype)
(tA, tB, tC) = tensors
strideA1 = tvm.var('strideA1')
strideA2 = tvm.var('strideA2')
strideA3 = tvm.var('strideA3')
sA = tvm.decl_buffer(tA.shape, tA.dtype, name='sA', offset_factor=1, strides=[strideA1, strideA2, strideA3, 1])
strideB1 = tvm.var('strideB1')
strideB2 = tvm.var('strideB2')
strideB3 = tvm.var('strideB3')
sB = tvm.decl_buffer(tB.shape, tB.dtype, name='sB', offset_factor=1, strides=[strideB1, strideB2, strideB3, 1])
strideC1 = tvm.var('strideC1')
strideC2 = tvm.var('strideC2')
strideC3 = tvm.var('strideC3')
sC = tvm.decl_buffer(tC.shape, tC.dtype, name='sC', offset_factor=1, strides=[strideC1, strideC2, strideC3, 1])
iter_n = ((f_n // d_n) + (0 if ((f_n % d_n) == 0) else 1))
iter_c = ((f_c // d_c) + (0 if ((f_c % d_c) == 0) else 1))
iter_y = ((f_y // d_y) + (0 if ((f_y % d_y) == 0) else 1))
iter_x = ((f_x // d_x) + (0 if ((f_x % d_x) == 0) else 1))
iter_k = ((f_k // d_k) + (0 if ((f_k % d_k) == 0) else 1))
iter_r = ((f_r // d_r) + (0 if ((f_r % d_r) == 0) else 1))
iter_s = ((f_s // d_s) + (0 if ((f_s % d_s) == 0) else 1))
pad_n = (0 if ((f_n % d_n) == 0) else (d_n - (f_n % d_n)))
pad_c = (0 if ((f_c % d_c) == 0) else (d_c - (f_c % d_c)))
pad_y = (0 if ((f_y % d_y) == 0) else (d_y - (f_y % d_y)))
pad_x = (0 if ((f_x % d_x) == 0) else (d_x - (f_x % d_x)))
pad_k = (0 if ((f_k % d_k) == 0) else (d_k - (f_k % d_k)))
pad_r = (0 if ((f_r % d_r) == 0) else (d_r - (f_r % d_r)))
pad_s = (0 if ((f_s % d_s) == 0) else (d_s - (f_s % d_s)))
last_iter_n = ((l_n // d_n) + (0 if ((l_n % d_n) == 0) else 1))
last_iter_c = ((l_c // d_c) + (0 if ((l_c % d_c) == 0) else 1))
last_iter_y = ((l_y // d_y) + (0 if ((l_y % d_y) == 0) else 1))
last_iter_x = ((l_x // d_x) + (0 if ((l_x % d_x) == 0) else 1))
last_iter_k = ((l_k // d_k) + (0 if ((l_k % d_k) == 0) else 1))
last_iter_r = ((l_r // d_r) + (0 if ((l_r % d_r) == 0) else 1))
last_iter_s = ((l_s // d_s) + (0 if ((l_s % d_s) == 0) else 1))
last_pad_n = (0 if ((l_n % d_n) == 0) else (d_n - (l_n % d_n)))
last_pad_c = (0 if ((l_c % d_c) == 0) else (d_c - (l_c % d_c)))
last_pad_y = (0 if ((l_y % d_y) == 0) else (d_y - (l_y % d_y)))
last_pad_x = (0 if ((l_x % d_x) == 0) else (d_x - (l_x % d_x)))
last_pad_k = (0 if ((l_k % d_k) == 0) else (d_k - (l_k % d_k)))
last_pad_r = (0 if ((l_r % d_r) == 0) else (d_r - (l_r % d_r)))
last_pad_s = (0 if ((l_s % d_s) == 0) else (d_s - (l_s % d_s)))
iter_n = tvm.if_then_else(c_n, last_iter_n, iter_n)
iter_c = tvm.if_then_else(c_c, last_iter_c, iter_c)
iter_y = tvm.if_then_else(c_y, last_iter_y, iter_y)
iter_x = tvm.if_then_else(c_x, last_iter_x, iter_x)
iter_k = tvm.if_then_else(c_k, last_iter_k, iter_k)
iter_r = tvm.if_then_else(c_r, last_iter_r, iter_r)
iter_s = tvm.if_then_else(c_s, last_iter_s, iter_s)
pad_n = tvm.if_then_else(c_n, last_pad_n, pad_n)
pad_c = tvm.if_then_else(c_c, last_pad_c, pad_c)
pad_y = tvm.if_then_else(c_y, last_pad_y, pad_y)
pad_x = tvm.if_then_else(c_x, last_pad_x, pad_x)
pad_k = tvm.if_then_else(c_k, last_pad_k, pad_k)
pad_r = tvm.if_then_else(c_r, last_pad_r, pad_r)
pad_s = tvm.if_then_else(c_s, last_pad_s, pad_s)
def interface_func(ins, outs):
(sa, sb) = ins
(sc,) = outs
def _body():
ib = tvm.ir_builder.create()
ib.emit(tvm.call_extern(dtype, 'tensorized_CONV', sa.access_ptr('r'), sb.access_ptr('r'), sc.access_ptr('rw'), 1, iter_n, iter_c, iter_y, iter_x, iter_k, iter_r, iter_s, pad_n, pad_c, pad_y, pad_x, pad_k, pad_r, pad_s, True, False))
return ib.get()
def _reset():
ib = tvm.ir_builder.create()
ib.emit(tvm.call_extern(dtype, 'init_output', sc.access_ptr('w'), iter_n, iter_y, iter_x, iter_k, pad_n, pad_y, pad_x, pad_k))
return ib.get()
def _finalize():
ib = tvm.ir_builder.create()
ib.emit(tvm.call_extern(dtype, 'store_output', sc.access_ptr('rw'), iter_n, iter_y, iter_x, iter_k, pad_n, pad_y, pad_x, pad_k))
return ib.get()
return (None, _reset(), _body(), _finalize())
with tvm.build_config(offset_factor=1):
return tvm.decl_tensor_intrin(tC.op, interface_func, binds={tA: sA, tB: sB, tC: sC}, name='conv_interface')<|docstring|>l_n, l_c, l_y, l_x, l_k, l_r, l_s: last iteration size
c_n, c_c, c_y, c_x, c_k, c_r, c_s: last iteration conditif_son<|endoftext|> |
9d1324adcc16762eb25cc4f4f869cbef7cd7a8b625a7cc056380a82e64ef99d3 | def generate_conv_interface(N, C, Y, X, K, R, S, fN, fC, fY, fX, fK, fR, fS, axisN, axisC, axisY, axisX, axisK, axisR, axisS, dN, dC, dY, dX, dK, dR, dS, sp_kb, local_kb, dtype):
'\n N, C, Y, X, K, R, S: the dimensions mapped to n, c, y, x, k, r, s\n fN, fC, fY, fX, fK, fR, fS: interface size (fN, fC, fY + fR, fX + fS) * (fR, fS, fC, fK)\n axisN, axisC, axisY, axisX, axisK, axisR, axisS: AST nodes \n dN, dC, dY, dX, dK, dR, dS: intrinsic size\n '
if verbose:
assert (((((fN * fX) * fY) * fC) + (((fK * fC) * fR) * fS)) <= ((sp_kb * 8192) / bits_map[dtype])), 'data too large for scratchpad'
assert (((((dN * dX) * dY) * dC) + (((dK * dC) * dR) * dS)) <= ((local_kb * 8192) / bits_map[dtype])), 'data too large for local memory'
else:
assert (((((fN * fX) * fY) * fC) + (((fK * fC) * fR) * fS)) <= ((sp_kb * 8192) / bits_map[dtype]))
assert (((((dN * dX) * dY) * dC) + (((dK * dC) * dR) * dS)) <= ((local_kb * 8192) / bits_map[dtype]))
last_n = (N % fN)
cond_n = (tvm.expr.EQ(axisN, (N // fN)) if (last_n != 0) else False)
last_n = (last_n if (last_n != 0) else fN)
last_c = (C % fC)
cond_c = (tvm.expr.EQ(axisC, (C // fC)) if (last_c != 0) else False)
last_c = (last_c if (last_c != 0) else fC)
last_y = (Y % fY)
cond_y = (tvm.expr.EQ(axisY, (Y // fY)) if (last_y != 0) else False)
last_y = (last_y if (last_y != 0) else fY)
last_x = (X % fX)
cond_x = (tvm.expr.EQ(axisX, (X // fX)) if (last_x != 0) else False)
last_x = (last_x if (last_x != 0) else fX)
last_k = (K % fK)
cond_k = (tvm.expr.EQ(axisK, (K // fK)) if (last_k != 0) else False)
last_k = (last_k if (last_k != 0) else fK)
last_r = (R % fR)
cond_r = (tvm.expr.EQ(axisR, (R // fR)) if (last_r != 0) else False)
last_r = (last_r if (last_r != 0) else fR)
last_s = (S % fS)
cond_s = (tvm.expr.EQ(axisS, (S // fS)) if (last_s != 0) else False)
last_s = (last_s if (last_s != 0) else fS)
return conv_interface(fN, fC, fY, fX, fK, fR, fS, last_n, last_c, last_y, last_x, last_k, last_r, last_s, cond_n, cond_c, cond_y, cond_x, cond_k, cond_r, cond_s, dN, dC, dY, dX, dK, dR, dS, dtype) | N, C, Y, X, K, R, S: the dimensions mapped to n, c, y, x, k, r, s
fN, fC, fY, fX, fK, fR, fS: interface size (fN, fC, fY + fR, fX + fS) * (fR, fS, fC, fK)
axisN, axisC, axisY, axisX, axisK, axisR, axisS: AST nodes
dN, dC, dY, dX, dK, dR, dS: intrinsic size | src/hw_generator/generator_conv.py | generate_conv_interface | hanlinxuy/HASCO | 20 | python | def generate_conv_interface(N, C, Y, X, K, R, S, fN, fC, fY, fX, fK, fR, fS, axisN, axisC, axisY, axisX, axisK, axisR, axisS, dN, dC, dY, dX, dK, dR, dS, sp_kb, local_kb, dtype):
'\n N, C, Y, X, K, R, S: the dimensions mapped to n, c, y, x, k, r, s\n fN, fC, fY, fX, fK, fR, fS: interface size (fN, fC, fY + fR, fX + fS) * (fR, fS, fC, fK)\n axisN, axisC, axisY, axisX, axisK, axisR, axisS: AST nodes \n dN, dC, dY, dX, dK, dR, dS: intrinsic size\n '
if verbose:
assert (((((fN * fX) * fY) * fC) + (((fK * fC) * fR) * fS)) <= ((sp_kb * 8192) / bits_map[dtype])), 'data too large for scratchpad'
assert (((((dN * dX) * dY) * dC) + (((dK * dC) * dR) * dS)) <= ((local_kb * 8192) / bits_map[dtype])), 'data too large for local memory'
else:
assert (((((fN * fX) * fY) * fC) + (((fK * fC) * fR) * fS)) <= ((sp_kb * 8192) / bits_map[dtype]))
assert (((((dN * dX) * dY) * dC) + (((dK * dC) * dR) * dS)) <= ((local_kb * 8192) / bits_map[dtype]))
last_n = (N % fN)
cond_n = (tvm.expr.EQ(axisN, (N // fN)) if (last_n != 0) else False)
last_n = (last_n if (last_n != 0) else fN)
last_c = (C % fC)
cond_c = (tvm.expr.EQ(axisC, (C // fC)) if (last_c != 0) else False)
last_c = (last_c if (last_c != 0) else fC)
last_y = (Y % fY)
cond_y = (tvm.expr.EQ(axisY, (Y // fY)) if (last_y != 0) else False)
last_y = (last_y if (last_y != 0) else fY)
last_x = (X % fX)
cond_x = (tvm.expr.EQ(axisX, (X // fX)) if (last_x != 0) else False)
last_x = (last_x if (last_x != 0) else fX)
last_k = (K % fK)
cond_k = (tvm.expr.EQ(axisK, (K // fK)) if (last_k != 0) else False)
last_k = (last_k if (last_k != 0) else fK)
last_r = (R % fR)
cond_r = (tvm.expr.EQ(axisR, (R // fR)) if (last_r != 0) else False)
last_r = (last_r if (last_r != 0) else fR)
last_s = (S % fS)
cond_s = (tvm.expr.EQ(axisS, (S // fS)) if (last_s != 0) else False)
last_s = (last_s if (last_s != 0) else fS)
return conv_interface(fN, fC, fY, fX, fK, fR, fS, last_n, last_c, last_y, last_x, last_k, last_r, last_s, cond_n, cond_c, cond_y, cond_x, cond_k, cond_r, cond_s, dN, dC, dY, dX, dK, dR, dS, dtype) | def generate_conv_interface(N, C, Y, X, K, R, S, fN, fC, fY, fX, fK, fR, fS, axisN, axisC, axisY, axisX, axisK, axisR, axisS, dN, dC, dY, dX, dK, dR, dS, sp_kb, local_kb, dtype):
'\n N, C, Y, X, K, R, S: the dimensions mapped to n, c, y, x, k, r, s\n fN, fC, fY, fX, fK, fR, fS: interface size (fN, fC, fY + fR, fX + fS) * (fR, fS, fC, fK)\n axisN, axisC, axisY, axisX, axisK, axisR, axisS: AST nodes \n dN, dC, dY, dX, dK, dR, dS: intrinsic size\n '
if verbose:
assert (((((fN * fX) * fY) * fC) + (((fK * fC) * fR) * fS)) <= ((sp_kb * 8192) / bits_map[dtype])), 'data too large for scratchpad'
assert (((((dN * dX) * dY) * dC) + (((dK * dC) * dR) * dS)) <= ((local_kb * 8192) / bits_map[dtype])), 'data too large for local memory'
else:
assert (((((fN * fX) * fY) * fC) + (((fK * fC) * fR) * fS)) <= ((sp_kb * 8192) / bits_map[dtype]))
assert (((((dN * dX) * dY) * dC) + (((dK * dC) * dR) * dS)) <= ((local_kb * 8192) / bits_map[dtype]))
last_n = (N % fN)
cond_n = (tvm.expr.EQ(axisN, (N // fN)) if (last_n != 0) else False)
last_n = (last_n if (last_n != 0) else fN)
last_c = (C % fC)
cond_c = (tvm.expr.EQ(axisC, (C // fC)) if (last_c != 0) else False)
last_c = (last_c if (last_c != 0) else fC)
last_y = (Y % fY)
cond_y = (tvm.expr.EQ(axisY, (Y // fY)) if (last_y != 0) else False)
last_y = (last_y if (last_y != 0) else fY)
last_x = (X % fX)
cond_x = (tvm.expr.EQ(axisX, (X // fX)) if (last_x != 0) else False)
last_x = (last_x if (last_x != 0) else fX)
last_k = (K % fK)
cond_k = (tvm.expr.EQ(axisK, (K // fK)) if (last_k != 0) else False)
last_k = (last_k if (last_k != 0) else fK)
last_r = (R % fR)
cond_r = (tvm.expr.EQ(axisR, (R // fR)) if (last_r != 0) else False)
last_r = (last_r if (last_r != 0) else fR)
last_s = (S % fS)
cond_s = (tvm.expr.EQ(axisS, (S // fS)) if (last_s != 0) else False)
last_s = (last_s if (last_s != 0) else fS)
return conv_interface(fN, fC, fY, fX, fK, fR, fS, last_n, last_c, last_y, last_x, last_k, last_r, last_s, cond_n, cond_c, cond_y, cond_x, cond_k, cond_r, cond_s, dN, dC, dY, dX, dK, dR, dS, dtype)<|docstring|>N, C, Y, X, K, R, S: the dimensions mapped to n, c, y, x, k, r, s
fN, fC, fY, fX, fK, fR, fS: interface size (fN, fC, fY + fR, fX + fS) * (fR, fS, fC, fK)
axisN, axisC, axisY, axisX, axisK, axisR, axisS: AST nodes
dN, dC, dY, dX, dK, dR, dS: intrinsic size<|endoftext|> |
da7c31349ea4fca5b920c9e9ecdea6f075d0544eeb3f28f5412496884ecc30e5 | def _get_batch_dim_helper(v: TensorStructType) -> int:
'Tries to find the batch dimension size of v, or None.'
if isinstance(v, dict):
for u in v.values():
return _get_batch_dim_helper(u)
elif isinstance(v, tuple):
return _get_batch_dim_helper(v[0])
elif isinstance(v, RepeatedValues):
return _get_batch_dim_helper(v.values)
else:
B = v.shape[0]
if hasattr(B, 'value'):
B = B.value
return B | Tries to find the batch dimension size of v, or None. | rllib/models/repeated_values.py | _get_batch_dim_helper | mopga/ray | 21,382 | python | def _get_batch_dim_helper(v: TensorStructType) -> int:
if isinstance(v, dict):
for u in v.values():
return _get_batch_dim_helper(u)
elif isinstance(v, tuple):
return _get_batch_dim_helper(v[0])
elif isinstance(v, RepeatedValues):
return _get_batch_dim_helper(v.values)
else:
B = v.shape[0]
if hasattr(B, 'value'):
B = B.value
return B | def _get_batch_dim_helper(v: TensorStructType) -> int:
if isinstance(v, dict):
for u in v.values():
return _get_batch_dim_helper(u)
elif isinstance(v, tuple):
return _get_batch_dim_helper(v[0])
elif isinstance(v, RepeatedValues):
return _get_batch_dim_helper(v.values)
else:
B = v.shape[0]
if hasattr(B, 'value'):
B = B.value
return B<|docstring|>Tries to find the batch dimension size of v, or None.<|endoftext|> |
369dccf48995128f3c2c1d5b70c9e732651fd29250b2e80a80f1d1f1a377c0ef | def _unbatch_helper(v: TensorStructType, max_len: int) -> TensorStructType:
'Recursively unpacks the repeat dimension (max_len).'
if isinstance(v, dict):
return {k: _unbatch_helper(u, max_len) for (k, u) in v.items()}
elif isinstance(v, tuple):
return tuple((_unbatch_helper(u, max_len) for u in v))
elif isinstance(v, RepeatedValues):
unbatched = _unbatch_helper(v.values, max_len)
return [RepeatedValues(u, v.lengths[(:, i, ...)], v.max_len) for (i, u) in enumerate(unbatched)]
else:
return [v[(:, i, ...)] for i in range(max_len)] | Recursively unpacks the repeat dimension (max_len). | rllib/models/repeated_values.py | _unbatch_helper | mopga/ray | 21,382 | python | def _unbatch_helper(v: TensorStructType, max_len: int) -> TensorStructType:
if isinstance(v, dict):
return {k: _unbatch_helper(u, max_len) for (k, u) in v.items()}
elif isinstance(v, tuple):
return tuple((_unbatch_helper(u, max_len) for u in v))
elif isinstance(v, RepeatedValues):
unbatched = _unbatch_helper(v.values, max_len)
return [RepeatedValues(u, v.lengths[(:, i, ...)], v.max_len) for (i, u) in enumerate(unbatched)]
else:
return [v[(:, i, ...)] for i in range(max_len)] | def _unbatch_helper(v: TensorStructType, max_len: int) -> TensorStructType:
if isinstance(v, dict):
return {k: _unbatch_helper(u, max_len) for (k, u) in v.items()}
elif isinstance(v, tuple):
return tuple((_unbatch_helper(u, max_len) for u in v))
elif isinstance(v, RepeatedValues):
unbatched = _unbatch_helper(v.values, max_len)
return [RepeatedValues(u, v.lengths[(:, i, ...)], v.max_len) for (i, u) in enumerate(unbatched)]
else:
return [v[(:, i, ...)] for i in range(max_len)]<|docstring|>Recursively unpacks the repeat dimension (max_len).<|endoftext|> |
b2bdc466cc407c77ce997457eb971b74bb896d28703d357e77425dc82f89d6b5 | def _batch_index_helper(v: TensorStructType, i: int, j: int) -> TensorStructType:
'Selects the item at the ith batch index and jth repetition.'
if isinstance(v, dict):
return {k: _batch_index_helper(u, i, j) for (k, u) in v.items()}
elif isinstance(v, tuple):
return tuple((_batch_index_helper(u, i, j) for u in v))
elif isinstance(v, list):
return _batch_index_helper(v[j], i, j)
elif isinstance(v, RepeatedValues):
unbatched = v.unbatch_all()
return unbatched[i]
else:
return v[(i, ...)] | Selects the item at the ith batch index and jth repetition. | rllib/models/repeated_values.py | _batch_index_helper | mopga/ray | 21,382 | python | def _batch_index_helper(v: TensorStructType, i: int, j: int) -> TensorStructType:
if isinstance(v, dict):
return {k: _batch_index_helper(u, i, j) for (k, u) in v.items()}
elif isinstance(v, tuple):
return tuple((_batch_index_helper(u, i, j) for u in v))
elif isinstance(v, list):
return _batch_index_helper(v[j], i, j)
elif isinstance(v, RepeatedValues):
unbatched = v.unbatch_all()
return unbatched[i]
else:
return v[(i, ...)] | def _batch_index_helper(v: TensorStructType, i: int, j: int) -> TensorStructType:
if isinstance(v, dict):
return {k: _batch_index_helper(u, i, j) for (k, u) in v.items()}
elif isinstance(v, tuple):
return tuple((_batch_index_helper(u, i, j) for u in v))
elif isinstance(v, list):
return _batch_index_helper(v[j], i, j)
elif isinstance(v, RepeatedValues):
unbatched = v.unbatch_all()
return unbatched[i]
else:
return v[(i, ...)]<|docstring|>Selects the item at the ith batch index and jth repetition.<|endoftext|> |
19c3bb9a9ada70eedf121239eb883e40633c13646a912b6d162578f57595c937 | def unbatch_all(self) -> List[List[TensorType]]:
'Unbatch both the repeat and batch dimensions into Python lists.\n\n This is only supported in PyTorch / TF eager mode.\n\n This lets you view the data unbatched in its original form, but is\n not efficient for processing.\n\n Examples:\n >>> batch = RepeatedValues(<Tensor shape=(B, N, K)>)\n >>> items = batch.unbatch_all()\n >>> print(len(items) == B)\n True\n >>> print(max(len(x) for x in items) <= N)\n True\n >>> print(items)\n ... [[<Tensor_1 shape=(K)>, ..., <Tensor_N, shape=(K)>],\n ... ...\n ... [<Tensor_1 shape=(K)>, <Tensor_2 shape=(K)>],\n ... ...\n ... [<Tensor_1 shape=(K)>],\n ... ...\n ... [<Tensor_1 shape=(K)>, ..., <Tensor_N shape=(K)>]]\n '
if (self._unbatched_repr is None):
B = _get_batch_dim_helper(self.values)
if (B is None):
raise ValueError('Cannot call unbatch_all() when batch_dim is unknown. This is probably because you are using TF graph mode.')
else:
B = int(B)
slices = self.unbatch_repeat_dim()
result = []
for i in range(B):
if hasattr(self.lengths[i], 'item'):
dynamic_len = int(self.lengths[i].item())
else:
dynamic_len = int(self.lengths[i].numpy())
dynamic_slice = []
for j in range(dynamic_len):
dynamic_slice.append(_batch_index_helper(slices, i, j))
result.append(dynamic_slice)
self._unbatched_repr = result
return self._unbatched_repr | Unbatch both the repeat and batch dimensions into Python lists.
This is only supported in PyTorch / TF eager mode.
This lets you view the data unbatched in its original form, but is
not efficient for processing.
Examples:
>>> batch = RepeatedValues(<Tensor shape=(B, N, K)>)
>>> items = batch.unbatch_all()
>>> print(len(items) == B)
True
>>> print(max(len(x) for x in items) <= N)
True
>>> print(items)
... [[<Tensor_1 shape=(K)>, ..., <Tensor_N, shape=(K)>],
... ...
... [<Tensor_1 shape=(K)>, <Tensor_2 shape=(K)>],
... ...
... [<Tensor_1 shape=(K)>],
... ...
... [<Tensor_1 shape=(K)>, ..., <Tensor_N shape=(K)>]] | rllib/models/repeated_values.py | unbatch_all | mopga/ray | 21,382 | python | def unbatch_all(self) -> List[List[TensorType]]:
'Unbatch both the repeat and batch dimensions into Python lists.\n\n This is only supported in PyTorch / TF eager mode.\n\n This lets you view the data unbatched in its original form, but is\n not efficient for processing.\n\n Examples:\n >>> batch = RepeatedValues(<Tensor shape=(B, N, K)>)\n >>> items = batch.unbatch_all()\n >>> print(len(items) == B)\n True\n >>> print(max(len(x) for x in items) <= N)\n True\n >>> print(items)\n ... [[<Tensor_1 shape=(K)>, ..., <Tensor_N, shape=(K)>],\n ... ...\n ... [<Tensor_1 shape=(K)>, <Tensor_2 shape=(K)>],\n ... ...\n ... [<Tensor_1 shape=(K)>],\n ... ...\n ... [<Tensor_1 shape=(K)>, ..., <Tensor_N shape=(K)>]]\n '
if (self._unbatched_repr is None):
B = _get_batch_dim_helper(self.values)
if (B is None):
raise ValueError('Cannot call unbatch_all() when batch_dim is unknown. This is probably because you are using TF graph mode.')
else:
B = int(B)
slices = self.unbatch_repeat_dim()
result = []
for i in range(B):
if hasattr(self.lengths[i], 'item'):
dynamic_len = int(self.lengths[i].item())
else:
dynamic_len = int(self.lengths[i].numpy())
dynamic_slice = []
for j in range(dynamic_len):
dynamic_slice.append(_batch_index_helper(slices, i, j))
result.append(dynamic_slice)
self._unbatched_repr = result
return self._unbatched_repr | def unbatch_all(self) -> List[List[TensorType]]:
'Unbatch both the repeat and batch dimensions into Python lists.\n\n This is only supported in PyTorch / TF eager mode.\n\n This lets you view the data unbatched in its original form, but is\n not efficient for processing.\n\n Examples:\n >>> batch = RepeatedValues(<Tensor shape=(B, N, K)>)\n >>> items = batch.unbatch_all()\n >>> print(len(items) == B)\n True\n >>> print(max(len(x) for x in items) <= N)\n True\n >>> print(items)\n ... [[<Tensor_1 shape=(K)>, ..., <Tensor_N, shape=(K)>],\n ... ...\n ... [<Tensor_1 shape=(K)>, <Tensor_2 shape=(K)>],\n ... ...\n ... [<Tensor_1 shape=(K)>],\n ... ...\n ... [<Tensor_1 shape=(K)>, ..., <Tensor_N shape=(K)>]]\n '
if (self._unbatched_repr is None):
B = _get_batch_dim_helper(self.values)
if (B is None):
raise ValueError('Cannot call unbatch_all() when batch_dim is unknown. This is probably because you are using TF graph mode.')
else:
B = int(B)
slices = self.unbatch_repeat_dim()
result = []
for i in range(B):
if hasattr(self.lengths[i], 'item'):
dynamic_len = int(self.lengths[i].item())
else:
dynamic_len = int(self.lengths[i].numpy())
dynamic_slice = []
for j in range(dynamic_len):
dynamic_slice.append(_batch_index_helper(slices, i, j))
result.append(dynamic_slice)
self._unbatched_repr = result
return self._unbatched_repr<|docstring|>Unbatch both the repeat and batch dimensions into Python lists.
This is only supported in PyTorch / TF eager mode.
This lets you view the data unbatched in its original form, but is
not efficient for processing.
Examples:
>>> batch = RepeatedValues(<Tensor shape=(B, N, K)>)
>>> items = batch.unbatch_all()
>>> print(len(items) == B)
True
>>> print(max(len(x) for x in items) <= N)
True
>>> print(items)
... [[<Tensor_1 shape=(K)>, ..., <Tensor_N, shape=(K)>],
... ...
... [<Tensor_1 shape=(K)>, <Tensor_2 shape=(K)>],
... ...
... [<Tensor_1 shape=(K)>],
... ...
... [<Tensor_1 shape=(K)>, ..., <Tensor_N shape=(K)>]]<|endoftext|> |
335f34bc926c8fd51b796c188f914d0100bba324bcb524acb58e1934b1d85a92 | def unbatch_repeat_dim(self) -> List[TensorType]:
'Unbatches the repeat dimension (the one `max_len` in size).\n\n This removes the repeat dimension. The result will be a Python list of\n with length `self.max_len`. Note that the data is still padded.\n\n Examples:\n >>> batch = RepeatedValues(<Tensor shape=(B, N, K)>)\n >>> items = batch.unbatch()\n >>> len(items) == batch.max_len\n True\n >>> print(items)\n ... [<Tensor_1 shape=(B, K)>, ..., <Tensor_N shape=(B, K)>]\n '
return _unbatch_helper(self.values, self.max_len) | Unbatches the repeat dimension (the one `max_len` in size).
This removes the repeat dimension. The result will be a Python list of
with length `self.max_len`. Note that the data is still padded.
Examples:
>>> batch = RepeatedValues(<Tensor shape=(B, N, K)>)
>>> items = batch.unbatch()
>>> len(items) == batch.max_len
True
>>> print(items)
... [<Tensor_1 shape=(B, K)>, ..., <Tensor_N shape=(B, K)>] | rllib/models/repeated_values.py | unbatch_repeat_dim | mopga/ray | 21,382 | python | def unbatch_repeat_dim(self) -> List[TensorType]:
'Unbatches the repeat dimension (the one `max_len` in size).\n\n This removes the repeat dimension. The result will be a Python list of\n with length `self.max_len`. Note that the data is still padded.\n\n Examples:\n >>> batch = RepeatedValues(<Tensor shape=(B, N, K)>)\n >>> items = batch.unbatch()\n >>> len(items) == batch.max_len\n True\n >>> print(items)\n ... [<Tensor_1 shape=(B, K)>, ..., <Tensor_N shape=(B, K)>]\n '
return _unbatch_helper(self.values, self.max_len) | def unbatch_repeat_dim(self) -> List[TensorType]:
'Unbatches the repeat dimension (the one `max_len` in size).\n\n This removes the repeat dimension. The result will be a Python list of\n with length `self.max_len`. Note that the data is still padded.\n\n Examples:\n >>> batch = RepeatedValues(<Tensor shape=(B, N, K)>)\n >>> items = batch.unbatch()\n >>> len(items) == batch.max_len\n True\n >>> print(items)\n ... [<Tensor_1 shape=(B, K)>, ..., <Tensor_N shape=(B, K)>]\n '
return _unbatch_helper(self.values, self.max_len)<|docstring|>Unbatches the repeat dimension (the one `max_len` in size).
This removes the repeat dimension. The result will be a Python list of
with length `self.max_len`. Note that the data is still padded.
Examples:
>>> batch = RepeatedValues(<Tensor shape=(B, N, K)>)
>>> items = batch.unbatch()
>>> len(items) == batch.max_len
True
>>> print(items)
... [<Tensor_1 shape=(B, K)>, ..., <Tensor_N shape=(B, K)>]<|endoftext|> |
0975d23ddec91a9141130dfc7f36d2b3c481b196860990c26116ac7db2fdf2ae | def build_controller(self, model) -> CompressionAlgorithmController:
'\n Should be called once the compressed model target_model is fully constructed\n '
return MagnitudeSparsityController(model, self.config.params) | Should be called once the compressed model target_model is fully constructed | nncf/sparsity/magnitude/algorithm.py | build_controller | dupeljan/nncf_for_tf | 0 | python | def build_controller(self, model) -> CompressionAlgorithmController:
'\n \n '
return MagnitudeSparsityController(model, self.config.params) | def build_controller(self, model) -> CompressionAlgorithmController:
'\n \n '
return MagnitudeSparsityController(model, self.config.params)<|docstring|>Should be called once the compressed model target_model is fully constructed<|endoftext|> |
c72301e22784835813510cf22ce42b0846f76b5dc32b77fd83f241313b11f997 | def parse_type_line(type_line):
'Parses a type annotation specified as a comment.\n\n Example inputs:\n # type: (Tensor, torch.Tensor) -> Tuple[Tensor]\n # type: (Tensor, Tuple[Tensor, Tensor]) -> Tensor\n '
(arg_ann_str, ret_ann_str) = split_type_line(type_line)
try:
arg_ann = eval(arg_ann_str, _eval_env)
except SyntaxError:
raise RuntimeError('Failed to parse the argument list of a type annotation')
if (not isinstance(arg_ann, tuple)):
arg_ann = (arg_ann,)
try:
ret_ann = eval(ret_ann_str, _eval_env)
except SyntaxError:
raise RuntimeError('Failed to parse the return type of a type annotation')
arg_types = [ann_to_type(ann) for ann in arg_ann]
ret_types = flatten_return_type(ann_to_type(ret_ann))
return (arg_types, ret_types) | Parses a type annotation specified as a comment.
Example inputs:
# type: (Tensor, torch.Tensor) -> Tuple[Tensor]
# type: (Tensor, Tuple[Tensor, Tensor]) -> Tensor | torch/jit/annotations.py | parse_type_line | DavidKo3/mctorch | 1 | python | def parse_type_line(type_line):
'Parses a type annotation specified as a comment.\n\n Example inputs:\n # type: (Tensor, torch.Tensor) -> Tuple[Tensor]\n # type: (Tensor, Tuple[Tensor, Tensor]) -> Tensor\n '
(arg_ann_str, ret_ann_str) = split_type_line(type_line)
try:
arg_ann = eval(arg_ann_str, _eval_env)
except SyntaxError:
raise RuntimeError('Failed to parse the argument list of a type annotation')
if (not isinstance(arg_ann, tuple)):
arg_ann = (arg_ann,)
try:
ret_ann = eval(ret_ann_str, _eval_env)
except SyntaxError:
raise RuntimeError('Failed to parse the return type of a type annotation')
arg_types = [ann_to_type(ann) for ann in arg_ann]
ret_types = flatten_return_type(ann_to_type(ret_ann))
return (arg_types, ret_types) | def parse_type_line(type_line):
'Parses a type annotation specified as a comment.\n\n Example inputs:\n # type: (Tensor, torch.Tensor) -> Tuple[Tensor]\n # type: (Tensor, Tuple[Tensor, Tensor]) -> Tensor\n '
(arg_ann_str, ret_ann_str) = split_type_line(type_line)
try:
arg_ann = eval(arg_ann_str, _eval_env)
except SyntaxError:
raise RuntimeError('Failed to parse the argument list of a type annotation')
if (not isinstance(arg_ann, tuple)):
arg_ann = (arg_ann,)
try:
ret_ann = eval(ret_ann_str, _eval_env)
except SyntaxError:
raise RuntimeError('Failed to parse the return type of a type annotation')
arg_types = [ann_to_type(ann) for ann in arg_ann]
ret_types = flatten_return_type(ann_to_type(ret_ann))
return (arg_types, ret_types)<|docstring|>Parses a type annotation specified as a comment.
Example inputs:
# type: (Tensor, torch.Tensor) -> Tuple[Tensor]
# type: (Tensor, Tuple[Tensor, Tensor]) -> Tensor<|endoftext|> |
6bc2dacfde7ea7cfb952b722a1a64a2acad172486517eab0fce74f375bee6c91 | def get_type_line(source):
'Tries to find the line containing a comment with the type annotation.'
lines = source.split('\n')
def strip_comment(line):
return line[:(line.index('#') if ('#' in line) else None)]
i = 0
while (not _def_end_regex.match(strip_comment(lines[i]))):
i += 1
i += 1
type_line = lines[i].strip()
if (not type_line.startswith('# type:')):
return None
return type_line | Tries to find the line containing a comment with the type annotation. | torch/jit/annotations.py | get_type_line | DavidKo3/mctorch | 1 | python | def get_type_line(source):
lines = source.split('\n')
def strip_comment(line):
return line[:(line.index('#') if ('#' in line) else None)]
i = 0
while (not _def_end_regex.match(strip_comment(lines[i]))):
i += 1
i += 1
type_line = lines[i].strip()
if (not type_line.startswith('# type:')):
return None
return type_line | def get_type_line(source):
lines = source.split('\n')
def strip_comment(line):
return line[:(line.index('#') if ('#' in line) else None)]
i = 0
while (not _def_end_regex.match(strip_comment(lines[i]))):
i += 1
i += 1
type_line = lines[i].strip()
if (not type_line.startswith('# type:')):
return None
return type_line<|docstring|>Tries to find the line containing a comment with the type annotation.<|endoftext|> |
357fe09b2811e6d27b71c890f391501436bbae7f682f3ff14955a9c19c31dd7c | def split_type_line(type_line):
'Splits the comment with the type annotation into parts for argument and return types.\n\n For example, for an input of:\n # type: (Tensor, torch.Tensor) -> Tuple[Tensor, Tensor]\n\n This function will return:\n ("(Tensor, torch.Tensor)", "Tuple[Tensor, Tensor]")\n\n '
start_offset = len('# type:')
try:
arrow_pos = type_line.index('->')
except ValueError:
raise RuntimeError("Syntax error in type annotation (cound't find `->`)")
return (type_line[start_offset:arrow_pos].strip(), type_line[(arrow_pos + 2):].strip()) | Splits the comment with the type annotation into parts for argument and return types.
For example, for an input of:
# type: (Tensor, torch.Tensor) -> Tuple[Tensor, Tensor]
This function will return:
("(Tensor, torch.Tensor)", "Tuple[Tensor, Tensor]") | torch/jit/annotations.py | split_type_line | DavidKo3/mctorch | 1 | python | def split_type_line(type_line):
'Splits the comment with the type annotation into parts for argument and return types.\n\n For example, for an input of:\n # type: (Tensor, torch.Tensor) -> Tuple[Tensor, Tensor]\n\n This function will return:\n ("(Tensor, torch.Tensor)", "Tuple[Tensor, Tensor]")\n\n '
start_offset = len('# type:')
try:
arrow_pos = type_line.index('->')
except ValueError:
raise RuntimeError("Syntax error in type annotation (cound't find `->`)")
return (type_line[start_offset:arrow_pos].strip(), type_line[(arrow_pos + 2):].strip()) | def split_type_line(type_line):
'Splits the comment with the type annotation into parts for argument and return types.\n\n For example, for an input of:\n # type: (Tensor, torch.Tensor) -> Tuple[Tensor, Tensor]\n\n This function will return:\n ("(Tensor, torch.Tensor)", "Tuple[Tensor, Tensor]")\n\n '
start_offset = len('# type:')
try:
arrow_pos = type_line.index('->')
except ValueError:
raise RuntimeError("Syntax error in type annotation (cound't find `->`)")
return (type_line[start_offset:arrow_pos].strip(), type_line[(arrow_pos + 2):].strip())<|docstring|>Splits the comment with the type annotation into parts for argument and return types.
For example, for an input of:
# type: (Tensor, torch.Tensor) -> Tuple[Tensor, Tensor]
This function will return:
("(Tensor, torch.Tensor)", "Tuple[Tensor, Tensor]")<|endoftext|> |
0b963a7cb15aba2f9689385c8ee418b93ec078756c2570f725185f796c41d93f | def try_real_annotations(fn):
'Tries to use the Py3.5+ annotation syntax to get the type.'
try:
sig = inspect.signature(fn)
except ValueError:
return None
all_annots = ([sig.return_annotation] + [p.annotation for p in sig.parameters.values()])
if all(((ann is sig.empty) for ann in all_annots)):
return None
def as_ann(ann):
return (ann if (ann is not sig.empty) else None)
arg_types = [ann_to_type(as_ann(p.annotation)) for p in sig.parameters.values()]
return_types = flatten_return_type(ann_to_type(as_ann(sig.return_annotation)))
return (arg_types, return_types) | Tries to use the Py3.5+ annotation syntax to get the type. | torch/jit/annotations.py | try_real_annotations | DavidKo3/mctorch | 1 | python | def try_real_annotations(fn):
try:
sig = inspect.signature(fn)
except ValueError:
return None
all_annots = ([sig.return_annotation] + [p.annotation for p in sig.parameters.values()])
if all(((ann is sig.empty) for ann in all_annots)):
return None
def as_ann(ann):
return (ann if (ann is not sig.empty) else None)
arg_types = [ann_to_type(as_ann(p.annotation)) for p in sig.parameters.values()]
return_types = flatten_return_type(ann_to_type(as_ann(sig.return_annotation)))
return (arg_types, return_types) | def try_real_annotations(fn):
try:
sig = inspect.signature(fn)
except ValueError:
return None
all_annots = ([sig.return_annotation] + [p.annotation for p in sig.parameters.values()])
if all(((ann is sig.empty) for ann in all_annots)):
return None
def as_ann(ann):
return (ann if (ann is not sig.empty) else None)
arg_types = [ann_to_type(as_ann(p.annotation)) for p in sig.parameters.values()]
return_types = flatten_return_type(ann_to_type(as_ann(sig.return_annotation)))
return (arg_types, return_types)<|docstring|>Tries to use the Py3.5+ annotation syntax to get the type.<|endoftext|> |
bcb3e3c489ac6aa64d05369090a633bccc3abc1ebf8be68c2b87b027d9d475f0 | @blueprint.route('/call/<cid>')
@utils.login_required
def call(cid):
'List all grants for a call.'
call = anubis.call.get_call(cid)
if (call is None):
return utils.error('No such call.', flask.url_for('home'))
if (not anubis.call.allow_view(call)):
return utils.error('You may not view the call.', flask.url_for('home'))
if (not anubis.call.allow_view_grants(call)):
return utils.error('You may not view the grants of the call.', flask.url_for('call.display', cid=call['identifier']))
grants = utils.get_docs_view('grants', 'call', call['identifier'])
for grant in grants:
grant['user'] = anubis.user.get_user(grant['user'])
receiver_emails = [g['user']['email'] for g in grants]
receiver_emails = [e for e in receiver_emails if e]
access_emails = []
field_emails = []
for grant in grants:
access_emails.extend([anubis.user.get_user(a)['email'] for a in grant.get('access_view', [])])
for field in call['grant']:
if (field['type'] == constants.EMAIL):
if field.get('repeat'):
n_repeat = (grant['values'].get(field['repeat']) or 0)
for n in range(1, (n_repeat + 1)):
key = f"{field['identifier']}-{n}"
field_emails.append(grant['values'].get(key))
else:
field_emails.append(grant['values'].get(field['identifier']))
field_emails = sorted(set([e for e in field_emails if e]))
access_emails = sorted(set([e for e in access_emails if e]))
all_emails = sorted(set(receiver_emails).union(access_emails).union(field_emails))
email_lists = {'Grant receivers (= proposal submitters)': ', '.join(receiver_emails), 'Persons with access to a grant': ', '.join(access_emails), 'Emails provided in grant fields': ', '.join(field_emails), 'All emails': ', '.join(all_emails)}
return flask.render_template('grants/call.html', call=call, grants=grants, email_lists=email_lists) | List all grants for a call. | anubis/grants.py | call | pekrau/Anubis | 2 | python | @blueprint.route('/call/<cid>')
@utils.login_required
def call(cid):
call = anubis.call.get_call(cid)
if (call is None):
return utils.error('No such call.', flask.url_for('home'))
if (not anubis.call.allow_view(call)):
return utils.error('You may not view the call.', flask.url_for('home'))
if (not anubis.call.allow_view_grants(call)):
return utils.error('You may not view the grants of the call.', flask.url_for('call.display', cid=call['identifier']))
grants = utils.get_docs_view('grants', 'call', call['identifier'])
for grant in grants:
grant['user'] = anubis.user.get_user(grant['user'])
receiver_emails = [g['user']['email'] for g in grants]
receiver_emails = [e for e in receiver_emails if e]
access_emails = []
field_emails = []
for grant in grants:
access_emails.extend([anubis.user.get_user(a)['email'] for a in grant.get('access_view', [])])
for field in call['grant']:
if (field['type'] == constants.EMAIL):
if field.get('repeat'):
n_repeat = (grant['values'].get(field['repeat']) or 0)
for n in range(1, (n_repeat + 1)):
key = f"{field['identifier']}-{n}"
field_emails.append(grant['values'].get(key))
else:
field_emails.append(grant['values'].get(field['identifier']))
field_emails = sorted(set([e for e in field_emails if e]))
access_emails = sorted(set([e for e in access_emails if e]))
all_emails = sorted(set(receiver_emails).union(access_emails).union(field_emails))
email_lists = {'Grant receivers (= proposal submitters)': ', '.join(receiver_emails), 'Persons with access to a grant': ', '.join(access_emails), 'Emails provided in grant fields': ', '.join(field_emails), 'All emails': ', '.join(all_emails)}
return flask.render_template('grants/call.html', call=call, grants=grants, email_lists=email_lists) | @blueprint.route('/call/<cid>')
@utils.login_required
def call(cid):
call = anubis.call.get_call(cid)
if (call is None):
return utils.error('No such call.', flask.url_for('home'))
if (not anubis.call.allow_view(call)):
return utils.error('You may not view the call.', flask.url_for('home'))
if (not anubis.call.allow_view_grants(call)):
return utils.error('You may not view the grants of the call.', flask.url_for('call.display', cid=call['identifier']))
grants = utils.get_docs_view('grants', 'call', call['identifier'])
for grant in grants:
grant['user'] = anubis.user.get_user(grant['user'])
receiver_emails = [g['user']['email'] for g in grants]
receiver_emails = [e for e in receiver_emails if e]
access_emails = []
field_emails = []
for grant in grants:
access_emails.extend([anubis.user.get_user(a)['email'] for a in grant.get('access_view', [])])
for field in call['grant']:
if (field['type'] == constants.EMAIL):
if field.get('repeat'):
n_repeat = (grant['values'].get(field['repeat']) or 0)
for n in range(1, (n_repeat + 1)):
key = f"{field['identifier']}-{n}"
field_emails.append(grant['values'].get(key))
else:
field_emails.append(grant['values'].get(field['identifier']))
field_emails = sorted(set([e for e in field_emails if e]))
access_emails = sorted(set([e for e in access_emails if e]))
all_emails = sorted(set(receiver_emails).union(access_emails).union(field_emails))
email_lists = {'Grant receivers (= proposal submitters)': ', '.join(receiver_emails), 'Persons with access to a grant': ', '.join(access_emails), 'Emails provided in grant fields': ', '.join(field_emails), 'All emails': ', '.join(all_emails)}
return flask.render_template('grants/call.html', call=call, grants=grants, email_lists=email_lists)<|docstring|>List all grants for a call.<|endoftext|> |
200e668279025180575ad6e8f8834b432c37c38c9e97fb37f9d6d0ba53bc6c23 | @blueprint.route('/call/<cid>.xlsx')
@utils.login_required
def call_xlsx(cid):
'Produce an XLSX file of all grants for a call.'
call = anubis.call.get_call(cid)
if (call is None):
return utils.error('No such call.', flask.url_for('home'))
if (not anubis.call.allow_view(call)):
return utils.error('You may not view the call.', flask.url_for('home'))
if (not anubis.call.allow_view_grants(call)):
return utils.error('You may not view the grants of the call.', flask.url_for('call.display', cid=call['identifier']))
grants = utils.get_docs_view('grants', 'call', call['identifier'])
grants.sort(key=(lambda g: g['identifier']))
content = get_call_grants_xlsx(call, grants)
response = flask.make_response(content)
response.headers.set('Content-Type', constants.XLSX_MIMETYPE)
response.headers.set('Content-Disposition', 'attachment', filename=f'{cid}_grants.xlsx')
return response | Produce an XLSX file of all grants for a call. | anubis/grants.py | call_xlsx | pekrau/Anubis | 2 | python | @blueprint.route('/call/<cid>.xlsx')
@utils.login_required
def call_xlsx(cid):
call = anubis.call.get_call(cid)
if (call is None):
return utils.error('No such call.', flask.url_for('home'))
if (not anubis.call.allow_view(call)):
return utils.error('You may not view the call.', flask.url_for('home'))
if (not anubis.call.allow_view_grants(call)):
return utils.error('You may not view the grants of the call.', flask.url_for('call.display', cid=call['identifier']))
grants = utils.get_docs_view('grants', 'call', call['identifier'])
grants.sort(key=(lambda g: g['identifier']))
content = get_call_grants_xlsx(call, grants)
response = flask.make_response(content)
response.headers.set('Content-Type', constants.XLSX_MIMETYPE)
response.headers.set('Content-Disposition', 'attachment', filename=f'{cid}_grants.xlsx')
return response | @blueprint.route('/call/<cid>.xlsx')
@utils.login_required
def call_xlsx(cid):
call = anubis.call.get_call(cid)
if (call is None):
return utils.error('No such call.', flask.url_for('home'))
if (not anubis.call.allow_view(call)):
return utils.error('You may not view the call.', flask.url_for('home'))
if (not anubis.call.allow_view_grants(call)):
return utils.error('You may not view the grants of the call.', flask.url_for('call.display', cid=call['identifier']))
grants = utils.get_docs_view('grants', 'call', call['identifier'])
grants.sort(key=(lambda g: g['identifier']))
content = get_call_grants_xlsx(call, grants)
response = flask.make_response(content)
response.headers.set('Content-Type', constants.XLSX_MIMETYPE)
response.headers.set('Content-Disposition', 'attachment', filename=f'{cid}_grants.xlsx')
return response<|docstring|>Produce an XLSX file of all grants for a call.<|endoftext|> |
58466bc344bebedb7b40dd8f9c31e1cd283193efcd01264789f231d3df1dcd23 | def get_call_grants_xlsx(call, grants):
'Return the content for the XLSX file for the list of grants.'
output = io.BytesIO()
wb = xlsxwriter.Workbook(output, {'in_memory': True})
head_text_format = wb.add_format({'bold': True, 'text_wrap': True, 'bg_color': '#9ECA7F', 'font_size': 15, 'align': 'center', 'border': 1})
normal_text_format = wb.add_format({'font_size': 14, 'align': 'left', 'valign': 'vcenter'})
ws = wb.add_worksheet(f"Grants in call {call['identifier']}"[:31])
ws.freeze_panes(2, 1)
ws.set_row(0, 60, head_text_format)
ws.set_row(1, 60, head_text_format)
ws.set_column(0, 2, 10, normal_text_format)
ws.set_column(3, 3, 40, normal_text_format)
ws.set_column(4, 6, 20, normal_text_format)
nrow = 0
row = ['Grant', 'Status', 'Proposal', 'Proposal title', 'Submitter', 'Email', 'Affiliation']
ws.write_row(nrow, 0, row)
pos = (len(row) - 1)
start_pos = pos
for field in call['grant']:
if field.get('repeat'):
continue
title = (field['title'] or field['identifier'].capitalize())
pos += 1
n_repeat = len([f for f in call['grant'] if (f.get('repeat') == field['identifier'])])
if n_repeat:
ws.merge_range(0, pos, 0, ((pos + n_repeat) - 1), title)
pos += (n_repeat - 1)
else:
ws.write_row(nrow, pos, [title])
nrow += 1
pos = start_pos
for field in call['grant']:
if field.get('repeat'):
continue
pos += 1
repeat = [(f['title'] or f['identifier'].capitalize()) for f in call['grant'] if (f.get('repeat') == field['identifier'])]
n_repeat = len(repeat)
if n_repeat:
ws.write_row(nrow, pos, repeat)
pos += (n_repeat - 1)
nrow += 1
for grant in grants:
n_merge = 1
for field in call['grant']:
if (field['type'] != constants.REPEAT):
continue
try:
n_merge = max(n_merge, (grant['values'][field['identifier']] or 0))
except KeyError:
pass
ncol = 0
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, '')
ws.write_url(nrow, ncol, flask.url_for('grant.display', gid=grant['identifier'], _external=True), string=grant['identifier'])
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, '')
ws.write_string(nrow, ncol, ((grant['errors'] and 'Incomplete') or 'Complete'))
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, '')
ws.write_url(nrow, ncol, flask.url_for('proposal.display', pid=grant['proposal'], _external=True), string=grant['proposal'])
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, '')
proposal = anubis.proposal.get_proposal(grant['proposal'])
ws.write_string(nrow, ncol, proposal['title'])
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, '')
user = anubis.user.get_user(username=proposal['user'])
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, '')
ws.write_string(nrow, ncol, utils.get_fullname(user))
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, '')
ws.write_string(nrow, ncol, (user.get('email') or ''))
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, '')
ws.write_string(nrow, ncol, (user.get('affiliation') or ''))
max_ncol = ncol
ncol += 1
for field in call['grant']:
if field.get('repeat'):
continue
if (field['type'] == constants.REPEAT):
n_repeat = grant['values'][field['identifier']]
if (not n_repeat):
continue
col_offset = 0
for repeated in call['grant']:
if (repeated.get('repeat') != field['identifier']):
continue
for row_offset in range(n_repeat):
fid = f"{repeated['identifier']}-{(row_offset + 1)}"
write_cell(ws, (nrow + row_offset), (ncol + col_offset), grant['values'].get(fid), repeated['type'], grant['identifier'], fid)
max_ncol = max(max_ncol, (ncol + col_offset))
col_offset += 1
else:
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, '')
write_cell(ws, nrow, ncol, grant['values'].get(field['identifier']), field['type'], grant['identifier'], field['identifier'])
max_ncol = max(max_ncol, ncol)
ncol += 1
nrow += n_merge
if (max_ncol > 6):
ws.set_column((6 + 1), max_ncol, 20, normal_text_format)
wb.close()
return output.getvalue() | Return the content for the XLSX file for the list of grants. | anubis/grants.py | get_call_grants_xlsx | pekrau/Anubis | 2 | python | def get_call_grants_xlsx(call, grants):
output = io.BytesIO()
wb = xlsxwriter.Workbook(output, {'in_memory': True})
head_text_format = wb.add_format({'bold': True, 'text_wrap': True, 'bg_color': '#9ECA7F', 'font_size': 15, 'align': 'center', 'border': 1})
normal_text_format = wb.add_format({'font_size': 14, 'align': 'left', 'valign': 'vcenter'})
ws = wb.add_worksheet(f"Grants in call {call['identifier']}"[:31])
ws.freeze_panes(2, 1)
ws.set_row(0, 60, head_text_format)
ws.set_row(1, 60, head_text_format)
ws.set_column(0, 2, 10, normal_text_format)
ws.set_column(3, 3, 40, normal_text_format)
ws.set_column(4, 6, 20, normal_text_format)
nrow = 0
row = ['Grant', 'Status', 'Proposal', 'Proposal title', 'Submitter', 'Email', 'Affiliation']
ws.write_row(nrow, 0, row)
pos = (len(row) - 1)
start_pos = pos
for field in call['grant']:
if field.get('repeat'):
continue
title = (field['title'] or field['identifier'].capitalize())
pos += 1
n_repeat = len([f for f in call['grant'] if (f.get('repeat') == field['identifier'])])
if n_repeat:
ws.merge_range(0, pos, 0, ((pos + n_repeat) - 1), title)
pos += (n_repeat - 1)
else:
ws.write_row(nrow, pos, [title])
nrow += 1
pos = start_pos
for field in call['grant']:
if field.get('repeat'):
continue
pos += 1
repeat = [(f['title'] or f['identifier'].capitalize()) for f in call['grant'] if (f.get('repeat') == field['identifier'])]
n_repeat = len(repeat)
if n_repeat:
ws.write_row(nrow, pos, repeat)
pos += (n_repeat - 1)
nrow += 1
for grant in grants:
n_merge = 1
for field in call['grant']:
if (field['type'] != constants.REPEAT):
continue
try:
n_merge = max(n_merge, (grant['values'][field['identifier']] or 0))
except KeyError:
pass
ncol = 0
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
ws.write_url(nrow, ncol, flask.url_for('grant.display', gid=grant['identifier'], _external=True), string=grant['identifier'])
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
ws.write_string(nrow, ncol, ((grant['errors'] and 'Incomplete') or 'Complete'))
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
ws.write_url(nrow, ncol, flask.url_for('proposal.display', pid=grant['proposal'], _external=True), string=grant['proposal'])
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
proposal = anubis.proposal.get_proposal(grant['proposal'])
ws.write_string(nrow, ncol, proposal['title'])
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
user = anubis.user.get_user(username=proposal['user'])
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
ws.write_string(nrow, ncol, utils.get_fullname(user))
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
ws.write_string(nrow, ncol, (user.get('email') or ))
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
ws.write_string(nrow, ncol, (user.get('affiliation') or ))
max_ncol = ncol
ncol += 1
for field in call['grant']:
if field.get('repeat'):
continue
if (field['type'] == constants.REPEAT):
n_repeat = grant['values'][field['identifier']]
if (not n_repeat):
continue
col_offset = 0
for repeated in call['grant']:
if (repeated.get('repeat') != field['identifier']):
continue
for row_offset in range(n_repeat):
fid = f"{repeated['identifier']}-{(row_offset + 1)}"
write_cell(ws, (nrow + row_offset), (ncol + col_offset), grant['values'].get(fid), repeated['type'], grant['identifier'], fid)
max_ncol = max(max_ncol, (ncol + col_offset))
col_offset += 1
else:
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
write_cell(ws, nrow, ncol, grant['values'].get(field['identifier']), field['type'], grant['identifier'], field['identifier'])
max_ncol = max(max_ncol, ncol)
ncol += 1
nrow += n_merge
if (max_ncol > 6):
ws.set_column((6 + 1), max_ncol, 20, normal_text_format)
wb.close()
return output.getvalue() | def get_call_grants_xlsx(call, grants):
output = io.BytesIO()
wb = xlsxwriter.Workbook(output, {'in_memory': True})
head_text_format = wb.add_format({'bold': True, 'text_wrap': True, 'bg_color': '#9ECA7F', 'font_size': 15, 'align': 'center', 'border': 1})
normal_text_format = wb.add_format({'font_size': 14, 'align': 'left', 'valign': 'vcenter'})
ws = wb.add_worksheet(f"Grants in call {call['identifier']}"[:31])
ws.freeze_panes(2, 1)
ws.set_row(0, 60, head_text_format)
ws.set_row(1, 60, head_text_format)
ws.set_column(0, 2, 10, normal_text_format)
ws.set_column(3, 3, 40, normal_text_format)
ws.set_column(4, 6, 20, normal_text_format)
nrow = 0
row = ['Grant', 'Status', 'Proposal', 'Proposal title', 'Submitter', 'Email', 'Affiliation']
ws.write_row(nrow, 0, row)
pos = (len(row) - 1)
start_pos = pos
for field in call['grant']:
if field.get('repeat'):
continue
title = (field['title'] or field['identifier'].capitalize())
pos += 1
n_repeat = len([f for f in call['grant'] if (f.get('repeat') == field['identifier'])])
if n_repeat:
ws.merge_range(0, pos, 0, ((pos + n_repeat) - 1), title)
pos += (n_repeat - 1)
else:
ws.write_row(nrow, pos, [title])
nrow += 1
pos = start_pos
for field in call['grant']:
if field.get('repeat'):
continue
pos += 1
repeat = [(f['title'] or f['identifier'].capitalize()) for f in call['grant'] if (f.get('repeat') == field['identifier'])]
n_repeat = len(repeat)
if n_repeat:
ws.write_row(nrow, pos, repeat)
pos += (n_repeat - 1)
nrow += 1
for grant in grants:
n_merge = 1
for field in call['grant']:
if (field['type'] != constants.REPEAT):
continue
try:
n_merge = max(n_merge, (grant['values'][field['identifier']] or 0))
except KeyError:
pass
ncol = 0
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
ws.write_url(nrow, ncol, flask.url_for('grant.display', gid=grant['identifier'], _external=True), string=grant['identifier'])
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
ws.write_string(nrow, ncol, ((grant['errors'] and 'Incomplete') or 'Complete'))
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
ws.write_url(nrow, ncol, flask.url_for('proposal.display', pid=grant['proposal'], _external=True), string=grant['proposal'])
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
proposal = anubis.proposal.get_proposal(grant['proposal'])
ws.write_string(nrow, ncol, proposal['title'])
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
user = anubis.user.get_user(username=proposal['user'])
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
ws.write_string(nrow, ncol, utils.get_fullname(user))
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
ws.write_string(nrow, ncol, (user.get('email') or ))
ncol += 1
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
ws.write_string(nrow, ncol, (user.get('affiliation') or ))
max_ncol = ncol
ncol += 1
for field in call['grant']:
if field.get('repeat'):
continue
if (field['type'] == constants.REPEAT):
n_repeat = grant['values'][field['identifier']]
if (not n_repeat):
continue
col_offset = 0
for repeated in call['grant']:
if (repeated.get('repeat') != field['identifier']):
continue
for row_offset in range(n_repeat):
fid = f"{repeated['identifier']}-{(row_offset + 1)}"
write_cell(ws, (nrow + row_offset), (ncol + col_offset), grant['values'].get(fid), repeated['type'], grant['identifier'], fid)
max_ncol = max(max_ncol, (ncol + col_offset))
col_offset += 1
else:
if (n_merge > 1):
ws.merge_range(nrow, ncol, ((nrow + n_merge) - 1), ncol, )
write_cell(ws, nrow, ncol, grant['values'].get(field['identifier']), field['type'], grant['identifier'], field['identifier'])
max_ncol = max(max_ncol, ncol)
ncol += 1
nrow += n_merge
if (max_ncol > 6):
ws.set_column((6 + 1), max_ncol, 20, normal_text_format)
wb.close()
return output.getvalue()<|docstring|>Return the content for the XLSX file for the list of grants.<|endoftext|> |
cac77e0610b2ba090610f1966d50ef2c6889aa617f2dcdeadf8f1b9b8ed0f7d8 | @blueprint.route('/call/<cid>.zip')
@utils.login_required
def call_zip(cid):
'Return a zip file containing the XLSX file of all grants for a call\n and all documents in all grant dossiers.\n '
call = anubis.call.get_call(cid)
if (call is None):
return utils.error('No such call.', flask.url_for('home'))
if (not anubis.call.allow_view(call)):
return utils.error('You may not view the call.', flask.url_for('home'))
if (not anubis.call.allow_view_grants(call)):
return utils.error('You may not view the grants of the call.', flask.url_for('call.display', cid=call['identifier']))
cid = cid.replace(':', '-')
grants = utils.get_docs_view('grants', 'call', call['identifier'])
output = io.BytesIO()
with zipfile.ZipFile(output, 'w') as outfile:
outfile.writestr(f'{cid}_grants.xlsx', get_call_grants_xlsx(call, grants))
for grant in grants:
for document in anubis.grant.get_grant_documents(grant):
outfile.writestr(document['filename'], document['content'])
response = flask.make_response(output.getvalue())
response.headers.set('Content-Type', constants.ZIP_MIMETYPE)
response.headers.set('Content-Disposition', 'attachment', filename=f'{cid}_grants.zip')
return response | Return a zip file containing the XLSX file of all grants for a call
and all documents in all grant dossiers. | anubis/grants.py | call_zip | pekrau/Anubis | 2 | python | @blueprint.route('/call/<cid>.zip')
@utils.login_required
def call_zip(cid):
'Return a zip file containing the XLSX file of all grants for a call\n and all documents in all grant dossiers.\n '
call = anubis.call.get_call(cid)
if (call is None):
return utils.error('No such call.', flask.url_for('home'))
if (not anubis.call.allow_view(call)):
return utils.error('You may not view the call.', flask.url_for('home'))
if (not anubis.call.allow_view_grants(call)):
return utils.error('You may not view the grants of the call.', flask.url_for('call.display', cid=call['identifier']))
cid = cid.replace(':', '-')
grants = utils.get_docs_view('grants', 'call', call['identifier'])
output = io.BytesIO()
with zipfile.ZipFile(output, 'w') as outfile:
outfile.writestr(f'{cid}_grants.xlsx', get_call_grants_xlsx(call, grants))
for grant in grants:
for document in anubis.grant.get_grant_documents(grant):
outfile.writestr(document['filename'], document['content'])
response = flask.make_response(output.getvalue())
response.headers.set('Content-Type', constants.ZIP_MIMETYPE)
response.headers.set('Content-Disposition', 'attachment', filename=f'{cid}_grants.zip')
return response | @blueprint.route('/call/<cid>.zip')
@utils.login_required
def call_zip(cid):
'Return a zip file containing the XLSX file of all grants for a call\n and all documents in all grant dossiers.\n '
call = anubis.call.get_call(cid)
if (call is None):
return utils.error('No such call.', flask.url_for('home'))
if (not anubis.call.allow_view(call)):
return utils.error('You may not view the call.', flask.url_for('home'))
if (not anubis.call.allow_view_grants(call)):
return utils.error('You may not view the grants of the call.', flask.url_for('call.display', cid=call['identifier']))
cid = cid.replace(':', '-')
grants = utils.get_docs_view('grants', 'call', call['identifier'])
output = io.BytesIO()
with zipfile.ZipFile(output, 'w') as outfile:
outfile.writestr(f'{cid}_grants.xlsx', get_call_grants_xlsx(call, grants))
for grant in grants:
for document in anubis.grant.get_grant_documents(grant):
outfile.writestr(document['filename'], document['content'])
response = flask.make_response(output.getvalue())
response.headers.set('Content-Type', constants.ZIP_MIMETYPE)
response.headers.set('Content-Disposition', 'attachment', filename=f'{cid}_grants.zip')
return response<|docstring|>Return a zip file containing the XLSX file of all grants for a call
and all documents in all grant dossiers.<|endoftext|> |
03de1a344e4e08884f888687d3ca53560648546b0147152fbee63d62c76203bf | @blueprint.route('/user/<username>')
@utils.login_required
def user(username):
'List all grants for a user, including the grants the user has access to.'
user = anubis.user.get_user(username=username)
if (user is None):
return utils.error('No such user.', flask.url_for('home'))
if (not anubis.user.allow_view(user)):
return utils.error("You may not view the user's grants.", flask.url_for('home'))
grants = utils.get_docs_view('grants', 'user', user['username'])
grants.extend(utils.get_docs_view('grants', 'access', user['username']))
return flask.render_template('grants/user.html', user=user, grants=grants) | List all grants for a user, including the grants the user has access to. | anubis/grants.py | user | pekrau/Anubis | 2 | python | @blueprint.route('/user/<username>')
@utils.login_required
def user(username):
user = anubis.user.get_user(username=username)
if (user is None):
return utils.error('No such user.', flask.url_for('home'))
if (not anubis.user.allow_view(user)):
return utils.error("You may not view the user's grants.", flask.url_for('home'))
grants = utils.get_docs_view('grants', 'user', user['username'])
grants.extend(utils.get_docs_view('grants', 'access', user['username']))
return flask.render_template('grants/user.html', user=user, grants=grants) | @blueprint.route('/user/<username>')
@utils.login_required
def user(username):
user = anubis.user.get_user(username=username)
if (user is None):
return utils.error('No such user.', flask.url_for('home'))
if (not anubis.user.allow_view(user)):
return utils.error("You may not view the user's grants.", flask.url_for('home'))
grants = utils.get_docs_view('grants', 'user', user['username'])
grants.extend(utils.get_docs_view('grants', 'access', user['username']))
return flask.render_template('grants/user.html', user=user, grants=grants)<|docstring|>List all grants for a user, including the grants the user has access to.<|endoftext|> |
f41331c322dd81a67011f9511a85bf00e443569672bbcf50e5aaa9731572a8c8 | def try_cast_or_404(cast_type, _input):
' Used for GET variables i.e. when you expect to receive an int\n returns 404 if the cast is unsuccessful\n '
assert isinstance(cast_type, type)
try:
if (input is None):
raise ValueError
return cast_type(_input)
except (ValueError, TypeError):
raise http.Http404 | Used for GET variables i.e. when you expect to receive an int
returns 404 if the cast is unsuccessful | front_edit/views.py | try_cast_or_404 | hwms/django-front-edit | 0 | python | def try_cast_or_404(cast_type, _input):
' Used for GET variables i.e. when you expect to receive an int\n returns 404 if the cast is unsuccessful\n '
assert isinstance(cast_type, type)
try:
if (input is None):
raise ValueError
return cast_type(_input)
except (ValueError, TypeError):
raise http.Http404 | def try_cast_or_404(cast_type, _input):
' Used for GET variables i.e. when you expect to receive an int\n returns 404 if the cast is unsuccessful\n '
assert isinstance(cast_type, type)
try:
if (input is None):
raise ValueError
return cast_type(_input)
except (ValueError, TypeError):
raise http.Http404<|docstring|>Used for GET variables i.e. when you expect to receive an int
returns 404 if the cast is unsuccessful<|endoftext|> |
1c372a4ce0359e45cf6904156d9ef374829604113217f3ed5ef070c49b2c2d11 | def create_creatives(creatives):
'\n Creates creatives in DFP.\n\n Args:\n creatives (arr): an array of objects, each a creative configuration\n Returns:\n an array: an array of created creative IDs\n '
dfp_client = get_client()
creative_service = dfp_client.GetService('CreativeService', version='v202102')
creatives = creative_service.createCreatives(creatives)
created_creative_ids = []
for creative in creatives:
created_creative_ids.append(creative['id'])
logger.info(u'Created creative with name "{name}".'.format(name=creative['name']))
return created_creative_ids | Creates creatives in DFP.
Args:
creatives (arr): an array of objects, each a creative configuration
Returns:
an array: an array of created creative IDs | dfp/create_creatives.py | create_creatives | Pubmatic-Dhruv-Sonone/dfp-prebid-setup | 5 | python | def create_creatives(creatives):
'\n Creates creatives in DFP.\n\n Args:\n creatives (arr): an array of objects, each a creative configuration\n Returns:\n an array: an array of created creative IDs\n '
dfp_client = get_client()
creative_service = dfp_client.GetService('CreativeService', version='v202102')
creatives = creative_service.createCreatives(creatives)
created_creative_ids = []
for creative in creatives:
created_creative_ids.append(creative['id'])
logger.info(u'Created creative with name "{name}".'.format(name=creative['name']))
return created_creative_ids | def create_creatives(creatives):
'\n Creates creatives in DFP.\n\n Args:\n creatives (arr): an array of objects, each a creative configuration\n Returns:\n an array: an array of created creative IDs\n '
dfp_client = get_client()
creative_service = dfp_client.GetService('CreativeService', version='v202102')
creatives = creative_service.createCreatives(creatives)
created_creative_ids = []
for creative in creatives:
created_creative_ids.append(creative['id'])
logger.info(u'Created creative with name "{name}".'.format(name=creative['name']))
return created_creative_ids<|docstring|>Creates creatives in DFP.
Args:
creatives (arr): an array of objects, each a creative configuration
Returns:
an array: an array of created creative IDs<|endoftext|> |
68f13bb474926436e8e943d8bae212b1a26482038511d80302aed32101630569 | def create_creative_config(name, advertiser_id, size=None, creative_file=None, safe_frame=False):
'\n Creates a creative config object.\n\n Args:\n name (str): the name of the creative\n advertiser_id (int): the ID of the advertiser in DFP\n sizes (string array): size for the creative\n creative_file (string): the name of the file containing creative\n safe_frame (bool): Flag to indicate Whether the Creative is compatible for SafeFrame rendering.\n\n\n Returns:\n an object: the line item config\n '
if (creative_file == None):
creative_file = 'creative_snippet.html'
snippet_file_path = os.path.join(os.path.dirname(__file__), creative_file)
with open(snippet_file_path, 'r') as snippet_file:
snippet = snippet_file.read()
if (size == None):
size = {'width': 1, 'height': 1}
config = {'xsi_type': 'ThirdPartyCreative', 'name': name, 'advertiserId': advertiser_id, 'size': size, 'snippet': snippet, 'isSafeFrameCompatible': safe_frame}
return config | Creates a creative config object.
Args:
name (str): the name of the creative
advertiser_id (int): the ID of the advertiser in DFP
sizes (string array): size for the creative
creative_file (string): the name of the file containing creative
safe_frame (bool): Flag to indicate Whether the Creative is compatible for SafeFrame rendering.
Returns:
an object: the line item config | dfp/create_creatives.py | create_creative_config | Pubmatic-Dhruv-Sonone/dfp-prebid-setup | 5 | python | def create_creative_config(name, advertiser_id, size=None, creative_file=None, safe_frame=False):
'\n Creates a creative config object.\n\n Args:\n name (str): the name of the creative\n advertiser_id (int): the ID of the advertiser in DFP\n sizes (string array): size for the creative\n creative_file (string): the name of the file containing creative\n safe_frame (bool): Flag to indicate Whether the Creative is compatible for SafeFrame rendering.\n\n\n Returns:\n an object: the line item config\n '
if (creative_file == None):
creative_file = 'creative_snippet.html'
snippet_file_path = os.path.join(os.path.dirname(__file__), creative_file)
with open(snippet_file_path, 'r') as snippet_file:
snippet = snippet_file.read()
if (size == None):
size = {'width': 1, 'height': 1}
config = {'xsi_type': 'ThirdPartyCreative', 'name': name, 'advertiserId': advertiser_id, 'size': size, 'snippet': snippet, 'isSafeFrameCompatible': safe_frame}
return config | def create_creative_config(name, advertiser_id, size=None, creative_file=None, safe_frame=False):
'\n Creates a creative config object.\n\n Args:\n name (str): the name of the creative\n advertiser_id (int): the ID of the advertiser in DFP\n sizes (string array): size for the creative\n creative_file (string): the name of the file containing creative\n safe_frame (bool): Flag to indicate Whether the Creative is compatible for SafeFrame rendering.\n\n\n Returns:\n an object: the line item config\n '
if (creative_file == None):
creative_file = 'creative_snippet.html'
snippet_file_path = os.path.join(os.path.dirname(__file__), creative_file)
with open(snippet_file_path, 'r') as snippet_file:
snippet = snippet_file.read()
if (size == None):
size = {'width': 1, 'height': 1}
config = {'xsi_type': 'ThirdPartyCreative', 'name': name, 'advertiserId': advertiser_id, 'size': size, 'snippet': snippet, 'isSafeFrameCompatible': safe_frame}
return config<|docstring|>Creates a creative config object.
Args:
name (str): the name of the creative
advertiser_id (int): the ID of the advertiser in DFP
sizes (string array): size for the creative
creative_file (string): the name of the file containing creative
safe_frame (bool): Flag to indicate Whether the Creative is compatible for SafeFrame rendering.
Returns:
an object: the line item config<|endoftext|> |
6fd44e83b9c1a9345a11d44b40c0a94e35326d5b39b42687d84d16e67bc7b470 | def build_creative_name(bidder_code, order_name, creative_num, size=None, prefix=None):
'\n Returns a name for a creative.\n\n Args:\n bidder_code (str): the bidder code for the header bidding partner\n order_name (int): the name of the order in DFP\n creative_num (int): the num_creatives distinguising this creative from any\n duplicates\n Returns:\n a string\n '
if (prefix != None):
if (size == None):
return '{prefix}_1x1'.format(prefix=prefix)
return '{prefix}_{width}x{height}'.format(prefix=prefix, width=size['width'], height=size['height'])
if (size == None):
return '{bidder_code}: HB {order_name}, #{num}'.format(bidder_code=bidder_code, order_name=order_name, num=creative_num)
else:
return '{bidder_code}: HB {order_name}, {width}x{height} #{num}'.format(bidder_code=bidder_code, order_name=order_name, width=size['width'], height=size['height'], num=creative_num) | Returns a name for a creative.
Args:
bidder_code (str): the bidder code for the header bidding partner
order_name (int): the name of the order in DFP
creative_num (int): the num_creatives distinguising this creative from any
duplicates
Returns:
a string | dfp/create_creatives.py | build_creative_name | Pubmatic-Dhruv-Sonone/dfp-prebid-setup | 5 | python | def build_creative_name(bidder_code, order_name, creative_num, size=None, prefix=None):
'\n Returns a name for a creative.\n\n Args:\n bidder_code (str): the bidder code for the header bidding partner\n order_name (int): the name of the order in DFP\n creative_num (int): the num_creatives distinguising this creative from any\n duplicates\n Returns:\n a string\n '
if (prefix != None):
if (size == None):
return '{prefix}_1x1'.format(prefix=prefix)
return '{prefix}_{width}x{height}'.format(prefix=prefix, width=size['width'], height=size['height'])
if (size == None):
return '{bidder_code}: HB {order_name}, #{num}'.format(bidder_code=bidder_code, order_name=order_name, num=creative_num)
else:
return '{bidder_code}: HB {order_name}, {width}x{height} #{num}'.format(bidder_code=bidder_code, order_name=order_name, width=size['width'], height=size['height'], num=creative_num) | def build_creative_name(bidder_code, order_name, creative_num, size=None, prefix=None):
'\n Returns a name for a creative.\n\n Args:\n bidder_code (str): the bidder code for the header bidding partner\n order_name (int): the name of the order in DFP\n creative_num (int): the num_creatives distinguising this creative from any\n duplicates\n Returns:\n a string\n '
if (prefix != None):
if (size == None):
return '{prefix}_1x1'.format(prefix=prefix)
return '{prefix}_{width}x{height}'.format(prefix=prefix, width=size['width'], height=size['height'])
if (size == None):
return '{bidder_code}: HB {order_name}, #{num}'.format(bidder_code=bidder_code, order_name=order_name, num=creative_num)
else:
return '{bidder_code}: HB {order_name}, {width}x{height} #{num}'.format(bidder_code=bidder_code, order_name=order_name, width=size['width'], height=size['height'], num=creative_num)<|docstring|>Returns a name for a creative.
Args:
bidder_code (str): the bidder code for the header bidding partner
order_name (int): the name of the order in DFP
creative_num (int): the num_creatives distinguising this creative from any
duplicates
Returns:
a string<|endoftext|> |
b7ae80f3d825b6d5b855762dc822b30d52cd886470b576985a1fca7ea71ab774 | def create_duplicate_creative_configs(bidder_code, order_name, advertiser_id, sizes=None, num_creatives=1, creative_file=None, safe_frame=False, prefix=None):
'\n Returns an array of creative config object.\n\n Args:\n bidder_code (str): the bidder code for the header bidding partner\n order_name (int): the name of the order in DFP\n advertiser_id (int): the ID of the advertiser in DFP\n sizes(String array): sizes for creative\n num_creatives (int): how many creative configs to generate\n creative_file: (string) file name containing creative content\n safe_frame (bool): to enable safe_frame option\n prefix (string): creative name prefix\n Returns:\n an array: an array of length `num_creatives`, each item a line item config\n '
creative_configs = []
if (sizes == None):
for creative_num in range(1, (num_creatives + 1)):
config = create_creative_config(name=build_creative_name(bidder_code, order_name, creative_num, prefix=prefix), advertiser_id=advertiser_id, creative_file=creative_file, safe_frame=safe_frame)
creative_configs.append(config)
else:
for size in sizes:
for creative_num in range(1, (num_creatives + 1)):
config = create_creative_config(name=build_creative_name(bidder_code, order_name, creative_num, size, prefix), advertiser_id=advertiser_id, size=size, creative_file=creative_file, safe_frame=safe_frame)
creative_configs.append(config)
return creative_configs | Returns an array of creative config object.
Args:
bidder_code (str): the bidder code for the header bidding partner
order_name (int): the name of the order in DFP
advertiser_id (int): the ID of the advertiser in DFP
sizes(String array): sizes for creative
num_creatives (int): how many creative configs to generate
creative_file: (string) file name containing creative content
safe_frame (bool): to enable safe_frame option
prefix (string): creative name prefix
Returns:
an array: an array of length `num_creatives`, each item a line item config | dfp/create_creatives.py | create_duplicate_creative_configs | Pubmatic-Dhruv-Sonone/dfp-prebid-setup | 5 | python | def create_duplicate_creative_configs(bidder_code, order_name, advertiser_id, sizes=None, num_creatives=1, creative_file=None, safe_frame=False, prefix=None):
'\n Returns an array of creative config object.\n\n Args:\n bidder_code (str): the bidder code for the header bidding partner\n order_name (int): the name of the order in DFP\n advertiser_id (int): the ID of the advertiser in DFP\n sizes(String array): sizes for creative\n num_creatives (int): how many creative configs to generate\n creative_file: (string) file name containing creative content\n safe_frame (bool): to enable safe_frame option\n prefix (string): creative name prefix\n Returns:\n an array: an array of length `num_creatives`, each item a line item config\n '
creative_configs = []
if (sizes == None):
for creative_num in range(1, (num_creatives + 1)):
config = create_creative_config(name=build_creative_name(bidder_code, order_name, creative_num, prefix=prefix), advertiser_id=advertiser_id, creative_file=creative_file, safe_frame=safe_frame)
creative_configs.append(config)
else:
for size in sizes:
for creative_num in range(1, (num_creatives + 1)):
config = create_creative_config(name=build_creative_name(bidder_code, order_name, creative_num, size, prefix), advertiser_id=advertiser_id, size=size, creative_file=creative_file, safe_frame=safe_frame)
creative_configs.append(config)
return creative_configs | def create_duplicate_creative_configs(bidder_code, order_name, advertiser_id, sizes=None, num_creatives=1, creative_file=None, safe_frame=False, prefix=None):
'\n Returns an array of creative config object.\n\n Args:\n bidder_code (str): the bidder code for the header bidding partner\n order_name (int): the name of the order in DFP\n advertiser_id (int): the ID of the advertiser in DFP\n sizes(String array): sizes for creative\n num_creatives (int): how many creative configs to generate\n creative_file: (string) file name containing creative content\n safe_frame (bool): to enable safe_frame option\n prefix (string): creative name prefix\n Returns:\n an array: an array of length `num_creatives`, each item a line item config\n '
creative_configs = []
if (sizes == None):
for creative_num in range(1, (num_creatives + 1)):
config = create_creative_config(name=build_creative_name(bidder_code, order_name, creative_num, prefix=prefix), advertiser_id=advertiser_id, creative_file=creative_file, safe_frame=safe_frame)
creative_configs.append(config)
else:
for size in sizes:
for creative_num in range(1, (num_creatives + 1)):
config = create_creative_config(name=build_creative_name(bidder_code, order_name, creative_num, size, prefix), advertiser_id=advertiser_id, size=size, creative_file=creative_file, safe_frame=safe_frame)
creative_configs.append(config)
return creative_configs<|docstring|>Returns an array of creative config object.
Args:
bidder_code (str): the bidder code for the header bidding partner
order_name (int): the name of the order in DFP
advertiser_id (int): the ID of the advertiser in DFP
sizes(String array): sizes for creative
num_creatives (int): how many creative configs to generate
creative_file: (string) file name containing creative content
safe_frame (bool): to enable safe_frame option
prefix (string): creative name prefix
Returns:
an array: an array of length `num_creatives`, each item a line item config<|endoftext|> |
9020d8d0ffa70f2f63b367ff9800defc707224a86b292245474c3b32fe911f3f | def _is_debugger(self):
'\n Am I a debugger?\n '
return (self.target_state == _DEBUGGER_TARGET) | Am I a debugger? | fud/fud/stages/interpreter.py | _is_debugger | crystalhu26/calyx | 0 | python | def _is_debugger(self):
'\n \n '
return (self.target_state == _DEBUGGER_TARGET) | def _is_debugger(self):
'\n \n '
return (self.target_state == _DEBUGGER_TARGET)<|docstring|>Am I a debugger?<|endoftext|> |
c671a797eee19f1ecec681a9bf8f6245d70502f5ea95a7ac9d9821e14cd11198 | @builder.step()
def mktmp() -> SourceType.Directory:
'\n Make temporary directory to store Verilator build files.\n '
return TmpDir() | Make temporary directory to store Verilator build files. | fud/fud/stages/interpreter.py | mktmp | crystalhu26/calyx | 0 | python | @builder.step()
def mktmp() -> SourceType.Directory:
'\n \n '
return TmpDir() | @builder.step()
def mktmp() -> SourceType.Directory:
'\n \n '
return TmpDir()<|docstring|>Make temporary directory to store Verilator build files.<|endoftext|> |
f1c9459383fca3ed01a9e4e4adf9101a13c7cf3239b2389411cde2675e3544e1 | @builder.step()
def convert_json_to_interp_json(tmpdir: SourceType.Directory, json_path: SourceType.Stream):
'\n Creates a data file to initialze the interpreter memories\n '
round_float_to_fixed = config[('stages', self.name, 'round_float_to_fixed')]
convert_to_json(tmpdir.name, sjson.load(json_path, use_decimal=True), round_float_to_fixed) | Creates a data file to initialze the interpreter memories | fud/fud/stages/interpreter.py | convert_json_to_interp_json | crystalhu26/calyx | 0 | python | @builder.step()
def convert_json_to_interp_json(tmpdir: SourceType.Directory, json_path: SourceType.Stream):
'\n \n '
round_float_to_fixed = config[('stages', self.name, 'round_float_to_fixed')]
convert_to_json(tmpdir.name, sjson.load(json_path, use_decimal=True), round_float_to_fixed) | @builder.step()
def convert_json_to_interp_json(tmpdir: SourceType.Directory, json_path: SourceType.Stream):
'\n \n '
round_float_to_fixed = config[('stages', self.name, 'round_float_to_fixed')]
convert_to_json(tmpdir.name, sjson.load(json_path, use_decimal=True), round_float_to_fixed)<|docstring|>Creates a data file to initialze the interpreter memories<|endoftext|> |
23fb8cf4bb5e9daa14cb45744cabe4fd77bc33554a2b1a57f8ae64ffc34d1705 | @builder.step(description=cmd)
def interpret(target: SourceType.Path, tmpdir: SourceType.Directory) -> SourceType.Stream:
'\n Invoke the interpreter\n '
command = cmd.format(data_file=(Path(tmpdir.name) / _FILE_NAME), target=str(target))
return shell(command) | Invoke the interpreter | fud/fud/stages/interpreter.py | interpret | crystalhu26/calyx | 0 | python | @builder.step(description=cmd)
def interpret(target: SourceType.Path, tmpdir: SourceType.Directory) -> SourceType.Stream:
'\n \n '
command = cmd.format(data_file=(Path(tmpdir.name) / _FILE_NAME), target=str(target))
return shell(command) | @builder.step(description=cmd)
def interpret(target: SourceType.Path, tmpdir: SourceType.Directory) -> SourceType.Stream:
'\n \n '
command = cmd.format(data_file=(Path(tmpdir.name) / _FILE_NAME), target=str(target))
return shell(command)<|docstring|>Invoke the interpreter<|endoftext|> |
6b037cd1e86f6198927ec533776fe276731c781feb300d4048c1e96a6f7efae3 | @builder.step(description=cmd)
def debug(target: SourceType.Path, tmpdir: SourceType.Directory) -> SourceType.Terminal:
'\n Invoke the debugger\n '
command = cmd.format(data_file=(Path(tmpdir.name) / _FILE_NAME), target=str(target))
transparent_shell(command) | Invoke the debugger | fud/fud/stages/interpreter.py | debug | crystalhu26/calyx | 0 | python | @builder.step(description=cmd)
def debug(target: SourceType.Path, tmpdir: SourceType.Directory) -> SourceType.Terminal:
'\n \n '
command = cmd.format(data_file=(Path(tmpdir.name) / _FILE_NAME), target=str(target))
transparent_shell(command) | @builder.step(description=cmd)
def debug(target: SourceType.Path, tmpdir: SourceType.Directory) -> SourceType.Terminal:
'\n \n '
command = cmd.format(data_file=(Path(tmpdir.name) / _FILE_NAME), target=str(target))
transparent_shell(command)<|docstring|>Invoke the debugger<|endoftext|> |
82306cca72b8aed53e56b8c37a76a4a61cf82ebe36bb4424f1696df348f3eace | @builder.step()
def parse_output(output: SourceType.Stream, json_path: SourceType.UnTyped, tmpdir: SourceType.Directory) -> SourceType.Stream:
'\n Parses a raw interpreter output\n '
out_path = (Path(tmpdir.name) / 'output.json')
output = parse_from_json(output, json_path)
with out_path.open('w') as f:
sjson.dump(output, f, indent=2, sort_keys=True, use_decimal=True)
return out_path.open('rb') | Parses a raw interpreter output | fud/fud/stages/interpreter.py | parse_output | crystalhu26/calyx | 0 | python | @builder.step()
def parse_output(output: SourceType.Stream, json_path: SourceType.UnTyped, tmpdir: SourceType.Directory) -> SourceType.Stream:
'\n \n '
out_path = (Path(tmpdir.name) / 'output.json')
output = parse_from_json(output, json_path)
with out_path.open('w') as f:
sjson.dump(output, f, indent=2, sort_keys=True, use_decimal=True)
return out_path.open('rb') | @builder.step()
def parse_output(output: SourceType.Stream, json_path: SourceType.UnTyped, tmpdir: SourceType.Directory) -> SourceType.Stream:
'\n \n '
out_path = (Path(tmpdir.name) / 'output.json')
output = parse_from_json(output, json_path)
with out_path.open('w') as f:
sjson.dump(output, f, indent=2, sort_keys=True, use_decimal=True)
return out_path.open('rb')<|docstring|>Parses a raw interpreter output<|endoftext|> |
5c7d0db8abcfca5a45a7a3e05a3e51db6a366b270433d7fa1bc5e188a09b6827 | def query(self, filter_by: str=None, order_by: str=None, privileges: 'Optional[List[str]]'=None, full_attribution: bool=False) -> 'Optional[List[AppCondensed]]':
'\n This method queries Qlik Sense apps based on the provided criteria and provides either partial or\n full attribution of the apps based on the setting of full_attribution\n\n Args:\n filter_by: a filter string in jquery format\n order_by: an order by string\n privileges:\n full_attribution: allows the response to contain the full user attribution,\n defaults to False (limited attribution)\n\n Returns: a list of Qlik Sense Apps that meet the query_string criteria (or None)\n '
if full_attribution:
schema = AppSchema()
else:
schema = AppCondensedSchema()
return self._query(schema=schema, filter_by=filter_by, order_by=order_by, privileges=privileges, full_attribution=full_attribution) | This method queries Qlik Sense apps based on the provided criteria and provides either partial or
full attribution of the apps based on the setting of full_attribution
Args:
filter_by: a filter string in jquery format
order_by: an order by string
privileges:
full_attribution: allows the response to contain the full user attribution,
defaults to False (limited attribution)
Returns: a list of Qlik Sense Apps that meet the query_string criteria (or None) | qlik_sense/services/app.py | query | ricardolsmendes/qlik-sense | 2 | python | def query(self, filter_by: str=None, order_by: str=None, privileges: 'Optional[List[str]]'=None, full_attribution: bool=False) -> 'Optional[List[AppCondensed]]':
'\n This method queries Qlik Sense apps based on the provided criteria and provides either partial or\n full attribution of the apps based on the setting of full_attribution\n\n Args:\n filter_by: a filter string in jquery format\n order_by: an order by string\n privileges:\n full_attribution: allows the response to contain the full user attribution,\n defaults to False (limited attribution)\n\n Returns: a list of Qlik Sense Apps that meet the query_string criteria (or None)\n '
if full_attribution:
schema = AppSchema()
else:
schema = AppCondensedSchema()
return self._query(schema=schema, filter_by=filter_by, order_by=order_by, privileges=privileges, full_attribution=full_attribution) | def query(self, filter_by: str=None, order_by: str=None, privileges: 'Optional[List[str]]'=None, full_attribution: bool=False) -> 'Optional[List[AppCondensed]]':
'\n This method queries Qlik Sense apps based on the provided criteria and provides either partial or\n full attribution of the apps based on the setting of full_attribution\n\n Args:\n filter_by: a filter string in jquery format\n order_by: an order by string\n privileges:\n full_attribution: allows the response to contain the full user attribution,\n defaults to False (limited attribution)\n\n Returns: a list of Qlik Sense Apps that meet the query_string criteria (or None)\n '
if full_attribution:
schema = AppSchema()
else:
schema = AppCondensedSchema()
return self._query(schema=schema, filter_by=filter_by, order_by=order_by, privileges=privileges, full_attribution=full_attribution)<|docstring|>This method queries Qlik Sense apps based on the provided criteria and provides either partial or
full attribution of the apps based on the setting of full_attribution
Args:
filter_by: a filter string in jquery format
order_by: an order by string
privileges:
full_attribution: allows the response to contain the full user attribution,
defaults to False (limited attribution)
Returns: a list of Qlik Sense Apps that meet the query_string criteria (or None)<|endoftext|> |
cd7284d992c5a5a567199f77b89fbd94c04c300121d26958e0c323d013d14d01 | def get_by_name_and_stream(self, app_name: str, stream_name: str) -> 'Optional[List[AppCondensed]]':
'\n This method is such a common use case of the query() method that it gets its own method\n\n Args:\n app_name: name of the app\n stream_name: name of the stream\n\n Returns: the Qlik Sense app(s) that fit the criteria\n '
filter_by = f"name eq '{app_name}' and stream.name eq '{stream_name}'"
apps = self.query(filter_by=filter_by)
if (isinstance(apps, list) and (len(apps) > 0)):
return apps[0]
return | This method is such a common use case of the query() method that it gets its own method
Args:
app_name: name of the app
stream_name: name of the stream
Returns: the Qlik Sense app(s) that fit the criteria | qlik_sense/services/app.py | get_by_name_and_stream | ricardolsmendes/qlik-sense | 2 | python | def get_by_name_and_stream(self, app_name: str, stream_name: str) -> 'Optional[List[AppCondensed]]':
'\n This method is such a common use case of the query() method that it gets its own method\n\n Args:\n app_name: name of the app\n stream_name: name of the stream\n\n Returns: the Qlik Sense app(s) that fit the criteria\n '
filter_by = f"name eq '{app_name}' and stream.name eq '{stream_name}'"
apps = self.query(filter_by=filter_by)
if (isinstance(apps, list) and (len(apps) > 0)):
return apps[0]
return | def get_by_name_and_stream(self, app_name: str, stream_name: str) -> 'Optional[List[AppCondensed]]':
'\n This method is such a common use case of the query() method that it gets its own method\n\n Args:\n app_name: name of the app\n stream_name: name of the stream\n\n Returns: the Qlik Sense app(s) that fit the criteria\n '
filter_by = f"name eq '{app_name}' and stream.name eq '{stream_name}'"
apps = self.query(filter_by=filter_by)
if (isinstance(apps, list) and (len(apps) > 0)):
return apps[0]
return<|docstring|>This method is such a common use case of the query() method that it gets its own method
Args:
app_name: name of the app
stream_name: name of the stream
Returns: the Qlik Sense app(s) that fit the criteria<|endoftext|> |
b4f61516d3d1bb020c5149b45704c3f4c77d7b1f232a91424ef197d43f336570 | def get(self, id: str, privileges: 'Optional[List[str]]'=None) -> 'Optional[App]':
'\n This method returns a Qlik Sense app by its id\n\n Args:\n id: id of the app on the server in uuid format\n privileges:\n\n Returns: a Qlik Sense app\n '
return self._get(schema=AppSchema(), id=id, privileges=privileges) | This method returns a Qlik Sense app by its id
Args:
id: id of the app on the server in uuid format
privileges:
Returns: a Qlik Sense app | qlik_sense/services/app.py | get | ricardolsmendes/qlik-sense | 2 | python | def get(self, id: str, privileges: 'Optional[List[str]]'=None) -> 'Optional[App]':
'\n This method returns a Qlik Sense app by its id\n\n Args:\n id: id of the app on the server in uuid format\n privileges:\n\n Returns: a Qlik Sense app\n '
return self._get(schema=AppSchema(), id=id, privileges=privileges) | def get(self, id: str, privileges: 'Optional[List[str]]'=None) -> 'Optional[App]':
'\n This method returns a Qlik Sense app by its id\n\n Args:\n id: id of the app on the server in uuid format\n privileges:\n\n Returns: a Qlik Sense app\n '
return self._get(schema=AppSchema(), id=id, privileges=privileges)<|docstring|>This method returns a Qlik Sense app by its id
Args:
id: id of the app on the server in uuid format
privileges:
Returns: a Qlik Sense app<|endoftext|> |
937f1718290e5f418fdb5bafd05fbcd322071fc3138f82cedde3600786c1979b | def update(self, app: 'App', privileges: 'Optional[List[str]]'=None) -> 'Optional[App]':
'\n This method updates attributes of the provided app on the server\n\n Args:\n app: app to update\n privileges:\n\n Returns: a Qlik Sense app object for the updated app\n '
return self._update(schema=AppSchema(), entity=app, privileges=privileges) | This method updates attributes of the provided app on the server
Args:
app: app to update
privileges:
Returns: a Qlik Sense app object for the updated app | qlik_sense/services/app.py | update | ricardolsmendes/qlik-sense | 2 | python | def update(self, app: 'App', privileges: 'Optional[List[str]]'=None) -> 'Optional[App]':
'\n This method updates attributes of the provided app on the server\n\n Args:\n app: app to update\n privileges:\n\n Returns: a Qlik Sense app object for the updated app\n '
return self._update(schema=AppSchema(), entity=app, privileges=privileges) | def update(self, app: 'App', privileges: 'Optional[List[str]]'=None) -> 'Optional[App]':
'\n This method updates attributes of the provided app on the server\n\n Args:\n app: app to update\n privileges:\n\n Returns: a Qlik Sense app object for the updated app\n '
return self._update(schema=AppSchema(), entity=app, privileges=privileges)<|docstring|>This method updates attributes of the provided app on the server
Args:
app: app to update
privileges:
Returns: a Qlik Sense app object for the updated app<|endoftext|> |
0fcdf15280996ca32f7e865ed32422ab33a9c3f2ba1c096b9a7b3fb9e7f17615 | def delete(self, app: 'AppCondensed'):
'\n This method deletes the provided app from the server\n\n Args:\n app: app to delete\n '
self._delete(entity=app) | This method deletes the provided app from the server
Args:
app: app to delete | qlik_sense/services/app.py | delete | ricardolsmendes/qlik-sense | 2 | python | def delete(self, app: 'AppCondensed'):
'\n This method deletes the provided app from the server\n\n Args:\n app: app to delete\n '
self._delete(entity=app) | def delete(self, app: 'AppCondensed'):
'\n This method deletes the provided app from the server\n\n Args:\n app: app to delete\n '
self._delete(entity=app)<|docstring|>This method deletes the provided app from the server
Args:
app: app to delete<|endoftext|> |
ec9070cc76f0c7d9711d54746b08872c3e376b01fb167a007ecdfdd600c21bc8 | def copy(self, app: 'AppCondensed', name: str=None, include_custom_properties: bool=False) -> 'Optional[App]':
'\n This method copies the provided app\n\n Args:\n app: app to copy\n name: name for the new app\n include_custom_properties: flag to include custom properties on the new app\n\n Returns: a Qlik Sense App object for the newly copied app\n '
schema = AppSchema()
params = {'name': name, 'includecustomproperties': include_custom_properties}
request = QSAPIRequest(method='POST', url=f'{self.url}/{app.id}/copy', params=params)
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None | This method copies the provided app
Args:
app: app to copy
name: name for the new app
include_custom_properties: flag to include custom properties on the new app
Returns: a Qlik Sense App object for the newly copied app | qlik_sense/services/app.py | copy | ricardolsmendes/qlik-sense | 2 | python | def copy(self, app: 'AppCondensed', name: str=None, include_custom_properties: bool=False) -> 'Optional[App]':
'\n This method copies the provided app\n\n Args:\n app: app to copy\n name: name for the new app\n include_custom_properties: flag to include custom properties on the new app\n\n Returns: a Qlik Sense App object for the newly copied app\n '
schema = AppSchema()
params = {'name': name, 'includecustomproperties': include_custom_properties}
request = QSAPIRequest(method='POST', url=f'{self.url}/{app.id}/copy', params=params)
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None | def copy(self, app: 'AppCondensed', name: str=None, include_custom_properties: bool=False) -> 'Optional[App]':
'\n This method copies the provided app\n\n Args:\n app: app to copy\n name: name for the new app\n include_custom_properties: flag to include custom properties on the new app\n\n Returns: a Qlik Sense App object for the newly copied app\n '
schema = AppSchema()
params = {'name': name, 'includecustomproperties': include_custom_properties}
request = QSAPIRequest(method='POST', url=f'{self.url}/{app.id}/copy', params=params)
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None<|docstring|>This method copies the provided app
Args:
app: app to copy
name: name for the new app
include_custom_properties: flag to include custom properties on the new app
Returns: a Qlik Sense App object for the newly copied app<|endoftext|> |
8d70a0fc044fbca1af9656d989047c5005b2b3e81d7f3973b173db7c906bd31a | def replace(self, app: 'AppCondensed', app_to_replace: 'AppCondensed') -> 'Optional[App]':
'\n This method replaces the target app with the provided app\n\n .. warning ::\n This method is not working as expected. In testing, the returned app appears to be identical to the app\n to be replaced, suggesting that it was not actually replaced.\n\n Args:\n app: app to copy\n app_to_replace: app to replace\n\n Returns: a Qlik Sense App object for the new app\n '
schema = AppSchema()
request = QSAPIRequest(method='PUT', url=f'{self.url}/{app.id}/replace', params={'app': app_to_replace.id})
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None | This method replaces the target app with the provided app
.. warning ::
This method is not working as expected. In testing, the returned app appears to be identical to the app
to be replaced, suggesting that it was not actually replaced.
Args:
app: app to copy
app_to_replace: app to replace
Returns: a Qlik Sense App object for the new app | qlik_sense/services/app.py | replace | ricardolsmendes/qlik-sense | 2 | python | def replace(self, app: 'AppCondensed', app_to_replace: 'AppCondensed') -> 'Optional[App]':
'\n This method replaces the target app with the provided app\n\n .. warning ::\n This method is not working as expected. In testing, the returned app appears to be identical to the app\n to be replaced, suggesting that it was not actually replaced.\n\n Args:\n app: app to copy\n app_to_replace: app to replace\n\n Returns: a Qlik Sense App object for the new app\n '
schema = AppSchema()
request = QSAPIRequest(method='PUT', url=f'{self.url}/{app.id}/replace', params={'app': app_to_replace.id})
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None | def replace(self, app: 'AppCondensed', app_to_replace: 'AppCondensed') -> 'Optional[App]':
'\n This method replaces the target app with the provided app\n\n .. warning ::\n This method is not working as expected. In testing, the returned app appears to be identical to the app\n to be replaced, suggesting that it was not actually replaced.\n\n Args:\n app: app to copy\n app_to_replace: app to replace\n\n Returns: a Qlik Sense App object for the new app\n '
schema = AppSchema()
request = QSAPIRequest(method='PUT', url=f'{self.url}/{app.id}/replace', params={'app': app_to_replace.id})
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None<|docstring|>This method replaces the target app with the provided app
.. warning ::
This method is not working as expected. In testing, the returned app appears to be identical to the app
to be replaced, suggesting that it was not actually replaced.
Args:
app: app to copy
app_to_replace: app to replace
Returns: a Qlik Sense App object for the new app<|endoftext|> |
b383850ca329daacd4d4fe4dae419dda58e5b837f2ff3d43bf4c499ea5722bbf | def reload(self, app: 'AppCondensed'):
'\n This method reloads the provided app\n\n Args:\n app: app to reload\n '
request = QSAPIRequest(method='POST', url=f'{self.url}/{app.id}/reload')
self._call(request) | This method reloads the provided app
Args:
app: app to reload | qlik_sense/services/app.py | reload | ricardolsmendes/qlik-sense | 2 | python | def reload(self, app: 'AppCondensed'):
'\n This method reloads the provided app\n\n Args:\n app: app to reload\n '
request = QSAPIRequest(method='POST', url=f'{self.url}/{app.id}/reload')
self._call(request) | def reload(self, app: 'AppCondensed'):
'\n This method reloads the provided app\n\n Args:\n app: app to reload\n '
request = QSAPIRequest(method='POST', url=f'{self.url}/{app.id}/reload')
self._call(request)<|docstring|>This method reloads the provided app
Args:
app: app to reload<|endoftext|> |
afbcb70778ba84f35420ba23f9a818241606dc4f08a87d6c04eb4a760a22ef64 | def publish(self, app: 'AppCondensed', stream: 'StreamCondensed', name: str=None) -> 'Optional[App]':
'\n This method will publish the provided app to the provided stream\n\n Args:\n app: app to publish\n stream: stream to which to publish the app\n name: name of the published app\n\n Returns: a Qlik Sense App object for the published app\n '
schema = AppSchema()
params = {'stream': stream.id, 'name': (name if name else app.name)}
request = QSAPIRequest(method='PUT', url=f'{self.url}/{app.id}/publish', params=params)
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None | This method will publish the provided app to the provided stream
Args:
app: app to publish
stream: stream to which to publish the app
name: name of the published app
Returns: a Qlik Sense App object for the published app | qlik_sense/services/app.py | publish | ricardolsmendes/qlik-sense | 2 | python | def publish(self, app: 'AppCondensed', stream: 'StreamCondensed', name: str=None) -> 'Optional[App]':
'\n This method will publish the provided app to the provided stream\n\n Args:\n app: app to publish\n stream: stream to which to publish the app\n name: name of the published app\n\n Returns: a Qlik Sense App object for the published app\n '
schema = AppSchema()
params = {'stream': stream.id, 'name': (name if name else app.name)}
request = QSAPIRequest(method='PUT', url=f'{self.url}/{app.id}/publish', params=params)
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None | def publish(self, app: 'AppCondensed', stream: 'StreamCondensed', name: str=None) -> 'Optional[App]':
'\n This method will publish the provided app to the provided stream\n\n Args:\n app: app to publish\n stream: stream to which to publish the app\n name: name of the published app\n\n Returns: a Qlik Sense App object for the published app\n '
schema = AppSchema()
params = {'stream': stream.id, 'name': (name if name else app.name)}
request = QSAPIRequest(method='PUT', url=f'{self.url}/{app.id}/publish', params=params)
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None<|docstring|>This method will publish the provided app to the provided stream
Args:
app: app to publish
stream: stream to which to publish the app
name: name of the published app
Returns: a Qlik Sense App object for the published app<|endoftext|> |
281794dd078efd27f8f562ad519ea6270bf4819e285e0c07882e50c55172ee25 | def unpublish(self, app: 'AppCondensed') -> 'Optional[App]':
'\n Unpublishes the provided app\n\n .. warning ::\n The current version of the API being used for development (may not be up to date) indicates that this is\n not implemented.\n\n Args:\n app: app to unpublish\n\n Returns: a Qlik Sense App object for the un-published app\n '
schema = AppSchema()
request = QSAPIRequest(method='POST', url=f'{self.url}/{app.id}/unpublish')
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None | Unpublishes the provided app
.. warning ::
The current version of the API being used for development (may not be up to date) indicates that this is
not implemented.
Args:
app: app to unpublish
Returns: a Qlik Sense App object for the un-published app | qlik_sense/services/app.py | unpublish | ricardolsmendes/qlik-sense | 2 | python | def unpublish(self, app: 'AppCondensed') -> 'Optional[App]':
'\n Unpublishes the provided app\n\n .. warning ::\n The current version of the API being used for development (may not be up to date) indicates that this is\n not implemented.\n\n Args:\n app: app to unpublish\n\n Returns: a Qlik Sense App object for the un-published app\n '
schema = AppSchema()
request = QSAPIRequest(method='POST', url=f'{self.url}/{app.id}/unpublish')
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None | def unpublish(self, app: 'AppCondensed') -> 'Optional[App]':
'\n Unpublishes the provided app\n\n .. warning ::\n The current version of the API being used for development (may not be up to date) indicates that this is\n not implemented.\n\n Args:\n app: app to unpublish\n\n Returns: a Qlik Sense App object for the un-published app\n '
schema = AppSchema()
request = QSAPIRequest(method='POST', url=f'{self.url}/{app.id}/unpublish')
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None<|docstring|>Unpublishes the provided app
.. warning ::
The current version of the API being used for development (may not be up to date) indicates that this is
not implemented.
Args:
app: app to unpublish
Returns: a Qlik Sense App object for the un-published app<|endoftext|> |
8a82aa75a85ac164915de2bdd5d30d2d14f6781639d864bacd7c9eeb9479b7d3 | def get_export_token(self, app: 'AppCondensed') -> 'Optional[str]':
'\n This method returns an export token for an app\n\n Args:\n app: app to export\n\n Returns: an export token as a UUID\n '
request = QSAPIRequest(method='GET', url=f'{self.url}/{app.id}/export')
response = self._call(request)
if (200 <= response.status_code < 300):
return response.json()['value']
return | This method returns an export token for an app
Args:
app: app to export
Returns: an export token as a UUID | qlik_sense/services/app.py | get_export_token | ricardolsmendes/qlik-sense | 2 | python | def get_export_token(self, app: 'AppCondensed') -> 'Optional[str]':
'\n This method returns an export token for an app\n\n Args:\n app: app to export\n\n Returns: an export token as a UUID\n '
request = QSAPIRequest(method='GET', url=f'{self.url}/{app.id}/export')
response = self._call(request)
if (200 <= response.status_code < 300):
return response.json()['value']
return | def get_export_token(self, app: 'AppCondensed') -> 'Optional[str]':
'\n This method returns an export token for an app\n\n Args:\n app: app to export\n\n Returns: an export token as a UUID\n '
request = QSAPIRequest(method='GET', url=f'{self.url}/{app.id}/export')
response = self._call(request)
if (200 <= response.status_code < 300):
return response.json()['value']
return<|docstring|>This method returns an export token for an app
Args:
app: app to export
Returns: an export token as a UUID<|endoftext|> |
b4e29aa0d2e855528e69fba7e3c5a850daf649f3e879c6a10df62c18b8afd41d | def create_export(self, app: 'AppCondensed', keep_data: bool=False) -> 'Optional[AppExport]':
'\n This method returns a download path for the provided app. It can be passed into download_file() to obtain\n the app itself\n\n Args:\n app: app to export\n keep_data: indicates if the data should be exported with the app\n\n Returns: the app export object that contains attributes like download_path and export_token\n '
schema = AppExportSchema()
token = self.get_export_token(app=app)
if token:
request = QSAPIRequest(method='POST', url=f'{self.url}/{app.id}/export/{token}', params={'skipdata': (not keep_data)})
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None | This method returns a download path for the provided app. It can be passed into download_file() to obtain
the app itself
Args:
app: app to export
keep_data: indicates if the data should be exported with the app
Returns: the app export object that contains attributes like download_path and export_token | qlik_sense/services/app.py | create_export | ricardolsmendes/qlik-sense | 2 | python | def create_export(self, app: 'AppCondensed', keep_data: bool=False) -> 'Optional[AppExport]':
'\n This method returns a download path for the provided app. It can be passed into download_file() to obtain\n the app itself\n\n Args:\n app: app to export\n keep_data: indicates if the data should be exported with the app\n\n Returns: the app export object that contains attributes like download_path and export_token\n '
schema = AppExportSchema()
token = self.get_export_token(app=app)
if token:
request = QSAPIRequest(method='POST', url=f'{self.url}/{app.id}/export/{token}', params={'skipdata': (not keep_data)})
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None | def create_export(self, app: 'AppCondensed', keep_data: bool=False) -> 'Optional[AppExport]':
'\n This method returns a download path for the provided app. It can be passed into download_file() to obtain\n the app itself\n\n Args:\n app: app to export\n keep_data: indicates if the data should be exported with the app\n\n Returns: the app export object that contains attributes like download_path and export_token\n '
schema = AppExportSchema()
token = self.get_export_token(app=app)
if token:
request = QSAPIRequest(method='POST', url=f'{self.url}/{app.id}/export/{token}', params={'skipdata': (not keep_data)})
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None<|docstring|>This method returns a download path for the provided app. It can be passed into download_file() to obtain
the app itself
Args:
app: app to export
keep_data: indicates if the data should be exported with the app
Returns: the app export object that contains attributes like download_path and export_token<|endoftext|> |
28d8efd3d652d412781ed95c0417f209a84996454b17332c8339f4a5d82145d0 | def delete_export(self, app_export: 'AppExport') -> 'Optional[AppExport]':
'\n This method cancels the export for the provided app.\n\n .. warning ::\n This method is not behaving as expected. The AppExport object that is returned has a zero-length download\n path, but indicates that the export is not cancelled (cancelled = False).\n\n Args:\n app_export: app export metadata, contains the app getting exported and the export token\n '
schema = AppExportSchema()
request = QSAPIRequest(method='DELETE', url=f'{self.url}/{app_export.app_id}/export/{app_export.export_token}')
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None | This method cancels the export for the provided app.
.. warning ::
This method is not behaving as expected. The AppExport object that is returned has a zero-length download
path, but indicates that the export is not cancelled (cancelled = False).
Args:
app_export: app export metadata, contains the app getting exported and the export token | qlik_sense/services/app.py | delete_export | ricardolsmendes/qlik-sense | 2 | python | def delete_export(self, app_export: 'AppExport') -> 'Optional[AppExport]':
'\n This method cancels the export for the provided app.\n\n .. warning ::\n This method is not behaving as expected. The AppExport object that is returned has a zero-length download\n path, but indicates that the export is not cancelled (cancelled = False).\n\n Args:\n app_export: app export metadata, contains the app getting exported and the export token\n '
schema = AppExportSchema()
request = QSAPIRequest(method='DELETE', url=f'{self.url}/{app_export.app_id}/export/{app_export.export_token}')
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None | def delete_export(self, app_export: 'AppExport') -> 'Optional[AppExport]':
'\n This method cancels the export for the provided app.\n\n .. warning ::\n This method is not behaving as expected. The AppExport object that is returned has a zero-length download\n path, but indicates that the export is not cancelled (cancelled = False).\n\n Args:\n app_export: app export metadata, contains the app getting exported and the export token\n '
schema = AppExportSchema()
request = QSAPIRequest(method='DELETE', url=f'{self.url}/{app_export.app_id}/export/{app_export.export_token}')
response = self._call(request)
if (200 <= response.status_code < 300):
return schema.loads(response.content)
return None<|docstring|>This method cancels the export for the provided app.
.. warning ::
This method is not behaving as expected. The AppExport object that is returned has a zero-length download
path, but indicates that the export is not cancelled (cancelled = False).
Args:
app_export: app export metadata, contains the app getting exported and the export token<|endoftext|> |
b4f2f6279d7ba6b2259ae4f9cd30b23e141ad035ce3071b1b143e03c66c9b95e | def download_file(self, app_export: 'AppExport') -> 'Optional[Iterable]':
'\n This method downloads an app given a download link\n\n .. warning ::\n This method has not been tested.\n\n Args:\n app_export: app export metadata, contains the app getting exported, the download path, and the export token\n\n Returns: the file as an iterable\n '
request = QSAPIRequest(method='GET', url=f'{self.url}/{app_export.download_path}')
response = self._call(request)
if (200 <= response.status_code < 300):
return response.iter_content(chunk_size=(512 << 10))
return None | This method downloads an app given a download link
.. warning ::
This method has not been tested.
Args:
app_export: app export metadata, contains the app getting exported, the download path, and the export token
Returns: the file as an iterable | qlik_sense/services/app.py | download_file | ricardolsmendes/qlik-sense | 2 | python | def download_file(self, app_export: 'AppExport') -> 'Optional[Iterable]':
'\n This method downloads an app given a download link\n\n .. warning ::\n This method has not been tested.\n\n Args:\n app_export: app export metadata, contains the app getting exported, the download path, and the export token\n\n Returns: the file as an iterable\n '
request = QSAPIRequest(method='GET', url=f'{self.url}/{app_export.download_path}')
response = self._call(request)
if (200 <= response.status_code < 300):
return response.iter_content(chunk_size=(512 << 10))
return None | def download_file(self, app_export: 'AppExport') -> 'Optional[Iterable]':
'\n This method downloads an app given a download link\n\n .. warning ::\n This method has not been tested.\n\n Args:\n app_export: app export metadata, contains the app getting exported, the download path, and the export token\n\n Returns: the file as an iterable\n '
request = QSAPIRequest(method='GET', url=f'{self.url}/{app_export.download_path}')
response = self._call(request)
if (200 <= response.status_code < 300):
return response.iter_content(chunk_size=(512 << 10))
return None<|docstring|>This method downloads an app given a download link
.. warning ::
This method has not been tested.
Args:
app_export: app export metadata, contains the app getting exported, the download path, and the export token
Returns: the file as an iterable<|endoftext|> |
fc5753c234d7cd5f87231eddfcbaaa88de2866ff111653542bd3dac99c2cdcf4 | def parse_arguments():
'\n Prepare the arguments that are specific for this application.\n '
parser = argparse.ArgumentParser()
parser.add_argument('--file', dest='filename', action='store', help='The name of the HDF5 file.')
return parser.parse_args() | Prepare the arguments that are specific for this application. | src/h5ui.py | parse_arguments | rhuygen/hdf | 0 | python | def parse_arguments():
'\n \n '
parser = argparse.ArgumentParser()
parser.add_argument('--file', dest='filename', action='store', help='The name of the HDF5 file.')
return parser.parse_args() | def parse_arguments():
'\n \n '
parser = argparse.ArgumentParser()
parser.add_argument('--file', dest='filename', action='store', help='The name of the HDF5 file.')
return parser.parse_args()<|docstring|>Prepare the arguments that are specific for this application.<|endoftext|> |
792efa4c71ab79fc241ce765c61515647f3b006aea4971327f6818bab1fb3494 | def hidden_to_idx(self, hidden, dropout=False):
'Converts hidden state vectors into indices into the dictionary.'
if (hidden.size(0) > 1):
raise RuntimeError('Bad dimensions of tensor:', hidden)
hidden = hidden.squeeze(0)
scores = self.h2o(hidden)
if dropout:
scores = self.dropout(scores)
(_, idx) = scores.max(1)
idx.unsqueeze_(1)
return (idx, scores) | Converts hidden state vectors into indices into the dictionary. | parlai/agents/legacy_agents/memnn/modules_v0.py | hidden_to_idx | isaacmg/ParlAI | 258 | python | def hidden_to_idx(self, hidden, dropout=False):
if (hidden.size(0) > 1):
raise RuntimeError('Bad dimensions of tensor:', hidden)
hidden = hidden.squeeze(0)
scores = self.h2o(hidden)
if dropout:
scores = self.dropout(scores)
(_, idx) = scores.max(1)
idx.unsqueeze_(1)
return (idx, scores) | def hidden_to_idx(self, hidden, dropout=False):
if (hidden.size(0) > 1):
raise RuntimeError('Bad dimensions of tensor:', hidden)
hidden = hidden.squeeze(0)
scores = self.h2o(hidden)
if dropout:
scores = self.dropout(scores)
(_, idx) = scores.max(1)
idx.unsqueeze_(1)
return (idx, scores)<|docstring|>Converts hidden state vectors into indices into the dictionary.<|endoftext|> |
1c1d0a0ce319de5104eeb0b3269c8a413de850cbf0f96ea25fea24dc94eba5b7 | @pkg.command()
@click.option('--isolate/--no-isolate', default=False, help='Isolate all test runs in virtual environments. This greatly increases the duration for tests to run as the environment needs to be created and packages installed first, but it ensures that the unit tests work for a vanilla installation (default: false)')
@click.option('--keep-test-env', is_flag=True, help='Do not delete the virtual environment created when testing with --isolate.')
@click.option('--capture/--no-capture', default=True, help='Capture the output of the underlying testing framework. If set to false, the output will be routed to stderr (default: true)')
def test(isolate: bool, keep_test_env: bool, capture: bool) -> None:
"\n Run the package's unit tests.\n "
package = project.load_or_exit(expect=PackageModel)
test_run = test_package(package, isolate, keep_test_env, capture)
print_test_run(test_run)
sys.exit((0 if (test_run.status == TestStatus.PASSED) else 1)) | Run the package's unit tests. | src/shut/commands/pkg/test.py | test | ndejong/shut | 0 | python | @pkg.command()
@click.option('--isolate/--no-isolate', default=False, help='Isolate all test runs in virtual environments. This greatly increases the duration for tests to run as the environment needs to be created and packages installed first, but it ensures that the unit tests work for a vanilla installation (default: false)')
@click.option('--keep-test-env', is_flag=True, help='Do not delete the virtual environment created when testing with --isolate.')
@click.option('--capture/--no-capture', default=True, help='Capture the output of the underlying testing framework. If set to false, the output will be routed to stderr (default: true)')
def test(isolate: bool, keep_test_env: bool, capture: bool) -> None:
"\n \n "
package = project.load_or_exit(expect=PackageModel)
test_run = test_package(package, isolate, keep_test_env, capture)
print_test_run(test_run)
sys.exit((0 if (test_run.status == TestStatus.PASSED) else 1)) | @pkg.command()
@click.option('--isolate/--no-isolate', default=False, help='Isolate all test runs in virtual environments. This greatly increases the duration for tests to run as the environment needs to be created and packages installed first, but it ensures that the unit tests work for a vanilla installation (default: false)')
@click.option('--keep-test-env', is_flag=True, help='Do not delete the virtual environment created when testing with --isolate.')
@click.option('--capture/--no-capture', default=True, help='Capture the output of the underlying testing framework. If set to false, the output will be routed to stderr (default: true)')
def test(isolate: bool, keep_test_env: bool, capture: bool) -> None:
"\n \n "
package = project.load_or_exit(expect=PackageModel)
test_run = test_package(package, isolate, keep_test_env, capture)
print_test_run(test_run)
sys.exit((0 if (test_run.status == TestStatus.PASSED) else 1))<|docstring|>Run the package's unit tests.<|endoftext|> |
9bbb7fd838df59d3bdf342da8f1b0be6d4563e4980671ae2d57864f43017b6d8 | def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.ImportGraph = channel.unary_unary('/devrpc.Dev/ImportGraph', request_serializer=lndgrpc_dot_compiled_dot_lightning__pb2.ChannelGraph.SerializeToString, response_deserializer=lndgrpc_dot_compiled_dot_dev__pb2.ImportGraphResponse.FromString) | Constructor.
Args:
channel: A grpc.Channel. | lndgrpc/compiled/dev_pb2_grpc.py | __init__ | kornpow/lnd-grpc-client | 19 | python | def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.ImportGraph = channel.unary_unary('/devrpc.Dev/ImportGraph', request_serializer=lndgrpc_dot_compiled_dot_lightning__pb2.ChannelGraph.SerializeToString, response_deserializer=lndgrpc_dot_compiled_dot_dev__pb2.ImportGraphResponse.FromString) | def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.ImportGraph = channel.unary_unary('/devrpc.Dev/ImportGraph', request_serializer=lndgrpc_dot_compiled_dot_lightning__pb2.ChannelGraph.SerializeToString, response_deserializer=lndgrpc_dot_compiled_dot_dev__pb2.ImportGraphResponse.FromString)<|docstring|>Constructor.
Args:
channel: A grpc.Channel.<|endoftext|> |
a57f76fd41913179295d01654c01e219f472614a4203464c529b51295e1539a4 | def ImportGraph(self, request, context):
'\n ImportGraph imports a ChannelGraph into the graph database. Should only be\n used for development.\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | ImportGraph imports a ChannelGraph into the graph database. Should only be
used for development. | lndgrpc/compiled/dev_pb2_grpc.py | ImportGraph | kornpow/lnd-grpc-client | 19 | python | def ImportGraph(self, request, context):
'\n ImportGraph imports a ChannelGraph into the graph database. Should only be\n used for development.\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | def ImportGraph(self, request, context):
'\n ImportGraph imports a ChannelGraph into the graph database. Should only be\n used for development.\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')<|docstring|>ImportGraph imports a ChannelGraph into the graph database. Should only be
used for development.<|endoftext|> |
a7d17138fa3106a53c4bd09cbfc015b0ffae1f4f7951b58a90e9bd1faa589134 | def __init__(self, action, context):
'\n Initializes the superclass and retrieves the configuration parameters or sets defaults.\n '
BasePrimitive.__init__(self, action, context)
cfg = self.config.fits2png
self.sigmas = eval(cfg.get('DEFAULT', 'denoise_sigmas'))
self.sizes = cfg.get('DEFAULT', 'denoise_sizes') | Initializes the superclass and retrieves the configuration parameters or sets defaults. | keckdrpframework/primitives/noise_removal.py | __init__ | Keck-DataReductionPipelines/keckdrpframework | 0 | python | def __init__(self, action, context):
'\n \n '
BasePrimitive.__init__(self, action, context)
cfg = self.config.fits2png
self.sigmas = eval(cfg.get('DEFAULT', 'denoise_sigmas'))
self.sizes = cfg.get('DEFAULT', 'denoise_sizes') | def __init__(self, action, context):
'\n \n '
BasePrimitive.__init__(self, action, context)
cfg = self.config.fits2png
self.sigmas = eval(cfg.get('DEFAULT', 'denoise_sigmas'))
self.sizes = cfg.get('DEFAULT', 'denoise_sizes')<|docstring|>Initializes the superclass and retrieves the configuration parameters or sets defaults.<|endoftext|> |
a32874d9a9c9a4771babcb90713ed4463b63ee0c5ba0e25f65090e5e096d9d5f | def _denoise(self, _img, size=3, sigmas=3):
'\n Finds mean and std.\n For pixels outside mean +/- std *3, finds median and replace pixel\n Returns new image\n '
def f(x, y):
x = max(min(w, x), 0)
y = max(min(h, y), 0)
return medf(_img[(y:(y + size), x:(x + size))])
medf = np.median
(h, w) = _img.shape
mean0 = np.mean(_img)
std0 = np.std(_img)
std1 = (std0 * sigmas)
idc = np.vstack((np.argwhere((_img > (mean0 + std1))), np.argwhere((_img < (mean0 - std1)))))
out = np.copy(_img)
half = (size // 2)
for (a, b) in idc:
out[(a, b)] = f((b - half), (a - half))
return out | Finds mean and std.
For pixels outside mean +/- std *3, finds median and replace pixel
Returns new image | keckdrpframework/primitives/noise_removal.py | _denoise | Keck-DataReductionPipelines/keckdrpframework | 0 | python | def _denoise(self, _img, size=3, sigmas=3):
'\n Finds mean and std.\n For pixels outside mean +/- std *3, finds median and replace pixel\n Returns new image\n '
def f(x, y):
x = max(min(w, x), 0)
y = max(min(h, y), 0)
return medf(_img[(y:(y + size), x:(x + size))])
medf = np.median
(h, w) = _img.shape
mean0 = np.mean(_img)
std0 = np.std(_img)
std1 = (std0 * sigmas)
idc = np.vstack((np.argwhere((_img > (mean0 + std1))), np.argwhere((_img < (mean0 - std1)))))
out = np.copy(_img)
half = (size // 2)
for (a, b) in idc:
out[(a, b)] = f((b - half), (a - half))
return out | def _denoise(self, _img, size=3, sigmas=3):
'\n Finds mean and std.\n For pixels outside mean +/- std *3, finds median and replace pixel\n Returns new image\n '
def f(x, y):
x = max(min(w, x), 0)
y = max(min(h, y), 0)
return medf(_img[(y:(y + size), x:(x + size))])
medf = np.median
(h, w) = _img.shape
mean0 = np.mean(_img)
std0 = np.std(_img)
std1 = (std0 * sigmas)
idc = np.vstack((np.argwhere((_img > (mean0 + std1))), np.argwhere((_img < (mean0 - std1)))))
out = np.copy(_img)
half = (size // 2)
for (a, b) in idc:
out[(a, b)] = f((b - half), (a - half))
return out<|docstring|>Finds mean and std.
For pixels outside mean +/- std *3, finds median and replace pixel
Returns new image<|endoftext|> |
87503081ab190fae66db6670461cd1011c22edb98dff79846b262914ccfa8e06 | def get_num_connections() -> int:
'\n Connect to the "postgres" database to see pg_stat_activity data.\n\n We need raw psycopg2 cursor access here, because the test-runner creates a separate\n test database otherwise.\n '
handler = ConnectionHandler(databases={'default': settings.POSTGRES_CONN_PARAMS})
handler.ensure_defaults('default')
connection = handler['default']
app_user = settings.DATABASES['default']['USER']
time.sleep(1)
try:
with connection.cursor() as cursor:
cursor.execute(SHOW_CONNECTIONS, [app_user])
rows = cursor.fetchall()
headers = [x.name for x in cursor.description]
tabulated = tabulate(rows, headers=headers)
print('Open connections:')
print(tabulated)
unexpected = [row for row in rows if (row[(- 1)] == TEST_QUERY)]
count = len(unexpected)
finally:
connection.close()
return count | Connect to the "postgres" database to see pg_stat_activity data.
We need raw psycopg2 cursor access here, because the test-runner creates a separate
test database otherwise. | tests/test_concurrency.py | get_num_connections | damm89/zgw-consumers | 2 | python | def get_num_connections() -> int:
'\n Connect to the "postgres" database to see pg_stat_activity data.\n\n We need raw psycopg2 cursor access here, because the test-runner creates a separate\n test database otherwise.\n '
handler = ConnectionHandler(databases={'default': settings.POSTGRES_CONN_PARAMS})
handler.ensure_defaults('default')
connection = handler['default']
app_user = settings.DATABASES['default']['USER']
time.sleep(1)
try:
with connection.cursor() as cursor:
cursor.execute(SHOW_CONNECTIONS, [app_user])
rows = cursor.fetchall()
headers = [x.name for x in cursor.description]
tabulated = tabulate(rows, headers=headers)
print('Open connections:')
print(tabulated)
unexpected = [row for row in rows if (row[(- 1)] == TEST_QUERY)]
count = len(unexpected)
finally:
connection.close()
return count | def get_num_connections() -> int:
'\n Connect to the "postgres" database to see pg_stat_activity data.\n\n We need raw psycopg2 cursor access here, because the test-runner creates a separate\n test database otherwise.\n '
handler = ConnectionHandler(databases={'default': settings.POSTGRES_CONN_PARAMS})
handler.ensure_defaults('default')
connection = handler['default']
app_user = settings.DATABASES['default']['USER']
time.sleep(1)
try:
with connection.cursor() as cursor:
cursor.execute(SHOW_CONNECTIONS, [app_user])
rows = cursor.fetchall()
headers = [x.name for x in cursor.description]
tabulated = tabulate(rows, headers=headers)
print('Open connections:')
print(tabulated)
unexpected = [row for row in rows if (row[(- 1)] == TEST_QUERY)]
count = len(unexpected)
finally:
connection.close()
return count<|docstring|>Connect to the "postgres" database to see pg_stat_activity data.
We need raw psycopg2 cursor access here, because the test-runner creates a separate
test database otherwise.<|endoftext|> |
f5836188d7a69657e4bdf93e6a4ce4b389d6eff9f969bfd9e5609f5974861583 | def test_create(self):
'\n Ensure we can create a package if user has permission.\n '
self.client.force_authenticate(user=self.admin)
data = {'name': 'basic_package', 'details': '10 reservations package', 'available': True, 'price': 50, 'reservations': 10, 'exclusive_memberships': [reverse('membership-detail', args=[self.membership.id])]}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'details': '10 reservations package', 'exclusive_memberships': [f'http://testserver/memberships/{self.membership.id}'], 'available': True, 'name': 'basic_package', 'price': '50.00', 'reservations': 10, 'available_on_product_types': [], 'available_on_products': [], 'options': []}
response_content = json.loads(response.content)
del response_content['id']
del response_content['url']
self.assertEqual(remove_translation_fields(response_content), content)
self.assertEqual(response.status_code, status.HTTP_201_CREATED) | Ensure we can create a package if user has permission. | store/tests/tests_viewset_Package.py | test_create | RignonNoel/Blitz-API | 3 | python | def test_create(self):
'\n \n '
self.client.force_authenticate(user=self.admin)
data = {'name': 'basic_package', 'details': '10 reservations package', 'available': True, 'price': 50, 'reservations': 10, 'exclusive_memberships': [reverse('membership-detail', args=[self.membership.id])]}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'details': '10 reservations package', 'exclusive_memberships': [f'http://testserver/memberships/{self.membership.id}'], 'available': True, 'name': 'basic_package', 'price': '50.00', 'reservations': 10, 'available_on_product_types': [], 'available_on_products': [], 'options': []}
response_content = json.loads(response.content)
del response_content['id']
del response_content['url']
self.assertEqual(remove_translation_fields(response_content), content)
self.assertEqual(response.status_code, status.HTTP_201_CREATED) | def test_create(self):
'\n \n '
self.client.force_authenticate(user=self.admin)
data = {'name': 'basic_package', 'details': '10 reservations package', 'available': True, 'price': 50, 'reservations': 10, 'exclusive_memberships': [reverse('membership-detail', args=[self.membership.id])]}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'details': '10 reservations package', 'exclusive_memberships': [f'http://testserver/memberships/{self.membership.id}'], 'available': True, 'name': 'basic_package', 'price': '50.00', 'reservations': 10, 'available_on_product_types': [], 'available_on_products': [], 'options': []}
response_content = json.loads(response.content)
del response_content['id']
del response_content['url']
self.assertEqual(remove_translation_fields(response_content), content)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)<|docstring|>Ensure we can create a package if user has permission.<|endoftext|> |
5b402a84e3356c749958fe0f85bebc06074ba671b292cc8c6036e84703b915a8 | def test_create_without_permission(self):
"\n Ensure we can't create a package if user has no permission.\n "
self.client.force_authenticate(user=self.user)
data = {'name': 'basic_package', 'details': '10 reservations package', 'price': 50, 'reservations': 10}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'detail': 'You do not have permission to perform this action.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) | Ensure we can't create a package if user has no permission. | store/tests/tests_viewset_Package.py | test_create_without_permission | RignonNoel/Blitz-API | 3 | python | def test_create_without_permission(self):
"\n \n "
self.client.force_authenticate(user=self.user)
data = {'name': 'basic_package', 'details': '10 reservations package', 'price': 50, 'reservations': 10}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'detail': 'You do not have permission to perform this action.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) | def test_create_without_permission(self):
"\n \n "
self.client.force_authenticate(user=self.user)
data = {'name': 'basic_package', 'details': '10 reservations package', 'price': 50, 'reservations': 10}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'detail': 'You do not have permission to perform this action.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)<|docstring|>Ensure we can't create a package if user has no permission.<|endoftext|> |
8632cdf0ebd6a04d109969152f1df11f661761b0108cfed320e81a5b8b3f7e5d | def test_create_missing_field(self):
"\n Ensure we can't create a package when required field are missing.\n "
self.client.force_authenticate(user=self.admin)
data = {}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'price': ['This field is required.'], 'reservations': ['This field is required.'], 'available': ['This field is required.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) | Ensure we can't create a package when required field are missing. | store/tests/tests_viewset_Package.py | test_create_missing_field | RignonNoel/Blitz-API | 3 | python | def test_create_missing_field(self):
"\n \n "
self.client.force_authenticate(user=self.admin)
data = {}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'price': ['This field is required.'], 'reservations': ['This field is required.'], 'available': ['This field is required.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) | def test_create_missing_field(self):
"\n \n "
self.client.force_authenticate(user=self.admin)
data = {}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'price': ['This field is required.'], 'reservations': ['This field is required.'], 'available': ['This field is required.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)<|docstring|>Ensure we can't create a package when required field are missing.<|endoftext|> |
be61001b2137c67d521a97751a160e0057a6eff6d3cde200884dbd7f9e6c70a2 | def test_create_null_field(self):
"\n Ensure we can't create a package when required field are null.\n "
self.client.force_authenticate(user=self.admin)
data = {'name': None, 'details': None, 'price': None, 'reservations': None, 'exclusive_memberships': None, 'available': None}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'exclusive_memberships': ['This field may not be null.'], 'name': ['This field may not be null.'], 'price': ['This field may not be null.'], 'reservations': ['This field may not be null.'], 'available': ['This field may not be null.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) | Ensure we can't create a package when required field are null. | store/tests/tests_viewset_Package.py | test_create_null_field | RignonNoel/Blitz-API | 3 | python | def test_create_null_field(self):
"\n \n "
self.client.force_authenticate(user=self.admin)
data = {'name': None, 'details': None, 'price': None, 'reservations': None, 'exclusive_memberships': None, 'available': None}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'exclusive_memberships': ['This field may not be null.'], 'name': ['This field may not be null.'], 'price': ['This field may not be null.'], 'reservations': ['This field may not be null.'], 'available': ['This field may not be null.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) | def test_create_null_field(self):
"\n \n "
self.client.force_authenticate(user=self.admin)
data = {'name': None, 'details': None, 'price': None, 'reservations': None, 'exclusive_memberships': None, 'available': None}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'exclusive_memberships': ['This field may not be null.'], 'name': ['This field may not be null.'], 'price': ['This field may not be null.'], 'reservations': ['This field may not be null.'], 'available': ['This field may not be null.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)<|docstring|>Ensure we can't create a package when required field are null.<|endoftext|> |
129165f16c5d9138598274cf5d82f98da44f798ca47fe58a44a7aa6bd6bbd71f | def test_create_invalid_field(self):
"\n Ensure we can't create a package when required field are invalid.\n "
self.client.force_authenticate(user=self.admin)
data = {'name': (1,), 'details': (1,), 'price': '', 'reservations': '', 'exclusive_memberships': (1,), 'available': (1,)}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'details': ['Not a valid string.'], 'exclusive_memberships': ['Incorrect type. Expected URL string, received int.'], 'name': ['Not a valid string.'], 'price': ['A valid number is required.'], 'reservations': ['A valid integer is required.'], 'available': ['Must be a valid boolean.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) | Ensure we can't create a package when required field are invalid. | store/tests/tests_viewset_Package.py | test_create_invalid_field | RignonNoel/Blitz-API | 3 | python | def test_create_invalid_field(self):
"\n \n "
self.client.force_authenticate(user=self.admin)
data = {'name': (1,), 'details': (1,), 'price': , 'reservations': , 'exclusive_memberships': (1,), 'available': (1,)}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'details': ['Not a valid string.'], 'exclusive_memberships': ['Incorrect type. Expected URL string, received int.'], 'name': ['Not a valid string.'], 'price': ['A valid number is required.'], 'reservations': ['A valid integer is required.'], 'available': ['Must be a valid boolean.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) | def test_create_invalid_field(self):
"\n \n "
self.client.force_authenticate(user=self.admin)
data = {'name': (1,), 'details': (1,), 'price': , 'reservations': , 'exclusive_memberships': (1,), 'available': (1,)}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'details': ['Not a valid string.'], 'exclusive_memberships': ['Incorrect type. Expected URL string, received int.'], 'name': ['Not a valid string.'], 'price': ['A valid number is required.'], 'reservations': ['A valid integer is required.'], 'available': ['Must be a valid boolean.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)<|docstring|>Ensure we can't create a package when required field are invalid.<|endoftext|> |
2893aac59bec654eb5bbc43f63ca4f6096965ba6859a9f4e3f4e09ff53a35bf3 | def test_create_negative_price_and_reservations(self):
"\n Ensure we can't create a package with a negative price.\n "
self.client.force_authenticate(user=self.admin)
data = {'name': 'basic_package', 'details': '10 reservations package', 'price': (- 1), 'reservations': (- 10), 'exclusive_memberships': [reverse('membership-detail', args=[self.membership.id])], 'available': True}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'price': ['Ensure this value is greater than or equal to 0.1.'], 'reservations': ['Ensure this value is greater than or equal to 1.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) | Ensure we can't create a package with a negative price. | store/tests/tests_viewset_Package.py | test_create_negative_price_and_reservations | RignonNoel/Blitz-API | 3 | python | def test_create_negative_price_and_reservations(self):
"\n \n "
self.client.force_authenticate(user=self.admin)
data = {'name': 'basic_package', 'details': '10 reservations package', 'price': (- 1), 'reservations': (- 10), 'exclusive_memberships': [reverse('membership-detail', args=[self.membership.id])], 'available': True}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'price': ['Ensure this value is greater than or equal to 0.1.'], 'reservations': ['Ensure this value is greater than or equal to 1.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) | def test_create_negative_price_and_reservations(self):
"\n \n "
self.client.force_authenticate(user=self.admin)
data = {'name': 'basic_package', 'details': '10 reservations package', 'price': (- 1), 'reservations': (- 10), 'exclusive_memberships': [reverse('membership-detail', args=[self.membership.id])], 'available': True}
response = self.client.post(reverse('package-list'), data, format='json')
content = {'price': ['Ensure this value is greater than or equal to 0.1.'], 'reservations': ['Ensure this value is greater than or equal to 1.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)<|docstring|>Ensure we can't create a package with a negative price.<|endoftext|> |
f4f9d3fae6074e672c6f697f63bc0e0fdad5d8a61b1c0c87afce763eee940715 | def test_update(self):
'\n Ensure we can update a package.\n '
self.client.force_authenticate(user=self.admin)
data = {'name': 'extreme_package_updated', 'details': '999 reservations package', 'price': 1, 'reservations': 999, 'available': True}
response = self.client.put(reverse('package-detail', args=[self.package.id]), data, format='json')
content = {'available': True, 'details': '999 reservations package', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package_updated', 'price': '1.00', 'reservations': 999, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}
self.assertEqual(remove_translation_fields(json.loads(response.content)), content)
self.assertEqual(response.status_code, status.HTTP_200_OK) | Ensure we can update a package. | store/tests/tests_viewset_Package.py | test_update | RignonNoel/Blitz-API | 3 | python | def test_update(self):
'\n \n '
self.client.force_authenticate(user=self.admin)
data = {'name': 'extreme_package_updated', 'details': '999 reservations package', 'price': 1, 'reservations': 999, 'available': True}
response = self.client.put(reverse('package-detail', args=[self.package.id]), data, format='json')
content = {'available': True, 'details': '999 reservations package', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package_updated', 'price': '1.00', 'reservations': 999, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}
self.assertEqual(remove_translation_fields(json.loads(response.content)), content)
self.assertEqual(response.status_code, status.HTTP_200_OK) | def test_update(self):
'\n \n '
self.client.force_authenticate(user=self.admin)
data = {'name': 'extreme_package_updated', 'details': '999 reservations package', 'price': 1, 'reservations': 999, 'available': True}
response = self.client.put(reverse('package-detail', args=[self.package.id]), data, format='json')
content = {'available': True, 'details': '999 reservations package', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package_updated', 'price': '1.00', 'reservations': 999, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}
self.assertEqual(remove_translation_fields(json.loads(response.content)), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)<|docstring|>Ensure we can update a package.<|endoftext|> |
f9766e87581a62deeae119aaf2ba0f923425b59f70c5ef77887988bccf69182c | def test_update_partial(self):
'\n Ensure we can partially update a package.\n '
self.client.force_authenticate(user=self.admin)
data = {'details': 'New very cool details', 'price': 99}
response = self.client.patch(reverse('package-detail', args=[self.package.id]), data, format='json')
content = {'available': True, 'details': 'New very cool details', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package', 'price': '99.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}
self.assertEqual(remove_translation_fields(json.loads(response.content)), content)
self.assertEqual(response.status_code, status.HTTP_200_OK) | Ensure we can partially update a package. | store/tests/tests_viewset_Package.py | test_update_partial | RignonNoel/Blitz-API | 3 | python | def test_update_partial(self):
'\n \n '
self.client.force_authenticate(user=self.admin)
data = {'details': 'New very cool details', 'price': 99}
response = self.client.patch(reverse('package-detail', args=[self.package.id]), data, format='json')
content = {'available': True, 'details': 'New very cool details', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package', 'price': '99.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}
self.assertEqual(remove_translation_fields(json.loads(response.content)), content)
self.assertEqual(response.status_code, status.HTTP_200_OK) | def test_update_partial(self):
'\n \n '
self.client.force_authenticate(user=self.admin)
data = {'details': 'New very cool details', 'price': 99}
response = self.client.patch(reverse('package-detail', args=[self.package.id]), data, format='json')
content = {'available': True, 'details': 'New very cool details', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package', 'price': '99.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}
self.assertEqual(remove_translation_fields(json.loads(response.content)), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)<|docstring|>Ensure we can partially update a package.<|endoftext|> |
fe2a23f767272b8fcec3b928ae0cbfc625642c950582d0b296d7c35acb244527 | def test_delete_as_admin(self):
'\n Ensure we can make a package unavailable as an admin.\n '
self.client.force_authenticate(user=self.admin)
response = self.client.delete(reverse('package-detail', args=[self.package.id]))
package = self.package
package.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(self.package.available) | Ensure we can make a package unavailable as an admin. | store/tests/tests_viewset_Package.py | test_delete_as_admin | RignonNoel/Blitz-API | 3 | python | def test_delete_as_admin(self):
'\n \n '
self.client.force_authenticate(user=self.admin)
response = self.client.delete(reverse('package-detail', args=[self.package.id]))
package = self.package
package.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(self.package.available) | def test_delete_as_admin(self):
'\n \n '
self.client.force_authenticate(user=self.admin)
response = self.client.delete(reverse('package-detail', args=[self.package.id]))
package = self.package
package.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(self.package.available)<|docstring|>Ensure we can make a package unavailable as an admin.<|endoftext|> |
6b55b177ad6f494321133ce6b3c84454cb1351422db32ea3ba20d4dd142098bc | def test_delete_as_user(self):
"\n Ensure that a user can't make a package unavailable.\n "
self.client.force_authenticate(user=self.user)
response = self.client.delete(reverse('package-detail', args=[self.package.id]))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) | Ensure that a user can't make a package unavailable. | store/tests/tests_viewset_Package.py | test_delete_as_user | RignonNoel/Blitz-API | 3 | python | def test_delete_as_user(self):
"\n \n "
self.client.force_authenticate(user=self.user)
response = self.client.delete(reverse('package-detail', args=[self.package.id]))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) | def test_delete_as_user(self):
"\n \n "
self.client.force_authenticate(user=self.user)
response = self.client.delete(reverse('package-detail', args=[self.package.id]))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)<|docstring|>Ensure that a user can't make a package unavailable.<|endoftext|> |
58f79fd1ce88854e8902d769a91da06be2d6f8b464a4c10961a2c35a162a2294 | def test_delete_inexistent(self):
'\n Ensure that deleting a non-existent package does nothing.\n '
self.client.force_authenticate(user=self.admin)
response = self.client.delete(reverse('package-detail', kwargs={'pk': 999}))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) | Ensure that deleting a non-existent package does nothing. | store/tests/tests_viewset_Package.py | test_delete_inexistent | RignonNoel/Blitz-API | 3 | python | def test_delete_inexistent(self):
'\n \n '
self.client.force_authenticate(user=self.admin)
response = self.client.delete(reverse('package-detail', kwargs={'pk': 999}))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) | def test_delete_inexistent(self):
'\n \n '
self.client.force_authenticate(user=self.admin)
response = self.client.delete(reverse('package-detail', kwargs={'pk': 999}))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)<|docstring|>Ensure that deleting a non-existent package does nothing.<|endoftext|> |
dad9bcd585a29aa42cb57ccdb0f20bf62ccac1b6afdf81c7fb1c195d6cb7f5b6 | def test_list(self):
'\n Ensure we can list packages as an unauthenticated user.\n '
response = self.client.get(reverse('package-list'), format='json')
data = json.loads(response.content)
content = {'count': 1, 'next': None, 'previous': None, 'results': [{'available': True, 'details': '100 reservations package', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package', 'price': '400.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}]}
self.assertEqual(data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK) | Ensure we can list packages as an unauthenticated user. | store/tests/tests_viewset_Package.py | test_list | RignonNoel/Blitz-API | 3 | python | def test_list(self):
'\n \n '
response = self.client.get(reverse('package-list'), format='json')
data = json.loads(response.content)
content = {'count': 1, 'next': None, 'previous': None, 'results': [{'available': True, 'details': '100 reservations package', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package', 'price': '400.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}]}
self.assertEqual(data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK) | def test_list(self):
'\n \n '
response = self.client.get(reverse('package-list'), format='json')
data = json.loads(response.content)
content = {'count': 1, 'next': None, 'previous': None, 'results': [{'available': True, 'details': '100 reservations package', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package', 'price': '400.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}]}
self.assertEqual(data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK)<|docstring|>Ensure we can list packages as an unauthenticated user.<|endoftext|> |
b4f1f005d2856c939b629c1c96cece1383fe5e94f102732420d55bd9ea1c67a8 | def test_list_as_admin(self):
'\n Ensure we can list all packages as an admin.\n '
self.client.force_authenticate(user=self.admin)
response = self.client.get(reverse('package-list'), format='json')
data = remove_translation_fields(json.loads(response.content))
data['results'] = [remove_translation_fields(m) for m in data['results']]
content = {'count': 2, 'next': None, 'previous': None, 'results': [{'available': True, 'details': '100 reservations package', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package', 'price': '400.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}, {'available': False, 'details': 'todo', 'exclusive_memberships': [], 'id': self.package_unavailable.id, 'name': 'pending_package', 'price': '400.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package_unavailable.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}]}
self.assertEqual(data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK) | Ensure we can list all packages as an admin. | store/tests/tests_viewset_Package.py | test_list_as_admin | RignonNoel/Blitz-API | 3 | python | def test_list_as_admin(self):
'\n \n '
self.client.force_authenticate(user=self.admin)
response = self.client.get(reverse('package-list'), format='json')
data = remove_translation_fields(json.loads(response.content))
data['results'] = [remove_translation_fields(m) for m in data['results']]
content = {'count': 2, 'next': None, 'previous': None, 'results': [{'available': True, 'details': '100 reservations package', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package', 'price': '400.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}, {'available': False, 'details': 'todo', 'exclusive_memberships': [], 'id': self.package_unavailable.id, 'name': 'pending_package', 'price': '400.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package_unavailable.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}]}
self.assertEqual(data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK) | def test_list_as_admin(self):
'\n \n '
self.client.force_authenticate(user=self.admin)
response = self.client.get(reverse('package-list'), format='json')
data = remove_translation_fields(json.loads(response.content))
data['results'] = [remove_translation_fields(m) for m in data['results']]
content = {'count': 2, 'next': None, 'previous': None, 'results': [{'available': True, 'details': '100 reservations package', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package', 'price': '400.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}, {'available': False, 'details': 'todo', 'exclusive_memberships': [], 'id': self.package_unavailable.id, 'name': 'pending_package', 'price': '400.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package_unavailable.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}]}
self.assertEqual(data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK)<|docstring|>Ensure we can list all packages as an admin.<|endoftext|> |
8a4bc919f40b07973408d1b2e30e5d151a22d57edf8355c4bba46f251eae14d8 | def test_read(self):
'\n Ensure we can read a package as an unauthenticated user.\n '
response = self.client.get(reverse('package-detail', args=[self.package.id]))
content = {'available': True, 'details': '100 reservations package', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package', 'price': '400.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_200_OK) | Ensure we can read a package as an unauthenticated user. | store/tests/tests_viewset_Package.py | test_read | RignonNoel/Blitz-API | 3 | python | def test_read(self):
'\n \n '
response = self.client.get(reverse('package-detail', args=[self.package.id]))
content = {'available': True, 'details': '100 reservations package', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package', 'price': '400.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_200_OK) | def test_read(self):
'\n \n '
response = self.client.get(reverse('package-detail', args=[self.package.id]))
content = {'available': True, 'details': '100 reservations package', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package', 'price': '400.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)<|docstring|>Ensure we can read a package as an unauthenticated user.<|endoftext|> |
663c4099b8b5538d51a0e9b99c9c7d0653dea048814604020c7fac92cc947eea | def test_read_admin(self):
"\n Ensure we can read a package's order lines as an admin.\n "
self.client.force_authenticate(user=self.admin)
response = self.client.get(reverse('package-detail', args=[self.package.id]))
content = {'available': True, 'details': '100 reservations package', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package', 'price': '400.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}
self.assertEqual(remove_translation_fields(json.loads(response.content)), content)
self.assertEqual(response.status_code, status.HTTP_200_OK) | Ensure we can read a package's order lines as an admin. | store/tests/tests_viewset_Package.py | test_read_admin | RignonNoel/Blitz-API | 3 | python | def test_read_admin(self):
"\n \n "
self.client.force_authenticate(user=self.admin)
response = self.client.get(reverse('package-detail', args=[self.package.id]))
content = {'available': True, 'details': '100 reservations package', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package', 'price': '400.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}
self.assertEqual(remove_translation_fields(json.loads(response.content)), content)
self.assertEqual(response.status_code, status.HTTP_200_OK) | def test_read_admin(self):
"\n \n "
self.client.force_authenticate(user=self.admin)
response = self.client.get(reverse('package-detail', args=[self.package.id]))
content = {'available': True, 'details': '100 reservations package', 'exclusive_memberships': [], 'id': self.package.id, 'name': 'extreme_package', 'price': '400.00', 'reservations': 100, 'url': f'http://testserver/packages/{self.package.id}', 'available_on_product_types': [], 'available_on_products': [], 'options': []}
self.assertEqual(remove_translation_fields(json.loads(response.content)), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)<|docstring|>Ensure we can read a package's order lines as an admin.<|endoftext|> |
c5555399dc00ec3add0754d35b98285e79eb480a8aa2edca3f8c1ad16c8e864c | def test_read_non_existent(self):
"\n Ensure we get not found when asking for a package that doesn't\n exist.\n "
response = self.client.get(reverse('package-detail', kwargs={'pk': 999}))
content = {'detail': 'Not found.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) | Ensure we get not found when asking for a package that doesn't
exist. | store/tests/tests_viewset_Package.py | test_read_non_existent | RignonNoel/Blitz-API | 3 | python | def test_read_non_existent(self):
"\n Ensure we get not found when asking for a package that doesn't\n exist.\n "
response = self.client.get(reverse('package-detail', kwargs={'pk': 999}))
content = {'detail': 'Not found.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) | def test_read_non_existent(self):
"\n Ensure we get not found when asking for a package that doesn't\n exist.\n "
response = self.client.get(reverse('package-detail', kwargs={'pk': 999}))
content = {'detail': 'Not found.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)<|docstring|>Ensure we get not found when asking for a package that doesn't
exist.<|endoftext|> |
e42cb61dd576b0ad0eb3ea92402136c17e1aab9aa93d5075f97a24665d48d002 | def get_estimation_options(protocol_replacements):
'Returns the estimation options which describe the absolute uncertainties\n to within which properties should be estimated.\n\n Parameters\n ----------\n protocol_replacements: dict of str and str\n A dictionary with keys of classes protocols to replace, and\n values of the protocol class to use as a replacement.\n\n Returns\n -------\n options: PropertyEstimatorOptions\n The estimation of options.\n '
options = PropertyEstimatorOptions()
options.allowed_calculation_layers = ['SimulationLayer']
options.workflow_options = {'Density': {'SimulationLayer': WorkflowOptions(convergence_mode=WorkflowOptions.ConvergenceMode.AbsoluteUncertainty, absolute_uncertainty=((0.45 * unit.kilogram) * (unit.meter ** (- 3))), protocol_replacements=protocol_replacements)}, 'DielectricConstant': {'SimulationLayer': WorkflowOptions(convergence_mode=WorkflowOptions.ConvergenceMode.AbsoluteUncertainty, absolute_uncertainty=(1.5 * unit.dimensionless), protocol_replacements=protocol_replacements)}, 'EnthalpyOfVaporization': {'SimulationLayer': WorkflowOptions(convergence_mode=WorkflowOptions.ConvergenceMode.AbsoluteUncertainty, absolute_uncertainty=((0.65 * unit.kilojoule) / unit.mole), protocol_replacements=protocol_replacements)}, 'EnthalpyOfMixing': {'SimulationLayer': WorkflowOptions(convergence_mode=WorkflowOptions.ConvergenceMode.AbsoluteUncertainty, absolute_uncertainty=((0.02 * unit.kilojoule) / unit.mole), protocol_replacements=protocol_replacements)}, 'ExcessMolarVolume': {'SimulationLayer': WorkflowOptions(convergence_mode=WorkflowOptions.ConvergenceMode.AbsoluteUncertainty, absolute_uncertainty=((2e-08 * (unit.meter ** 3)) / unit.mole), protocol_replacements=protocol_replacements)}}
return options | Returns the estimation options which describe the absolute uncertainties
to within which properties should be estimated.
Parameters
----------
protocol_replacements: dict of str and str
A dictionary with keys of classes protocols to replace, and
values of the protocol class to use as a replacement.
Returns
-------
options: PropertyEstimatorOptions
The estimation of options. | 3_physical_property_datasets/physical_properties/estimation/run.py | get_estimation_options | hyejang/Parsley1.0_SI | 6 | python | def get_estimation_options(protocol_replacements):
'Returns the estimation options which describe the absolute uncertainties\n to within which properties should be estimated.\n\n Parameters\n ----------\n protocol_replacements: dict of str and str\n A dictionary with keys of classes protocols to replace, and\n values of the protocol class to use as a replacement.\n\n Returns\n -------\n options: PropertyEstimatorOptions\n The estimation of options.\n '
options = PropertyEstimatorOptions()
options.allowed_calculation_layers = ['SimulationLayer']
options.workflow_options = {'Density': {'SimulationLayer': WorkflowOptions(convergence_mode=WorkflowOptions.ConvergenceMode.AbsoluteUncertainty, absolute_uncertainty=((0.45 * unit.kilogram) * (unit.meter ** (- 3))), protocol_replacements=protocol_replacements)}, 'DielectricConstant': {'SimulationLayer': WorkflowOptions(convergence_mode=WorkflowOptions.ConvergenceMode.AbsoluteUncertainty, absolute_uncertainty=(1.5 * unit.dimensionless), protocol_replacements=protocol_replacements)}, 'EnthalpyOfVaporization': {'SimulationLayer': WorkflowOptions(convergence_mode=WorkflowOptions.ConvergenceMode.AbsoluteUncertainty, absolute_uncertainty=((0.65 * unit.kilojoule) / unit.mole), protocol_replacements=protocol_replacements)}, 'EnthalpyOfMixing': {'SimulationLayer': WorkflowOptions(convergence_mode=WorkflowOptions.ConvergenceMode.AbsoluteUncertainty, absolute_uncertainty=((0.02 * unit.kilojoule) / unit.mole), protocol_replacements=protocol_replacements)}, 'ExcessMolarVolume': {'SimulationLayer': WorkflowOptions(convergence_mode=WorkflowOptions.ConvergenceMode.AbsoluteUncertainty, absolute_uncertainty=((2e-08 * (unit.meter ** 3)) / unit.mole), protocol_replacements=protocol_replacements)}}
return options | def get_estimation_options(protocol_replacements):
'Returns the estimation options which describe the absolute uncertainties\n to within which properties should be estimated.\n\n Parameters\n ----------\n protocol_replacements: dict of str and str\n A dictionary with keys of classes protocols to replace, and\n values of the protocol class to use as a replacement.\n\n Returns\n -------\n options: PropertyEstimatorOptions\n The estimation of options.\n '
options = PropertyEstimatorOptions()
options.allowed_calculation_layers = ['SimulationLayer']
options.workflow_options = {'Density': {'SimulationLayer': WorkflowOptions(convergence_mode=WorkflowOptions.ConvergenceMode.AbsoluteUncertainty, absolute_uncertainty=((0.45 * unit.kilogram) * (unit.meter ** (- 3))), protocol_replacements=protocol_replacements)}, 'DielectricConstant': {'SimulationLayer': WorkflowOptions(convergence_mode=WorkflowOptions.ConvergenceMode.AbsoluteUncertainty, absolute_uncertainty=(1.5 * unit.dimensionless), protocol_replacements=protocol_replacements)}, 'EnthalpyOfVaporization': {'SimulationLayer': WorkflowOptions(convergence_mode=WorkflowOptions.ConvergenceMode.AbsoluteUncertainty, absolute_uncertainty=((0.65 * unit.kilojoule) / unit.mole), protocol_replacements=protocol_replacements)}, 'EnthalpyOfMixing': {'SimulationLayer': WorkflowOptions(convergence_mode=WorkflowOptions.ConvergenceMode.AbsoluteUncertainty, absolute_uncertainty=((0.02 * unit.kilojoule) / unit.mole), protocol_replacements=protocol_replacements)}, 'ExcessMolarVolume': {'SimulationLayer': WorkflowOptions(convergence_mode=WorkflowOptions.ConvergenceMode.AbsoluteUncertainty, absolute_uncertainty=((2e-08 * (unit.meter ** 3)) / unit.mole), protocol_replacements=protocol_replacements)}}
return options<|docstring|>Returns the estimation options which describe the absolute uncertainties
to within which properties should be estimated.
Parameters
----------
protocol_replacements: dict of str and str
A dictionary with keys of classes protocols to replace, and
values of the protocol class to use as a replacement.
Returns
-------
options: PropertyEstimatorOptions
The estimation of options.<|endoftext|> |
0034252778977d7c520f6e511f4268610e5263889c4f88d6083985d541e1bbb4 | def setup_server(max_number_of_workers=1, conda_environment='propertyestimator', worker_memory=(5 * unit.gigabyte), port=8000):
'Sets up an estimation server which will take advantage of\n an LSF based HPC cluster with access to nVidia GPUs.\n\n Parameters\n ----------\n max_number_of_workers: int\n The maximum number of workers to adaptively insert into\n the queuing system.\n conda_environment: str\n The name of the conda environment in which the propertyestimator\n package is installed.\n worker_memory: Quantity\n The maximum amount of memory to request per worker.\n port: int\n The port that the server should listen for estimation requests on.\n '
working_directory = 'working_directory'
storage_directory = 'storage_directory'
if os.path.isdir(working_directory):
shutil.rmtree(working_directory)
queue_resources = QueueWorkerResources(number_of_threads=1, number_of_gpus=1, preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA, per_thread_memory_limit=worker_memory, wallclock_time_limit='05:59')
worker_script_commands = [f'conda activate {conda_environment}', f'module load cuda/9.2']
calculation_backend = DaskLSFBackend(minimum_number_of_workers=1, maximum_number_of_workers=max_number_of_workers, resources_per_worker=queue_resources, queue_name='gpuqueue', setup_script_commands=worker_script_commands, adaptive_interval='1000ms')
storage_backend = LocalFileStorage(storage_directory)
PropertyEstimatorServer(calculation_backend=calculation_backend, storage_backend=storage_backend, port=port, working_directory=working_directory) | Sets up an estimation server which will take advantage of
an LSF based HPC cluster with access to nVidia GPUs.
Parameters
----------
max_number_of_workers: int
The maximum number of workers to adaptively insert into
the queuing system.
conda_environment: str
The name of the conda environment in which the propertyestimator
package is installed.
worker_memory: Quantity
The maximum amount of memory to request per worker.
port: int
The port that the server should listen for estimation requests on. | 3_physical_property_datasets/physical_properties/estimation/run.py | setup_server | hyejang/Parsley1.0_SI | 6 | python | def setup_server(max_number_of_workers=1, conda_environment='propertyestimator', worker_memory=(5 * unit.gigabyte), port=8000):
'Sets up an estimation server which will take advantage of\n an LSF based HPC cluster with access to nVidia GPUs.\n\n Parameters\n ----------\n max_number_of_workers: int\n The maximum number of workers to adaptively insert into\n the queuing system.\n conda_environment: str\n The name of the conda environment in which the propertyestimator\n package is installed.\n worker_memory: Quantity\n The maximum amount of memory to request per worker.\n port: int\n The port that the server should listen for estimation requests on.\n '
working_directory = 'working_directory'
storage_directory = 'storage_directory'
if os.path.isdir(working_directory):
shutil.rmtree(working_directory)
queue_resources = QueueWorkerResources(number_of_threads=1, number_of_gpus=1, preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA, per_thread_memory_limit=worker_memory, wallclock_time_limit='05:59')
worker_script_commands = [f'conda activate {conda_environment}', f'module load cuda/9.2']
calculation_backend = DaskLSFBackend(minimum_number_of_workers=1, maximum_number_of_workers=max_number_of_workers, resources_per_worker=queue_resources, queue_name='gpuqueue', setup_script_commands=worker_script_commands, adaptive_interval='1000ms')
storage_backend = LocalFileStorage(storage_directory)
PropertyEstimatorServer(calculation_backend=calculation_backend, storage_backend=storage_backend, port=port, working_directory=working_directory) | def setup_server(max_number_of_workers=1, conda_environment='propertyestimator', worker_memory=(5 * unit.gigabyte), port=8000):
'Sets up an estimation server which will take advantage of\n an LSF based HPC cluster with access to nVidia GPUs.\n\n Parameters\n ----------\n max_number_of_workers: int\n The maximum number of workers to adaptively insert into\n the queuing system.\n conda_environment: str\n The name of the conda environment in which the propertyestimator\n package is installed.\n worker_memory: Quantity\n The maximum amount of memory to request per worker.\n port: int\n The port that the server should listen for estimation requests on.\n '
working_directory = 'working_directory'
storage_directory = 'storage_directory'
if os.path.isdir(working_directory):
shutil.rmtree(working_directory)
queue_resources = QueueWorkerResources(number_of_threads=1, number_of_gpus=1, preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA, per_thread_memory_limit=worker_memory, wallclock_time_limit='05:59')
worker_script_commands = [f'conda activate {conda_environment}', f'module load cuda/9.2']
calculation_backend = DaskLSFBackend(minimum_number_of_workers=1, maximum_number_of_workers=max_number_of_workers, resources_per_worker=queue_resources, queue_name='gpuqueue', setup_script_commands=worker_script_commands, adaptive_interval='1000ms')
storage_backend = LocalFileStorage(storage_directory)
PropertyEstimatorServer(calculation_backend=calculation_backend, storage_backend=storage_backend, port=port, working_directory=working_directory)<|docstring|>Sets up an estimation server which will take advantage of
an LSF based HPC cluster with access to nVidia GPUs.
Parameters
----------
max_number_of_workers: int
The maximum number of workers to adaptively insert into
the queuing system.
conda_environment: str
The name of the conda environment in which the propertyestimator
package is installed.
worker_memory: Quantity
The maximum amount of memory to request per worker.
port: int
The port that the server should listen for estimation requests on.<|endoftext|> |
d9961f3561e74fe4da7ee9dc12a0a7b77ba48e3d415cc7529d377bfff83e3f20 | def save_results(force_field_key, results):
'Saves the estimated results to disk.\n\n Parameters\n ----------\n force_field_key: str\n The key of the force field which these results were\n estimated for.\n results: PropertyEstimatorResult\n The results of an estimation request.\n '
with open(f'{force_field_key} results.json', 'w') as file:
json.dump(results, file, sort_keys=True, indent=2, separators=(',', ': '), cls=TypedJSONEncoder)
estimated_data_set = PhysicalPropertyDataSet()
unsuccessful_data_set = PhysicalPropertyDataSet()
for substance_id in results.estimated_properties:
estimated_properties = results.estimated_properties[substance_id]
for estimated_property in estimated_properties:
if (substance_id not in estimated_data_set.properties):
estimated_data_set.properties[substance_id] = []
estimated_property.source.provenance = {}
estimated_data_set.properties[substance_id].append(estimated_property)
estimated_data_set.to_pandas().to_csv(f'{force_field_key}.csv')
with open(f'{force_field_key}.json', 'w') as file:
json.dump(estimated_data_set, file, sort_keys=True, indent=2, separators=(',', ': '), cls=TypedJSONEncoder)
for substance_id in results.unsuccessful_properties:
unsuccessful_properties = results.unsuccessful_properties[substance_id][0]
for unsuccessful_property in unsuccessful_properties:
if (substance_id not in unsuccessful_data_set.properties):
unsuccessful_data_set.properties[substance_id] = []
unsuccessful_property.source.provenance = None
unsuccessful_data_set.properties[substance_id].append(unsuccessful_property)
with open(f'{force_field_key} unsuccessful.json', 'w') as file:
json.dump(unsuccessful_data_set, file, sort_keys=True, indent=2, separators=(',', ': '), cls=TypedJSONEncoder)
with open(f'{force_field_key} exceptions.txt', 'w') as file:
for (index, exception) in enumerate(results.exceptions):
file.write(f'''
{exception.directory}
''')
file.write(exception.message.replace('\\n', '\n')) | Saves the estimated results to disk.
Parameters
----------
force_field_key: str
The key of the force field which these results were
estimated for.
results: PropertyEstimatorResult
The results of an estimation request. | 3_physical_property_datasets/physical_properties/estimation/run.py | save_results | hyejang/Parsley1.0_SI | 6 | python | def save_results(force_field_key, results):
'Saves the estimated results to disk.\n\n Parameters\n ----------\n force_field_key: str\n The key of the force field which these results were\n estimated for.\n results: PropertyEstimatorResult\n The results of an estimation request.\n '
with open(f'{force_field_key} results.json', 'w') as file:
json.dump(results, file, sort_keys=True, indent=2, separators=(',', ': '), cls=TypedJSONEncoder)
estimated_data_set = PhysicalPropertyDataSet()
unsuccessful_data_set = PhysicalPropertyDataSet()
for substance_id in results.estimated_properties:
estimated_properties = results.estimated_properties[substance_id]
for estimated_property in estimated_properties:
if (substance_id not in estimated_data_set.properties):
estimated_data_set.properties[substance_id] = []
estimated_property.source.provenance = {}
estimated_data_set.properties[substance_id].append(estimated_property)
estimated_data_set.to_pandas().to_csv(f'{force_field_key}.csv')
with open(f'{force_field_key}.json', 'w') as file:
json.dump(estimated_data_set, file, sort_keys=True, indent=2, separators=(',', ': '), cls=TypedJSONEncoder)
for substance_id in results.unsuccessful_properties:
unsuccessful_properties = results.unsuccessful_properties[substance_id][0]
for unsuccessful_property in unsuccessful_properties:
if (substance_id not in unsuccessful_data_set.properties):
unsuccessful_data_set.properties[substance_id] = []
unsuccessful_property.source.provenance = None
unsuccessful_data_set.properties[substance_id].append(unsuccessful_property)
with open(f'{force_field_key} unsuccessful.json', 'w') as file:
json.dump(unsuccessful_data_set, file, sort_keys=True, indent=2, separators=(',', ': '), cls=TypedJSONEncoder)
with open(f'{force_field_key} exceptions.txt', 'w') as file:
for (index, exception) in enumerate(results.exceptions):
file.write(f'
{exception.directory}
')
file.write(exception.message.replace('\\n', '\n')) | def save_results(force_field_key, results):
'Saves the estimated results to disk.\n\n Parameters\n ----------\n force_field_key: str\n The key of the force field which these results were\n estimated for.\n results: PropertyEstimatorResult\n The results of an estimation request.\n '
with open(f'{force_field_key} results.json', 'w') as file:
json.dump(results, file, sort_keys=True, indent=2, separators=(',', ': '), cls=TypedJSONEncoder)
estimated_data_set = PhysicalPropertyDataSet()
unsuccessful_data_set = PhysicalPropertyDataSet()
for substance_id in results.estimated_properties:
estimated_properties = results.estimated_properties[substance_id]
for estimated_property in estimated_properties:
if (substance_id not in estimated_data_set.properties):
estimated_data_set.properties[substance_id] = []
estimated_property.source.provenance = {}
estimated_data_set.properties[substance_id].append(estimated_property)
estimated_data_set.to_pandas().to_csv(f'{force_field_key}.csv')
with open(f'{force_field_key}.json', 'w') as file:
json.dump(estimated_data_set, file, sort_keys=True, indent=2, separators=(',', ': '), cls=TypedJSONEncoder)
for substance_id in results.unsuccessful_properties:
unsuccessful_properties = results.unsuccessful_properties[substance_id][0]
for unsuccessful_property in unsuccessful_properties:
if (substance_id not in unsuccessful_data_set.properties):
unsuccessful_data_set.properties[substance_id] = []
unsuccessful_property.source.provenance = None
unsuccessful_data_set.properties[substance_id].append(unsuccessful_property)
with open(f'{force_field_key} unsuccessful.json', 'w') as file:
json.dump(unsuccessful_data_set, file, sort_keys=True, indent=2, separators=(',', ': '), cls=TypedJSONEncoder)
with open(f'{force_field_key} exceptions.txt', 'w') as file:
for (index, exception) in enumerate(results.exceptions):
file.write(f'
{exception.directory}
')
file.write(exception.message.replace('\\n', '\n'))<|docstring|>Saves the estimated results to disk.
Parameters
----------
force_field_key: str
The key of the force field which these results were
estimated for.
results: PropertyEstimatorResult
The results of an estimation request.<|endoftext|> |
2281fd99eda8f2ff9d4787ecdbca3584e6fa0594d3e5ef0e3d65a7b8d4e70330 | def main():
'The main script which will create an estimation server, request\n the curated data set be estimated for each force field of interest,\n wait for the calculations to be complete, and save the results.\n '
setup_timestamp_logging()
logger = logging.getLogger()
force_field_sources = {'smirnoff99frosst 1.1.0': SmirnoffForceFieldSource.from_path('smirnoff99Frosst-1.1.0.offxml'), 'parsley 0.0.9': SmirnoffForceFieldSource.from_path('smirnoff_release_1_v0_0_9.offxml'), 'parsley rc 1': SmirnoffForceFieldSource.from_path('openff_hbonds-1.0.0-RC1.offxml'), 'gaff 1.81': TLeapForceFieldSource(leap_source='leaprc.gaff'), 'gaff 2.11': TLeapForceFieldSource(leap_source='leaprc.gaff2')}
setup_server(max_number_of_workers=50)
estimator_client = PropertyEstimatorClient()
with open('curated_data_set.json') as file:
data_set = PhysicalPropertyDataSet.parse_json(file.read())
protocol_replacements = {'gaff_1': {'BuildSmirnoffSystem': 'BuildTLeapSystem'}, 'gaff_2': {'BuildSmirnoffSystem': 'BuildTLeapSystem'}}
requests = {}
for force_field_key in force_field_sources:
force_field_source = force_field_sources[force_field_key]
options = get_estimation_options(protocol_replacements.get(force_field_key, {}))
requests[force_field_key] = estimator_client.request_estimate(property_set=data_set, force_field_source=force_field_source, options=options)
should_run = True
finished_force_fields = []
while should_run:
sleep(60)
for force_field_key in force_field_sources:
if (force_field_key in finished_force_fields):
continue
results = requests[force_field_key].results(False)
if (isinstance(results, PropertyEstimatorResult) and (len(results.queued_properties) > 0)):
continue
logger.info(f'The server has completed {force_field_key}.')
save_results(force_field_key, results)
finished_force_fields.append(force_field_key)
if (len(finished_force_fields) == len(force_field_sources)):
should_run = False | The main script which will create an estimation server, request
the curated data set be estimated for each force field of interest,
wait for the calculations to be complete, and save the results. | 3_physical_property_datasets/physical_properties/estimation/run.py | main | hyejang/Parsley1.0_SI | 6 | python | def main():
'The main script which will create an estimation server, request\n the curated data set be estimated for each force field of interest,\n wait for the calculations to be complete, and save the results.\n '
setup_timestamp_logging()
logger = logging.getLogger()
force_field_sources = {'smirnoff99frosst 1.1.0': SmirnoffForceFieldSource.from_path('smirnoff99Frosst-1.1.0.offxml'), 'parsley 0.0.9': SmirnoffForceFieldSource.from_path('smirnoff_release_1_v0_0_9.offxml'), 'parsley rc 1': SmirnoffForceFieldSource.from_path('openff_hbonds-1.0.0-RC1.offxml'), 'gaff 1.81': TLeapForceFieldSource(leap_source='leaprc.gaff'), 'gaff 2.11': TLeapForceFieldSource(leap_source='leaprc.gaff2')}
setup_server(max_number_of_workers=50)
estimator_client = PropertyEstimatorClient()
with open('curated_data_set.json') as file:
data_set = PhysicalPropertyDataSet.parse_json(file.read())
protocol_replacements = {'gaff_1': {'BuildSmirnoffSystem': 'BuildTLeapSystem'}, 'gaff_2': {'BuildSmirnoffSystem': 'BuildTLeapSystem'}}
requests = {}
for force_field_key in force_field_sources:
force_field_source = force_field_sources[force_field_key]
options = get_estimation_options(protocol_replacements.get(force_field_key, {}))
requests[force_field_key] = estimator_client.request_estimate(property_set=data_set, force_field_source=force_field_source, options=options)
should_run = True
finished_force_fields = []
while should_run:
sleep(60)
for force_field_key in force_field_sources:
if (force_field_key in finished_force_fields):
continue
results = requests[force_field_key].results(False)
if (isinstance(results, PropertyEstimatorResult) and (len(results.queued_properties) > 0)):
continue
logger.info(f'The server has completed {force_field_key}.')
save_results(force_field_key, results)
finished_force_fields.append(force_field_key)
if (len(finished_force_fields) == len(force_field_sources)):
should_run = False | def main():
'The main script which will create an estimation server, request\n the curated data set be estimated for each force field of interest,\n wait for the calculations to be complete, and save the results.\n '
setup_timestamp_logging()
logger = logging.getLogger()
force_field_sources = {'smirnoff99frosst 1.1.0': SmirnoffForceFieldSource.from_path('smirnoff99Frosst-1.1.0.offxml'), 'parsley 0.0.9': SmirnoffForceFieldSource.from_path('smirnoff_release_1_v0_0_9.offxml'), 'parsley rc 1': SmirnoffForceFieldSource.from_path('openff_hbonds-1.0.0-RC1.offxml'), 'gaff 1.81': TLeapForceFieldSource(leap_source='leaprc.gaff'), 'gaff 2.11': TLeapForceFieldSource(leap_source='leaprc.gaff2')}
setup_server(max_number_of_workers=50)
estimator_client = PropertyEstimatorClient()
with open('curated_data_set.json') as file:
data_set = PhysicalPropertyDataSet.parse_json(file.read())
protocol_replacements = {'gaff_1': {'BuildSmirnoffSystem': 'BuildTLeapSystem'}, 'gaff_2': {'BuildSmirnoffSystem': 'BuildTLeapSystem'}}
requests = {}
for force_field_key in force_field_sources:
force_field_source = force_field_sources[force_field_key]
options = get_estimation_options(protocol_replacements.get(force_field_key, {}))
requests[force_field_key] = estimator_client.request_estimate(property_set=data_set, force_field_source=force_field_source, options=options)
should_run = True
finished_force_fields = []
while should_run:
sleep(60)
for force_field_key in force_field_sources:
if (force_field_key in finished_force_fields):
continue
results = requests[force_field_key].results(False)
if (isinstance(results, PropertyEstimatorResult) and (len(results.queued_properties) > 0)):
continue
logger.info(f'The server has completed {force_field_key}.')
save_results(force_field_key, results)
finished_force_fields.append(force_field_key)
if (len(finished_force_fields) == len(force_field_sources)):
should_run = False<|docstring|>The main script which will create an estimation server, request
the curated data set be estimated for each force field of interest,
wait for the calculations to be complete, and save the results.<|endoftext|> |
a436048961041ec88d15c10040d838a49f27211e616e1d042370cf3ba027a55d | def get_mass_matrix_ver1():
'\n VERSION 1\n\n NOTE: checking out alternative implementation\n according to https://mediatum.ub.tum.de/doc/1072355/file.pdf\n description and implementation seems correct\n NOTE: find out where the formulation for the mass comes from, stiffness seems standard\n '
M11 = np.zeros((6, 6))
M11[0][0] = (1 / 3)
M11[1][1] = ((13 / 35) + ((6 * Iz) / ((5 * A) * (l ** 2))))
M11[2][2] = ((13 / 35) + ((6 * Iy) / ((5 * A) * (l ** 2))))
M11[3][3] = (Ip / (3 * A))
M11[4][4] = (((l ** 2) / 105) + ((2 * Iy) / (15 * A)))
M11[5][5] = (((l ** 2) / 105) + ((2 * Iz) / (15 * A)))
M11[5][1] = (((11 * l) / 210) + (Iz / ((10 * A) * l)))
M11[1][5] = M11[5][1]
M11[4][2] = ((((- 11) * l) / 210) - (Iy / ((10 * A) * l)))
M11[2][4] = M11[4][2]
M22 = ((- M11) + (2 * np.diag(np.diag(M11))))
M21 = np.zeros((6, 6))
M21[0][0] = (1 / 6)
M21[1][1] = ((9 / 70) - ((6 * Iz) / ((5 * A) * (l ** 2))))
M21[2][2] = ((9 / 70) - ((6 * Iy) / ((5 * A) * (l ** 2))))
M21[3][3] = (Ip / (6 * A))
M21[4][4] = (((- (l ** 2)) / 140) - (Iy / (30 * A)))
M21[5][5] = (((- (l ** 2)) / 140) - (Iz / (30 * A)))
M21[5][1] = ((((- 13) * l) / 420) + (Iz / ((10 * A) * l)))
M21[1][5] = (- M21[5][1])
M21[4][2] = (((13 * l) / 420) - (Iy / ((10 * A) * l)))
M21[2][4] = (- M21[4][2])
length = l
m_const = ((rho * A) * length)
m_el = np.zeros(((2 * 6), (2 * 6)))
m_el[(0:6, 0:6)] += (m_const * M11)
m_el[(6:12, 0:6)] += (m_const * M21)
m_el[(0:6, 6:12)] += (m_const * np.transpose(M21))
m_el[(6:12, 6:12)] += (m_const * M22)
return m_el | VERSION 1
NOTE: checking out alternative implementation
according to https://mediatum.ub.tum.de/doc/1072355/file.pdf
description and implementation seems correct
NOTE: find out where the formulation for the mass comes from, stiffness seems standard | test_scripts/test_and_compare_element_matrices.py | get_mass_matrix_ver1 | JoZimmer/ParOptBeam | 1 | python | def get_mass_matrix_ver1():
'\n VERSION 1\n\n NOTE: checking out alternative implementation\n according to https://mediatum.ub.tum.de/doc/1072355/file.pdf\n description and implementation seems correct\n NOTE: find out where the formulation for the mass comes from, stiffness seems standard\n '
M11 = np.zeros((6, 6))
M11[0][0] = (1 / 3)
M11[1][1] = ((13 / 35) + ((6 * Iz) / ((5 * A) * (l ** 2))))
M11[2][2] = ((13 / 35) + ((6 * Iy) / ((5 * A) * (l ** 2))))
M11[3][3] = (Ip / (3 * A))
M11[4][4] = (((l ** 2) / 105) + ((2 * Iy) / (15 * A)))
M11[5][5] = (((l ** 2) / 105) + ((2 * Iz) / (15 * A)))
M11[5][1] = (((11 * l) / 210) + (Iz / ((10 * A) * l)))
M11[1][5] = M11[5][1]
M11[4][2] = ((((- 11) * l) / 210) - (Iy / ((10 * A) * l)))
M11[2][4] = M11[4][2]
M22 = ((- M11) + (2 * np.diag(np.diag(M11))))
M21 = np.zeros((6, 6))
M21[0][0] = (1 / 6)
M21[1][1] = ((9 / 70) - ((6 * Iz) / ((5 * A) * (l ** 2))))
M21[2][2] = ((9 / 70) - ((6 * Iy) / ((5 * A) * (l ** 2))))
M21[3][3] = (Ip / (6 * A))
M21[4][4] = (((- (l ** 2)) / 140) - (Iy / (30 * A)))
M21[5][5] = (((- (l ** 2)) / 140) - (Iz / (30 * A)))
M21[5][1] = ((((- 13) * l) / 420) + (Iz / ((10 * A) * l)))
M21[1][5] = (- M21[5][1])
M21[4][2] = (((13 * l) / 420) - (Iy / ((10 * A) * l)))
M21[2][4] = (- M21[4][2])
length = l
m_const = ((rho * A) * length)
m_el = np.zeros(((2 * 6), (2 * 6)))
m_el[(0:6, 0:6)] += (m_const * M11)
m_el[(6:12, 0:6)] += (m_const * M21)
m_el[(0:6, 6:12)] += (m_const * np.transpose(M21))
m_el[(6:12, 6:12)] += (m_const * M22)
return m_el | def get_mass_matrix_ver1():
'\n VERSION 1\n\n NOTE: checking out alternative implementation\n according to https://mediatum.ub.tum.de/doc/1072355/file.pdf\n description and implementation seems correct\n NOTE: find out where the formulation for the mass comes from, stiffness seems standard\n '
M11 = np.zeros((6, 6))
M11[0][0] = (1 / 3)
M11[1][1] = ((13 / 35) + ((6 * Iz) / ((5 * A) * (l ** 2))))
M11[2][2] = ((13 / 35) + ((6 * Iy) / ((5 * A) * (l ** 2))))
M11[3][3] = (Ip / (3 * A))
M11[4][4] = (((l ** 2) / 105) + ((2 * Iy) / (15 * A)))
M11[5][5] = (((l ** 2) / 105) + ((2 * Iz) / (15 * A)))
M11[5][1] = (((11 * l) / 210) + (Iz / ((10 * A) * l)))
M11[1][5] = M11[5][1]
M11[4][2] = ((((- 11) * l) / 210) - (Iy / ((10 * A) * l)))
M11[2][4] = M11[4][2]
M22 = ((- M11) + (2 * np.diag(np.diag(M11))))
M21 = np.zeros((6, 6))
M21[0][0] = (1 / 6)
M21[1][1] = ((9 / 70) - ((6 * Iz) / ((5 * A) * (l ** 2))))
M21[2][2] = ((9 / 70) - ((6 * Iy) / ((5 * A) * (l ** 2))))
M21[3][3] = (Ip / (6 * A))
M21[4][4] = (((- (l ** 2)) / 140) - (Iy / (30 * A)))
M21[5][5] = (((- (l ** 2)) / 140) - (Iz / (30 * A)))
M21[5][1] = ((((- 13) * l) / 420) + (Iz / ((10 * A) * l)))
M21[1][5] = (- M21[5][1])
M21[4][2] = (((13 * l) / 420) - (Iy / ((10 * A) * l)))
M21[2][4] = (- M21[4][2])
length = l
m_const = ((rho * A) * length)
m_el = np.zeros(((2 * 6), (2 * 6)))
m_el[(0:6, 0:6)] += (m_const * M11)
m_el[(6:12, 0:6)] += (m_const * M21)
m_el[(0:6, 6:12)] += (m_const * np.transpose(M21))
m_el[(6:12, 6:12)] += (m_const * M22)
return m_el<|docstring|>VERSION 1
NOTE: checking out alternative implementation
according to https://mediatum.ub.tum.de/doc/1072355/file.pdf
description and implementation seems correct
NOTE: find out where the formulation for the mass comes from, stiffness seems standard<|endoftext|> |
93fa6e946ff3180682381e700775a1bc6253e769ee303c128ea511e780897127 | def get_mass_matrix_ver2():
'\n VERSION 2\n\n NOTE: from http://homes.civil.aau.dk/jc/FemteSemester/Beams3D.pdf\n seems to be a typo in 1-105 and 1-106 as a division with l**3 instead of l**3 should take place\n implemented mass matrices similar to the stiffness one\n '
m_x = (m_const / 6.0)
m_x_11 = 2.0
m_x_12 = 1.0
m_el_x = (m_x * np.array([[m_x_11, m_x_12], [m_x_12, m_x_11]]))
m_a = (((m_const * Ip) / A) / 6.0)
m_a_11 = 2
m_a_12 = 1
m_el_a = (m_a * np.array([[m_a_11, m_a_12], [m_a_12, m_a_11]]))
m_yg = (m_const / 420)
m_yg_11 = 156.0
m_yg_12 = (22.0 * length)
m_yg_13 = 54.0
m_yg_14 = ((- 13.0) * length)
m_yg_22 = (4 * (length ** 2))
m_yg_23 = (- m_yg_14)
m_yg_24 = ((- 3) * (length ** 2))
m_yg_33 = m_yg_11
m_yg_34 = (- m_yg_12)
m_yg_44 = m_yg_22
m_el_yg = (m_yg * np.array([[m_yg_11, m_yg_12, m_yg_13, m_yg_14], [m_yg_12, m_yg_22, m_yg_23, m_yg_24], [m_yg_13, m_yg_23, m_yg_33, m_yg_34], [m_yg_14, m_yg_24, m_yg_34, m_yg_44]]))
m_zb = (m_const / 420)
m_zb_11 = 156.0
m_zb_12 = ((- 22.0) * length)
m_zb_13 = 54.0
m_zb_14 = (13.0 * length)
m_zb_22 = (4.0 * (length ** 2))
m_zb_23 = (- m_zb_14)
m_zb_24 = ((- 3) * (length ** 2))
m_zb_33 = m_zb_11
m_zb_34 = (- m_zb_12)
m_zb_44 = m_zb_22
m_el_zb = (m_zb * np.array([[m_zb_11, m_zb_12, m_zb_13, m_zb_14], [m_zb_12, m_zb_22, m_zb_23, m_zb_24], [m_zb_13, m_zb_23, m_zb_33, m_zb_34], [m_zb_14, m_zb_24, m_zb_34, m_zb_44]]))
m_el = np.array([[m_el_x[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_x[0][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, m_el_yg[0][0], 0.0, 0.0, 0.0, m_el_yg[0][1], 0.0, m_el_yg[0][2], 0.0, 0.0, 0.0, m_el_yg[0][3]], [0.0, 0.0, m_el_zb[0][0], 0.0, m_el_zb[0][1], 0.0, 0.0, 0.0, m_el_zb[0][2], 0.0, m_el_zb[0][3], 0.0], [0.0, 0.0, 0.0, m_el_a[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_a[0][1], 0.0, 0.0], [0.0, 0.0, m_el_zb[0][1], 0.0, m_el_zb[1][1], 0.0, 0.0, 0.0, m_el_zb[1][2], 0.0, m_el_zb[1][3], 0.0], [0.0, m_el_yg[0][1], 0.0, 0.0, 0.0, m_el_yg[1][1], 0.0, m_el_yg[1][2], 0.0, 0.0, 0.0, m_el_yg[1][3]], [m_el_x[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_x[1][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, m_el_yg[0][2], 0.0, 0.0, 0.0, m_el_yg[1][2], 0.0, m_el_yg[2][2], 0.0, 0.0, 0.0, m_el_yg[2][3]], [0.0, 0.0, m_el_zb[0][2], 0.0, m_el_zb[1][2], 0.0, 0.0, 0.0, m_el_zb[2][2], 0.0, m_el_zb[2][3], 0.0], [0.0, 0.0, 0.0, m_el_a[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_a[1][1], 0.0, 0.0], [0.0, 0.0, m_el_zb[0][3], 0.0, m_el_zb[1][3], 0.0, 0.0, 0.0, m_el_zb[2][3], 0.0, m_el_zb[3][3], 0.0], [0.0, m_el_yg[0][3], 0.0, 0.0, 0.0, m_el_yg[1][3], 0.0, m_el_yg[2][3], 0.0, 0.0, 0.0, m_el_yg[3][3]]])
return m_el | VERSION 2
NOTE: from http://homes.civil.aau.dk/jc/FemteSemester/Beams3D.pdf
seems to be a typo in 1-105 and 1-106 as a division with l**3 instead of l**3 should take place
implemented mass matrices similar to the stiffness one | test_scripts/test_and_compare_element_matrices.py | get_mass_matrix_ver2 | JoZimmer/ParOptBeam | 1 | python | def get_mass_matrix_ver2():
'\n VERSION 2\n\n NOTE: from http://homes.civil.aau.dk/jc/FemteSemester/Beams3D.pdf\n seems to be a typo in 1-105 and 1-106 as a division with l**3 instead of l**3 should take place\n implemented mass matrices similar to the stiffness one\n '
m_x = (m_const / 6.0)
m_x_11 = 2.0
m_x_12 = 1.0
m_el_x = (m_x * np.array([[m_x_11, m_x_12], [m_x_12, m_x_11]]))
m_a = (((m_const * Ip) / A) / 6.0)
m_a_11 = 2
m_a_12 = 1
m_el_a = (m_a * np.array([[m_a_11, m_a_12], [m_a_12, m_a_11]]))
m_yg = (m_const / 420)
m_yg_11 = 156.0
m_yg_12 = (22.0 * length)
m_yg_13 = 54.0
m_yg_14 = ((- 13.0) * length)
m_yg_22 = (4 * (length ** 2))
m_yg_23 = (- m_yg_14)
m_yg_24 = ((- 3) * (length ** 2))
m_yg_33 = m_yg_11
m_yg_34 = (- m_yg_12)
m_yg_44 = m_yg_22
m_el_yg = (m_yg * np.array([[m_yg_11, m_yg_12, m_yg_13, m_yg_14], [m_yg_12, m_yg_22, m_yg_23, m_yg_24], [m_yg_13, m_yg_23, m_yg_33, m_yg_34], [m_yg_14, m_yg_24, m_yg_34, m_yg_44]]))
m_zb = (m_const / 420)
m_zb_11 = 156.0
m_zb_12 = ((- 22.0) * length)
m_zb_13 = 54.0
m_zb_14 = (13.0 * length)
m_zb_22 = (4.0 * (length ** 2))
m_zb_23 = (- m_zb_14)
m_zb_24 = ((- 3) * (length ** 2))
m_zb_33 = m_zb_11
m_zb_34 = (- m_zb_12)
m_zb_44 = m_zb_22
m_el_zb = (m_zb * np.array([[m_zb_11, m_zb_12, m_zb_13, m_zb_14], [m_zb_12, m_zb_22, m_zb_23, m_zb_24], [m_zb_13, m_zb_23, m_zb_33, m_zb_34], [m_zb_14, m_zb_24, m_zb_34, m_zb_44]]))
m_el = np.array([[m_el_x[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_x[0][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, m_el_yg[0][0], 0.0, 0.0, 0.0, m_el_yg[0][1], 0.0, m_el_yg[0][2], 0.0, 0.0, 0.0, m_el_yg[0][3]], [0.0, 0.0, m_el_zb[0][0], 0.0, m_el_zb[0][1], 0.0, 0.0, 0.0, m_el_zb[0][2], 0.0, m_el_zb[0][3], 0.0], [0.0, 0.0, 0.0, m_el_a[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_a[0][1], 0.0, 0.0], [0.0, 0.0, m_el_zb[0][1], 0.0, m_el_zb[1][1], 0.0, 0.0, 0.0, m_el_zb[1][2], 0.0, m_el_zb[1][3], 0.0], [0.0, m_el_yg[0][1], 0.0, 0.0, 0.0, m_el_yg[1][1], 0.0, m_el_yg[1][2], 0.0, 0.0, 0.0, m_el_yg[1][3]], [m_el_x[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_x[1][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, m_el_yg[0][2], 0.0, 0.0, 0.0, m_el_yg[1][2], 0.0, m_el_yg[2][2], 0.0, 0.0, 0.0, m_el_yg[2][3]], [0.0, 0.0, m_el_zb[0][2], 0.0, m_el_zb[1][2], 0.0, 0.0, 0.0, m_el_zb[2][2], 0.0, m_el_zb[2][3], 0.0], [0.0, 0.0, 0.0, m_el_a[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_a[1][1], 0.0, 0.0], [0.0, 0.0, m_el_zb[0][3], 0.0, m_el_zb[1][3], 0.0, 0.0, 0.0, m_el_zb[2][3], 0.0, m_el_zb[3][3], 0.0], [0.0, m_el_yg[0][3], 0.0, 0.0, 0.0, m_el_yg[1][3], 0.0, m_el_yg[2][3], 0.0, 0.0, 0.0, m_el_yg[3][3]]])
return m_el | def get_mass_matrix_ver2():
'\n VERSION 2\n\n NOTE: from http://homes.civil.aau.dk/jc/FemteSemester/Beams3D.pdf\n seems to be a typo in 1-105 and 1-106 as a division with l**3 instead of l**3 should take place\n implemented mass matrices similar to the stiffness one\n '
m_x = (m_const / 6.0)
m_x_11 = 2.0
m_x_12 = 1.0
m_el_x = (m_x * np.array([[m_x_11, m_x_12], [m_x_12, m_x_11]]))
m_a = (((m_const * Ip) / A) / 6.0)
m_a_11 = 2
m_a_12 = 1
m_el_a = (m_a * np.array([[m_a_11, m_a_12], [m_a_12, m_a_11]]))
m_yg = (m_const / 420)
m_yg_11 = 156.0
m_yg_12 = (22.0 * length)
m_yg_13 = 54.0
m_yg_14 = ((- 13.0) * length)
m_yg_22 = (4 * (length ** 2))
m_yg_23 = (- m_yg_14)
m_yg_24 = ((- 3) * (length ** 2))
m_yg_33 = m_yg_11
m_yg_34 = (- m_yg_12)
m_yg_44 = m_yg_22
m_el_yg = (m_yg * np.array([[m_yg_11, m_yg_12, m_yg_13, m_yg_14], [m_yg_12, m_yg_22, m_yg_23, m_yg_24], [m_yg_13, m_yg_23, m_yg_33, m_yg_34], [m_yg_14, m_yg_24, m_yg_34, m_yg_44]]))
m_zb = (m_const / 420)
m_zb_11 = 156.0
m_zb_12 = ((- 22.0) * length)
m_zb_13 = 54.0
m_zb_14 = (13.0 * length)
m_zb_22 = (4.0 * (length ** 2))
m_zb_23 = (- m_zb_14)
m_zb_24 = ((- 3) * (length ** 2))
m_zb_33 = m_zb_11
m_zb_34 = (- m_zb_12)
m_zb_44 = m_zb_22
m_el_zb = (m_zb * np.array([[m_zb_11, m_zb_12, m_zb_13, m_zb_14], [m_zb_12, m_zb_22, m_zb_23, m_zb_24], [m_zb_13, m_zb_23, m_zb_33, m_zb_34], [m_zb_14, m_zb_24, m_zb_34, m_zb_44]]))
m_el = np.array([[m_el_x[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_x[0][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, m_el_yg[0][0], 0.0, 0.0, 0.0, m_el_yg[0][1], 0.0, m_el_yg[0][2], 0.0, 0.0, 0.0, m_el_yg[0][3]], [0.0, 0.0, m_el_zb[0][0], 0.0, m_el_zb[0][1], 0.0, 0.0, 0.0, m_el_zb[0][2], 0.0, m_el_zb[0][3], 0.0], [0.0, 0.0, 0.0, m_el_a[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_a[0][1], 0.0, 0.0], [0.0, 0.0, m_el_zb[0][1], 0.0, m_el_zb[1][1], 0.0, 0.0, 0.0, m_el_zb[1][2], 0.0, m_el_zb[1][3], 0.0], [0.0, m_el_yg[0][1], 0.0, 0.0, 0.0, m_el_yg[1][1], 0.0, m_el_yg[1][2], 0.0, 0.0, 0.0, m_el_yg[1][3]], [m_el_x[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_x[1][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, m_el_yg[0][2], 0.0, 0.0, 0.0, m_el_yg[1][2], 0.0, m_el_yg[2][2], 0.0, 0.0, 0.0, m_el_yg[2][3]], [0.0, 0.0, m_el_zb[0][2], 0.0, m_el_zb[1][2], 0.0, 0.0, 0.0, m_el_zb[2][2], 0.0, m_el_zb[2][3], 0.0], [0.0, 0.0, 0.0, m_el_a[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_a[1][1], 0.0, 0.0], [0.0, 0.0, m_el_zb[0][3], 0.0, m_el_zb[1][3], 0.0, 0.0, 0.0, m_el_zb[2][3], 0.0, m_el_zb[3][3], 0.0], [0.0, m_el_yg[0][3], 0.0, 0.0, 0.0, m_el_yg[1][3], 0.0, m_el_yg[2][3], 0.0, 0.0, 0.0, m_el_yg[3][3]]])
return m_el<|docstring|>VERSION 2
NOTE: from http://homes.civil.aau.dk/jc/FemteSemester/Beams3D.pdf
seems to be a typo in 1-105 and 1-106 as a division with l**3 instead of l**3 should take place
implemented mass matrices similar to the stiffness one<|endoftext|> |
c9cd73ac4354185905918ab0dc71eac5c13d67ab4a15bdaa4c38422300d2b66e | def get_mass_matrix_ver3():
'\n VERSION 3\n\n NOTE: from Appendix A - Straight Beam Element Matrices - page 228\n https://link.springer.com/content/pdf/bbm%3A978-3-319-56493-7%2F1.pdf\n\n '
m_x = (m_const / 6.0)
m_x_11 = 2.0
m_x_12 = 1.0
m_el_x = (m_x * np.array([[m_x_11, m_x_12], [m_x_12, m_x_11]]))
m_a = (((m_const * Ip) / A) / 6.0)
m_a_11 = 2
m_a_12 = 1
m_el_a = (m_a * np.array([[m_a_11, m_a_12], [m_a_12, m_a_11]]))
m_yg = ((m_const / 210) / ((1 + Py) ** 2))
m_yg_11 = (((70 * (Py ** 2)) + (147 * Py)) + 78)
m_yg_12 = (((((35 * (Py ** 2)) + (77 * Py)) + 44) * length) / 4)
m_yg_13 = (((35 * (Py ** 2)) + (63 * Py)) + 27)
m_yg_14 = (((- (((35 * (Py ** 2)) + (63 * Py)) + 26)) * length) / 4)
m_yg_22 = (((((7 * (Py ** 2)) + (14 * Py)) + 8) * (length ** 2)) / 4)
m_yg_23 = (- m_yg_14)
m_yg_24 = (((- (((7 * (Py ** 2)) + (14 * Py)) + 6)) * (length ** 2)) / 4)
m_yg_33 = m_yg_11
m_yg_34 = (- m_yg_12)
m_yg_44 = m_yg_22
m_el_yg_trans = (m_yg * np.array([[m_yg_11, m_yg_12, m_yg_13, m_yg_14], [m_yg_12, m_yg_22, m_yg_23, m_yg_24], [m_yg_13, m_yg_23, m_yg_33, m_yg_34], [m_yg_14, m_yg_24, m_yg_34, m_yg_44]]))
m_yg = ((((rho * Iz) / 30) / ((1 + Py) ** 2)) / length)
m_yg_11 = 36
m_yg_12 = ((- ((15 * Py) - 3)) * length)
m_yg_13 = (- m_yg_11)
m_yg_14 = m_yg_12
m_yg_22 = ((((10 * (Py ** 2)) + (5 * Py)) + 4) * (length ** 2))
m_yg_23 = (- m_yg_12)
m_yg_24 = ((((5 * (Py ** 2)) - (5 * Py)) - 1) * (length ** 2))
m_yg_33 = m_yg_11
m_yg_34 = (- m_yg_12)
m_yg_44 = m_yg_22
m_el_yg_rot = (m_yg * np.array([[m_yg_11, m_yg_12, m_yg_13, m_yg_14], [m_yg_12, m_yg_22, m_yg_23, m_yg_24], [m_yg_13, m_yg_23, m_yg_33, m_yg_34], [m_yg_14, m_yg_24, m_yg_34, m_yg_44]]))
m_el_yg = (m_el_yg_trans + m_el_yg_rot)
m_zb = ((m_const / 210) / ((1 + Pz) ** 2))
m_zb_11 = (((70 * (Pz ** 2)) + (147 * Pz)) + 78)
m_zb_12 = (((- (((35 * (Pz ** 2)) + (77 * Pz)) + 44)) * length) / 4)
m_zb_13 = (((35 * (Pz ** 2)) + (63 * Pz)) + 27)
m_zb_14 = (((((35 * (Pz ** 2)) + (63 * Pz)) + 26) * length) / 4)
m_zb_22 = (((((7 * (Pz ** 2)) + (14 * Pz)) + 8) * (length ** 2)) / 4)
m_zb_23 = (- m_zb_14)
m_zb_24 = (((- (((7 * (Pz ** 2)) + (14 * Pz)) + 6)) * (length ** 2)) / 4)
m_zb_33 = m_zb_11
m_zb_34 = (- m_zb_12)
m_zb_44 = m_zb_22
m_el_zb_trans = (m_zb * np.array([[m_zb_11, m_zb_12, m_zb_13, m_zb_14], [m_zb_12, m_zb_22, m_zb_23, m_zb_24], [m_zb_13, m_zb_23, m_zb_33, m_zb_34], [m_zb_14, m_zb_24, m_zb_34, m_zb_44]]))
m_zb = ((((rho * Iy) / 30) / ((1 + Pz) ** 2)) / length)
m_zb_11 = 36
m_zb_12 = (((15 * Pz) - 3) * length)
m_zb_13 = (- m_zb_11)
m_zb_14 = m_zb_12
m_zb_22 = ((((10 * (Pz ** 2)) + (5 * Pz)) + 4) * (length ** 2))
m_zb_23 = (- m_zb_12)
m_zb_24 = ((((5 * (Pz ** 2)) - (5 * Pz)) - 1) * (length ** 2))
m_zb_33 = m_zb_11
m_zb_34 = (- m_zb_12)
m_zb_44 = m_zb_22
m_el_zb_rot = (m_zb * np.array([[m_zb_11, m_zb_12, m_zb_13, m_zb_14], [m_zb_12, m_zb_22, m_zb_23, m_zb_24], [m_zb_13, m_zb_23, m_zb_33, m_zb_34], [m_zb_14, m_zb_24, m_zb_34, m_zb_44]]))
m_el_zb = (m_el_zb_trans + m_el_zb_rot)
m_el = np.array([[m_el_x[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_x[0][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, m_el_yg[0][0], 0.0, 0.0, 0.0, m_el_yg[0][1], 0.0, m_el_yg[0][2], 0.0, 0.0, 0.0, m_el_yg[0][3]], [0.0, 0.0, m_el_zb[0][0], 0.0, m_el_zb[0][1], 0.0, 0.0, 0.0, m_el_zb[0][2], 0.0, m_el_zb[0][3], 0.0], [0.0, 0.0, 0.0, m_el_a[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_a[0][1], 0.0, 0.0], [0.0, 0.0, m_el_zb[0][1], 0.0, m_el_zb[1][1], 0.0, 0.0, 0.0, m_el_zb[1][2], 0.0, m_el_zb[1][3], 0.0], [0.0, m_el_yg[0][1], 0.0, 0.0, 0.0, m_el_yg[1][1], 0.0, m_el_yg[1][2], 0.0, 0.0, 0.0, m_el_yg[1][3]], [m_el_x[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_x[1][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, m_el_yg[0][2], 0.0, 0.0, 0.0, m_el_yg[1][2], 0.0, m_el_yg[2][2], 0.0, 0.0, 0.0, m_el_yg[2][3]], [0.0, 0.0, m_el_zb[0][2], 0.0, m_el_zb[1][2], 0.0, 0.0, 0.0, m_el_zb[2][2], 0.0, m_el_zb[2][3], 0.0], [0.0, 0.0, 0.0, m_el_a[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_a[1][1], 0.0, 0.0], [0.0, 0.0, m_el_zb[0][3], 0.0, m_el_zb[1][3], 0.0, 0.0, 0.0, m_el_zb[2][3], 0.0, m_el_zb[3][3], 0.0], [0.0, m_el_yg[0][3], 0.0, 0.0, 0.0, m_el_yg[1][3], 0.0, m_el_yg[2][3], 0.0, 0.0, 0.0, m_el_yg[3][3]]])
return m_el | VERSION 3
NOTE: from Appendix A - Straight Beam Element Matrices - page 228
https://link.springer.com/content/pdf/bbm%3A978-3-319-56493-7%2F1.pdf | test_scripts/test_and_compare_element_matrices.py | get_mass_matrix_ver3 | JoZimmer/ParOptBeam | 1 | python | def get_mass_matrix_ver3():
'\n VERSION 3\n\n NOTE: from Appendix A - Straight Beam Element Matrices - page 228\n https://link.springer.com/content/pdf/bbm%3A978-3-319-56493-7%2F1.pdf\n\n '
m_x = (m_const / 6.0)
m_x_11 = 2.0
m_x_12 = 1.0
m_el_x = (m_x * np.array([[m_x_11, m_x_12], [m_x_12, m_x_11]]))
m_a = (((m_const * Ip) / A) / 6.0)
m_a_11 = 2
m_a_12 = 1
m_el_a = (m_a * np.array([[m_a_11, m_a_12], [m_a_12, m_a_11]]))
m_yg = ((m_const / 210) / ((1 + Py) ** 2))
m_yg_11 = (((70 * (Py ** 2)) + (147 * Py)) + 78)
m_yg_12 = (((((35 * (Py ** 2)) + (77 * Py)) + 44) * length) / 4)
m_yg_13 = (((35 * (Py ** 2)) + (63 * Py)) + 27)
m_yg_14 = (((- (((35 * (Py ** 2)) + (63 * Py)) + 26)) * length) / 4)
m_yg_22 = (((((7 * (Py ** 2)) + (14 * Py)) + 8) * (length ** 2)) / 4)
m_yg_23 = (- m_yg_14)
m_yg_24 = (((- (((7 * (Py ** 2)) + (14 * Py)) + 6)) * (length ** 2)) / 4)
m_yg_33 = m_yg_11
m_yg_34 = (- m_yg_12)
m_yg_44 = m_yg_22
m_el_yg_trans = (m_yg * np.array([[m_yg_11, m_yg_12, m_yg_13, m_yg_14], [m_yg_12, m_yg_22, m_yg_23, m_yg_24], [m_yg_13, m_yg_23, m_yg_33, m_yg_34], [m_yg_14, m_yg_24, m_yg_34, m_yg_44]]))
m_yg = ((((rho * Iz) / 30) / ((1 + Py) ** 2)) / length)
m_yg_11 = 36
m_yg_12 = ((- ((15 * Py) - 3)) * length)
m_yg_13 = (- m_yg_11)
m_yg_14 = m_yg_12
m_yg_22 = ((((10 * (Py ** 2)) + (5 * Py)) + 4) * (length ** 2))
m_yg_23 = (- m_yg_12)
m_yg_24 = ((((5 * (Py ** 2)) - (5 * Py)) - 1) * (length ** 2))
m_yg_33 = m_yg_11
m_yg_34 = (- m_yg_12)
m_yg_44 = m_yg_22
m_el_yg_rot = (m_yg * np.array([[m_yg_11, m_yg_12, m_yg_13, m_yg_14], [m_yg_12, m_yg_22, m_yg_23, m_yg_24], [m_yg_13, m_yg_23, m_yg_33, m_yg_34], [m_yg_14, m_yg_24, m_yg_34, m_yg_44]]))
m_el_yg = (m_el_yg_trans + m_el_yg_rot)
m_zb = ((m_const / 210) / ((1 + Pz) ** 2))
m_zb_11 = (((70 * (Pz ** 2)) + (147 * Pz)) + 78)
m_zb_12 = (((- (((35 * (Pz ** 2)) + (77 * Pz)) + 44)) * length) / 4)
m_zb_13 = (((35 * (Pz ** 2)) + (63 * Pz)) + 27)
m_zb_14 = (((((35 * (Pz ** 2)) + (63 * Pz)) + 26) * length) / 4)
m_zb_22 = (((((7 * (Pz ** 2)) + (14 * Pz)) + 8) * (length ** 2)) / 4)
m_zb_23 = (- m_zb_14)
m_zb_24 = (((- (((7 * (Pz ** 2)) + (14 * Pz)) + 6)) * (length ** 2)) / 4)
m_zb_33 = m_zb_11
m_zb_34 = (- m_zb_12)
m_zb_44 = m_zb_22
m_el_zb_trans = (m_zb * np.array([[m_zb_11, m_zb_12, m_zb_13, m_zb_14], [m_zb_12, m_zb_22, m_zb_23, m_zb_24], [m_zb_13, m_zb_23, m_zb_33, m_zb_34], [m_zb_14, m_zb_24, m_zb_34, m_zb_44]]))
m_zb = ((((rho * Iy) / 30) / ((1 + Pz) ** 2)) / length)
m_zb_11 = 36
m_zb_12 = (((15 * Pz) - 3) * length)
m_zb_13 = (- m_zb_11)
m_zb_14 = m_zb_12
m_zb_22 = ((((10 * (Pz ** 2)) + (5 * Pz)) + 4) * (length ** 2))
m_zb_23 = (- m_zb_12)
m_zb_24 = ((((5 * (Pz ** 2)) - (5 * Pz)) - 1) * (length ** 2))
m_zb_33 = m_zb_11
m_zb_34 = (- m_zb_12)
m_zb_44 = m_zb_22
m_el_zb_rot = (m_zb * np.array([[m_zb_11, m_zb_12, m_zb_13, m_zb_14], [m_zb_12, m_zb_22, m_zb_23, m_zb_24], [m_zb_13, m_zb_23, m_zb_33, m_zb_34], [m_zb_14, m_zb_24, m_zb_34, m_zb_44]]))
m_el_zb = (m_el_zb_trans + m_el_zb_rot)
m_el = np.array([[m_el_x[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_x[0][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, m_el_yg[0][0], 0.0, 0.0, 0.0, m_el_yg[0][1], 0.0, m_el_yg[0][2], 0.0, 0.0, 0.0, m_el_yg[0][3]], [0.0, 0.0, m_el_zb[0][0], 0.0, m_el_zb[0][1], 0.0, 0.0, 0.0, m_el_zb[0][2], 0.0, m_el_zb[0][3], 0.0], [0.0, 0.0, 0.0, m_el_a[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_a[0][1], 0.0, 0.0], [0.0, 0.0, m_el_zb[0][1], 0.0, m_el_zb[1][1], 0.0, 0.0, 0.0, m_el_zb[1][2], 0.0, m_el_zb[1][3], 0.0], [0.0, m_el_yg[0][1], 0.0, 0.0, 0.0, m_el_yg[1][1], 0.0, m_el_yg[1][2], 0.0, 0.0, 0.0, m_el_yg[1][3]], [m_el_x[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_x[1][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, m_el_yg[0][2], 0.0, 0.0, 0.0, m_el_yg[1][2], 0.0, m_el_yg[2][2], 0.0, 0.0, 0.0, m_el_yg[2][3]], [0.0, 0.0, m_el_zb[0][2], 0.0, m_el_zb[1][2], 0.0, 0.0, 0.0, m_el_zb[2][2], 0.0, m_el_zb[2][3], 0.0], [0.0, 0.0, 0.0, m_el_a[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_a[1][1], 0.0, 0.0], [0.0, 0.0, m_el_zb[0][3], 0.0, m_el_zb[1][3], 0.0, 0.0, 0.0, m_el_zb[2][3], 0.0, m_el_zb[3][3], 0.0], [0.0, m_el_yg[0][3], 0.0, 0.0, 0.0, m_el_yg[1][3], 0.0, m_el_yg[2][3], 0.0, 0.0, 0.0, m_el_yg[3][3]]])
return m_el | def get_mass_matrix_ver3():
'\n VERSION 3\n\n NOTE: from Appendix A - Straight Beam Element Matrices - page 228\n https://link.springer.com/content/pdf/bbm%3A978-3-319-56493-7%2F1.pdf\n\n '
m_x = (m_const / 6.0)
m_x_11 = 2.0
m_x_12 = 1.0
m_el_x = (m_x * np.array([[m_x_11, m_x_12], [m_x_12, m_x_11]]))
m_a = (((m_const * Ip) / A) / 6.0)
m_a_11 = 2
m_a_12 = 1
m_el_a = (m_a * np.array([[m_a_11, m_a_12], [m_a_12, m_a_11]]))
m_yg = ((m_const / 210) / ((1 + Py) ** 2))
m_yg_11 = (((70 * (Py ** 2)) + (147 * Py)) + 78)
m_yg_12 = (((((35 * (Py ** 2)) + (77 * Py)) + 44) * length) / 4)
m_yg_13 = (((35 * (Py ** 2)) + (63 * Py)) + 27)
m_yg_14 = (((- (((35 * (Py ** 2)) + (63 * Py)) + 26)) * length) / 4)
m_yg_22 = (((((7 * (Py ** 2)) + (14 * Py)) + 8) * (length ** 2)) / 4)
m_yg_23 = (- m_yg_14)
m_yg_24 = (((- (((7 * (Py ** 2)) + (14 * Py)) + 6)) * (length ** 2)) / 4)
m_yg_33 = m_yg_11
m_yg_34 = (- m_yg_12)
m_yg_44 = m_yg_22
m_el_yg_trans = (m_yg * np.array([[m_yg_11, m_yg_12, m_yg_13, m_yg_14], [m_yg_12, m_yg_22, m_yg_23, m_yg_24], [m_yg_13, m_yg_23, m_yg_33, m_yg_34], [m_yg_14, m_yg_24, m_yg_34, m_yg_44]]))
m_yg = ((((rho * Iz) / 30) / ((1 + Py) ** 2)) / length)
m_yg_11 = 36
m_yg_12 = ((- ((15 * Py) - 3)) * length)
m_yg_13 = (- m_yg_11)
m_yg_14 = m_yg_12
m_yg_22 = ((((10 * (Py ** 2)) + (5 * Py)) + 4) * (length ** 2))
m_yg_23 = (- m_yg_12)
m_yg_24 = ((((5 * (Py ** 2)) - (5 * Py)) - 1) * (length ** 2))
m_yg_33 = m_yg_11
m_yg_34 = (- m_yg_12)
m_yg_44 = m_yg_22
m_el_yg_rot = (m_yg * np.array([[m_yg_11, m_yg_12, m_yg_13, m_yg_14], [m_yg_12, m_yg_22, m_yg_23, m_yg_24], [m_yg_13, m_yg_23, m_yg_33, m_yg_34], [m_yg_14, m_yg_24, m_yg_34, m_yg_44]]))
m_el_yg = (m_el_yg_trans + m_el_yg_rot)
m_zb = ((m_const / 210) / ((1 + Pz) ** 2))
m_zb_11 = (((70 * (Pz ** 2)) + (147 * Pz)) + 78)
m_zb_12 = (((- (((35 * (Pz ** 2)) + (77 * Pz)) + 44)) * length) / 4)
m_zb_13 = (((35 * (Pz ** 2)) + (63 * Pz)) + 27)
m_zb_14 = (((((35 * (Pz ** 2)) + (63 * Pz)) + 26) * length) / 4)
m_zb_22 = (((((7 * (Pz ** 2)) + (14 * Pz)) + 8) * (length ** 2)) / 4)
m_zb_23 = (- m_zb_14)
m_zb_24 = (((- (((7 * (Pz ** 2)) + (14 * Pz)) + 6)) * (length ** 2)) / 4)
m_zb_33 = m_zb_11
m_zb_34 = (- m_zb_12)
m_zb_44 = m_zb_22
m_el_zb_trans = (m_zb * np.array([[m_zb_11, m_zb_12, m_zb_13, m_zb_14], [m_zb_12, m_zb_22, m_zb_23, m_zb_24], [m_zb_13, m_zb_23, m_zb_33, m_zb_34], [m_zb_14, m_zb_24, m_zb_34, m_zb_44]]))
m_zb = ((((rho * Iy) / 30) / ((1 + Pz) ** 2)) / length)
m_zb_11 = 36
m_zb_12 = (((15 * Pz) - 3) * length)
m_zb_13 = (- m_zb_11)
m_zb_14 = m_zb_12
m_zb_22 = ((((10 * (Pz ** 2)) + (5 * Pz)) + 4) * (length ** 2))
m_zb_23 = (- m_zb_12)
m_zb_24 = ((((5 * (Pz ** 2)) - (5 * Pz)) - 1) * (length ** 2))
m_zb_33 = m_zb_11
m_zb_34 = (- m_zb_12)
m_zb_44 = m_zb_22
m_el_zb_rot = (m_zb * np.array([[m_zb_11, m_zb_12, m_zb_13, m_zb_14], [m_zb_12, m_zb_22, m_zb_23, m_zb_24], [m_zb_13, m_zb_23, m_zb_33, m_zb_34], [m_zb_14, m_zb_24, m_zb_34, m_zb_44]]))
m_el_zb = (m_el_zb_trans + m_el_zb_rot)
m_el = np.array([[m_el_x[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_x[0][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, m_el_yg[0][0], 0.0, 0.0, 0.0, m_el_yg[0][1], 0.0, m_el_yg[0][2], 0.0, 0.0, 0.0, m_el_yg[0][3]], [0.0, 0.0, m_el_zb[0][0], 0.0, m_el_zb[0][1], 0.0, 0.0, 0.0, m_el_zb[0][2], 0.0, m_el_zb[0][3], 0.0], [0.0, 0.0, 0.0, m_el_a[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_a[0][1], 0.0, 0.0], [0.0, 0.0, m_el_zb[0][1], 0.0, m_el_zb[1][1], 0.0, 0.0, 0.0, m_el_zb[1][2], 0.0, m_el_zb[1][3], 0.0], [0.0, m_el_yg[0][1], 0.0, 0.0, 0.0, m_el_yg[1][1], 0.0, m_el_yg[1][2], 0.0, 0.0, 0.0, m_el_yg[1][3]], [m_el_x[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_x[1][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, m_el_yg[0][2], 0.0, 0.0, 0.0, m_el_yg[1][2], 0.0, m_el_yg[2][2], 0.0, 0.0, 0.0, m_el_yg[2][3]], [0.0, 0.0, m_el_zb[0][2], 0.0, m_el_zb[1][2], 0.0, 0.0, 0.0, m_el_zb[2][2], 0.0, m_el_zb[2][3], 0.0], [0.0, 0.0, 0.0, m_el_a[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, m_el_a[1][1], 0.0, 0.0], [0.0, 0.0, m_el_zb[0][3], 0.0, m_el_zb[1][3], 0.0, 0.0, 0.0, m_el_zb[2][3], 0.0, m_el_zb[3][3], 0.0], [0.0, m_el_yg[0][3], 0.0, 0.0, 0.0, m_el_yg[1][3], 0.0, m_el_yg[2][3], 0.0, 0.0, 0.0, m_el_yg[3][3]]])
return m_el<|docstring|>VERSION 3
NOTE: from Appendix A - Straight Beam Element Matrices - page 228
https://link.springer.com/content/pdf/bbm%3A978-3-319-56493-7%2F1.pdf<|endoftext|> |
8ce050f5bf505370356f0c9cf2d48619ecf051cbe5e8c4304e0d9ec9f619275f | def get_stiffness_matrix_ver1():
' \n VERSION 1\n\n NOTE: checking out alternative implementation\n according to https://mediatum.ub.tum.de/doc/1072355/file.pdf\n description and implementation seems correct\n NOTE: find out where the formulation for the mass comes from, stiffness seems standard\n '
K11 = np.zeros((6, 6))
K11[0][0] = ((E * A) / l)
K11[1][1] = (((12 * E) * Iz) / ((l ** 3) * (1 + Py)))
K11[2][2] = (((12 * E) * Iy) / ((l ** 3) * (1 + Pz)))
K11[3][3] = ((G * It) / l)
K11[4][4] = ((((4 + Pz) * E) * Iy) / (l * (1 + Pz)))
K11[5][5] = ((((4 + Py) * E) * Iz) / (l * (1 + Py)))
K11[1][5] = (((6 * E) * Iz) / ((l ** 2) * (1 + Py)))
K11[5][1] = K11[1][5]
K11[2][4] = ((((- 6) * E) * Iy) / ((l ** 2) * (1 + Pz)))
K11[4][2] = K11[2][4]
K22 = ((- K11) + (2 * np.diag(np.diag(K11))))
K21 = (K11 - (2 * np.diag(np.diag(K11))))
K21[4][4] = ((((2 - Pz) * E) * Iy) / (l * (1 + Pz)))
K21[5][5] = ((((2 - Py) * E) * Iz) / (l * (1 + Py)))
K21[1][5] = (- K21[5][1])
K21[2][4] = (- K21[4][2])
k_el = np.zeros(((2 * 6), (2 * 6)))
k_el[(0:6, 0:6)] += K11
k_el[(6:12, 0:6)] += K21
k_el[(0:6, 6:12)] += np.transpose(K21)
k_el[(6:12, 6:12)] += K22
return k_el | VERSION 1
NOTE: checking out alternative implementation
according to https://mediatum.ub.tum.de/doc/1072355/file.pdf
description and implementation seems correct
NOTE: find out where the formulation for the mass comes from, stiffness seems standard | test_scripts/test_and_compare_element_matrices.py | get_stiffness_matrix_ver1 | JoZimmer/ParOptBeam | 1 | python | def get_stiffness_matrix_ver1():
' \n VERSION 1\n\n NOTE: checking out alternative implementation\n according to https://mediatum.ub.tum.de/doc/1072355/file.pdf\n description and implementation seems correct\n NOTE: find out where the formulation for the mass comes from, stiffness seems standard\n '
K11 = np.zeros((6, 6))
K11[0][0] = ((E * A) / l)
K11[1][1] = (((12 * E) * Iz) / ((l ** 3) * (1 + Py)))
K11[2][2] = (((12 * E) * Iy) / ((l ** 3) * (1 + Pz)))
K11[3][3] = ((G * It) / l)
K11[4][4] = ((((4 + Pz) * E) * Iy) / (l * (1 + Pz)))
K11[5][5] = ((((4 + Py) * E) * Iz) / (l * (1 + Py)))
K11[1][5] = (((6 * E) * Iz) / ((l ** 2) * (1 + Py)))
K11[5][1] = K11[1][5]
K11[2][4] = ((((- 6) * E) * Iy) / ((l ** 2) * (1 + Pz)))
K11[4][2] = K11[2][4]
K22 = ((- K11) + (2 * np.diag(np.diag(K11))))
K21 = (K11 - (2 * np.diag(np.diag(K11))))
K21[4][4] = ((((2 - Pz) * E) * Iy) / (l * (1 + Pz)))
K21[5][5] = ((((2 - Py) * E) * Iz) / (l * (1 + Py)))
K21[1][5] = (- K21[5][1])
K21[2][4] = (- K21[4][2])
k_el = np.zeros(((2 * 6), (2 * 6)))
k_el[(0:6, 0:6)] += K11
k_el[(6:12, 0:6)] += K21
k_el[(0:6, 6:12)] += np.transpose(K21)
k_el[(6:12, 6:12)] += K22
return k_el | def get_stiffness_matrix_ver1():
' \n VERSION 1\n\n NOTE: checking out alternative implementation\n according to https://mediatum.ub.tum.de/doc/1072355/file.pdf\n description and implementation seems correct\n NOTE: find out where the formulation for the mass comes from, stiffness seems standard\n '
K11 = np.zeros((6, 6))
K11[0][0] = ((E * A) / l)
K11[1][1] = (((12 * E) * Iz) / ((l ** 3) * (1 + Py)))
K11[2][2] = (((12 * E) * Iy) / ((l ** 3) * (1 + Pz)))
K11[3][3] = ((G * It) / l)
K11[4][4] = ((((4 + Pz) * E) * Iy) / (l * (1 + Pz)))
K11[5][5] = ((((4 + Py) * E) * Iz) / (l * (1 + Py)))
K11[1][5] = (((6 * E) * Iz) / ((l ** 2) * (1 + Py)))
K11[5][1] = K11[1][5]
K11[2][4] = ((((- 6) * E) * Iy) / ((l ** 2) * (1 + Pz)))
K11[4][2] = K11[2][4]
K22 = ((- K11) + (2 * np.diag(np.diag(K11))))
K21 = (K11 - (2 * np.diag(np.diag(K11))))
K21[4][4] = ((((2 - Pz) * E) * Iy) / (l * (1 + Pz)))
K21[5][5] = ((((2 - Py) * E) * Iz) / (l * (1 + Py)))
K21[1][5] = (- K21[5][1])
K21[2][4] = (- K21[4][2])
k_el = np.zeros(((2 * 6), (2 * 6)))
k_el[(0:6, 0:6)] += K11
k_el[(6:12, 0:6)] += K21
k_el[(0:6, 6:12)] += np.transpose(K21)
k_el[(6:12, 6:12)] += K22
return k_el<|docstring|>VERSION 1
NOTE: checking out alternative implementation
according to https://mediatum.ub.tum.de/doc/1072355/file.pdf
description and implementation seems correct
NOTE: find out where the formulation for the mass comes from, stiffness seems standard<|endoftext|> |
10db3434d60c301574d113376bbb604ff8ec080f744597a8f3b58074fb060c9f | def get_stiffness_matrix_ver2():
'\n VERSION 2\n\n NOTE: from http://homes.civil.aau.dk/jc/FemteSemester/Beams3D.pdf\n seems to be a typo in 1-105 and 1-106 as a division with l**3 instead of l**3 should take place\n implemented mass matrices similar to the stiffness one\n '
length = l
k_x = ((E * A) / l)
k_x_11 = 1.0
k_x_12 = (- 1.0)
k_el_x = (k_x * np.array([[k_x_11, k_x_12], [k_x_12, k_x_11]]))
k_a = ((G * It) / l)
k_a_11 = 1.0
k_a_12 = (- 1.0)
k_el_a = (k_a * np.array([[k_a_11, k_a_12], [k_a_12, k_a_11]]))
beta_yg = Py
k_yg = (((E * Iz) / (1 + beta_yg)) / (l ** 3))
k_yg_11 = 12.0
k_yg_12 = (6.0 * length)
k_yg_13 = (- k_yg_11)
k_yg_14 = k_yg_12
k_yg_22 = ((4.0 + beta_yg) * (length ** 2))
k_yg_23 = (- k_yg_12)
k_yg_24 = ((2 - beta_yg) * (length ** 2))
k_yg_33 = k_yg_11
k_yg_34 = (- k_yg_12)
k_yg_44 = k_yg_22
k_el_yg = (k_yg * np.array([[k_yg_11, k_yg_12, k_yg_13, k_yg_14], [k_yg_12, k_yg_22, k_yg_23, k_yg_24], [k_yg_13, k_yg_23, k_yg_33, k_yg_34], [k_yg_14, k_yg_24, k_yg_34, k_yg_44]]))
beta_zb = Pz
k_zb = (((E * Iy) / (1 + beta_zb)) / (l ** 3))
k_zb_11 = 12.0
k_zb_12 = ((- 6.0) * length)
k_zb_13 = (- 12.0)
k_zb_14 = k_zb_12
k_zb_22 = ((4.0 + beta_zb) * (length ** 2))
k_zb_23 = (- k_zb_12)
k_zb_24 = ((2 - beta_zb) * (length ** 2))
k_zb_33 = k_zb_11
k_zb_34 = (- k_zb_12)
k_zb_44 = k_zb_22
k_el_zb = (k_zb * np.array([[k_zb_11, k_zb_12, k_zb_13, k_zb_14], [k_zb_12, k_zb_22, k_zb_23, k_zb_24], [k_zb_13, k_zb_23, k_zb_33, k_zb_34], [k_zb_14, k_zb_24, k_zb_34, k_zb_44]]))
k_el = np.array([[k_el_x[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, k_el_x[0][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, k_el_yg[0][0], 0.0, 0.0, 0.0, k_el_yg[0][1], 0.0, k_el_yg[0][2], 0.0, 0.0, 0.0, k_el_yg[0][3]], [0.0, 0.0, k_el_zb[0][0], 0.0, k_el_zb[0][1], 0.0, 0.0, 0.0, k_el_zb[0][2], 0.0, k_el_zb[0][3], 0.0], [0.0, 0.0, 0.0, k_el_a[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, k_el_a[0][1], 0.0, 0.0], [0.0, 0.0, k_el_zb[0][1], 0.0, k_el_zb[1][1], 0.0, 0.0, 0.0, k_el_zb[1][2], 0.0, k_el_zb[1][3], 0.0], [0.0, k_el_yg[0][1], 0.0, 0.0, 0.0, k_el_yg[1][1], 0.0, k_el_yg[1][2], 0.0, 0.0, 0.0, k_el_yg[1][3]], [k_el_x[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, k_el_x[1][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, k_el_yg[0][2], 0.0, 0.0, 0.0, k_el_yg[1][2], 0.0, k_el_yg[2][2], 0.0, 0.0, 0.0, k_el_yg[2][3]], [0.0, 0.0, k_el_zb[0][2], 0.0, k_el_zb[1][2], 0.0, 0.0, 0.0, k_el_zb[2][2], 0.0, k_el_zb[2][3], 0.0], [0.0, 0.0, 0.0, k_el_a[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, k_el_a[1][1], 0.0, 0.0], [0.0, 0.0, k_el_zb[0][3], 0.0, k_el_zb[1][3], 0.0, 0.0, 0.0, k_el_zb[2][3], 0.0, k_el_zb[3][3], 0.0], [0.0, k_el_yg[0][3], 0.0, 0.0, 0.0, k_el_yg[1][3], 0.0, k_el_yg[2][3], 0.0, 0.0, 0.0, k_el_yg[3][3]]])
return k_el | VERSION 2
NOTE: from http://homes.civil.aau.dk/jc/FemteSemester/Beams3D.pdf
seems to be a typo in 1-105 and 1-106 as a division with l**3 instead of l**3 should take place
implemented mass matrices similar to the stiffness one | test_scripts/test_and_compare_element_matrices.py | get_stiffness_matrix_ver2 | JoZimmer/ParOptBeam | 1 | python | def get_stiffness_matrix_ver2():
'\n VERSION 2\n\n NOTE: from http://homes.civil.aau.dk/jc/FemteSemester/Beams3D.pdf\n seems to be a typo in 1-105 and 1-106 as a division with l**3 instead of l**3 should take place\n implemented mass matrices similar to the stiffness one\n '
length = l
k_x = ((E * A) / l)
k_x_11 = 1.0
k_x_12 = (- 1.0)
k_el_x = (k_x * np.array([[k_x_11, k_x_12], [k_x_12, k_x_11]]))
k_a = ((G * It) / l)
k_a_11 = 1.0
k_a_12 = (- 1.0)
k_el_a = (k_a * np.array([[k_a_11, k_a_12], [k_a_12, k_a_11]]))
beta_yg = Py
k_yg = (((E * Iz) / (1 + beta_yg)) / (l ** 3))
k_yg_11 = 12.0
k_yg_12 = (6.0 * length)
k_yg_13 = (- k_yg_11)
k_yg_14 = k_yg_12
k_yg_22 = ((4.0 + beta_yg) * (length ** 2))
k_yg_23 = (- k_yg_12)
k_yg_24 = ((2 - beta_yg) * (length ** 2))
k_yg_33 = k_yg_11
k_yg_34 = (- k_yg_12)
k_yg_44 = k_yg_22
k_el_yg = (k_yg * np.array([[k_yg_11, k_yg_12, k_yg_13, k_yg_14], [k_yg_12, k_yg_22, k_yg_23, k_yg_24], [k_yg_13, k_yg_23, k_yg_33, k_yg_34], [k_yg_14, k_yg_24, k_yg_34, k_yg_44]]))
beta_zb = Pz
k_zb = (((E * Iy) / (1 + beta_zb)) / (l ** 3))
k_zb_11 = 12.0
k_zb_12 = ((- 6.0) * length)
k_zb_13 = (- 12.0)
k_zb_14 = k_zb_12
k_zb_22 = ((4.0 + beta_zb) * (length ** 2))
k_zb_23 = (- k_zb_12)
k_zb_24 = ((2 - beta_zb) * (length ** 2))
k_zb_33 = k_zb_11
k_zb_34 = (- k_zb_12)
k_zb_44 = k_zb_22
k_el_zb = (k_zb * np.array([[k_zb_11, k_zb_12, k_zb_13, k_zb_14], [k_zb_12, k_zb_22, k_zb_23, k_zb_24], [k_zb_13, k_zb_23, k_zb_33, k_zb_34], [k_zb_14, k_zb_24, k_zb_34, k_zb_44]]))
k_el = np.array([[k_el_x[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, k_el_x[0][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, k_el_yg[0][0], 0.0, 0.0, 0.0, k_el_yg[0][1], 0.0, k_el_yg[0][2], 0.0, 0.0, 0.0, k_el_yg[0][3]], [0.0, 0.0, k_el_zb[0][0], 0.0, k_el_zb[0][1], 0.0, 0.0, 0.0, k_el_zb[0][2], 0.0, k_el_zb[0][3], 0.0], [0.0, 0.0, 0.0, k_el_a[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, k_el_a[0][1], 0.0, 0.0], [0.0, 0.0, k_el_zb[0][1], 0.0, k_el_zb[1][1], 0.0, 0.0, 0.0, k_el_zb[1][2], 0.0, k_el_zb[1][3], 0.0], [0.0, k_el_yg[0][1], 0.0, 0.0, 0.0, k_el_yg[1][1], 0.0, k_el_yg[1][2], 0.0, 0.0, 0.0, k_el_yg[1][3]], [k_el_x[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, k_el_x[1][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, k_el_yg[0][2], 0.0, 0.0, 0.0, k_el_yg[1][2], 0.0, k_el_yg[2][2], 0.0, 0.0, 0.0, k_el_yg[2][3]], [0.0, 0.0, k_el_zb[0][2], 0.0, k_el_zb[1][2], 0.0, 0.0, 0.0, k_el_zb[2][2], 0.0, k_el_zb[2][3], 0.0], [0.0, 0.0, 0.0, k_el_a[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, k_el_a[1][1], 0.0, 0.0], [0.0, 0.0, k_el_zb[0][3], 0.0, k_el_zb[1][3], 0.0, 0.0, 0.0, k_el_zb[2][3], 0.0, k_el_zb[3][3], 0.0], [0.0, k_el_yg[0][3], 0.0, 0.0, 0.0, k_el_yg[1][3], 0.0, k_el_yg[2][3], 0.0, 0.0, 0.0, k_el_yg[3][3]]])
return k_el | def get_stiffness_matrix_ver2():
'\n VERSION 2\n\n NOTE: from http://homes.civil.aau.dk/jc/FemteSemester/Beams3D.pdf\n seems to be a typo in 1-105 and 1-106 as a division with l**3 instead of l**3 should take place\n implemented mass matrices similar to the stiffness one\n '
length = l
k_x = ((E * A) / l)
k_x_11 = 1.0
k_x_12 = (- 1.0)
k_el_x = (k_x * np.array([[k_x_11, k_x_12], [k_x_12, k_x_11]]))
k_a = ((G * It) / l)
k_a_11 = 1.0
k_a_12 = (- 1.0)
k_el_a = (k_a * np.array([[k_a_11, k_a_12], [k_a_12, k_a_11]]))
beta_yg = Py
k_yg = (((E * Iz) / (1 + beta_yg)) / (l ** 3))
k_yg_11 = 12.0
k_yg_12 = (6.0 * length)
k_yg_13 = (- k_yg_11)
k_yg_14 = k_yg_12
k_yg_22 = ((4.0 + beta_yg) * (length ** 2))
k_yg_23 = (- k_yg_12)
k_yg_24 = ((2 - beta_yg) * (length ** 2))
k_yg_33 = k_yg_11
k_yg_34 = (- k_yg_12)
k_yg_44 = k_yg_22
k_el_yg = (k_yg * np.array([[k_yg_11, k_yg_12, k_yg_13, k_yg_14], [k_yg_12, k_yg_22, k_yg_23, k_yg_24], [k_yg_13, k_yg_23, k_yg_33, k_yg_34], [k_yg_14, k_yg_24, k_yg_34, k_yg_44]]))
beta_zb = Pz
k_zb = (((E * Iy) / (1 + beta_zb)) / (l ** 3))
k_zb_11 = 12.0
k_zb_12 = ((- 6.0) * length)
k_zb_13 = (- 12.0)
k_zb_14 = k_zb_12
k_zb_22 = ((4.0 + beta_zb) * (length ** 2))
k_zb_23 = (- k_zb_12)
k_zb_24 = ((2 - beta_zb) * (length ** 2))
k_zb_33 = k_zb_11
k_zb_34 = (- k_zb_12)
k_zb_44 = k_zb_22
k_el_zb = (k_zb * np.array([[k_zb_11, k_zb_12, k_zb_13, k_zb_14], [k_zb_12, k_zb_22, k_zb_23, k_zb_24], [k_zb_13, k_zb_23, k_zb_33, k_zb_34], [k_zb_14, k_zb_24, k_zb_34, k_zb_44]]))
k_el = np.array([[k_el_x[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, k_el_x[0][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, k_el_yg[0][0], 0.0, 0.0, 0.0, k_el_yg[0][1], 0.0, k_el_yg[0][2], 0.0, 0.0, 0.0, k_el_yg[0][3]], [0.0, 0.0, k_el_zb[0][0], 0.0, k_el_zb[0][1], 0.0, 0.0, 0.0, k_el_zb[0][2], 0.0, k_el_zb[0][3], 0.0], [0.0, 0.0, 0.0, k_el_a[0][0], 0.0, 0.0, 0.0, 0.0, 0.0, k_el_a[0][1], 0.0, 0.0], [0.0, 0.0, k_el_zb[0][1], 0.0, k_el_zb[1][1], 0.0, 0.0, 0.0, k_el_zb[1][2], 0.0, k_el_zb[1][3], 0.0], [0.0, k_el_yg[0][1], 0.0, 0.0, 0.0, k_el_yg[1][1], 0.0, k_el_yg[1][2], 0.0, 0.0, 0.0, k_el_yg[1][3]], [k_el_x[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, k_el_x[1][1], 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, k_el_yg[0][2], 0.0, 0.0, 0.0, k_el_yg[1][2], 0.0, k_el_yg[2][2], 0.0, 0.0, 0.0, k_el_yg[2][3]], [0.0, 0.0, k_el_zb[0][2], 0.0, k_el_zb[1][2], 0.0, 0.0, 0.0, k_el_zb[2][2], 0.0, k_el_zb[2][3], 0.0], [0.0, 0.0, 0.0, k_el_a[1][0], 0.0, 0.0, 0.0, 0.0, 0.0, k_el_a[1][1], 0.0, 0.0], [0.0, 0.0, k_el_zb[0][3], 0.0, k_el_zb[1][3], 0.0, 0.0, 0.0, k_el_zb[2][3], 0.0, k_el_zb[3][3], 0.0], [0.0, k_el_yg[0][3], 0.0, 0.0, 0.0, k_el_yg[1][3], 0.0, k_el_yg[2][3], 0.0, 0.0, 0.0, k_el_yg[3][3]]])
return k_el<|docstring|>VERSION 2
NOTE: from http://homes.civil.aau.dk/jc/FemteSemester/Beams3D.pdf
seems to be a typo in 1-105 and 1-106 as a division with l**3 instead of l**3 should take place
implemented mass matrices similar to the stiffness one<|endoftext|> |
225d5ccf0d6fb093ef1f7ad7019c76077de41bdc2d3fe93dc51d778409fb2b95 | def __init__(self, work_folder: str, system_name: str, in_smiles: str=None, in_top_path: str=None, in_cnf_path: str=None, in_imd_path: str=None, in_disres_path: str=None, in_ptp_path: str=None, in_posres_path: str=None, in_refpos_path: str=None, in_gromosXX_bin_dir: str=None, in_gromosPP_bin_dir: str=None, rdkitMol: Chem.rdchem.Mol=None, readIn=True, Forcefield: forcefield_system=forcefield_system(), auto_convert: bool=False, adapt_imd_automatically: bool=True, verbose: bool=True):
'\n\n Parameters\n ----------\n work_folder\n system_name\n in_smiles\n in_top_path\n in_cnf_path\n in_imd_path\n in_disres_path\n in_ptp_path\n in_posres_path\n in_refpos_path\n in_gromosXX_bin_dir\n in_gromosPP_bin_dir\n rdkitMol\n readIn\n Forcefield\n auto_convert\n adapt_imd_automatically\n '
self.hasData = False
self._name = system_name
self._work_folder = work_folder
self.smiles = in_smiles
self.Forcefield = Forcefield
self.mol = Chem.Mol()
self.checkpoint_path = None
self.verbose = verbose
self._gromosPP_bin_dir = in_gromosPP_bin_dir
self._gromosXX_bin_dir = in_gromosXX_bin_dir
self._gromosPP = GromosPP(gromosPP_bin_dir=in_gromosPP_bin_dir)
self._gromosXX = GromosXX(gromosXX_bin_dir=in_gromosXX_bin_dir)
self.__bind_gromosPPFuncs()
self._future_promise = False
self._future_promised_files = []
if (((in_smiles == None) and (rdkitMol == None)) or (readIn == False)):
if verbose:
warnings.warn('No data provided to gromos_system\nmanual work needed')
file_mapping = {'imd': in_imd_path, 'top': in_top_path, 'ptp': in_ptp_path, 'cnf': in_cnf_path, 'disres': in_disres_path, 'posres': in_posres_path, 'refpos': in_refpos_path}
self.parse_attribute_files(file_mapping, readIn=readIn, verbose=verbose)
if (not self._cnf._future_file):
(self.residue_list, self.solute_info, self.protein_info, self.non_ligand_info, self.solvent_info) = self._cnf.get_system_information(not_ligand_residues=[])
else:
self.residue_list = None
self.solute_info = None
self.protein_info = None
self.non_ligand_info = None
if in_smiles:
self.mol = Chem.MolFromSmiles(in_smiles)
self.mol = Chem.AddHs(self.mol)
AllChem.EmbedMolecule(self.mol)
AllChem.UFFOptimizeMolecule(self.mol)
self.hasData = True
if rdkitMol:
self.mol = rdkitMol
self.smiles = Chem.MolToSmiles(self.mol)
self.hasData = True
if auto_convert:
if self.hasData:
self.auto_convert()
else:
raise Warning('auto_convert active but no data provided -> auto_convert NOT done!')
if ((in_cnf_path is None) and (type(self.mol) == Chem.rdchem.Mol) and (self.mol.GetNumAtoms() >= 1)):
self.cnf = Cnf(in_value=self.mol)
if (adapt_imd_automatically and (not self._cnf._future_file) and (not self.imd._future_file)):
self.adapt_imd()
self._all_files_key = list(map((lambda x: ('_' + x)), self.required_files.keys()))
self._all_files_key.extend(list(map((lambda x: ('_' + x)), self.optional_files.keys())))
self._all_files = copy.copy(self.required_files)
self._all_files.update(copy.copy(self.optional_files)) | Parameters
----------
work_folder
system_name
in_smiles
in_top_path
in_cnf_path
in_imd_path
in_disres_path
in_ptp_path
in_posres_path
in_refpos_path
in_gromosXX_bin_dir
in_gromosPP_bin_dir
rdkitMol
readIn
Forcefield
auto_convert
adapt_imd_automatically | pygromos/files/gromos_system/gromos_system.py | __init__ | pultar/PyGromosTools | 13 | python | def __init__(self, work_folder: str, system_name: str, in_smiles: str=None, in_top_path: str=None, in_cnf_path: str=None, in_imd_path: str=None, in_disres_path: str=None, in_ptp_path: str=None, in_posres_path: str=None, in_refpos_path: str=None, in_gromosXX_bin_dir: str=None, in_gromosPP_bin_dir: str=None, rdkitMol: Chem.rdchem.Mol=None, readIn=True, Forcefield: forcefield_system=forcefield_system(), auto_convert: bool=False, adapt_imd_automatically: bool=True, verbose: bool=True):
'\n\n Parameters\n ----------\n work_folder\n system_name\n in_smiles\n in_top_path\n in_cnf_path\n in_imd_path\n in_disres_path\n in_ptp_path\n in_posres_path\n in_refpos_path\n in_gromosXX_bin_dir\n in_gromosPP_bin_dir\n rdkitMol\n readIn\n Forcefield\n auto_convert\n adapt_imd_automatically\n '
self.hasData = False
self._name = system_name
self._work_folder = work_folder
self.smiles = in_smiles
self.Forcefield = Forcefield
self.mol = Chem.Mol()
self.checkpoint_path = None
self.verbose = verbose
self._gromosPP_bin_dir = in_gromosPP_bin_dir
self._gromosXX_bin_dir = in_gromosXX_bin_dir
self._gromosPP = GromosPP(gromosPP_bin_dir=in_gromosPP_bin_dir)
self._gromosXX = GromosXX(gromosXX_bin_dir=in_gromosXX_bin_dir)
self.__bind_gromosPPFuncs()
self._future_promise = False
self._future_promised_files = []
if (((in_smiles == None) and (rdkitMol == None)) or (readIn == False)):
if verbose:
warnings.warn('No data provided to gromos_system\nmanual work needed')
file_mapping = {'imd': in_imd_path, 'top': in_top_path, 'ptp': in_ptp_path, 'cnf': in_cnf_path, 'disres': in_disres_path, 'posres': in_posres_path, 'refpos': in_refpos_path}
self.parse_attribute_files(file_mapping, readIn=readIn, verbose=verbose)
if (not self._cnf._future_file):
(self.residue_list, self.solute_info, self.protein_info, self.non_ligand_info, self.solvent_info) = self._cnf.get_system_information(not_ligand_residues=[])
else:
self.residue_list = None
self.solute_info = None
self.protein_info = None
self.non_ligand_info = None
if in_smiles:
self.mol = Chem.MolFromSmiles(in_smiles)
self.mol = Chem.AddHs(self.mol)
AllChem.EmbedMolecule(self.mol)
AllChem.UFFOptimizeMolecule(self.mol)
self.hasData = True
if rdkitMol:
self.mol = rdkitMol
self.smiles = Chem.MolToSmiles(self.mol)
self.hasData = True
if auto_convert:
if self.hasData:
self.auto_convert()
else:
raise Warning('auto_convert active but no data provided -> auto_convert NOT done!')
if ((in_cnf_path is None) and (type(self.mol) == Chem.rdchem.Mol) and (self.mol.GetNumAtoms() >= 1)):
self.cnf = Cnf(in_value=self.mol)
if (adapt_imd_automatically and (not self._cnf._future_file) and (not self.imd._future_file)):
self.adapt_imd()
self._all_files_key = list(map((lambda x: ('_' + x)), self.required_files.keys()))
self._all_files_key.extend(list(map((lambda x: ('_' + x)), self.optional_files.keys())))
self._all_files = copy.copy(self.required_files)
self._all_files.update(copy.copy(self.optional_files)) | def __init__(self, work_folder: str, system_name: str, in_smiles: str=None, in_top_path: str=None, in_cnf_path: str=None, in_imd_path: str=None, in_disres_path: str=None, in_ptp_path: str=None, in_posres_path: str=None, in_refpos_path: str=None, in_gromosXX_bin_dir: str=None, in_gromosPP_bin_dir: str=None, rdkitMol: Chem.rdchem.Mol=None, readIn=True, Forcefield: forcefield_system=forcefield_system(), auto_convert: bool=False, adapt_imd_automatically: bool=True, verbose: bool=True):
'\n\n Parameters\n ----------\n work_folder\n system_name\n in_smiles\n in_top_path\n in_cnf_path\n in_imd_path\n in_disres_path\n in_ptp_path\n in_posres_path\n in_refpos_path\n in_gromosXX_bin_dir\n in_gromosPP_bin_dir\n rdkitMol\n readIn\n Forcefield\n auto_convert\n adapt_imd_automatically\n '
self.hasData = False
self._name = system_name
self._work_folder = work_folder
self.smiles = in_smiles
self.Forcefield = Forcefield
self.mol = Chem.Mol()
self.checkpoint_path = None
self.verbose = verbose
self._gromosPP_bin_dir = in_gromosPP_bin_dir
self._gromosXX_bin_dir = in_gromosXX_bin_dir
self._gromosPP = GromosPP(gromosPP_bin_dir=in_gromosPP_bin_dir)
self._gromosXX = GromosXX(gromosXX_bin_dir=in_gromosXX_bin_dir)
self.__bind_gromosPPFuncs()
self._future_promise = False
self._future_promised_files = []
if (((in_smiles == None) and (rdkitMol == None)) or (readIn == False)):
if verbose:
warnings.warn('No data provided to gromos_system\nmanual work needed')
file_mapping = {'imd': in_imd_path, 'top': in_top_path, 'ptp': in_ptp_path, 'cnf': in_cnf_path, 'disres': in_disres_path, 'posres': in_posres_path, 'refpos': in_refpos_path}
self.parse_attribute_files(file_mapping, readIn=readIn, verbose=verbose)
if (not self._cnf._future_file):
(self.residue_list, self.solute_info, self.protein_info, self.non_ligand_info, self.solvent_info) = self._cnf.get_system_information(not_ligand_residues=[])
else:
self.residue_list = None
self.solute_info = None
self.protein_info = None
self.non_ligand_info = None
if in_smiles:
self.mol = Chem.MolFromSmiles(in_smiles)
self.mol = Chem.AddHs(self.mol)
AllChem.EmbedMolecule(self.mol)
AllChem.UFFOptimizeMolecule(self.mol)
self.hasData = True
if rdkitMol:
self.mol = rdkitMol
self.smiles = Chem.MolToSmiles(self.mol)
self.hasData = True
if auto_convert:
if self.hasData:
self.auto_convert()
else:
raise Warning('auto_convert active but no data provided -> auto_convert NOT done!')
if ((in_cnf_path is None) and (type(self.mol) == Chem.rdchem.Mol) and (self.mol.GetNumAtoms() >= 1)):
self.cnf = Cnf(in_value=self.mol)
if (adapt_imd_automatically and (not self._cnf._future_file) and (not self.imd._future_file)):
self.adapt_imd()
self._all_files_key = list(map((lambda x: ('_' + x)), self.required_files.keys()))
self._all_files_key.extend(list(map((lambda x: ('_' + x)), self.optional_files.keys())))
self._all_files = copy.copy(self.required_files)
self._all_files.update(copy.copy(self.optional_files))<|docstring|>Parameters
----------
work_folder
system_name
in_smiles
in_top_path
in_cnf_path
in_imd_path
in_disres_path
in_ptp_path
in_posres_path
in_refpos_path
in_gromosXX_bin_dir
in_gromosPP_bin_dir
rdkitMol
readIn
Forcefield
auto_convert
adapt_imd_automatically<|endoftext|> |
d8584f22d72f8d80d7fa820120a3d13b76897487915900e612892d720c600bd8 | def __getstate__(self):
'\n preperation for pickling:\n remove the non trivial pickling parts\n '
attribute_dict = self.__dict__
new_dict = {}
for key in attribute_dict.keys():
if ((not isinstance(attribute_dict[key], Callable)) and (not (key in skip))):
new_dict.update({key: attribute_dict[key]})
elif ((not (attribute_dict[key] is None)) and (key in skip)):
new_dict.update({key: attribute_dict[key]._asdict()})
else:
new_dict.update({key: None})
return new_dict | preperation for pickling:
remove the non trivial pickling parts | pygromos/files/gromos_system/gromos_system.py | __getstate__ | pultar/PyGromosTools | 13 | python | def __getstate__(self):
'\n preperation for pickling:\n remove the non trivial pickling parts\n '
attribute_dict = self.__dict__
new_dict = {}
for key in attribute_dict.keys():
if ((not isinstance(attribute_dict[key], Callable)) and (not (key in skip))):
new_dict.update({key: attribute_dict[key]})
elif ((not (attribute_dict[key] is None)) and (key in skip)):
new_dict.update({key: attribute_dict[key]._asdict()})
else:
new_dict.update({key: None})
return new_dict | def __getstate__(self):
'\n preperation for pickling:\n remove the non trivial pickling parts\n '
attribute_dict = self.__dict__
new_dict = {}
for key in attribute_dict.keys():
if ((not isinstance(attribute_dict[key], Callable)) and (not (key in skip))):
new_dict.update({key: attribute_dict[key]})
elif ((not (attribute_dict[key] is None)) and (key in skip)):
new_dict.update({key: attribute_dict[key]._asdict()})
else:
new_dict.update({key: None})
return new_dict<|docstring|>preperation for pickling:
remove the non trivial pickling parts<|endoftext|> |
96093ef9d396b5a9e7d37c547033c36d4250d80366d2dfc0bccc3ed079dc488e | @property
def all_files(self) -> Dict[(str, _general_gromos_file)]:
'\n\n Returns\n -------\n\n '
self._all_files_key = list(self.required_files.keys())
self._all_files_key.extend(list(self.optional_files.keys()))
self._all_files = {key: getattr(self, key) for key in self._all_files_key if (hasattr(self, key) and (not (getattr(self, key) is None)))}
return self._all_files | Returns
------- | pygromos/files/gromos_system/gromos_system.py | all_files | pultar/PyGromosTools | 13 | python | @property
def all_files(self) -> Dict[(str, _general_gromos_file)]:
'\n\n Returns\n -------\n\n '
self._all_files_key = list(self.required_files.keys())
self._all_files_key.extend(list(self.optional_files.keys()))
self._all_files = {key: getattr(self, key) for key in self._all_files_key if (hasattr(self, key) and (not (getattr(self, key) is None)))}
return self._all_files | @property
def all_files(self) -> Dict[(str, _general_gromos_file)]:
'\n\n Returns\n -------\n\n '
self._all_files_key = list(self.required_files.keys())
self._all_files_key.extend(list(self.optional_files.keys()))
self._all_files = {key: getattr(self, key) for key in self._all_files_key if (hasattr(self, key) and (not (getattr(self, key) is None)))}
return self._all_files<|docstring|>Returns
-------<|endoftext|> |
441f03c6fa953a34415844afd04f663a2cfc406100ba437fe85390662690f06d | @property
def all_file_paths(self) -> Dict[(str, str)]:
'\n\n Returns\n -------\n\n '
self._all_files_key = list(self.required_files.keys())
self._all_files_key.extend(list(self.optional_files.keys()))
self._all_file_paths = {key: getattr(self, key).path for key in self._all_files_key if (hasattr(self, key) and (not (getattr(self, key) is None)))}
return self._all_file_paths | Returns
------- | pygromos/files/gromos_system/gromos_system.py | all_file_paths | pultar/PyGromosTools | 13 | python | @property
def all_file_paths(self) -> Dict[(str, str)]:
'\n\n Returns\n -------\n\n '
self._all_files_key = list(self.required_files.keys())
self._all_files_key.extend(list(self.optional_files.keys()))
self._all_file_paths = {key: getattr(self, key).path for key in self._all_files_key if (hasattr(self, key) and (not (getattr(self, key) is None)))}
return self._all_file_paths | @property
def all_file_paths(self) -> Dict[(str, str)]:
'\n\n Returns\n -------\n\n '
self._all_files_key = list(self.required_files.keys())
self._all_files_key.extend(list(self.optional_files.keys()))
self._all_file_paths = {key: getattr(self, key).path for key in self._all_files_key if (hasattr(self, key) and (not (getattr(self, key) is None)))}
return self._all_file_paths<|docstring|>Returns
-------<|endoftext|> |
d9504e4c116ea537a5682b0639968bf8f6a2a34f79f809fb900cac072d2dc062 | def parse_attribute_files(self, file_mapping: Dict[(str, str)], readIn: bool=True, verbose: bool=False):
'\n This function sets dynamically builds the output folder, the file objs of this class and checks their dependencies.\n\n Parameters\n ----------\n file_mapping: Dict[str, Union[str, None]]\n attribute name: input path\n\n Returns\n -------\n\n '
check_file_paths = []
if (not os.path.exists(self._work_folder)):
bash.make_folder(self._work_folder)
check_file_paths.append(self._work_folder)
all_files = {key: val for (key, val) in self.required_files.items()}
all_files.update(self.optional_files)
[check_file_paths.append(x) for (k, x) in file_mapping.items() if (not (x is None))]
if (len(check_file_paths) > 0):
bash.check_path_dependencies(check_file_paths, verbose=verbose)
for (attribute_name, file_path) in file_mapping.items():
if (readIn and (not (file_path is None))):
if verbose:
print('Parsing File: ', attribute_name)
obj = all_files[attribute_name](file_path)
elif (attribute_name in self.required_files):
if verbose:
print('Generate Empty: ', attribute_name)
obj = all_files[attribute_name](None, _future_file=True)
obj.path = file_path
if (not (file_path is None)):
self._future_promise = True
self._future_promised_files.append(attribute_name)
else:
obj = None
setattr(self, ('_' + attribute_name), obj) | This function sets dynamically builds the output folder, the file objs of this class and checks their dependencies.
Parameters
----------
file_mapping: Dict[str, Union[str, None]]
attribute name: input path
Returns
------- | pygromos/files/gromos_system/gromos_system.py | parse_attribute_files | pultar/PyGromosTools | 13 | python | def parse_attribute_files(self, file_mapping: Dict[(str, str)], readIn: bool=True, verbose: bool=False):
'\n This function sets dynamically builds the output folder, the file objs of this class and checks their dependencies.\n\n Parameters\n ----------\n file_mapping: Dict[str, Union[str, None]]\n attribute name: input path\n\n Returns\n -------\n\n '
check_file_paths = []
if (not os.path.exists(self._work_folder)):
bash.make_folder(self._work_folder)
check_file_paths.append(self._work_folder)
all_files = {key: val for (key, val) in self.required_files.items()}
all_files.update(self.optional_files)
[check_file_paths.append(x) for (k, x) in file_mapping.items() if (not (x is None))]
if (len(check_file_paths) > 0):
bash.check_path_dependencies(check_file_paths, verbose=verbose)
for (attribute_name, file_path) in file_mapping.items():
if (readIn and (not (file_path is None))):
if verbose:
print('Parsing File: ', attribute_name)
obj = all_files[attribute_name](file_path)
elif (attribute_name in self.required_files):
if verbose:
print('Generate Empty: ', attribute_name)
obj = all_files[attribute_name](None, _future_file=True)
obj.path = file_path
if (not (file_path is None)):
self._future_promise = True
self._future_promised_files.append(attribute_name)
else:
obj = None
setattr(self, ('_' + attribute_name), obj) | def parse_attribute_files(self, file_mapping: Dict[(str, str)], readIn: bool=True, verbose: bool=False):
'\n This function sets dynamically builds the output folder, the file objs of this class and checks their dependencies.\n\n Parameters\n ----------\n file_mapping: Dict[str, Union[str, None]]\n attribute name: input path\n\n Returns\n -------\n\n '
check_file_paths = []
if (not os.path.exists(self._work_folder)):
bash.make_folder(self._work_folder)
check_file_paths.append(self._work_folder)
all_files = {key: val for (key, val) in self.required_files.items()}
all_files.update(self.optional_files)
[check_file_paths.append(x) for (k, x) in file_mapping.items() if (not (x is None))]
if (len(check_file_paths) > 0):
bash.check_path_dependencies(check_file_paths, verbose=verbose)
for (attribute_name, file_path) in file_mapping.items():
if (readIn and (not (file_path is None))):
if verbose:
print('Parsing File: ', attribute_name)
obj = all_files[attribute_name](file_path)
elif (attribute_name in self.required_files):
if verbose:
print('Generate Empty: ', attribute_name)
obj = all_files[attribute_name](None, _future_file=True)
obj.path = file_path
if (not (file_path is None)):
self._future_promise = True
self._future_promised_files.append(attribute_name)
else:
obj = None
setattr(self, ('_' + attribute_name), obj)<|docstring|>This function sets dynamically builds the output folder, the file objs of this class and checks their dependencies.
Parameters
----------
file_mapping: Dict[str, Union[str, None]]
attribute name: input path
Returns
-------<|endoftext|> |
b9ebea9632107a7a4cda59c65aeb173b11ab17a862832ae291b1e584dcba8c95 | def get_file_paths(self) -> Dict[(str, str)]:
'\n get the paths of the files in a dict.\n Returns\n -------\n Dict[str, str]\n returns alle file paths, with attribute file name as key.\n '
return {x: file_obj.path for (x, file_obj) in self.all_files.items()} | get the paths of the files in a dict.
Returns
-------
Dict[str, str]
returns alle file paths, with attribute file name as key. | pygromos/files/gromos_system/gromos_system.py | get_file_paths | pultar/PyGromosTools | 13 | python | def get_file_paths(self) -> Dict[(str, str)]:
'\n get the paths of the files in a dict.\n Returns\n -------\n Dict[str, str]\n returns alle file paths, with attribute file name as key.\n '
return {x: file_obj.path for (x, file_obj) in self.all_files.items()} | def get_file_paths(self) -> Dict[(str, str)]:
'\n get the paths of the files in a dict.\n Returns\n -------\n Dict[str, str]\n returns alle file paths, with attribute file name as key.\n '
return {x: file_obj.path for (x, file_obj) in self.all_files.items()}<|docstring|>get the paths of the files in a dict.
Returns
-------
Dict[str, str]
returns alle file paths, with attribute file name as key.<|endoftext|> |
027be767d37cc944047d4ad277f0b11919506a9c903cb071494b240a845ce1d7 | def save(self, path: Union[(str, io.FileIO)]=None, safe: bool=True) -> str:
'\n This method stores the Class as binary obj to a given path or fileBuffer.\n '
safe_skip = False
if isinstance(path, str):
if (os.path.exists(path) and safe):
warnings.warn('FOUND ALREADY A FILE! SKIPPING!')
safe_skip = True
else:
bufferdWriter = open(path, 'wb')
elif isinstance(path, io.BufferedWriter):
bufferdWriter = path
path = bufferdWriter.name
else:
raise IOError((('Please give as parameter a path:str or a File Buffer. To ' + str(self.__class__)) + '.save'))
if (not safe_skip):
pickle.dump(obj=self, file=bufferdWriter)
bufferdWriter.close()
self.checkpoint_path = path
return path | This method stores the Class as binary obj to a given path or fileBuffer. | pygromos/files/gromos_system/gromos_system.py | save | pultar/PyGromosTools | 13 | python | def save(self, path: Union[(str, io.FileIO)]=None, safe: bool=True) -> str:
'\n \n '
safe_skip = False
if isinstance(path, str):
if (os.path.exists(path) and safe):
warnings.warn('FOUND ALREADY A FILE! SKIPPING!')
safe_skip = True
else:
bufferdWriter = open(path, 'wb')
elif isinstance(path, io.BufferedWriter):
bufferdWriter = path
path = bufferdWriter.name
else:
raise IOError((('Please give as parameter a path:str or a File Buffer. To ' + str(self.__class__)) + '.save'))
if (not safe_skip):
pickle.dump(obj=self, file=bufferdWriter)
bufferdWriter.close()
self.checkpoint_path = path
return path | def save(self, path: Union[(str, io.FileIO)]=None, safe: bool=True) -> str:
'\n \n '
safe_skip = False
if isinstance(path, str):
if (os.path.exists(path) and safe):
warnings.warn('FOUND ALREADY A FILE! SKIPPING!')
safe_skip = True
else:
bufferdWriter = open(path, 'wb')
elif isinstance(path, io.BufferedWriter):
bufferdWriter = path
path = bufferdWriter.name
else:
raise IOError((('Please give as parameter a path:str or a File Buffer. To ' + str(self.__class__)) + '.save'))
if (not safe_skip):
pickle.dump(obj=self, file=bufferdWriter)
bufferdWriter.close()
self.checkpoint_path = path
return path<|docstring|>This method stores the Class as binary obj to a given path or fileBuffer.<|endoftext|> |
7369426cdbaa0cfc9cb25df9ec6590181f2a64a69d274043a16eca18bdbf6018 | @classmethod
def load(cls, path: Union[(str, io.FileIO)]=None) -> object:
'\n This method stores the Class as binary obj to a given path or fileBuffer.\n '
if isinstance(path, str):
bufferedReader = open(path, 'rb')
elif isinstance(path, io.BufferedReader):
bufferedReader = path
else:
raise IOError('Please give as parameter a path:str or a File Buffer.')
obj = pickle.load(file=bufferedReader)
bufferedReader.close()
if (hasattr(obj, 'cnf') and hasattr(obj.cnf, 'POSITION')):
(obj.residue_list, obj.solute_info, obj.protein_info, obj.non_ligand_info, obj.solvent_info) = obj._cnf.get_system_information()
obj.checkpoint_path = path
return obj | This method stores the Class as binary obj to a given path or fileBuffer. | pygromos/files/gromos_system/gromos_system.py | load | pultar/PyGromosTools | 13 | python | @classmethod
def load(cls, path: Union[(str, io.FileIO)]=None) -> object:
'\n \n '
if isinstance(path, str):
bufferedReader = open(path, 'rb')
elif isinstance(path, io.BufferedReader):
bufferedReader = path
else:
raise IOError('Please give as parameter a path:str or a File Buffer.')
obj = pickle.load(file=bufferedReader)
bufferedReader.close()
if (hasattr(obj, 'cnf') and hasattr(obj.cnf, 'POSITION')):
(obj.residue_list, obj.solute_info, obj.protein_info, obj.non_ligand_info, obj.solvent_info) = obj._cnf.get_system_information()
obj.checkpoint_path = path
return obj | @classmethod
def load(cls, path: Union[(str, io.FileIO)]=None) -> object:
'\n \n '
if isinstance(path, str):
bufferedReader = open(path, 'rb')
elif isinstance(path, io.BufferedReader):
bufferedReader = path
else:
raise IOError('Please give as parameter a path:str or a File Buffer.')
obj = pickle.load(file=bufferedReader)
bufferedReader.close()
if (hasattr(obj, 'cnf') and hasattr(obj.cnf, 'POSITION')):
(obj.residue_list, obj.solute_info, obj.protein_info, obj.non_ligand_info, obj.solvent_info) = obj._cnf.get_system_information()
obj.checkpoint_path = path
return obj<|docstring|>This method stores the Class as binary obj to a given path or fileBuffer.<|endoftext|> |
9c51903a54b7c93292da16b79de6bbea25eb1dcd0bf3fec9d86a8b38409266c7 | def __SystemConstructionAttributeFinder(self, func: callable) -> callable:
'\n ** DECORATOR **\n\n This decorator trys to find input parameters of the function in the gromossystem and will automatically assign those to the function call!\n functional programming\n\n Parameters\n ----------\n func : callable\n\n Returns\n -------\n callable\n returns the wrapped function\n\n '
@functools.wraps(func)
def findGromosSystemAttributes(*args, **kwargs):
tmp_files = []
for k in inspect.signature(func).parameters:
attr_key = k.replace('in_', '').replace('_path', '')
if (('in' in k) and ('path' in k) and (attr_key in dir(self))):
grom_obj = getattr(self, attr_key)
if (grom_obj.path is None):
tmp_file_path = ((self.work_folder + '/tmp_file.') + grom_obj._gromos_file_ending)
grom_obj.write(tmp_file_path)
kwargs.update({k: tmp_file_path})
tmp_files.append(tmp_file_path)
else:
grom_obj.write(grom_obj.path)
kwargs.update({k: grom_obj.path})
r = func(*args, **kwargs)
[bash.remove_file(p) for p in tmp_files]
return r
return findGromosSystemAttributes | ** DECORATOR **
This decorator trys to find input parameters of the function in the gromossystem and will automatically assign those to the function call!
functional programming
Parameters
----------
func : callable
Returns
-------
callable
returns the wrapped function | pygromos/files/gromos_system/gromos_system.py | __SystemConstructionAttributeFinder | pultar/PyGromosTools | 13 | python | def __SystemConstructionAttributeFinder(self, func: callable) -> callable:
'\n ** DECORATOR **\n\n This decorator trys to find input parameters of the function in the gromossystem and will automatically assign those to the function call!\n functional programming\n\n Parameters\n ----------\n func : callable\n\n Returns\n -------\n callable\n returns the wrapped function\n\n '
@functools.wraps(func)
def findGromosSystemAttributes(*args, **kwargs):
tmp_files = []
for k in inspect.signature(func).parameters:
attr_key = k.replace('in_', ).replace('_path', )
if (('in' in k) and ('path' in k) and (attr_key in dir(self))):
grom_obj = getattr(self, attr_key)
if (grom_obj.path is None):
tmp_file_path = ((self.work_folder + '/tmp_file.') + grom_obj._gromos_file_ending)
grom_obj.write(tmp_file_path)
kwargs.update({k: tmp_file_path})
tmp_files.append(tmp_file_path)
else:
grom_obj.write(grom_obj.path)
kwargs.update({k: grom_obj.path})
r = func(*args, **kwargs)
[bash.remove_file(p) for p in tmp_files]
return r
return findGromosSystemAttributes | def __SystemConstructionAttributeFinder(self, func: callable) -> callable:
'\n ** DECORATOR **\n\n This decorator trys to find input parameters of the function in the gromossystem and will automatically assign those to the function call!\n functional programming\n\n Parameters\n ----------\n func : callable\n\n Returns\n -------\n callable\n returns the wrapped function\n\n '
@functools.wraps(func)
def findGromosSystemAttributes(*args, **kwargs):
tmp_files = []
for k in inspect.signature(func).parameters:
attr_key = k.replace('in_', ).replace('_path', )
if (('in' in k) and ('path' in k) and (attr_key in dir(self))):
grom_obj = getattr(self, attr_key)
if (grom_obj.path is None):
tmp_file_path = ((self.work_folder + '/tmp_file.') + grom_obj._gromos_file_ending)
grom_obj.write(tmp_file_path)
kwargs.update({k: tmp_file_path})
tmp_files.append(tmp_file_path)
else:
grom_obj.write(grom_obj.path)
kwargs.update({k: grom_obj.path})
r = func(*args, **kwargs)
[bash.remove_file(p) for p in tmp_files]
return r
return findGromosSystemAttributes<|docstring|>** DECORATOR **
This decorator trys to find input parameters of the function in the gromossystem and will automatically assign those to the function call!
functional programming
Parameters
----------
func : callable
Returns
-------
callable
returns the wrapped function<|endoftext|> |
14fa0bae3f5f05614773a6d5077fea251b9559c15a3607e07eb18ccd9d0bc7cb | def __SystemConstructionUpdater(self, func: callable) -> callable:
'\n ** DECORATOR **\n This decorator trys to find output parameters of the function in the gromossystem and will automatically update the state of those attributes!\n functional programming\n\n Parameters\n ----------\n func: callable\n the function to be wrapped\n\n Returns\n -------\n func\n\n '
@functools.wraps(func)
def updateGromosSystem(*args, **kwargs):
update_dict = {}
for k in inspect.signature(func).parameters:
if (('out' in k) and ('path' in k)):
attr_key = k.replace('out_', '').replace('_path', '')
kwargs.update({k: ((self.work_folder + '/tmp_file.') + attr_key)})
update_dict.update({k: attr_key})
r = func(*args, **kwargs)
for k in update_dict:
setattr(self, update_dict[k], kwargs[k])
getattr(self, update_dict[k]).path = None
bash.remove_file(kwargs[k])
return r
return updateGromosSystem | ** DECORATOR **
This decorator trys to find output parameters of the function in the gromossystem and will automatically update the state of those attributes!
functional programming
Parameters
----------
func: callable
the function to be wrapped
Returns
-------
func | pygromos/files/gromos_system/gromos_system.py | __SystemConstructionUpdater | pultar/PyGromosTools | 13 | python | def __SystemConstructionUpdater(self, func: callable) -> callable:
'\n ** DECORATOR **\n This decorator trys to find output parameters of the function in the gromossystem and will automatically update the state of those attributes!\n functional programming\n\n Parameters\n ----------\n func: callable\n the function to be wrapped\n\n Returns\n -------\n func\n\n '
@functools.wraps(func)
def updateGromosSystem(*args, **kwargs):
update_dict = {}
for k in inspect.signature(func).parameters:
if (('out' in k) and ('path' in k)):
attr_key = k.replace('out_', ).replace('_path', )
kwargs.update({k: ((self.work_folder + '/tmp_file.') + attr_key)})
update_dict.update({k: attr_key})
r = func(*args, **kwargs)
for k in update_dict:
setattr(self, update_dict[k], kwargs[k])
getattr(self, update_dict[k]).path = None
bash.remove_file(kwargs[k])
return r
return updateGromosSystem | def __SystemConstructionUpdater(self, func: callable) -> callable:
'\n ** DECORATOR **\n This decorator trys to find output parameters of the function in the gromossystem and will automatically update the state of those attributes!\n functional programming\n\n Parameters\n ----------\n func: callable\n the function to be wrapped\n\n Returns\n -------\n func\n\n '
@functools.wraps(func)
def updateGromosSystem(*args, **kwargs):
update_dict = {}
for k in inspect.signature(func).parameters:
if (('out' in k) and ('path' in k)):
attr_key = k.replace('out_', ).replace('_path', )
kwargs.update({k: ((self.work_folder + '/tmp_file.') + attr_key)})
update_dict.update({k: attr_key})
r = func(*args, **kwargs)
for k in update_dict:
setattr(self, update_dict[k], kwargs[k])
getattr(self, update_dict[k]).path = None
bash.remove_file(kwargs[k])
return r
return updateGromosSystem<|docstring|>** DECORATOR **
This decorator trys to find output parameters of the function in the gromossystem and will automatically update the state of those attributes!
functional programming
Parameters
----------
func: callable
the function to be wrapped
Returns
-------
func<|endoftext|> |
cb6e5750cd02382d066c8ab5ebdd576bc5106145499c817672852ce9dd3ffd63 | def run_epoch_test(session, model, verbose=False):
'run the given model over its data'
start_time = time.time()
losses = 0.0
iters = 0
state = session.run(model.initial_state)
feed_dict_masks = {}
fetches = {'loss': model.loss, 'final_state': model.final_state}
if config.dynamic_eval:
fetches['update_op'] = model.dynamic_eval.update_op()
for step in range(model.input.epoch_size):
feed_dict = dict(feed_dict_masks.items())
for (j, (c, h)) in enumerate(model.initial_state):
feed_dict[c] = state[j].c
feed_dict[h] = state[j].h
feed_dict.update(model.input.get_batch((step * model.input.time_steps)))
vals = session.run(fetches, feed_dict)
loss = vals['loss']
state = vals['final_state']
losses += loss
iters += 1
if (verbose and ((step % (model.input.epoch_size // 10)) == 10)):
logger.info(('%.3f perplexity: %.3f bits: %.3f speed: %.0f wps' % (((step * 1.0) / model.input.epoch_size), np.exp((losses / iters)), np.log2(np.exp((losses / iters))), (((iters * model.input.batch_size) * model.input.time_steps) / (time.time() - start_time)))))
return np.exp((losses / iters)) | run the given model over its data | tensorflow_impl/model_estimator.py | run_epoch_test | zivaharoni/gradual-learning-rnn | 10 | python | def run_epoch_test(session, model, verbose=False):
start_time = time.time()
losses = 0.0
iters = 0
state = session.run(model.initial_state)
feed_dict_masks = {}
fetches = {'loss': model.loss, 'final_state': model.final_state}
if config.dynamic_eval:
fetches['update_op'] = model.dynamic_eval.update_op()
for step in range(model.input.epoch_size):
feed_dict = dict(feed_dict_masks.items())
for (j, (c, h)) in enumerate(model.initial_state):
feed_dict[c] = state[j].c
feed_dict[h] = state[j].h
feed_dict.update(model.input.get_batch((step * model.input.time_steps)))
vals = session.run(fetches, feed_dict)
loss = vals['loss']
state = vals['final_state']
losses += loss
iters += 1
if (verbose and ((step % (model.input.epoch_size // 10)) == 10)):
logger.info(('%.3f perplexity: %.3f bits: %.3f speed: %.0f wps' % (((step * 1.0) / model.input.epoch_size), np.exp((losses / iters)), np.log2(np.exp((losses / iters))), (((iters * model.input.batch_size) * model.input.time_steps) / (time.time() - start_time)))))
return np.exp((losses / iters)) | def run_epoch_test(session, model, verbose=False):
start_time = time.time()
losses = 0.0
iters = 0
state = session.run(model.initial_state)
feed_dict_masks = {}
fetches = {'loss': model.loss, 'final_state': model.final_state}
if config.dynamic_eval:
fetches['update_op'] = model.dynamic_eval.update_op()
for step in range(model.input.epoch_size):
feed_dict = dict(feed_dict_masks.items())
for (j, (c, h)) in enumerate(model.initial_state):
feed_dict[c] = state[j].c
feed_dict[h] = state[j].h
feed_dict.update(model.input.get_batch((step * model.input.time_steps)))
vals = session.run(fetches, feed_dict)
loss = vals['loss']
state = vals['final_state']
losses += loss
iters += 1
if (verbose and ((step % (model.input.epoch_size // 10)) == 10)):
logger.info(('%.3f perplexity: %.3f bits: %.3f speed: %.0f wps' % (((step * 1.0) / model.input.epoch_size), np.exp((losses / iters)), np.log2(np.exp((losses / iters))), (((iters * model.input.batch_size) * model.input.time_steps) / (time.time() - start_time)))))
return np.exp((losses / iters))<|docstring|>run the given model over its data<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.