body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
fb6173465dbe16ed88857c96387fafdabb6637fcc350ae47125bb136dc4702a3
def compute_network_pass_size(model): 'Computes the size of a network pass in bytes using cached\n parameters as well as gradients' num_bytes = 0 print('Adding layer caches for forward pass:') for layer in model.cache.keys(): key_num_bytes = 0 for value in model.cache[layer]: value_num_bytes = sys.getsizeof(value) key_num_bytes += value_num_bytes num_bytes += key_num_bytes print(layer, key_num_bytes) print('\nAdding layer gradients for backward pass:') for key in model.grads.keys(): key_num_bytes = sys.getsizeof(model.grads[key]) num_bytes += key_num_bytes print(key, key_num_bytes) return num_bytes
Computes the size of a network pass in bytes using cached parameters as well as gradients
exercise_05/exercise_code/networks/compute_network_size.py
compute_network_pass_size
Sihifu/i2dl
0
python
def compute_network_pass_size(model): 'Computes the size of a network pass in bytes using cached\n parameters as well as gradients' num_bytes = 0 print('Adding layer caches for forward pass:') for layer in model.cache.keys(): key_num_bytes = 0 for value in model.cache[layer]: value_num_bytes = sys.getsizeof(value) key_num_bytes += value_num_bytes num_bytes += key_num_bytes print(layer, key_num_bytes) print('\nAdding layer gradients for backward pass:') for key in model.grads.keys(): key_num_bytes = sys.getsizeof(model.grads[key]) num_bytes += key_num_bytes print(key, key_num_bytes) return num_bytes
def compute_network_pass_size(model): 'Computes the size of a network pass in bytes using cached\n parameters as well as gradients' num_bytes = 0 print('Adding layer caches for forward pass:') for layer in model.cache.keys(): key_num_bytes = 0 for value in model.cache[layer]: value_num_bytes = sys.getsizeof(value) key_num_bytes += value_num_bytes num_bytes += key_num_bytes print(layer, key_num_bytes) print('\nAdding layer gradients for backward pass:') for key in model.grads.keys(): key_num_bytes = sys.getsizeof(model.grads[key]) num_bytes += key_num_bytes print(key, key_num_bytes) return num_bytes<|docstring|>Computes the size of a network pass in bytes using cached parameters as well as gradients<|endoftext|>
7424f22f09aeba10cd68a6ecbe1f6de1ea06851e2c06747f767523a39379ef30
def __init__(self, intermediate_directory='intermediates'): '\n :param intermediate_directory: Directory, where the\n intermediate pandas dataframe should be persisted\n to.\n ' super(NumpyNullPreprocessor, self).__init__() self._intermediate_directory = intermediate_directory self._cached = False self._cached_object = None
:param intermediate_directory: Directory, where the intermediate pandas dataframe should be persisted to.
brewPipe/preprocess/numpy_null.py
__init__
meyerd/brewPipe
0
python
def __init__(self, intermediate_directory='intermediates'): '\n :param intermediate_directory: Directory, where the\n intermediate pandas dataframe should be persisted\n to.\n ' super(NumpyNullPreprocessor, self).__init__() self._intermediate_directory = intermediate_directory self._cached = False self._cached_object = None
def __init__(self, intermediate_directory='intermediates'): '\n :param intermediate_directory: Directory, where the\n intermediate pandas dataframe should be persisted\n to.\n ' super(NumpyNullPreprocessor, self).__init__() self._intermediate_directory = intermediate_directory self._cached = False self._cached_object = None<|docstring|>:param intermediate_directory: Directory, where the intermediate pandas dataframe should be persisted to.<|endoftext|>
50e92de0ef9f73bd74a30030912580551c4dd71ffe70dee2598709f6c2b8b715
def fix_queryselector(elems): "Workaround for web components breaking querySelector.\n\n Because someone thought it was a good idea to just yeet the moral equivalent\n of iframes everywhere over a single page 🤦\n\n Shadow DOM was a terrible idea and everyone involved should feel professionally\n ashamed of themselves. Every problem it tried to solved could and should have\n been solved in better ways that don't break the DOM.\n " selectors = '").shadowRoot.querySelector("'.join(elems) return (('return document.querySelector("' + selectors) + '")')
Workaround for web components breaking querySelector. Because someone thought it was a good idea to just yeet the moral equivalent of iframes everywhere over a single page 🤦 Shadow DOM was a terrible idea and everyone involved should feel professionally ashamed of themselves. Every problem it tried to solved could and should have been solved in better ways that don't break the DOM.
tests/integration/test_charm.py
fix_queryselector
VariableDeclared/kubeflow-dashboard-operator
0
python
def fix_queryselector(elems): "Workaround for web components breaking querySelector.\n\n Because someone thought it was a good idea to just yeet the moral equivalent\n of iframes everywhere over a single page 🤦\n\n Shadow DOM was a terrible idea and everyone involved should feel professionally\n ashamed of themselves. Every problem it tried to solved could and should have\n been solved in better ways that don't break the DOM.\n " selectors = '").shadowRoot.querySelector("'.join(elems) return (('return document.querySelector("' + selectors) + '")')
def fix_queryselector(elems): "Workaround for web components breaking querySelector.\n\n Because someone thought it was a good idea to just yeet the moral equivalent\n of iframes everywhere over a single page 🤦\n\n Shadow DOM was a terrible idea and everyone involved should feel professionally\n ashamed of themselves. Every problem it tried to solved could and should have\n been solved in better ways that don't break the DOM.\n " selectors = '").shadowRoot.querySelector("'.join(elems) return (('return document.querySelector("' + selectors) + '")')<|docstring|>Workaround for web components breaking querySelector. Because someone thought it was a good idea to just yeet the moral equivalent of iframes everywhere over a single page 🤦 Shadow DOM was a terrible idea and everyone involved should feel professionally ashamed of themselves. Every problem it tried to solved could and should have been solved in better ways that don't break the DOM.<|endoftext|>
e93a67e923afe1f55526c988fa95ac28a4f5624f31ed3e52abb9ddb38db92e1e
def __init__(self, audit_reason_id=None, comment=None): 'GetEdocWithAuditReasonRequest - a model defined in Swagger' self._audit_reason_id = None self._comment = None self.discriminator = None if (audit_reason_id is not None): self.audit_reason_id = audit_reason_id if (comment is not None): self.comment = comment
GetEdocWithAuditReasonRequest - a model defined in Swagger
laserfiche_api/models/get_edoc_with_audit_reason_request.py
__init__
Layer8Err/laserfiche_api
1
python
def __init__(self, audit_reason_id=None, comment=None): self._audit_reason_id = None self._comment = None self.discriminator = None if (audit_reason_id is not None): self.audit_reason_id = audit_reason_id if (comment is not None): self.comment = comment
def __init__(self, audit_reason_id=None, comment=None): self._audit_reason_id = None self._comment = None self.discriminator = None if (audit_reason_id is not None): self.audit_reason_id = audit_reason_id if (comment is not None): self.comment = comment<|docstring|>GetEdocWithAuditReasonRequest - a model defined in Swagger<|endoftext|>
296aa5d2830efa69b740ef9966359e342a0f8779b51cf412a6050527c098c9b0
@property def audit_reason_id(self): 'Gets the audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501\n\n The reason id for this audit event. # noqa: E501\n\n :return: The audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501\n :rtype: int\n ' return self._audit_reason_id
Gets the audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501 The reason id for this audit event. # noqa: E501 :return: The audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501 :rtype: int
laserfiche_api/models/get_edoc_with_audit_reason_request.py
audit_reason_id
Layer8Err/laserfiche_api
1
python
@property def audit_reason_id(self): 'Gets the audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501\n\n The reason id for this audit event. # noqa: E501\n\n :return: The audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501\n :rtype: int\n ' return self._audit_reason_id
@property def audit_reason_id(self): 'Gets the audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501\n\n The reason id for this audit event. # noqa: E501\n\n :return: The audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501\n :rtype: int\n ' return self._audit_reason_id<|docstring|>Gets the audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501 The reason id for this audit event. # noqa: E501 :return: The audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501 :rtype: int<|endoftext|>
c0e6f73a8c29b475fb9f152fd7d2db0b9562f54df1e924b8ab267e6d12a8f7b2
@audit_reason_id.setter def audit_reason_id(self, audit_reason_id): 'Sets the audit_reason_id of this GetEdocWithAuditReasonRequest.\n\n The reason id for this audit event. # noqa: E501\n\n :param audit_reason_id: The audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501\n :type: int\n ' self._audit_reason_id = audit_reason_id
Sets the audit_reason_id of this GetEdocWithAuditReasonRequest. The reason id for this audit event. # noqa: E501 :param audit_reason_id: The audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501 :type: int
laserfiche_api/models/get_edoc_with_audit_reason_request.py
audit_reason_id
Layer8Err/laserfiche_api
1
python
@audit_reason_id.setter def audit_reason_id(self, audit_reason_id): 'Sets the audit_reason_id of this GetEdocWithAuditReasonRequest.\n\n The reason id for this audit event. # noqa: E501\n\n :param audit_reason_id: The audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501\n :type: int\n ' self._audit_reason_id = audit_reason_id
@audit_reason_id.setter def audit_reason_id(self, audit_reason_id): 'Sets the audit_reason_id of this GetEdocWithAuditReasonRequest.\n\n The reason id for this audit event. # noqa: E501\n\n :param audit_reason_id: The audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501\n :type: int\n ' self._audit_reason_id = audit_reason_id<|docstring|>Sets the audit_reason_id of this GetEdocWithAuditReasonRequest. The reason id for this audit event. # noqa: E501 :param audit_reason_id: The audit_reason_id of this GetEdocWithAuditReasonRequest. # noqa: E501 :type: int<|endoftext|>
c93eaf8be37260e8c680a7eef89900148ff12ff0211c134c6068abc5022108e1
@property def comment(self): 'Gets the comment of this GetEdocWithAuditReasonRequest. # noqa: E501\n\n The comment for this audit event. # noqa: E501\n\n :return: The comment of this GetEdocWithAuditReasonRequest. # noqa: E501\n :rtype: str\n ' return self._comment
Gets the comment of this GetEdocWithAuditReasonRequest. # noqa: E501 The comment for this audit event. # noqa: E501 :return: The comment of this GetEdocWithAuditReasonRequest. # noqa: E501 :rtype: str
laserfiche_api/models/get_edoc_with_audit_reason_request.py
comment
Layer8Err/laserfiche_api
1
python
@property def comment(self): 'Gets the comment of this GetEdocWithAuditReasonRequest. # noqa: E501\n\n The comment for this audit event. # noqa: E501\n\n :return: The comment of this GetEdocWithAuditReasonRequest. # noqa: E501\n :rtype: str\n ' return self._comment
@property def comment(self): 'Gets the comment of this GetEdocWithAuditReasonRequest. # noqa: E501\n\n The comment for this audit event. # noqa: E501\n\n :return: The comment of this GetEdocWithAuditReasonRequest. # noqa: E501\n :rtype: str\n ' return self._comment<|docstring|>Gets the comment of this GetEdocWithAuditReasonRequest. # noqa: E501 The comment for this audit event. # noqa: E501 :return: The comment of this GetEdocWithAuditReasonRequest. # noqa: E501 :rtype: str<|endoftext|>
7e61abd25102d2ee68ae1a2d6fcc4f9e5341ef6f4f9a2abce26bf40036d3b6f0
@comment.setter def comment(self, comment): 'Sets the comment of this GetEdocWithAuditReasonRequest.\n\n The comment for this audit event. # noqa: E501\n\n :param comment: The comment of this GetEdocWithAuditReasonRequest. # noqa: E501\n :type: str\n ' self._comment = comment
Sets the comment of this GetEdocWithAuditReasonRequest. The comment for this audit event. # noqa: E501 :param comment: The comment of this GetEdocWithAuditReasonRequest. # noqa: E501 :type: str
laserfiche_api/models/get_edoc_with_audit_reason_request.py
comment
Layer8Err/laserfiche_api
1
python
@comment.setter def comment(self, comment): 'Sets the comment of this GetEdocWithAuditReasonRequest.\n\n The comment for this audit event. # noqa: E501\n\n :param comment: The comment of this GetEdocWithAuditReasonRequest. # noqa: E501\n :type: str\n ' self._comment = comment
@comment.setter def comment(self, comment): 'Sets the comment of this GetEdocWithAuditReasonRequest.\n\n The comment for this audit event. # noqa: E501\n\n :param comment: The comment of this GetEdocWithAuditReasonRequest. # noqa: E501\n :type: str\n ' self._comment = comment<|docstring|>Sets the comment of this GetEdocWithAuditReasonRequest. The comment for this audit event. # noqa: E501 :param comment: The comment of this GetEdocWithAuditReasonRequest. # noqa: E501 :type: str<|endoftext|>
79d8de7adf9d153041da991026d253b6af9d943dd3b83a4e0bf3d7156619d035
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value if issubclass(GetEdocWithAuditReasonRequest, dict): for (key, value) in self.items(): result[key] = value return result
Returns the model properties as a dict
laserfiche_api/models/get_edoc_with_audit_reason_request.py
to_dict
Layer8Err/laserfiche_api
1
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value if issubclass(GetEdocWithAuditReasonRequest, dict): for (key, value) in self.items(): result[key] = value return result
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value if issubclass(GetEdocWithAuditReasonRequest, dict): for (key, value) in self.items(): result[key] = value return result<|docstring|>Returns the model properties as a dict<|endoftext|>
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
Returns the string representation of the model
laserfiche_api/models/get_edoc_with_audit_reason_request.py
to_str
Layer8Err/laserfiche_api
1
python
def to_str(self): return pprint.pformat(self.to_dict())
def to_str(self): return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|>
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
For `print` and `pprint`
laserfiche_api/models/get_edoc_with_audit_reason_request.py
__repr__
Layer8Err/laserfiche_api
1
python
def __repr__(self): return self.to_str()
def __repr__(self): return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|>
d2789e8921f8d5f33322b814bad6f6781119573d482862c089e132669d798049
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, GetEdocWithAuditReasonRequest)): return False return (self.__dict__ == other.__dict__)
Returns true if both objects are equal
laserfiche_api/models/get_edoc_with_audit_reason_request.py
__eq__
Layer8Err/laserfiche_api
1
python
def __eq__(self, other): if (not isinstance(other, GetEdocWithAuditReasonRequest)): return False return (self.__dict__ == other.__dict__)
def __eq__(self, other): if (not isinstance(other, GetEdocWithAuditReasonRequest)): return False return (self.__dict__ == other.__dict__)<|docstring|>Returns true if both objects are equal<|endoftext|>
43dc6740163eb9fc1161d09cb2208a64c7ad0cc8d9c8637ac3264522d3ec7e42
def __ne__(self, other): 'Returns true if both objects are not equal' return (not (self == other))
Returns true if both objects are not equal
laserfiche_api/models/get_edoc_with_audit_reason_request.py
__ne__
Layer8Err/laserfiche_api
1
python
def __ne__(self, other): return (not (self == other))
def __ne__(self, other): return (not (self == other))<|docstring|>Returns true if both objects are not equal<|endoftext|>
c6a8159b49b2c14bf77106850830e466d51330f2c72d5f5740e78c626a8d66e5
@property def numpy_dtype(self): 'The NumPy dtype this PandasDtype wraps.' return self._dtype
The NumPy dtype this PandasDtype wraps.
extern_libs/Python27/lib/python2.7/site-packages/pandas/core/arrays/numpy_.py
numpy_dtype
onceawaken/MKL-DNN_Eigen_Boost_OpenMPI_GoogleTests_Examples
6,989
python
@property def numpy_dtype(self): return self._dtype
@property def numpy_dtype(self): return self._dtype<|docstring|>The NumPy dtype this PandasDtype wraps.<|endoftext|>
d0551daf5369537e56a9419fbc596457608ccf2143fad7e0f1aae9c70f69a9f7
@property def itemsize(self): 'The element size of this data-type object.' return self._dtype.itemsize
The element size of this data-type object.
extern_libs/Python27/lib/python2.7/site-packages/pandas/core/arrays/numpy_.py
itemsize
onceawaken/MKL-DNN_Eigen_Boost_OpenMPI_GoogleTests_Examples
6,989
python
@property def itemsize(self): return self._dtype.itemsize
@property def itemsize(self): return self._dtype.itemsize<|docstring|>The element size of this data-type object.<|endoftext|>
94c9100fb267bc8d3e436e2996ceb11dff2409ee53babe8847c56cead83ac75f
def to_numpy(self, dtype=None, copy=False): '\n Convert the PandasArray to a :class:`numpy.ndarray`.\n\n By default, this requires no coercion or copying of data.\n\n Parameters\n ----------\n dtype : numpy.dtype\n The NumPy dtype to pass to :func:`numpy.asarray`.\n copy : bool, default False\n Whether to copy the underlying data.\n\n Returns\n -------\n ndarray\n ' result = np.asarray(self._ndarray, dtype=dtype) if (copy and (result is self._ndarray)): result = result.copy() return result
Convert the PandasArray to a :class:`numpy.ndarray`. By default, this requires no coercion or copying of data. Parameters ---------- dtype : numpy.dtype The NumPy dtype to pass to :func:`numpy.asarray`. copy : bool, default False Whether to copy the underlying data. Returns ------- ndarray
extern_libs/Python27/lib/python2.7/site-packages/pandas/core/arrays/numpy_.py
to_numpy
onceawaken/MKL-DNN_Eigen_Boost_OpenMPI_GoogleTests_Examples
6,989
python
def to_numpy(self, dtype=None, copy=False): '\n Convert the PandasArray to a :class:`numpy.ndarray`.\n\n By default, this requires no coercion or copying of data.\n\n Parameters\n ----------\n dtype : numpy.dtype\n The NumPy dtype to pass to :func:`numpy.asarray`.\n copy : bool, default False\n Whether to copy the underlying data.\n\n Returns\n -------\n ndarray\n ' result = np.asarray(self._ndarray, dtype=dtype) if (copy and (result is self._ndarray)): result = result.copy() return result
def to_numpy(self, dtype=None, copy=False): '\n Convert the PandasArray to a :class:`numpy.ndarray`.\n\n By default, this requires no coercion or copying of data.\n\n Parameters\n ----------\n dtype : numpy.dtype\n The NumPy dtype to pass to :func:`numpy.asarray`.\n copy : bool, default False\n Whether to copy the underlying data.\n\n Returns\n -------\n ndarray\n ' result = np.asarray(self._ndarray, dtype=dtype) if (copy and (result is self._ndarray)): result = result.copy() return result<|docstring|>Convert the PandasArray to a :class:`numpy.ndarray`. By default, this requires no coercion or copying of data. Parameters ---------- dtype : numpy.dtype The NumPy dtype to pass to :func:`numpy.asarray`. copy : bool, default False Whether to copy the underlying data. Returns ------- ndarray<|endoftext|>
f0933f100bb3b8025c0c4ac5276195ef43e3adbbe25e43c7c742f05cfb5e844e
def main(): '主程序入口' qApp = createQApp() ee = EventEngine() me = MainEngine(ee) me.addGateway(secGateway) me.addGateway(ctpGateway) me.addGateway(ctpsecGateway) me.addApp(riskManager) me.addApp(optionMaster) mw = MainWindow(me, ee) mw.showMaximized() sys.exit(qApp.exec_())
主程序入口
examples/OptionMaster/run.py
main
ChetWang1993/vnpy
5
python
def main(): qApp = createQApp() ee = EventEngine() me = MainEngine(ee) me.addGateway(secGateway) me.addGateway(ctpGateway) me.addGateway(ctpsecGateway) me.addApp(riskManager) me.addApp(optionMaster) mw = MainWindow(me, ee) mw.showMaximized() sys.exit(qApp.exec_())
def main(): qApp = createQApp() ee = EventEngine() me = MainEngine(ee) me.addGateway(secGateway) me.addGateway(ctpGateway) me.addGateway(ctpsecGateway) me.addApp(riskManager) me.addApp(optionMaster) mw = MainWindow(me, ee) mw.showMaximized() sys.exit(qApp.exec_())<|docstring|>主程序入口<|endoftext|>
6f093eb6df3e55a942982c0fe7df9fa9b460683952e621f66e82cdfed7ca3b04
def __repr__(self) -> str: 'Non-literal text representation.' return '<{cls}: {prediction} for pixel ({n_x}|{n_y}) at {start_at}>'.format(cls=self.__class__.__name__, prediction=self.prediction, n_x=self.pixel.n_x, n_y=self.pixel.n_y, start_at=self.start_at)
Non-literal text representation.
src/urban_meal_delivery/db/forecasts.py
__repr__
webartifex/urban-meal-delivery
1
python
def __repr__(self) -> str: return '<{cls}: {prediction} for pixel ({n_x}|{n_y}) at {start_at}>'.format(cls=self.__class__.__name__, prediction=self.prediction, n_x=self.pixel.n_x, n_y=self.pixel.n_y, start_at=self.start_at)
def __repr__(self) -> str: return '<{cls}: {prediction} for pixel ({n_x}|{n_y}) at {start_at}>'.format(cls=self.__class__.__name__, prediction=self.prediction, n_x=self.pixel.n_x, n_y=self.pixel.n_y, start_at=self.start_at)<|docstring|>Non-literal text representation.<|endoftext|>
951d4b7d4d4ccb4db33c7685a3599f42cedb9eb717a1aabacb174ab701cf9c0f
@classmethod def from_dataframe(cls, pixel: db.Pixel, time_step: int, train_horizon: int, model: str, data: pd.Dataframe) -> List[db.Forecast]: 'Convert results from the forecasting `*Model`s into `Forecast` objects.\n\n This is an alternative constructor method.\n\n Background: The functions in `urban_meal_delivery.forecasts.methods`\n return `pd.Dataframe`s with "start_at" (i.e., `pd.Timestamp` objects)\n values in the index and five columns "prediction", "low80", "high80",\n "low95", and "high95" with `np.float` values. The `*Model.predict()`\n methods in `urban_meal_delivery.forecasts.models` then add an "actual"\n column. This constructor converts these results into ORM models.\n Also, the `np.float` values are cast as plain `float` ones as\n otherwise SQLAlchemy and the database would complain.\n\n Args:\n pixel: in which the forecast is made\n time_step: length of one time step in minutes\n train_horizon: length of the training horizon in weeks\n model: name of the forecasting model\n data: a `pd.Dataframe` as described above (i.e.,\n with the six columns holding `float`s)\n\n Returns:\n forecasts: the `data` as `Forecast` objects\n ' forecasts = [] for timestamp_idx in data.index: start_at = timestamp_idx.to_pydatetime() actual = int(data.loc[(timestamp_idx, 'actual')]) prediction = round(data.loc[(timestamp_idx, 'prediction')], 5) low80 = data.loc[(timestamp_idx, 'low80')] high80 = data.loc[(timestamp_idx, 'high80')] low95 = data.loc[(timestamp_idx, 'low95')] high95 = data.loc[(timestamp_idx, 'high95')] if math.isnan(low80): low80 = None else: low80 = round(low80, 5) if math.isnan(high80): high80 = None else: high80 = round(high80, 5) if math.isnan(low95): low95 = None else: low95 = round(low95, 5) if math.isnan(high95): high95 = None else: high95 = round(high95, 5) forecasts.append(cls(pixel=pixel, start_at=start_at, time_step=time_step, train_horizon=train_horizon, model=model, actual=actual, prediction=prediction, low80=low80, high80=high80, low95=low95, high95=high95)) return forecasts
Convert results from the forecasting `*Model`s into `Forecast` objects. This is an alternative constructor method. Background: The functions in `urban_meal_delivery.forecasts.methods` return `pd.Dataframe`s with "start_at" (i.e., `pd.Timestamp` objects) values in the index and five columns "prediction", "low80", "high80", "low95", and "high95" with `np.float` values. The `*Model.predict()` methods in `urban_meal_delivery.forecasts.models` then add an "actual" column. This constructor converts these results into ORM models. Also, the `np.float` values are cast as plain `float` ones as otherwise SQLAlchemy and the database would complain. Args: pixel: in which the forecast is made time_step: length of one time step in minutes train_horizon: length of the training horizon in weeks model: name of the forecasting model data: a `pd.Dataframe` as described above (i.e., with the six columns holding `float`s) Returns: forecasts: the `data` as `Forecast` objects
src/urban_meal_delivery/db/forecasts.py
from_dataframe
webartifex/urban-meal-delivery
1
python
@classmethod def from_dataframe(cls, pixel: db.Pixel, time_step: int, train_horizon: int, model: str, data: pd.Dataframe) -> List[db.Forecast]: 'Convert results from the forecasting `*Model`s into `Forecast` objects.\n\n This is an alternative constructor method.\n\n Background: The functions in `urban_meal_delivery.forecasts.methods`\n return `pd.Dataframe`s with "start_at" (i.e., `pd.Timestamp` objects)\n values in the index and five columns "prediction", "low80", "high80",\n "low95", and "high95" with `np.float` values. The `*Model.predict()`\n methods in `urban_meal_delivery.forecasts.models` then add an "actual"\n column. This constructor converts these results into ORM models.\n Also, the `np.float` values are cast as plain `float` ones as\n otherwise SQLAlchemy and the database would complain.\n\n Args:\n pixel: in which the forecast is made\n time_step: length of one time step in minutes\n train_horizon: length of the training horizon in weeks\n model: name of the forecasting model\n data: a `pd.Dataframe` as described above (i.e.,\n with the six columns holding `float`s)\n\n Returns:\n forecasts: the `data` as `Forecast` objects\n ' forecasts = [] for timestamp_idx in data.index: start_at = timestamp_idx.to_pydatetime() actual = int(data.loc[(timestamp_idx, 'actual')]) prediction = round(data.loc[(timestamp_idx, 'prediction')], 5) low80 = data.loc[(timestamp_idx, 'low80')] high80 = data.loc[(timestamp_idx, 'high80')] low95 = data.loc[(timestamp_idx, 'low95')] high95 = data.loc[(timestamp_idx, 'high95')] if math.isnan(low80): low80 = None else: low80 = round(low80, 5) if math.isnan(high80): high80 = None else: high80 = round(high80, 5) if math.isnan(low95): low95 = None else: low95 = round(low95, 5) if math.isnan(high95): high95 = None else: high95 = round(high95, 5) forecasts.append(cls(pixel=pixel, start_at=start_at, time_step=time_step, train_horizon=train_horizon, model=model, actual=actual, prediction=prediction, low80=low80, high80=high80, low95=low95, high95=high95)) return forecasts
@classmethod def from_dataframe(cls, pixel: db.Pixel, time_step: int, train_horizon: int, model: str, data: pd.Dataframe) -> List[db.Forecast]: 'Convert results from the forecasting `*Model`s into `Forecast` objects.\n\n This is an alternative constructor method.\n\n Background: The functions in `urban_meal_delivery.forecasts.methods`\n return `pd.Dataframe`s with "start_at" (i.e., `pd.Timestamp` objects)\n values in the index and five columns "prediction", "low80", "high80",\n "low95", and "high95" with `np.float` values. The `*Model.predict()`\n methods in `urban_meal_delivery.forecasts.models` then add an "actual"\n column. This constructor converts these results into ORM models.\n Also, the `np.float` values are cast as plain `float` ones as\n otherwise SQLAlchemy and the database would complain.\n\n Args:\n pixel: in which the forecast is made\n time_step: length of one time step in minutes\n train_horizon: length of the training horizon in weeks\n model: name of the forecasting model\n data: a `pd.Dataframe` as described above (i.e.,\n with the six columns holding `float`s)\n\n Returns:\n forecasts: the `data` as `Forecast` objects\n ' forecasts = [] for timestamp_idx in data.index: start_at = timestamp_idx.to_pydatetime() actual = int(data.loc[(timestamp_idx, 'actual')]) prediction = round(data.loc[(timestamp_idx, 'prediction')], 5) low80 = data.loc[(timestamp_idx, 'low80')] high80 = data.loc[(timestamp_idx, 'high80')] low95 = data.loc[(timestamp_idx, 'low95')] high95 = data.loc[(timestamp_idx, 'high95')] if math.isnan(low80): low80 = None else: low80 = round(low80, 5) if math.isnan(high80): high80 = None else: high80 = round(high80, 5) if math.isnan(low95): low95 = None else: low95 = round(low95, 5) if math.isnan(high95): high95 = None else: high95 = round(high95, 5) forecasts.append(cls(pixel=pixel, start_at=start_at, time_step=time_step, train_horizon=train_horizon, model=model, actual=actual, prediction=prediction, low80=low80, high80=high80, low95=low95, high95=high95)) return forecasts<|docstring|>Convert results from the forecasting `*Model`s into `Forecast` objects. This is an alternative constructor method. Background: The functions in `urban_meal_delivery.forecasts.methods` return `pd.Dataframe`s with "start_at" (i.e., `pd.Timestamp` objects) values in the index and five columns "prediction", "low80", "high80", "low95", and "high95" with `np.float` values. The `*Model.predict()` methods in `urban_meal_delivery.forecasts.models` then add an "actual" column. This constructor converts these results into ORM models. Also, the `np.float` values are cast as plain `float` ones as otherwise SQLAlchemy and the database would complain. Args: pixel: in which the forecast is made time_step: length of one time step in minutes train_horizon: length of the training horizon in weeks model: name of the forecasting model data: a `pd.Dataframe` as described above (i.e., with the six columns holding `float`s) Returns: forecasts: the `data` as `Forecast` objects<|endoftext|>
61018fd11306fa2939e2783bd5b428982893768619022a82bc19bc812295dec1
def result_noraise(future, flat=True): 'Extracts result from future, never raising an exception.\n\n If `flat` is True -- returns result or exception instance (including\n CancelledError), if `flat` is False -- returns tuple of (`result`,\n `exception` object).\n\n If traceback is needed -- just re-raise returned exception.' try: res = future.result() return (res if flat else (res, None)) except Exception as exc: return (exc if flat else (None, exc))
Extracts result from future, never raising an exception. If `flat` is True -- returns result or exception instance (including CancelledError), if `flat` is False -- returns tuple of (`result`, `exception` object). If traceback is needed -- just re-raise returned exception.
asyncio_pool/results.py
result_noraise
jtojnar/asyncio-pool
0
python
def result_noraise(future, flat=True): 'Extracts result from future, never raising an exception.\n\n If `flat` is True -- returns result or exception instance (including\n CancelledError), if `flat` is False -- returns tuple of (`result`,\n `exception` object).\n\n If traceback is needed -- just re-raise returned exception.' try: res = future.result() return (res if flat else (res, None)) except Exception as exc: return (exc if flat else (None, exc))
def result_noraise(future, flat=True): 'Extracts result from future, never raising an exception.\n\n If `flat` is True -- returns result or exception instance (including\n CancelledError), if `flat` is False -- returns tuple of (`result`,\n `exception` object).\n\n If traceback is needed -- just re-raise returned exception.' try: res = future.result() return (res if flat else (res, None)) except Exception as exc: return (exc if flat else (None, exc))<|docstring|>Extracts result from future, never raising an exception. If `flat` is True -- returns result or exception instance (including CancelledError), if `flat` is False -- returns tuple of (`result`, `exception` object). If traceback is needed -- just re-raise returned exception.<|endoftext|>
30f97c3d1b8643decd6523bec6f8b2e0bfd7e29d9f31fda66ead3e889ef5b96a
def test_not_a_git_repo(): " Run 'git up' being not on a git repo " os.chdir(repo_path) from PyGitUp.gitup import GitUp with pytest.raises(GitError): GitUp(testing=True)
Run 'git up' being not on a git repo
PyGitUp/tests/test_not_on_a_git_repo.py
test_not_a_git_repo
hugovk/PyGitUp
431
python
def test_not_a_git_repo(): " " os.chdir(repo_path) from PyGitUp.gitup import GitUp with pytest.raises(GitError): GitUp(testing=True)
def test_not_a_git_repo(): " " os.chdir(repo_path) from PyGitUp.gitup import GitUp with pytest.raises(GitError): GitUp(testing=True)<|docstring|>Run 'git up' being not on a git repo<|endoftext|>
9ded6f9d9591505e951055063978d790bf0e11819aae08fec30420713c1fc42a
def create(self, request): ' First check that the an authorized user posted the request. Then validate the API request body. Next convert\n the request body into a format suitable for the database. Finally, store the new OLTPBench test result in the\n database. ' user = BasicAuthentication().authenticate(request) if (user is None): logger.debug('Invalid authentication') return Response({'message': 'Forbidden'}, status=status.HTTP_403_FORBIDDEN) data = JSONParser().parse(request) api_serializer = OLTPBenchSerializer(data=data) if (not api_serializer.is_valid()): logger.debug(f'Bad Request: {api_serializer.errors}') return Response(api_serializer.errors, status=status.HTTP_400_BAD_REQUEST) api_serializer.save() db_serializer = OLTPBenchResultSerializer(data=api_serializer.instance.convert_to_db_json()) db_serializer.smudge_timestamp() if (not db_serializer.is_valid()): return Response(db_serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR) try: db_serializer.save() except Exception as err: logger.error(f'OLTPBenchViewSet create failed: {err}') return Response({'message': str(err)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) return Response(api_serializer.validated_data, status=status.HTTP_201_CREATED)
First check that the an authorized user posted the request. Then validate the API request body. Next convert the request body into a format suitable for the database. Finally, store the new OLTPBench test result in the database.
performance-storage-service/pss_project/api/views/oltpbench.py
create
cmu-db/noisepage-stats
23
python
def create(self, request): ' First check that the an authorized user posted the request. Then validate the API request body. Next convert\n the request body into a format suitable for the database. Finally, store the new OLTPBench test result in the\n database. ' user = BasicAuthentication().authenticate(request) if (user is None): logger.debug('Invalid authentication') return Response({'message': 'Forbidden'}, status=status.HTTP_403_FORBIDDEN) data = JSONParser().parse(request) api_serializer = OLTPBenchSerializer(data=data) if (not api_serializer.is_valid()): logger.debug(f'Bad Request: {api_serializer.errors}') return Response(api_serializer.errors, status=status.HTTP_400_BAD_REQUEST) api_serializer.save() db_serializer = OLTPBenchResultSerializer(data=api_serializer.instance.convert_to_db_json()) db_serializer.smudge_timestamp() if (not db_serializer.is_valid()): return Response(db_serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR) try: db_serializer.save() except Exception as err: logger.error(f'OLTPBenchViewSet create failed: {err}') return Response({'message': str(err)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) return Response(api_serializer.validated_data, status=status.HTTP_201_CREATED)
def create(self, request): ' First check that the an authorized user posted the request. Then validate the API request body. Next convert\n the request body into a format suitable for the database. Finally, store the new OLTPBench test result in the\n database. ' user = BasicAuthentication().authenticate(request) if (user is None): logger.debug('Invalid authentication') return Response({'message': 'Forbidden'}, status=status.HTTP_403_FORBIDDEN) data = JSONParser().parse(request) api_serializer = OLTPBenchSerializer(data=data) if (not api_serializer.is_valid()): logger.debug(f'Bad Request: {api_serializer.errors}') return Response(api_serializer.errors, status=status.HTTP_400_BAD_REQUEST) api_serializer.save() db_serializer = OLTPBenchResultSerializer(data=api_serializer.instance.convert_to_db_json()) db_serializer.smudge_timestamp() if (not db_serializer.is_valid()): return Response(db_serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR) try: db_serializer.save() except Exception as err: logger.error(f'OLTPBenchViewSet create failed: {err}') return Response({'message': str(err)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) return Response(api_serializer.validated_data, status=status.HTTP_201_CREATED)<|docstring|>First check that the an authorized user posted the request. Then validate the API request body. Next convert the request body into a format suitable for the database. Finally, store the new OLTPBench test result in the database.<|endoftext|>
4e28ba1d336dc8971982d969e2fb3a5d4b4fd259478047127b03de0fe9604303
def mish(x): 'Mish: A Self Regularized Non-Monotonic Neural Activation Function (https://arxiv.org/abs/1908.08681)' return (x * nn.Tanh()(f.softplus(x)))
Mish: A Self Regularized Non-Monotonic Neural Activation Function (https://arxiv.org/abs/1908.08681)
models/wideresnet.py
mish
WangChen0902/FixMatch-Paddle
0
python
def mish(x): return (x * nn.Tanh()(f.softplus(x)))
def mish(x): return (x * nn.Tanh()(f.softplus(x)))<|docstring|>Mish: A Self Regularized Non-Monotonic Neural Activation Function (https://arxiv.org/abs/1908.08681)<|endoftext|>
60b6c7187a373b529a9c59d6d2b57dccc02aa13ed6b70f14bca3af7206e2830a
def chebyshev_nodes(n: int) -> numpy.ndarray: 'Generate N Chebyshev nodes in the range [-1,1].' d = (math.pi * 0.5) return numpy.sin(numpy.linspace((- d), d, ((2 * n) + 1))[1::2])
Generate N Chebyshev nodes in the range [-1,1].
math/coeffs/calc.py
chebyshev_nodes
depp/ultrafxr
9
python
def chebyshev_nodes(n: int) -> numpy.ndarray: d = (math.pi * 0.5) return numpy.sin(numpy.linspace((- d), d, ((2 * n) + 1))[1::2])
def chebyshev_nodes(n: int) -> numpy.ndarray: d = (math.pi * 0.5) return numpy.sin(numpy.linspace((- d), d, ((2 * n) + 1))[1::2])<|docstring|>Generate N Chebyshev nodes in the range [-1,1].<|endoftext|>
347dcf97c1fc90ed7b66db4b8b328cdb6989c5df028d64dcc34a708d371b16c2
def rescale(x, xrange): 'Rescale the x array so it covers xrange exactly.' (x0, x1) = xrange xmin = numpy.min(x) xmax = numpy.max(x) xspan = (xmax - xmin) return (((x - xmin) * (x1 / xspan)) + ((xmax - x) * (x0 / xspan)))
Rescale the x array so it covers xrange exactly.
math/coeffs/calc.py
rescale
depp/ultrafxr
9
python
def rescale(x, xrange): (x0, x1) = xrange xmin = numpy.min(x) xmax = numpy.max(x) xspan = (xmax - xmin) return (((x - xmin) * (x1 / xspan)) + ((xmax - x) * (x0 / xspan)))
def rescale(x, xrange): (x0, x1) = xrange xmin = numpy.min(x) xmax = numpy.max(x) xspan = (xmax - xmin) return (((x - xmin) * (x1 / xspan)) + ((xmax - x) * (x0 / xspan)))<|docstring|>Rescale the x array so it covers xrange exactly.<|endoftext|>
65af6aa0907c7711ca2f2a221f539a4051a7c3efc08fc30fca3e79bd8e978d77
@function(name='exp2', min_order=2) def exp2_coeffs(order: int) -> numpy.ndarray: 'Coefficients for 2^x on (-0.5, 0.5).\n \n Coefficients are chosen to minimize maximum equivalent input error.\n ' xrange = ((- 0.5), 0.5) (x0, x1) = xrange signs = numpy.zeros(((order + 2),)) signs[0::2] = 1 signs[1::2] = (- 1) x = chebyshev_nodes((order + 2)) x = rescale(x, xrange) y = numpy.exp2(x) last_rel_err = math.inf last_poly_coeffs = None for i in range(100): error_coeffs = (signs * y) lin_coeffs = numpy.append(numpy.power(x[(:, None)], numpy.arange(0, (order + 1))[(None, :)]), error_coeffs.reshape(((order + 2), 1)), axis=1) poly_coeffs = numpy.linalg.solve(lin_coeffs, y) poly_coeffs = poly_coeffs[:(- 1)] rel_coeffs = (numpy.log(2) * poly_coeffs) rel_coeffs[:(- 1)] -= (numpy.arange(1, (order + 1)) * poly_coeffs[1:]) roots = numpy.roots(rel_coeffs[::(- 1)]) if numpy.any(numpy.iscomplex(roots)): raise SolverError('Roots are complex') roots.sort() if ((numpy.min(roots) <= x0) or (x1 <= numpy.max(roots))): raise SolverError('Roots are too large') x[0] = x0 x[1:(- 1)] = roots x[(- 1)] = x1 y = numpy.exp2(x) rel_err = numpy.max(numpy.abs(((polynomial.Polynomial(poly_coeffs)(x) - y) / y))) if (not math.isinf(last_rel_err)): improvement = ((last_rel_err - rel_err) / last_rel_err) if (improvement <= 0): (rel_err, poly_coeffs) = (last_rel_err, last_poly_coeffs) break elif (improvement < 1e-06): break last_rel_err = rel_err last_poly_coeffs = poly_coeffs return poly_coeffs
Coefficients for 2^x on (-0.5, 0.5). Coefficients are chosen to minimize maximum equivalent input error.
math/coeffs/calc.py
exp2_coeffs
depp/ultrafxr
9
python
@function(name='exp2', min_order=2) def exp2_coeffs(order: int) -> numpy.ndarray: 'Coefficients for 2^x on (-0.5, 0.5).\n \n Coefficients are chosen to minimize maximum equivalent input error.\n ' xrange = ((- 0.5), 0.5) (x0, x1) = xrange signs = numpy.zeros(((order + 2),)) signs[0::2] = 1 signs[1::2] = (- 1) x = chebyshev_nodes((order + 2)) x = rescale(x, xrange) y = numpy.exp2(x) last_rel_err = math.inf last_poly_coeffs = None for i in range(100): error_coeffs = (signs * y) lin_coeffs = numpy.append(numpy.power(x[(:, None)], numpy.arange(0, (order + 1))[(None, :)]), error_coeffs.reshape(((order + 2), 1)), axis=1) poly_coeffs = numpy.linalg.solve(lin_coeffs, y) poly_coeffs = poly_coeffs[:(- 1)] rel_coeffs = (numpy.log(2) * poly_coeffs) rel_coeffs[:(- 1)] -= (numpy.arange(1, (order + 1)) * poly_coeffs[1:]) roots = numpy.roots(rel_coeffs[::(- 1)]) if numpy.any(numpy.iscomplex(roots)): raise SolverError('Roots are complex') roots.sort() if ((numpy.min(roots) <= x0) or (x1 <= numpy.max(roots))): raise SolverError('Roots are too large') x[0] = x0 x[1:(- 1)] = roots x[(- 1)] = x1 y = numpy.exp2(x) rel_err = numpy.max(numpy.abs(((polynomial.Polynomial(poly_coeffs)(x) - y) / y))) if (not math.isinf(last_rel_err)): improvement = ((last_rel_err - rel_err) / last_rel_err) if (improvement <= 0): (rel_err, poly_coeffs) = (last_rel_err, last_poly_coeffs) break elif (improvement < 1e-06): break last_rel_err = rel_err last_poly_coeffs = poly_coeffs return poly_coeffs
@function(name='exp2', min_order=2) def exp2_coeffs(order: int) -> numpy.ndarray: 'Coefficients for 2^x on (-0.5, 0.5).\n \n Coefficients are chosen to minimize maximum equivalent input error.\n ' xrange = ((- 0.5), 0.5) (x0, x1) = xrange signs = numpy.zeros(((order + 2),)) signs[0::2] = 1 signs[1::2] = (- 1) x = chebyshev_nodes((order + 2)) x = rescale(x, xrange) y = numpy.exp2(x) last_rel_err = math.inf last_poly_coeffs = None for i in range(100): error_coeffs = (signs * y) lin_coeffs = numpy.append(numpy.power(x[(:, None)], numpy.arange(0, (order + 1))[(None, :)]), error_coeffs.reshape(((order + 2), 1)), axis=1) poly_coeffs = numpy.linalg.solve(lin_coeffs, y) poly_coeffs = poly_coeffs[:(- 1)] rel_coeffs = (numpy.log(2) * poly_coeffs) rel_coeffs[:(- 1)] -= (numpy.arange(1, (order + 1)) * poly_coeffs[1:]) roots = numpy.roots(rel_coeffs[::(- 1)]) if numpy.any(numpy.iscomplex(roots)): raise SolverError('Roots are complex') roots.sort() if ((numpy.min(roots) <= x0) or (x1 <= numpy.max(roots))): raise SolverError('Roots are too large') x[0] = x0 x[1:(- 1)] = roots x[(- 1)] = x1 y = numpy.exp2(x) rel_err = numpy.max(numpy.abs(((polynomial.Polynomial(poly_coeffs)(x) - y) / y))) if (not math.isinf(last_rel_err)): improvement = ((last_rel_err - rel_err) / last_rel_err) if (improvement <= 0): (rel_err, poly_coeffs) = (last_rel_err, last_poly_coeffs) break elif (improvement < 1e-06): break last_rel_err = rel_err last_poly_coeffs = poly_coeffs return poly_coeffs<|docstring|>Coefficients for 2^x on (-0.5, 0.5). Coefficients are chosen to minimize maximum equivalent input error.<|endoftext|>
a1be48fb0a8535bf06ebd9002e48da431328d1fd1f18f2c2e30e8d87c3a8a3e6
@function(name='sin1_smooth', min_order=1) def sin1_smooth_coeffs(order: int) -> numpy.ndarray: 'Coefficients for sin(2 pi x) on (-0.25, 0.25).\n\n Coefficients are chosen to make higher order derivatives smooth. Only\n odd-numbered coefficients are included.\n ' mat_coeffs = numpy.zeros((order, order)) vec_coeffs = numpy.zeros(order) poly = numpy.ones((order,)) powers = ((numpy.arange(order) * 2) + 1) for n in range((order - 1)): poly *= powers powers -= 1 mat_coeffs[n] = poly poly *= powers powers -= 1 mat_coeffs[(order - 1)] = 1 vec_coeffs[(order - 1)] = 1 poly_coeffs = numpy.linalg.solve(mat_coeffs, vec_coeffs) poly_coeffs *= (4 ** numpy.arange(1, ((2 * order) + 1), 2)) return poly_coeffs
Coefficients for sin(2 pi x) on (-0.25, 0.25). Coefficients are chosen to make higher order derivatives smooth. Only odd-numbered coefficients are included.
math/coeffs/calc.py
sin1_smooth_coeffs
depp/ultrafxr
9
python
@function(name='sin1_smooth', min_order=1) def sin1_smooth_coeffs(order: int) -> numpy.ndarray: 'Coefficients for sin(2 pi x) on (-0.25, 0.25).\n\n Coefficients are chosen to make higher order derivatives smooth. Only\n odd-numbered coefficients are included.\n ' mat_coeffs = numpy.zeros((order, order)) vec_coeffs = numpy.zeros(order) poly = numpy.ones((order,)) powers = ((numpy.arange(order) * 2) + 1) for n in range((order - 1)): poly *= powers powers -= 1 mat_coeffs[n] = poly poly *= powers powers -= 1 mat_coeffs[(order - 1)] = 1 vec_coeffs[(order - 1)] = 1 poly_coeffs = numpy.linalg.solve(mat_coeffs, vec_coeffs) poly_coeffs *= (4 ** numpy.arange(1, ((2 * order) + 1), 2)) return poly_coeffs
@function(name='sin1_smooth', min_order=1) def sin1_smooth_coeffs(order: int) -> numpy.ndarray: 'Coefficients for sin(2 pi x) on (-0.25, 0.25).\n\n Coefficients are chosen to make higher order derivatives smooth. Only\n odd-numbered coefficients are included.\n ' mat_coeffs = numpy.zeros((order, order)) vec_coeffs = numpy.zeros(order) poly = numpy.ones((order,)) powers = ((numpy.arange(order) * 2) + 1) for n in range((order - 1)): poly *= powers powers -= 1 mat_coeffs[n] = poly poly *= powers powers -= 1 mat_coeffs[(order - 1)] = 1 vec_coeffs[(order - 1)] = 1 poly_coeffs = numpy.linalg.solve(mat_coeffs, vec_coeffs) poly_coeffs *= (4 ** numpy.arange(1, ((2 * order) + 1), 2)) return poly_coeffs<|docstring|>Coefficients for sin(2 pi x) on (-0.25, 0.25). Coefficients are chosen to make higher order derivatives smooth. Only odd-numbered coefficients are included.<|endoftext|>
bdf777076cf6cb1d872a0c9b550c28da870018df7a5748cc4eb83882f6691320
@function(name='sin1_l1', min_order=2) def sin1_l1_coeffs(order: int) -> numpy.ndarray: 'Coefficients for sin(2 pi x) on (0, 0.25).\n\n Constant coefficient is chosen to be zero, and omitted from result. Maximum\n error is minimized.\n ' signs = numpy.zeros(((order + 2),)) signs[0::2] = 1 signs[1::2] = (- 1) signs[0] = 0 x = chebyshev_nodes((order + 2)) x = rescale(x, (0.0, 0.25)) tau = (2 * numpy.pi) last_error = math.inf last_poly_coeffs = None for _ in range(100): lin_coeffs = numpy.append(numpy.power(x[(:, None)], numpy.arange(0, (order + 1))[(None, :)]), signs[(:, None)], axis=1) poly_coeffs = numpy.linalg.solve(lin_coeffs, numpy.sin((tau * x)))[:(- 1)] poly_coeffs[0] = 0 extrema = x[1:(- 1)].copy() dpoly_coeffs = (poly_coeffs[1:] * numpy.arange(1, (order + 1))) ddpoly_coeffs = (dpoly_coeffs[1:] * numpy.arange(1, order)) for _ in range(10): powers = numpy.power(extrema[(:, None)], numpy.arange(0, order)[(None, :)]) fx = ((powers @ dpoly_coeffs) - (tau * numpy.cos((tau * extrema)))) dfx = ((powers[(:, :(- 1))] @ ddpoly_coeffs) + ((tau * tau) * numpy.sin((tau * extrema)))) deltax = (fx / dfx) extrema -= deltax maxdelta = numpy.max(numpy.abs(deltax)) if (maxdelta < 1e-10): break x[1:(- 1)] = extrema if (not numpy.all((x[:(- 1)] < x[1:]))): raise SolverError('extrema not ascending') powers = numpy.power(extrema[(:, None)], numpy.arange(0, (order + 1))[(None, :)]) error = numpy.max(numpy.abs(((powers @ poly_coeffs) - numpy.sin((tau * extrema))))) if (not math.isinf(last_error)): improvement = ((last_error - error) / last_error) if (improvement <= 0): (error, poly_coeffs) = (last_error, last_poly_coeffs) break elif (improvement < 1e-06): break last_error = error last_poly_coeffs = poly_coeffs return poly_coeffs[1:]
Coefficients for sin(2 pi x) on (0, 0.25). Constant coefficient is chosen to be zero, and omitted from result. Maximum error is minimized.
math/coeffs/calc.py
sin1_l1_coeffs
depp/ultrafxr
9
python
@function(name='sin1_l1', min_order=2) def sin1_l1_coeffs(order: int) -> numpy.ndarray: 'Coefficients for sin(2 pi x) on (0, 0.25).\n\n Constant coefficient is chosen to be zero, and omitted from result. Maximum\n error is minimized.\n ' signs = numpy.zeros(((order + 2),)) signs[0::2] = 1 signs[1::2] = (- 1) signs[0] = 0 x = chebyshev_nodes((order + 2)) x = rescale(x, (0.0, 0.25)) tau = (2 * numpy.pi) last_error = math.inf last_poly_coeffs = None for _ in range(100): lin_coeffs = numpy.append(numpy.power(x[(:, None)], numpy.arange(0, (order + 1))[(None, :)]), signs[(:, None)], axis=1) poly_coeffs = numpy.linalg.solve(lin_coeffs, numpy.sin((tau * x)))[:(- 1)] poly_coeffs[0] = 0 extrema = x[1:(- 1)].copy() dpoly_coeffs = (poly_coeffs[1:] * numpy.arange(1, (order + 1))) ddpoly_coeffs = (dpoly_coeffs[1:] * numpy.arange(1, order)) for _ in range(10): powers = numpy.power(extrema[(:, None)], numpy.arange(0, order)[(None, :)]) fx = ((powers @ dpoly_coeffs) - (tau * numpy.cos((tau * extrema)))) dfx = ((powers[(:, :(- 1))] @ ddpoly_coeffs) + ((tau * tau) * numpy.sin((tau * extrema)))) deltax = (fx / dfx) extrema -= deltax maxdelta = numpy.max(numpy.abs(deltax)) if (maxdelta < 1e-10): break x[1:(- 1)] = extrema if (not numpy.all((x[:(- 1)] < x[1:]))): raise SolverError('extrema not ascending') powers = numpy.power(extrema[(:, None)], numpy.arange(0, (order + 1))[(None, :)]) error = numpy.max(numpy.abs(((powers @ poly_coeffs) - numpy.sin((tau * extrema))))) if (not math.isinf(last_error)): improvement = ((last_error - error) / last_error) if (improvement <= 0): (error, poly_coeffs) = (last_error, last_poly_coeffs) break elif (improvement < 1e-06): break last_error = error last_poly_coeffs = poly_coeffs return poly_coeffs[1:]
@function(name='sin1_l1', min_order=2) def sin1_l1_coeffs(order: int) -> numpy.ndarray: 'Coefficients for sin(2 pi x) on (0, 0.25).\n\n Constant coefficient is chosen to be zero, and omitted from result. Maximum\n error is minimized.\n ' signs = numpy.zeros(((order + 2),)) signs[0::2] = 1 signs[1::2] = (- 1) signs[0] = 0 x = chebyshev_nodes((order + 2)) x = rescale(x, (0.0, 0.25)) tau = (2 * numpy.pi) last_error = math.inf last_poly_coeffs = None for _ in range(100): lin_coeffs = numpy.append(numpy.power(x[(:, None)], numpy.arange(0, (order + 1))[(None, :)]), signs[(:, None)], axis=1) poly_coeffs = numpy.linalg.solve(lin_coeffs, numpy.sin((tau * x)))[:(- 1)] poly_coeffs[0] = 0 extrema = x[1:(- 1)].copy() dpoly_coeffs = (poly_coeffs[1:] * numpy.arange(1, (order + 1))) ddpoly_coeffs = (dpoly_coeffs[1:] * numpy.arange(1, order)) for _ in range(10): powers = numpy.power(extrema[(:, None)], numpy.arange(0, order)[(None, :)]) fx = ((powers @ dpoly_coeffs) - (tau * numpy.cos((tau * extrema)))) dfx = ((powers[(:, :(- 1))] @ ddpoly_coeffs) + ((tau * tau) * numpy.sin((tau * extrema)))) deltax = (fx / dfx) extrema -= deltax maxdelta = numpy.max(numpy.abs(deltax)) if (maxdelta < 1e-10): break x[1:(- 1)] = extrema if (not numpy.all((x[:(- 1)] < x[1:]))): raise SolverError('extrema not ascending') powers = numpy.power(extrema[(:, None)], numpy.arange(0, (order + 1))[(None, :)]) error = numpy.max(numpy.abs(((powers @ poly_coeffs) - numpy.sin((tau * extrema))))) if (not math.isinf(last_error)): improvement = ((last_error - error) / last_error) if (improvement <= 0): (error, poly_coeffs) = (last_error, last_poly_coeffs) break elif (improvement < 1e-06): break last_error = error last_poly_coeffs = poly_coeffs return poly_coeffs[1:]<|docstring|>Coefficients for sin(2 pi x) on (0, 0.25). Constant coefficient is chosen to be zero, and omitted from result. Maximum error is minimized.<|endoftext|>
02df1f1bdccf046719f93f77e35f08b2d530f97bfcd0f702696616a67ea867d3
def create_cell_conv(input_nodes): 'Create a cell with convolution.\n\n Args:\n input_nodes (list(Node)): a list of input_nodes for this cell.\n\n Returns:\n Cell: the corresponding cell.\n ' cell = Cell(input_nodes) n1 = ConstantNode(op=Conv1D(filter_size=20, num_filters=128), name='N1') cell.graph.add_edge(input_nodes[0], n1) n2 = ConstantNode(op=Activation(activation='relu'), name='N2') n3 = ConstantNode(op=MaxPooling1D(pool_size=1, padding='same'), name='N3') n4 = ConstantNode(op=Conv1D(filter_size=10, num_filters=128), name='N4') n5 = ConstantNode(op=Activation(activation='relu'), name='N5') n6 = ConstantNode(op=MaxPooling1D(pool_size=10, padding='same'), name='N6') n7 = ConstantNode(op=Flatten(), name='N7') n8 = ConstantNode(op=Dense(units=200), name='N8') n9 = ConstantNode(op=Activation(activation='relu'), name='N9') n10 = ConstantNode(op=Dropout(rate=0.1), name='N10') n11 = ConstantNode(op=Dense(units=20), name='N11') n12 = ConstantNode(op=Activation(activation='relu'), name='N12') n13 = ConstantNode(op=Dropout(rate=0.1), name='N13') block = Block() block.add_node(n1) block.add_node(n2) block.add_node(n3) block.add_node(n4) block.add_node(n5) block.add_node(n6) block.add_node(n7) block.add_node(n8) block.add_node(n9) block.add_node(n10) block.add_node(n11) block.add_node(n12) block.add_node(n13) block.add_edge(n1, n2) block.add_edge(n2, n3) block.add_edge(n3, n4) block.add_edge(n4, n5) block.add_edge(n5, n6) block.add_edge(n6, n7) block.add_edge(n7, n8) block.add_edge(n8, n9) block.add_edge(n9, n10) block.add_edge(n10, n11) block.add_edge(n11, n12) block.add_edge(n12, n13) cell.add_block(block) cell.set_outputs() return cell
Create a cell with convolution. Args: input_nodes (list(Node)): a list of input_nodes for this cell. Returns: Cell: the corresponding cell.
nas4candle/candle/NT3/models/candle_conv_mlp_baseline.py
create_cell_conv
scrlnas2019/nas4candle
1
python
def create_cell_conv(input_nodes): 'Create a cell with convolution.\n\n Args:\n input_nodes (list(Node)): a list of input_nodes for this cell.\n\n Returns:\n Cell: the corresponding cell.\n ' cell = Cell(input_nodes) n1 = ConstantNode(op=Conv1D(filter_size=20, num_filters=128), name='N1') cell.graph.add_edge(input_nodes[0], n1) n2 = ConstantNode(op=Activation(activation='relu'), name='N2') n3 = ConstantNode(op=MaxPooling1D(pool_size=1, padding='same'), name='N3') n4 = ConstantNode(op=Conv1D(filter_size=10, num_filters=128), name='N4') n5 = ConstantNode(op=Activation(activation='relu'), name='N5') n6 = ConstantNode(op=MaxPooling1D(pool_size=10, padding='same'), name='N6') n7 = ConstantNode(op=Flatten(), name='N7') n8 = ConstantNode(op=Dense(units=200), name='N8') n9 = ConstantNode(op=Activation(activation='relu'), name='N9') n10 = ConstantNode(op=Dropout(rate=0.1), name='N10') n11 = ConstantNode(op=Dense(units=20), name='N11') n12 = ConstantNode(op=Activation(activation='relu'), name='N12') n13 = ConstantNode(op=Dropout(rate=0.1), name='N13') block = Block() block.add_node(n1) block.add_node(n2) block.add_node(n3) block.add_node(n4) block.add_node(n5) block.add_node(n6) block.add_node(n7) block.add_node(n8) block.add_node(n9) block.add_node(n10) block.add_node(n11) block.add_node(n12) block.add_node(n13) block.add_edge(n1, n2) block.add_edge(n2, n3) block.add_edge(n3, n4) block.add_edge(n4, n5) block.add_edge(n5, n6) block.add_edge(n6, n7) block.add_edge(n7, n8) block.add_edge(n8, n9) block.add_edge(n9, n10) block.add_edge(n10, n11) block.add_edge(n11, n12) block.add_edge(n12, n13) cell.add_block(block) cell.set_outputs() return cell
def create_cell_conv(input_nodes): 'Create a cell with convolution.\n\n Args:\n input_nodes (list(Node)): a list of input_nodes for this cell.\n\n Returns:\n Cell: the corresponding cell.\n ' cell = Cell(input_nodes) n1 = ConstantNode(op=Conv1D(filter_size=20, num_filters=128), name='N1') cell.graph.add_edge(input_nodes[0], n1) n2 = ConstantNode(op=Activation(activation='relu'), name='N2') n3 = ConstantNode(op=MaxPooling1D(pool_size=1, padding='same'), name='N3') n4 = ConstantNode(op=Conv1D(filter_size=10, num_filters=128), name='N4') n5 = ConstantNode(op=Activation(activation='relu'), name='N5') n6 = ConstantNode(op=MaxPooling1D(pool_size=10, padding='same'), name='N6') n7 = ConstantNode(op=Flatten(), name='N7') n8 = ConstantNode(op=Dense(units=200), name='N8') n9 = ConstantNode(op=Activation(activation='relu'), name='N9') n10 = ConstantNode(op=Dropout(rate=0.1), name='N10') n11 = ConstantNode(op=Dense(units=20), name='N11') n12 = ConstantNode(op=Activation(activation='relu'), name='N12') n13 = ConstantNode(op=Dropout(rate=0.1), name='N13') block = Block() block.add_node(n1) block.add_node(n2) block.add_node(n3) block.add_node(n4) block.add_node(n5) block.add_node(n6) block.add_node(n7) block.add_node(n8) block.add_node(n9) block.add_node(n10) block.add_node(n11) block.add_node(n12) block.add_node(n13) block.add_edge(n1, n2) block.add_edge(n2, n3) block.add_edge(n3, n4) block.add_edge(n4, n5) block.add_edge(n5, n6) block.add_edge(n6, n7) block.add_edge(n7, n8) block.add_edge(n8, n9) block.add_edge(n9, n10) block.add_edge(n10, n11) block.add_edge(n11, n12) block.add_edge(n12, n13) cell.add_block(block) cell.set_outputs() return cell<|docstring|>Create a cell with convolution. Args: input_nodes (list(Node)): a list of input_nodes for this cell. Returns: Cell: the corresponding cell.<|endoftext|>
e83ea4323e567578e9e0260b129c51e336d21ee74bba8b5586eaca0a75fcc9e2
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float, b: float) -> Tensor: "Cut & paste from PyTorch official master until it's in a few official\n releases - RW Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n \n Args:\n tensor (Tensor):\n An n-dimensional `Tensor`.\n mean (float):\n Mean of the normal distribution.\n std (float):\n Standard deviation of the normal distribution.\n a (float):\n Minimum cutoff value.\n b (float):\n Maximum cutoff value.\n " def norm_cdf(x): return ((1.0 + math.erf((x / math.sqrt(2.0)))) / 2.0) if ((mean < (a - (2 * std))) or (mean > (b + (2 * std)))): error_console.log('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. Fdistribution of values may be incorrect.', stacklevel=2) with torch.no_grad(): l = norm_cdf(((a - mean) / std)) u = norm_cdf(((b - mean) / std)) tensor.uniform_(((2 * l) - 1), ((2 * u) - 1)) tensor.erfinv_() tensor.mul_((std * math.sqrt(2.0))) tensor.add_(mean) tensor.clamp_(min=a, max=b) return tensor
Cut & paste from PyTorch official master until it's in a few official releases - RW Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf Args: tensor (Tensor): An n-dimensional `Tensor`. mean (float): Mean of the normal distribution. std (float): Standard deviation of the normal distribution. a (float): Minimum cutoff value. b (float): Maximum cutoff value.
src/onevision/nn/layer/weight_init.py
_no_grad_trunc_normal_
phlong3105/onevision
2
python
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float, b: float) -> Tensor: "Cut & paste from PyTorch official master until it's in a few official\n releases - RW Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n \n Args:\n tensor (Tensor):\n An n-dimensional `Tensor`.\n mean (float):\n Mean of the normal distribution.\n std (float):\n Standard deviation of the normal distribution.\n a (float):\n Minimum cutoff value.\n b (float):\n Maximum cutoff value.\n " def norm_cdf(x): return ((1.0 + math.erf((x / math.sqrt(2.0)))) / 2.0) if ((mean < (a - (2 * std))) or (mean > (b + (2 * std)))): error_console.log('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. Fdistribution of values may be incorrect.', stacklevel=2) with torch.no_grad(): l = norm_cdf(((a - mean) / std)) u = norm_cdf(((b - mean) / std)) tensor.uniform_(((2 * l) - 1), ((2 * u) - 1)) tensor.erfinv_() tensor.mul_((std * math.sqrt(2.0))) tensor.add_(mean) tensor.clamp_(min=a, max=b) return tensor
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float, b: float) -> Tensor: "Cut & paste from PyTorch official master until it's in a few official\n releases - RW Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n \n Args:\n tensor (Tensor):\n An n-dimensional `Tensor`.\n mean (float):\n Mean of the normal distribution.\n std (float):\n Standard deviation of the normal distribution.\n a (float):\n Minimum cutoff value.\n b (float):\n Maximum cutoff value.\n " def norm_cdf(x): return ((1.0 + math.erf((x / math.sqrt(2.0)))) / 2.0) if ((mean < (a - (2 * std))) or (mean > (b + (2 * std)))): error_console.log('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. Fdistribution of values may be incorrect.', stacklevel=2) with torch.no_grad(): l = norm_cdf(((a - mean) / std)) u = norm_cdf(((b - mean) / std)) tensor.uniform_(((2 * l) - 1), ((2 * u) - 1)) tensor.erfinv_() tensor.mul_((std * math.sqrt(2.0))) tensor.add_(mean) tensor.clamp_(min=a, max=b) return tensor<|docstring|>Cut & paste from PyTorch official master until it's in a few official releases - RW Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf Args: tensor (Tensor): An n-dimensional `Tensor`. mean (float): Mean of the normal distribution. std (float): Standard deviation of the normal distribution. a (float): Minimum cutoff value. b (float): Maximum cutoff value.<|endoftext|>
46c7f578fa7c55e6c0222385949952829ceaca1d1f8426434e6a48ba53864361
def trunc_normal_(tensor: Tensor, mean: float=0.0, std: float=1.0, a: float=(- 2.0), b: float=2.0) -> Tensor: 'Fills the input Tensor with values drawn from a truncated normal\n distribution. Fvalues are effectively drawn from the normal\n distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)` with values\n outside :math:`[a, b]` redrawn until they are within the bounds. Fmethod\n used for generating the random values works best when\n :math:`a \\leq \\text{mean} \\leq b`.\n \n Args:\n tensor (Tensor):\n An n-dimensional `Tensor`.\n mean (float):\n Mean of the normal distribution.\n std (float):\n Standard deviation of the normal distribution.\n a (float):\n Minimum cutoff value.\n b (float):\n Maximum cutoff value.\n \n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.trunc_normal_(w)\n ' return _no_grad_trunc_normal_(tensor, mean, std, a, b)
Fills the input Tensor with values drawn from a truncated normal distribution. Fvalues are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. Fmethod used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Args: tensor (Tensor): An n-dimensional `Tensor`. mean (float): Mean of the normal distribution. std (float): Standard deviation of the normal distribution. a (float): Minimum cutoff value. b (float): Maximum cutoff value. Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w)
src/onevision/nn/layer/weight_init.py
trunc_normal_
phlong3105/onevision
2
python
def trunc_normal_(tensor: Tensor, mean: float=0.0, std: float=1.0, a: float=(- 2.0), b: float=2.0) -> Tensor: 'Fills the input Tensor with values drawn from a truncated normal\n distribution. Fvalues are effectively drawn from the normal\n distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)` with values\n outside :math:`[a, b]` redrawn until they are within the bounds. Fmethod\n used for generating the random values works best when\n :math:`a \\leq \\text{mean} \\leq b`.\n \n Args:\n tensor (Tensor):\n An n-dimensional `Tensor`.\n mean (float):\n Mean of the normal distribution.\n std (float):\n Standard deviation of the normal distribution.\n a (float):\n Minimum cutoff value.\n b (float):\n Maximum cutoff value.\n \n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.trunc_normal_(w)\n ' return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def trunc_normal_(tensor: Tensor, mean: float=0.0, std: float=1.0, a: float=(- 2.0), b: float=2.0) -> Tensor: 'Fills the input Tensor with values drawn from a truncated normal\n distribution. Fvalues are effectively drawn from the normal\n distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)` with values\n outside :math:`[a, b]` redrawn until they are within the bounds. Fmethod\n used for generating the random values works best when\n :math:`a \\leq \\text{mean} \\leq b`.\n \n Args:\n tensor (Tensor):\n An n-dimensional `Tensor`.\n mean (float):\n Mean of the normal distribution.\n std (float):\n Standard deviation of the normal distribution.\n a (float):\n Minimum cutoff value.\n b (float):\n Maximum cutoff value.\n \n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.trunc_normal_(w)\n ' return _no_grad_trunc_normal_(tensor, mean, std, a, b)<|docstring|>Fills the input Tensor with values drawn from a truncated normal distribution. Fvalues are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. Fmethod used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Args: tensor (Tensor): An n-dimensional `Tensor`. mean (float): Mean of the normal distribution. std (float): Standard deviation of the normal distribution. a (float): Minimum cutoff value. b (float): Maximum cutoff value. Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w)<|endoftext|>
434e3b1e530d2378e3036b7b7debcda1013934ed2d6febe4a9a3232961cc852e
def before_validate(self): '\n Sets the number of inputs in `self.dls`\n ' x = self.dl.one_batch() self.learn.dls.n_inp = len(x)
Sets the number of inputs in `self.dls`
adaptnlp/callback.py
before_validate
mfredriksz/adaptnlp
410
python
def before_validate(self): '\n \n ' x = self.dl.one_batch() self.learn.dls.n_inp = len(x)
def before_validate(self): '\n \n ' x = self.dl.one_batch() self.learn.dls.n_inp = len(x)<|docstring|>Sets the number of inputs in `self.dls`<|endoftext|>
2f354f98fda9becd33fe1887327adabe675161c47685227c2593d78d90eb8416
def before_batch(self): '\n Turns `self.xb` from a tuple to a dictionary of either\n `{"input_ids", "attention_masks", "token_type_ids"}`d\n or\n `{"input_ids", "attention_masks"}`\n ' inputs = {'input_ids': self.learn.xb[0], 'attention_mask': self.learn.xb[1]} if (len(self.learn.xb) > 2): inputs['token_type_ids'] = self.learn.xb[2] self.learn.inputs = inputs
Turns `self.xb` from a tuple to a dictionary of either `{"input_ids", "attention_masks", "token_type_ids"}`d or `{"input_ids", "attention_masks"}`
adaptnlp/callback.py
before_batch
mfredriksz/adaptnlp
410
python
def before_batch(self): '\n Turns `self.xb` from a tuple to a dictionary of either\n `{"input_ids", "attention_masks", "token_type_ids"}`d\n or\n `{"input_ids", "attention_masks"}`\n ' inputs = {'input_ids': self.learn.xb[0], 'attention_mask': self.learn.xb[1]} if (len(self.learn.xb) > 2): inputs['token_type_ids'] = self.learn.xb[2] self.learn.inputs = inputs
def before_batch(self): '\n Turns `self.xb` from a tuple to a dictionary of either\n `{"input_ids", "attention_masks", "token_type_ids"}`d\n or\n `{"input_ids", "attention_masks"}`\n ' inputs = {'input_ids': self.learn.xb[0], 'attention_mask': self.learn.xb[1]} if (len(self.learn.xb) > 2): inputs['token_type_ids'] = self.learn.xb[2] self.learn.inputs = inputs<|docstring|>Turns `self.xb` from a tuple to a dictionary of either `{"input_ids", "attention_masks", "token_type_ids"}`d or `{"input_ids", "attention_masks"}`<|endoftext|>
19b098f1e04b7e62e4e460dd3a4a9021bb93b3c282490a2bd3c74aa5cf2ed894
def before_batch(self): '\n Set `self.learn.xb` to `self.learn.inputs.values()`\n ' if (not self.as_dict): self.learn.xb = list(self.learn.inputs.values()) else: self.learn.xb = self.learn.inputs
Set `self.learn.xb` to `self.learn.inputs.values()`
adaptnlp/callback.py
before_batch
mfredriksz/adaptnlp
410
python
def before_batch(self): '\n \n ' if (not self.as_dict): self.learn.xb = list(self.learn.inputs.values()) else: self.learn.xb = self.learn.inputs
def before_batch(self): '\n \n ' if (not self.as_dict): self.learn.xb = list(self.learn.inputs.values()) else: self.learn.xb = self.learn.inputs<|docstring|>Set `self.learn.xb` to `self.learn.inputs.values()`<|endoftext|>
5391f37a50220c97e66b55b1f155e6750d5d1036e80133d89dc6e0a79e3858fe
def before_batch(self): '\n Run model-specific inference\n ' pred = self.learn.model.generate(input_ids=self.xb['input_ids'], attention_mask=self.xb['attention_mask'], num_beams=self.num_beams, min_length=self.min_length, max_length=self.max_length, early_stopping=self.early_stopping, **self.kwargs) self.learn.pred = pred raise CancelBatchException
Run model-specific inference
adaptnlp/callback.py
before_batch
mfredriksz/adaptnlp
410
python
def before_batch(self): '\n \n ' pred = self.learn.model.generate(input_ids=self.xb['input_ids'], attention_mask=self.xb['attention_mask'], num_beams=self.num_beams, min_length=self.min_length, max_length=self.max_length, early_stopping=self.early_stopping, **self.kwargs) self.learn.pred = pred raise CancelBatchException
def before_batch(self): '\n \n ' pred = self.learn.model.generate(input_ids=self.xb['input_ids'], attention_mask=self.xb['attention_mask'], num_beams=self.num_beams, min_length=self.min_length, max_length=self.max_length, early_stopping=self.early_stopping, **self.kwargs) self.learn.pred = pred raise CancelBatchException<|docstring|>Run model-specific inference<|endoftext|>
ed623fda61d27f87be29b5690aabbedf0699ca8fdd52077fdfb6038b0d7defcf
def bar_compare(run_path, test_path): '\n compare if two csv files are same\n only compare H/C/L/O but drop the first column\n ' run_data = pd.read_csv(run_path) test_data = pd.read_csv(test_path) run_subset = run_data[['close', 'high', 'open', 'low']] test_subset = test_data[['C', 'H', 'O', 'L']] for i in range(run_subset.shape[1]): tmp_run = run_subset.iloc[(:, i)] tmp_test = test_subset.iloc[(:, i)] diff = np.where(((tmp_run - tmp_test) != 0)) if (len(diff[0]) != 0): return False return True
compare if two csv files are same only compare H/C/L/O but drop the first column
tests/preprocess/bar_test.py
bar_compare
crazywiden/fmlpy
3
python
def bar_compare(run_path, test_path): '\n compare if two csv files are same\n only compare H/C/L/O but drop the first column\n ' run_data = pd.read_csv(run_path) test_data = pd.read_csv(test_path) run_subset = run_data[['close', 'high', 'open', 'low']] test_subset = test_data[['C', 'H', 'O', 'L']] for i in range(run_subset.shape[1]): tmp_run = run_subset.iloc[(:, i)] tmp_test = test_subset.iloc[(:, i)] diff = np.where(((tmp_run - tmp_test) != 0)) if (len(diff[0]) != 0): return False return True
def bar_compare(run_path, test_path): '\n compare if two csv files are same\n only compare H/C/L/O but drop the first column\n ' run_data = pd.read_csv(run_path) test_data = pd.read_csv(test_path) run_subset = run_data[['close', 'high', 'open', 'low']] test_subset = test_data[['C', 'H', 'O', 'L']] for i in range(run_subset.shape[1]): tmp_run = run_subset.iloc[(:, i)] tmp_test = test_subset.iloc[(:, i)] diff = np.where(((tmp_run - tmp_test) != 0)) if (len(diff[0]) != 0): return False return True<|docstring|>compare if two csv files are same only compare H/C/L/O but drop the first column<|endoftext|>
1951359a6a72d295f920e8779fcdc94414365ec2019a417cf80358b2a96791c2
def __init__(self, map, lap=0, flagLMPC=0): 'Initialization\n map: map\n lap: number of laps to run. If set to 0 then the simulation is completed when ClosedLoopData is full\n flagLMPC: set to 0 for standart controller. Set to 1 for LMPC --> at iteration j add data to SS^{j-1} (look line 9999)\n ' self.map = map self.laps = lap self.flagLMPC = flagLMPC
Initialization map: map lap: number of laps to run. If set to 0 then the simulation is completed when ClosedLoopData is full flagLMPC: set to 0 for standart controller. Set to 1 for LMPC --> at iteration j add data to SS^{j-1} (look line 9999)
src/fnc/SysModel.py
__init__
SSubhnil/RacingLMPC
1
python
def __init__(self, map, lap=0, flagLMPC=0): 'Initialization\n map: map\n lap: number of laps to run. If set to 0 then the simulation is completed when ClosedLoopData is full\n flagLMPC: set to 0 for standart controller. Set to 1 for LMPC --> at iteration j add data to SS^{j-1} (look line 9999)\n ' self.map = map self.laps = lap self.flagLMPC = flagLMPC
def __init__(self, map, lap=0, flagLMPC=0): 'Initialization\n map: map\n lap: number of laps to run. If set to 0 then the simulation is completed when ClosedLoopData is full\n flagLMPC: set to 0 for standart controller. Set to 1 for LMPC --> at iteration j add data to SS^{j-1} (look line 9999)\n ' self.map = map self.laps = lap self.flagLMPC = flagLMPC<|docstring|>Initialization map: map lap: number of laps to run. If set to 0 then the simulation is completed when ClosedLoopData is full flagLMPC: set to 0 for standart controller. Set to 1 for LMPC --> at iteration j add data to SS^{j-1} (look line 9999)<|endoftext|>
c61e7f799a05810b945fae35de3c3d60450d45f189ae553ba9e4166d217846b6
def Sim(self, ClosedLoopData, Controller, LMPCprediction=0): 'Simulate closed-loop system\n ClosedLoopData: object where the closed-loop data are written\n Controller: controller used in the closed-loop\n LMPCprediction: object where the open-loop predictions and safe set are stored\n ' x = ClosedLoopData.x x_glob = ClosedLoopData.x_glob u = ClosedLoopData.u SimulationTime = 0 for i in range(0, int(ClosedLoopData.Points)): Controller.solve(x[(i, :)]) u[(i, :)] = Controller.uPred[(0, :)] if (LMPCprediction != 0): Controller.LapTime = i LMPCprediction.PredictedStates[(:, :, i, Controller.it)] = Controller.xPred LMPCprediction.PredictedInputs[(:, :, i, Controller.it)] = Controller.uPred LMPCprediction.SSused[(:, :, i, Controller.it)] = Controller.SS_PointSelectedTot LMPCprediction.Qfunused[(:, i, Controller.it)] = Controller.Qfun_SelectedTot (x[((i + 1), :)], x_glob[((i + 1), :)]) = _DynModel(x[(i, :)], x_glob[(i, :)], u[(i, :)], np, ClosedLoopData.dt, self.map.PointAndTangent) SimulationTime = (i + 1) if (i <= 5): print(('Linearization time: %.4fs Solver time: %.4fs' % (Controller.linearizationTime.total_seconds(), Controller.solverTime.total_seconds()))) print('Time: ', (i * ClosedLoopData.dt), 'Current State and Input: ', x[(i, :)], u[(i, :)]) if (Controller.feasible == 0): print('Unfeasible at time ', (i * ClosedLoopData.dt)) print('Cur State: ', x[(i, :)], 'Iteration ', Controller.it) break if (self.flagLMPC == 1): Controller.addPoint(x[(i, :)], u[(i, :)]) if ((self.laps == 1) and (int(np.floor((x[((i + 1), 4)] / self.map.TrackLength))) > 0)): print('Simulation terminated: Lap completed') break ClosedLoopData.SimTime = SimulationTime print('Number of laps completed: ', int(np.floor((x[((- 1), 4)] / self.map.TrackLength))))
Simulate closed-loop system ClosedLoopData: object where the closed-loop data are written Controller: controller used in the closed-loop LMPCprediction: object where the open-loop predictions and safe set are stored
src/fnc/SysModel.py
Sim
SSubhnil/RacingLMPC
1
python
def Sim(self, ClosedLoopData, Controller, LMPCprediction=0): 'Simulate closed-loop system\n ClosedLoopData: object where the closed-loop data are written\n Controller: controller used in the closed-loop\n LMPCprediction: object where the open-loop predictions and safe set are stored\n ' x = ClosedLoopData.x x_glob = ClosedLoopData.x_glob u = ClosedLoopData.u SimulationTime = 0 for i in range(0, int(ClosedLoopData.Points)): Controller.solve(x[(i, :)]) u[(i, :)] = Controller.uPred[(0, :)] if (LMPCprediction != 0): Controller.LapTime = i LMPCprediction.PredictedStates[(:, :, i, Controller.it)] = Controller.xPred LMPCprediction.PredictedInputs[(:, :, i, Controller.it)] = Controller.uPred LMPCprediction.SSused[(:, :, i, Controller.it)] = Controller.SS_PointSelectedTot LMPCprediction.Qfunused[(:, i, Controller.it)] = Controller.Qfun_SelectedTot (x[((i + 1), :)], x_glob[((i + 1), :)]) = _DynModel(x[(i, :)], x_glob[(i, :)], u[(i, :)], np, ClosedLoopData.dt, self.map.PointAndTangent) SimulationTime = (i + 1) if (i <= 5): print(('Linearization time: %.4fs Solver time: %.4fs' % (Controller.linearizationTime.total_seconds(), Controller.solverTime.total_seconds()))) print('Time: ', (i * ClosedLoopData.dt), 'Current State and Input: ', x[(i, :)], u[(i, :)]) if (Controller.feasible == 0): print('Unfeasible at time ', (i * ClosedLoopData.dt)) print('Cur State: ', x[(i, :)], 'Iteration ', Controller.it) break if (self.flagLMPC == 1): Controller.addPoint(x[(i, :)], u[(i, :)]) if ((self.laps == 1) and (int(np.floor((x[((i + 1), 4)] / self.map.TrackLength))) > 0)): print('Simulation terminated: Lap completed') break ClosedLoopData.SimTime = SimulationTime print('Number of laps completed: ', int(np.floor((x[((- 1), 4)] / self.map.TrackLength))))
def Sim(self, ClosedLoopData, Controller, LMPCprediction=0): 'Simulate closed-loop system\n ClosedLoopData: object where the closed-loop data are written\n Controller: controller used in the closed-loop\n LMPCprediction: object where the open-loop predictions and safe set are stored\n ' x = ClosedLoopData.x x_glob = ClosedLoopData.x_glob u = ClosedLoopData.u SimulationTime = 0 for i in range(0, int(ClosedLoopData.Points)): Controller.solve(x[(i, :)]) u[(i, :)] = Controller.uPred[(0, :)] if (LMPCprediction != 0): Controller.LapTime = i LMPCprediction.PredictedStates[(:, :, i, Controller.it)] = Controller.xPred LMPCprediction.PredictedInputs[(:, :, i, Controller.it)] = Controller.uPred LMPCprediction.SSused[(:, :, i, Controller.it)] = Controller.SS_PointSelectedTot LMPCprediction.Qfunused[(:, i, Controller.it)] = Controller.Qfun_SelectedTot (x[((i + 1), :)], x_glob[((i + 1), :)]) = _DynModel(x[(i, :)], x_glob[(i, :)], u[(i, :)], np, ClosedLoopData.dt, self.map.PointAndTangent) SimulationTime = (i + 1) if (i <= 5): print(('Linearization time: %.4fs Solver time: %.4fs' % (Controller.linearizationTime.total_seconds(), Controller.solverTime.total_seconds()))) print('Time: ', (i * ClosedLoopData.dt), 'Current State and Input: ', x[(i, :)], u[(i, :)]) if (Controller.feasible == 0): print('Unfeasible at time ', (i * ClosedLoopData.dt)) print('Cur State: ', x[(i, :)], 'Iteration ', Controller.it) break if (self.flagLMPC == 1): Controller.addPoint(x[(i, :)], u[(i, :)]) if ((self.laps == 1) and (int(np.floor((x[((i + 1), 4)] / self.map.TrackLength))) > 0)): print('Simulation terminated: Lap completed') break ClosedLoopData.SimTime = SimulationTime print('Number of laps completed: ', int(np.floor((x[((- 1), 4)] / self.map.TrackLength))))<|docstring|>Simulate closed-loop system ClosedLoopData: object where the closed-loop data are written Controller: controller used in the closed-loop LMPCprediction: object where the open-loop predictions and safe set are stored<|endoftext|>
aca8fdcbe989f6a3bdee8d8e1e9ac7b0ba68c656627e6b7bd0e031d51521273a
def __init__(self, vt): 'Initialization\n Arguments:\n vt: target velocity\n ' self.vt = vt self.uPred = np.zeros([1, 2]) startTimer = datetime.datetime.now() endTimer = datetime.datetime.now() deltaTimer = (endTimer - startTimer) self.solverTime = deltaTimer self.linearizationTime = deltaTimer self.feasible = 1
Initialization Arguments: vt: target velocity
src/fnc/SysModel.py
__init__
SSubhnil/RacingLMPC
1
python
def __init__(self, vt): 'Initialization\n Arguments:\n vt: target velocity\n ' self.vt = vt self.uPred = np.zeros([1, 2]) startTimer = datetime.datetime.now() endTimer = datetime.datetime.now() deltaTimer = (endTimer - startTimer) self.solverTime = deltaTimer self.linearizationTime = deltaTimer self.feasible = 1
def __init__(self, vt): 'Initialization\n Arguments:\n vt: target velocity\n ' self.vt = vt self.uPred = np.zeros([1, 2]) startTimer = datetime.datetime.now() endTimer = datetime.datetime.now() deltaTimer = (endTimer - startTimer) self.solverTime = deltaTimer self.linearizationTime = deltaTimer self.feasible = 1<|docstring|>Initialization Arguments: vt: target velocity<|endoftext|>
5c2ac1af12a53aeadb8be629a1aea43c41e1259a2b6f6925bcff428a2964a115
def solve(self, x0): 'Computes control action\n Arguments:\n x0: current state position\n ' vt = self.vt self.uPred[(0, 0)] = ((((- 0.6) * x0[5]) - (0.9 * x0[3])) + np.max([(- 0.9), np.min([(np.random.randn() * 0.25), 0.9])])) self.uPred[(0, 1)] = ((1.5 * (vt - x0[0])) + np.max([(- 0.2), np.min([(np.random.randn() * 0.1), 0.2])]))
Computes control action Arguments: x0: current state position
src/fnc/SysModel.py
solve
SSubhnil/RacingLMPC
1
python
def solve(self, x0): 'Computes control action\n Arguments:\n x0: current state position\n ' vt = self.vt self.uPred[(0, 0)] = ((((- 0.6) * x0[5]) - (0.9 * x0[3])) + np.max([(- 0.9), np.min([(np.random.randn() * 0.25), 0.9])])) self.uPred[(0, 1)] = ((1.5 * (vt - x0[0])) + np.max([(- 0.2), np.min([(np.random.randn() * 0.1), 0.2])]))
def solve(self, x0): 'Computes control action\n Arguments:\n x0: current state position\n ' vt = self.vt self.uPred[(0, 0)] = ((((- 0.6) * x0[5]) - (0.9 * x0[3])) + np.max([(- 0.9), np.min([(np.random.randn() * 0.25), 0.9])])) self.uPred[(0, 1)] = ((1.5 * (vt - x0[0])) + np.max([(- 0.2), np.min([(np.random.randn() * 0.1), 0.2])]))<|docstring|>Computes control action Arguments: x0: current state position<|endoftext|>
e901a6f091c900d093981502c64bfb73f746d85600e33d719dfc39a56355ab4e
def plot_connectivity(mtx, filename=None): '\n Create a connectivity matrix plot.\n\n If mtx has 3 dimensions, average first along the last axis.\n\n Parameters\n ----------\n mtx : numpy.ndarray\n A (square) array with connectivity information inside.\n filename : None, str, or os.PathLike, optional\n The path to save the plot on disk.\n\n Returns\n -------\n 0\n If there are no errors.\n\n Raises\n ------\n ImportError\n If matplotlib is not installed.\n ValueError\n If mtx has more than 3 dimensions.\n ' try: import matplotlib.pyplot as plt except ImportError: raise ImportError('matplotlib is required to plot connectivity matrices. Please see install instructions.') mtx = mtx.squeeze() if (mtx.ndim > 3): raise ValueError('Cannot plot connectivity matrices for matrix of dimensions > 3.') elif (mtx.ndim == 3): LGR.warning('Since matrix is 3D, averaging across last dimension.') mtx = mtx.mean(axis=(- 1)) if (mtx.shape[0] != mtx.shape[1]): LGR.warning('Given matrix is not a square matrix!') LGR.info('Creating connectivity plot.') plt.figure(figsize=FIGSIZE) plt.imshow(mtx, cmap='RdBu') if (filename is not None): plt.savefig(filename, dpi=SET_DPI) return 0
Create a connectivity matrix plot. If mtx has 3 dimensions, average first along the last axis. Parameters ---------- mtx : numpy.ndarray A (square) array with connectivity information inside. filename : None, str, or os.PathLike, optional The path to save the plot on disk. Returns ------- 0 If there are no errors. Raises ------ ImportError If matplotlib is not installed. ValueError If mtx has more than 3 dimensions.
nigsp/viz.py
plot_connectivity
smoia/nigsp
2
python
def plot_connectivity(mtx, filename=None): '\n Create a connectivity matrix plot.\n\n If mtx has 3 dimensions, average first along the last axis.\n\n Parameters\n ----------\n mtx : numpy.ndarray\n A (square) array with connectivity information inside.\n filename : None, str, or os.PathLike, optional\n The path to save the plot on disk.\n\n Returns\n -------\n 0\n If there are no errors.\n\n Raises\n ------\n ImportError\n If matplotlib is not installed.\n ValueError\n If mtx has more than 3 dimensions.\n ' try: import matplotlib.pyplot as plt except ImportError: raise ImportError('matplotlib is required to plot connectivity matrices. Please see install instructions.') mtx = mtx.squeeze() if (mtx.ndim > 3): raise ValueError('Cannot plot connectivity matrices for matrix of dimensions > 3.') elif (mtx.ndim == 3): LGR.warning('Since matrix is 3D, averaging across last dimension.') mtx = mtx.mean(axis=(- 1)) if (mtx.shape[0] != mtx.shape[1]): LGR.warning('Given matrix is not a square matrix!') LGR.info('Creating connectivity plot.') plt.figure(figsize=FIGSIZE) plt.imshow(mtx, cmap='RdBu') if (filename is not None): plt.savefig(filename, dpi=SET_DPI) return 0
def plot_connectivity(mtx, filename=None): '\n Create a connectivity matrix plot.\n\n If mtx has 3 dimensions, average first along the last axis.\n\n Parameters\n ----------\n mtx : numpy.ndarray\n A (square) array with connectivity information inside.\n filename : None, str, or os.PathLike, optional\n The path to save the plot on disk.\n\n Returns\n -------\n 0\n If there are no errors.\n\n Raises\n ------\n ImportError\n If matplotlib is not installed.\n ValueError\n If mtx has more than 3 dimensions.\n ' try: import matplotlib.pyplot as plt except ImportError: raise ImportError('matplotlib is required to plot connectivity matrices. Please see install instructions.') mtx = mtx.squeeze() if (mtx.ndim > 3): raise ValueError('Cannot plot connectivity matrices for matrix of dimensions > 3.') elif (mtx.ndim == 3): LGR.warning('Since matrix is 3D, averaging across last dimension.') mtx = mtx.mean(axis=(- 1)) if (mtx.shape[0] != mtx.shape[1]): LGR.warning('Given matrix is not a square matrix!') LGR.info('Creating connectivity plot.') plt.figure(figsize=FIGSIZE) plt.imshow(mtx, cmap='RdBu') if (filename is not None): plt.savefig(filename, dpi=SET_DPI) return 0<|docstring|>Create a connectivity matrix plot. If mtx has 3 dimensions, average first along the last axis. Parameters ---------- mtx : numpy.ndarray A (square) array with connectivity information inside. filename : None, str, or os.PathLike, optional The path to save the plot on disk. Returns ------- 0 If there are no errors. Raises ------ ImportError If matplotlib is not installed. ValueError If mtx has more than 3 dimensions.<|endoftext|>
b73879a3b0446cfa81a0976df48cb6a8d37abf0a60582cf1d5b2c5c989165c74
def plot_grayplot(timeseries, filename=None): '\n Create a grayplot (a.k.a. carpet plot a.k.a. timeseries plot).\n\n If timeseries has 3 dimensions, average first along the last axis.\n\n Parameters\n ----------\n timeseries : numpy.ndarray\n An array representing a timeseries. Time has to be encoded in the\n second dimension.\n filename : None, str, or os.PathLike, optional\n The path to save the plot on disk.\n\n Returns\n -------\n 0\n If there are no errors.\n\n Raises\n ------\n ImportError\n If matplotlib is not installed.\n ValueError\n If timeseries has more than 3 dimensions.\n ' try: import matplotlib.pyplot as plt except ImportError: raise ImportError('matplotlib is required to plot grayplots. Please see install instructions.') timeseries = timeseries.squeeze() if (timeseries.ndim > 3): raise ValueError('Cannot plot grayplots for timeseries of dimensions > 3.') elif (timeseries.ndim == 3): LGR.warning('Since timeseries is 3D, averaging across last dimension.') timeseries = timeseries.mean(axis=(- 1)) LGR.info('Creating grayplot.') plt.figure(figsize=FIGSIZE) vmax = np.percentile(timeseries, 99) vmin = np.percentile(timeseries, 1) plt.imshow(timeseries, cmap='gray', vmin=vmin, vmax=vmax) if (filename is not None): plt.savefig(filename, dpi=SET_DPI) return 0
Create a grayplot (a.k.a. carpet plot a.k.a. timeseries plot). If timeseries has 3 dimensions, average first along the last axis. Parameters ---------- timeseries : numpy.ndarray An array representing a timeseries. Time has to be encoded in the second dimension. filename : None, str, or os.PathLike, optional The path to save the plot on disk. Returns ------- 0 If there are no errors. Raises ------ ImportError If matplotlib is not installed. ValueError If timeseries has more than 3 dimensions.
nigsp/viz.py
plot_grayplot
smoia/nigsp
2
python
def plot_grayplot(timeseries, filename=None): '\n Create a grayplot (a.k.a. carpet plot a.k.a. timeseries plot).\n\n If timeseries has 3 dimensions, average first along the last axis.\n\n Parameters\n ----------\n timeseries : numpy.ndarray\n An array representing a timeseries. Time has to be encoded in the\n second dimension.\n filename : None, str, or os.PathLike, optional\n The path to save the plot on disk.\n\n Returns\n -------\n 0\n If there are no errors.\n\n Raises\n ------\n ImportError\n If matplotlib is not installed.\n ValueError\n If timeseries has more than 3 dimensions.\n ' try: import matplotlib.pyplot as plt except ImportError: raise ImportError('matplotlib is required to plot grayplots. Please see install instructions.') timeseries = timeseries.squeeze() if (timeseries.ndim > 3): raise ValueError('Cannot plot grayplots for timeseries of dimensions > 3.') elif (timeseries.ndim == 3): LGR.warning('Since timeseries is 3D, averaging across last dimension.') timeseries = timeseries.mean(axis=(- 1)) LGR.info('Creating grayplot.') plt.figure(figsize=FIGSIZE) vmax = np.percentile(timeseries, 99) vmin = np.percentile(timeseries, 1) plt.imshow(timeseries, cmap='gray', vmin=vmin, vmax=vmax) if (filename is not None): plt.savefig(filename, dpi=SET_DPI) return 0
def plot_grayplot(timeseries, filename=None): '\n Create a grayplot (a.k.a. carpet plot a.k.a. timeseries plot).\n\n If timeseries has 3 dimensions, average first along the last axis.\n\n Parameters\n ----------\n timeseries : numpy.ndarray\n An array representing a timeseries. Time has to be encoded in the\n second dimension.\n filename : None, str, or os.PathLike, optional\n The path to save the plot on disk.\n\n Returns\n -------\n 0\n If there are no errors.\n\n Raises\n ------\n ImportError\n If matplotlib is not installed.\n ValueError\n If timeseries has more than 3 dimensions.\n ' try: import matplotlib.pyplot as plt except ImportError: raise ImportError('matplotlib is required to plot grayplots. Please see install instructions.') timeseries = timeseries.squeeze() if (timeseries.ndim > 3): raise ValueError('Cannot plot grayplots for timeseries of dimensions > 3.') elif (timeseries.ndim == 3): LGR.warning('Since timeseries is 3D, averaging across last dimension.') timeseries = timeseries.mean(axis=(- 1)) LGR.info('Creating grayplot.') plt.figure(figsize=FIGSIZE) vmax = np.percentile(timeseries, 99) vmin = np.percentile(timeseries, 1) plt.imshow(timeseries, cmap='gray', vmin=vmin, vmax=vmax) if (filename is not None): plt.savefig(filename, dpi=SET_DPI) return 0<|docstring|>Create a grayplot (a.k.a. carpet plot a.k.a. timeseries plot). If timeseries has 3 dimensions, average first along the last axis. Parameters ---------- timeseries : numpy.ndarray An array representing a timeseries. Time has to be encoded in the second dimension. filename : None, str, or os.PathLike, optional The path to save the plot on disk. Returns ------- 0 If there are no errors. Raises ------ ImportError If matplotlib is not installed. ValueError If timeseries has more than 3 dimensions.<|endoftext|>
f5394db6b412012d248b2c797cd2dab9fc1924564b326e2c1ec16ab6a1e8c192
def plot_nodes(ns, atlas, filename=None): "\n Create a marker plot in the MNI space.\n\n If ns has 2 dimensions, average first along last dimension.\n\n Parameters\n ----------\n ns : numpy.ndarray\n A 1- or 2- D array that contains the value of the nodes.\n atlas : str, os.PathLike, 3D Nifti1Image, or numpy.ndarray\n The 3d nifti image of an atlas, a string or path to its position,\n or a list of coordinates of the center of mass of parcels.\n filename : None, str, or os.PathLike, optional\n The path to save the plot on disk.\n\n Returns\n -------\n 0\n If there are no errors.\n\n Raises\n ------\n ImportError\n If matplotlib and/or nilearn are not installed.\n If nibabel is not installed.\n nib.filebasedimages.ImageFileError\n If given path is not an atlas.\n ValueError\n If ns has more than 2 dimensions.\n If coordinates can't be extracted from atlas.\n " try: from nilearn.plotting import find_parcellation_cut_coords, plot_markers import matplotlib.pyplot as plt except ImportError: raise ImportError('nilearn and matplotlib are required to plot node images. Please see install instructions.') ns = ns.squeeze() if (ns.ndim > 2): raise ValueError('Cannot plot connectivity matrices for matrix of dimensions > 2.') elif (ns.ndim == 2): LGR.warning('Given matrix has 2 dimensions, averaging across last dimension.') ns = ns.mean(axis=(- 1)) try: coord = find_parcellation_cut_coords(atlas) except: if (type(atlas) is np.ndarray): if ((atlas.ndim > 2) or (atlas.shape[1] != 3)): raise NotImplementedError('Only atlases in nifti format or list of coordinates are supported.') coord = atlas else: try: import nibabel as nib if os.path.isfile(atlas): img = nib.load(atlas) else: raise nib.filebasedimages.ImageFileError(f'Cannot find file {atlas}') coord = find_parcellation_cut_coords(img) except ImportError: raise ImportError('Nibabel is required to handle atlases I/O. Please see install instructions.') try: coord except NameError: raise ValueError('Could not obtain coordinates from given atlas.') if (ns.shape[0] != coord.shape[0]): raise ValueError('Node array and coordinates array have different length.') LGR.info('Creating markerplot.') plt.figure(figsize=FIGSIZE) plot_markers(ns, coord) if (filename is not None): plt.savefig(filename, dpi=SET_DPI) return 0
Create a marker plot in the MNI space. If ns has 2 dimensions, average first along last dimension. Parameters ---------- ns : numpy.ndarray A 1- or 2- D array that contains the value of the nodes. atlas : str, os.PathLike, 3D Nifti1Image, or numpy.ndarray The 3d nifti image of an atlas, a string or path to its position, or a list of coordinates of the center of mass of parcels. filename : None, str, or os.PathLike, optional The path to save the plot on disk. Returns ------- 0 If there are no errors. Raises ------ ImportError If matplotlib and/or nilearn are not installed. If nibabel is not installed. nib.filebasedimages.ImageFileError If given path is not an atlas. ValueError If ns has more than 2 dimensions. If coordinates can't be extracted from atlas.
nigsp/viz.py
plot_nodes
smoia/nigsp
2
python
def plot_nodes(ns, atlas, filename=None): "\n Create a marker plot in the MNI space.\n\n If ns has 2 dimensions, average first along last dimension.\n\n Parameters\n ----------\n ns : numpy.ndarray\n A 1- or 2- D array that contains the value of the nodes.\n atlas : str, os.PathLike, 3D Nifti1Image, or numpy.ndarray\n The 3d nifti image of an atlas, a string or path to its position,\n or a list of coordinates of the center of mass of parcels.\n filename : None, str, or os.PathLike, optional\n The path to save the plot on disk.\n\n Returns\n -------\n 0\n If there are no errors.\n\n Raises\n ------\n ImportError\n If matplotlib and/or nilearn are not installed.\n If nibabel is not installed.\n nib.filebasedimages.ImageFileError\n If given path is not an atlas.\n ValueError\n If ns has more than 2 dimensions.\n If coordinates can't be extracted from atlas.\n " try: from nilearn.plotting import find_parcellation_cut_coords, plot_markers import matplotlib.pyplot as plt except ImportError: raise ImportError('nilearn and matplotlib are required to plot node images. Please see install instructions.') ns = ns.squeeze() if (ns.ndim > 2): raise ValueError('Cannot plot connectivity matrices for matrix of dimensions > 2.') elif (ns.ndim == 2): LGR.warning('Given matrix has 2 dimensions, averaging across last dimension.') ns = ns.mean(axis=(- 1)) try: coord = find_parcellation_cut_coords(atlas) except: if (type(atlas) is np.ndarray): if ((atlas.ndim > 2) or (atlas.shape[1] != 3)): raise NotImplementedError('Only atlases in nifti format or list of coordinates are supported.') coord = atlas else: try: import nibabel as nib if os.path.isfile(atlas): img = nib.load(atlas) else: raise nib.filebasedimages.ImageFileError(f'Cannot find file {atlas}') coord = find_parcellation_cut_coords(img) except ImportError: raise ImportError('Nibabel is required to handle atlases I/O. Please see install instructions.') try: coord except NameError: raise ValueError('Could not obtain coordinates from given atlas.') if (ns.shape[0] != coord.shape[0]): raise ValueError('Node array and coordinates array have different length.') LGR.info('Creating markerplot.') plt.figure(figsize=FIGSIZE) plot_markers(ns, coord) if (filename is not None): plt.savefig(filename, dpi=SET_DPI) return 0
def plot_nodes(ns, atlas, filename=None): "\n Create a marker plot in the MNI space.\n\n If ns has 2 dimensions, average first along last dimension.\n\n Parameters\n ----------\n ns : numpy.ndarray\n A 1- or 2- D array that contains the value of the nodes.\n atlas : str, os.PathLike, 3D Nifti1Image, or numpy.ndarray\n The 3d nifti image of an atlas, a string or path to its position,\n or a list of coordinates of the center of mass of parcels.\n filename : None, str, or os.PathLike, optional\n The path to save the plot on disk.\n\n Returns\n -------\n 0\n If there are no errors.\n\n Raises\n ------\n ImportError\n If matplotlib and/or nilearn are not installed.\n If nibabel is not installed.\n nib.filebasedimages.ImageFileError\n If given path is not an atlas.\n ValueError\n If ns has more than 2 dimensions.\n If coordinates can't be extracted from atlas.\n " try: from nilearn.plotting import find_parcellation_cut_coords, plot_markers import matplotlib.pyplot as plt except ImportError: raise ImportError('nilearn and matplotlib are required to plot node images. Please see install instructions.') ns = ns.squeeze() if (ns.ndim > 2): raise ValueError('Cannot plot connectivity matrices for matrix of dimensions > 2.') elif (ns.ndim == 2): LGR.warning('Given matrix has 2 dimensions, averaging across last dimension.') ns = ns.mean(axis=(- 1)) try: coord = find_parcellation_cut_coords(atlas) except: if (type(atlas) is np.ndarray): if ((atlas.ndim > 2) or (atlas.shape[1] != 3)): raise NotImplementedError('Only atlases in nifti format or list of coordinates are supported.') coord = atlas else: try: import nibabel as nib if os.path.isfile(atlas): img = nib.load(atlas) else: raise nib.filebasedimages.ImageFileError(f'Cannot find file {atlas}') coord = find_parcellation_cut_coords(img) except ImportError: raise ImportError('Nibabel is required to handle atlases I/O. Please see install instructions.') try: coord except NameError: raise ValueError('Could not obtain coordinates from given atlas.') if (ns.shape[0] != coord.shape[0]): raise ValueError('Node array and coordinates array have different length.') LGR.info('Creating markerplot.') plt.figure(figsize=FIGSIZE) plot_markers(ns, coord) if (filename is not None): plt.savefig(filename, dpi=SET_DPI) return 0<|docstring|>Create a marker plot in the MNI space. If ns has 2 dimensions, average first along last dimension. Parameters ---------- ns : numpy.ndarray A 1- or 2- D array that contains the value of the nodes. atlas : str, os.PathLike, 3D Nifti1Image, or numpy.ndarray The 3d nifti image of an atlas, a string or path to its position, or a list of coordinates of the center of mass of parcels. filename : None, str, or os.PathLike, optional The path to save the plot on disk. Returns ------- 0 If there are no errors. Raises ------ ImportError If matplotlib and/or nilearn are not installed. If nibabel is not installed. nib.filebasedimages.ImageFileError If given path is not an atlas. ValueError If ns has more than 2 dimensions. If coordinates can't be extracted from atlas.<|endoftext|>
bf40e620c65a0c9d0fe8bc2cff4ac8bede41357edbc4aab263aaf95b3ba6e14b
def test_quittung_create(self): 'Test case for quittung_create\n\n Create a receipt for an energy delivery (only valid in Germany). # noqa: E501\n ' pass
Test case for quittung_create Create a receipt for an energy delivery (only valid in Germany). # noqa: E501
out/python/test/test_strom_quittung_api.py
test_quittung_create
energychain/corrently-api
0
python
def test_quittung_create(self): 'Test case for quittung_create\n\n Create a receipt for an energy delivery (only valid in Germany). # noqa: E501\n ' pass
def test_quittung_create(self): 'Test case for quittung_create\n\n Create a receipt for an energy delivery (only valid in Germany). # noqa: E501\n ' pass<|docstring|>Test case for quittung_create Create a receipt for an energy delivery (only valid in Germany). # noqa: E501<|endoftext|>
443f0d39317b339aa83507f655a7cd27830e8c07a8bfb0446e4427c98f9c8fbe
def rx_pi(self, theta, q): 'Apply Rx to q.' return self.append(CX_PIGate(theta), [q], [])
Apply Rx to q.
qiskit/extensions/standard/rx_pi.py
rx_pi
shaimach/qiskit-terra-osq
0
python
def rx_pi(self, theta, q): return self.append(CX_PIGate(theta), [q], [])
def rx_pi(self, theta, q): return self.append(CX_PIGate(theta), [q], [])<|docstring|>Apply Rx to q.<|endoftext|>
760f218466d6b117fc24c4f836309b1be8f046618a9e59a7593a29e623d934c8
def __init__(self, theta): 'Create new rx single qubit gate.' if ((theta % 1) != 0): raise QiskitError('the desired angle is not supported by the gate ') super().__init__('rx_pi/2', 1, [theta])
Create new rx single qubit gate.
qiskit/extensions/standard/rx_pi.py
__init__
shaimach/qiskit-terra-osq
0
python
def __init__(self, theta): if ((theta % 1) != 0): raise QiskitError('the desired angle is not supported by the gate ') super().__init__('rx_pi/2', 1, [theta])
def __init__(self, theta): if ((theta % 1) != 0): raise QiskitError('the desired angle is not supported by the gate ') super().__init__('rx_pi/2', 1, [theta])<|docstring|>Create new rx single qubit gate.<|endoftext|>
760a1fd6e40c489c07889d51c03562b438d7173e6661fe9d95137306ec2148f4
def _define(self): '\n gate rx(theta) a {u3(theta, -pi/2, pi/2) a;}\n ' definition = [] q = QuantumRegister(1, 'q') rule = [(U3Gate(((self.params[0] * pi) / 2), ((- pi) / 2), (pi / 2)), [q[0]], [])] for inst in rule: definition.append(inst) self.definition = definition
gate rx(theta) a {u3(theta, -pi/2, pi/2) a;}
qiskit/extensions/standard/rx_pi.py
_define
shaimach/qiskit-terra-osq
0
python
def _define(self): '\n \n ' definition = [] q = QuantumRegister(1, 'q') rule = [(U3Gate(((self.params[0] * pi) / 2), ((- pi) / 2), (pi / 2)), [q[0]], [])] for inst in rule: definition.append(inst) self.definition = definition
def _define(self): '\n \n ' definition = [] q = QuantumRegister(1, 'q') rule = [(U3Gate(((self.params[0] * pi) / 2), ((- pi) / 2), (pi / 2)), [q[0]], [])] for inst in rule: definition.append(inst) self.definition = definition<|docstring|>gate rx(theta) a {u3(theta, -pi/2, pi/2) a;}<|endoftext|>
0a19e909d02b8b355a19dcc73ab1bd4569fa7f7389ef67f15ac70640feb63e76
def inverse(self): 'Invert this gate.\n\n rx(theta)^dagger = rx(-theta)\n ' return CX_PIGate((- self.params[0]))
Invert this gate. rx(theta)^dagger = rx(-theta)
qiskit/extensions/standard/rx_pi.py
inverse
shaimach/qiskit-terra-osq
0
python
def inverse(self): 'Invert this gate.\n\n rx(theta)^dagger = rx(-theta)\n ' return CX_PIGate((- self.params[0]))
def inverse(self): 'Invert this gate.\n\n rx(theta)^dagger = rx(-theta)\n ' return CX_PIGate((- self.params[0]))<|docstring|>Invert this gate. rx(theta)^dagger = rx(-theta)<|endoftext|>
0c0f548a7b2d473bc7cde52e5dce44e21142358f2007c2b171b6188101f36a01
def to_matrix(self): 'Return a Numpy.array for the U3 gate.' lam = self.params[0] lam = float(lam) return numpy.array([[1, 0], [0, numpy.exp((1j * lam))]], dtype=complex)
Return a Numpy.array for the U3 gate.
qiskit/extensions/standard/rx_pi.py
to_matrix
shaimach/qiskit-terra-osq
0
python
def to_matrix(self): lam = self.params[0] lam = float(lam) return numpy.array([[1, 0], [0, numpy.exp((1j * lam))]], dtype=complex)
def to_matrix(self): lam = self.params[0] lam = float(lam) return numpy.array([[1, 0], [0, numpy.exp((1j * lam))]], dtype=complex)<|docstring|>Return a Numpy.array for the U3 gate.<|endoftext|>
72e8f1ec81ec0063770eaa3ebfe0498622178236c2e1682ca199bc7e53e34d34
def get_MS1(time, rep, cond, species): 'Returns the desired MS1 intensity of given experiment and desired species\n\n Parameters\n ----------\n time : int/float\n Timepoint of the experiment\n\n rep : int\n Replicate of the experiment\n\n cond : str\n Condition/Treatment of the experiment\n\n species : str\n Species of which the MS1 data should be returned\n\n Returns\n -------\n float\n MS1 Intensity of given experiment\n ' return df_MS1[(((df_MS1['Time in h'] == time) & (df_MS1.Rep == rep)) & (df_MS1.Exp == cond))][species].values[0]
Returns the desired MS1 intensity of given experiment and desired species Parameters ---------- time : int/float Timepoint of the experiment rep : int Replicate of the experiment cond : str Condition/Treatment of the experiment species : str Species of which the MS1 data should be returned Returns ------- float MS1 Intensity of given experiment
Site-specific_intensities_H3.py
get_MS1
functional-proteo-metabolomics/CoMetChem
0
python
def get_MS1(time, rep, cond, species): 'Returns the desired MS1 intensity of given experiment and desired species\n\n Parameters\n ----------\n time : int/float\n Timepoint of the experiment\n\n rep : int\n Replicate of the experiment\n\n cond : str\n Condition/Treatment of the experiment\n\n species : str\n Species of which the MS1 data should be returned\n\n Returns\n -------\n float\n MS1 Intensity of given experiment\n ' return df_MS1[(((df_MS1['Time in h'] == time) & (df_MS1.Rep == rep)) & (df_MS1.Exp == cond))][species].values[0]
def get_MS1(time, rep, cond, species): 'Returns the desired MS1 intensity of given experiment and desired species\n\n Parameters\n ----------\n time : int/float\n Timepoint of the experiment\n\n rep : int\n Replicate of the experiment\n\n cond : str\n Condition/Treatment of the experiment\n\n species : str\n Species of which the MS1 data should be returned\n\n Returns\n -------\n float\n MS1 Intensity of given experiment\n ' return df_MS1[(((df_MS1['Time in h'] == time) & (df_MS1.Rep == rep)) & (df_MS1.Exp == cond))][species].values[0]<|docstring|>Returns the desired MS1 intensity of given experiment and desired species Parameters ---------- time : int/float Timepoint of the experiment rep : int Replicate of the experiment cond : str Condition/Treatment of the experiment species : str Species of which the MS1 data should be returned Returns ------- float MS1 Intensity of given experiment<|endoftext|>
b514ba7fcd06a6a1a157fb3ce384ded18219994518c365200cf964a4aed8c4fb
def main(): '\n This program simulates a bouncing ball at (START_X, START_Y)\n that has VX as x velocity and 0 as y velocity. Each bounce reduces\n y velocity to REDUCE of itself.\n ' ball.filled = True ball.fill_color = 'black' window.add(ball) onmouseclicked(bounce)
This program simulates a bouncing ball at (START_X, START_Y) that has VX as x velocity and 0 as y velocity. Each bounce reduces y velocity to REDUCE of itself.
bouncing_ball/bouncing_ball.py
main
pe11te18r/MystanCodeProjects
0
python
def main(): '\n This program simulates a bouncing ball at (START_X, START_Y)\n that has VX as x velocity and 0 as y velocity. Each bounce reduces\n y velocity to REDUCE of itself.\n ' ball.filled = True ball.fill_color = 'black' window.add(ball) onmouseclicked(bounce)
def main(): '\n This program simulates a bouncing ball at (START_X, START_Y)\n that has VX as x velocity and 0 as y velocity. Each bounce reduces\n y velocity to REDUCE of itself.\n ' ball.filled = True ball.fill_color = 'black' window.add(ball) onmouseclicked(bounce)<|docstring|>This program simulates a bouncing ball at (START_X, START_Y) that has VX as x velocity and 0 as y velocity. Each bounce reduces y velocity to REDUCE of itself.<|endoftext|>
d0b3ba1ff287e426a58024aca576cd9187dfcbf26f29a8376b1d6d6f09ca1e00
@_dispatch.add_dispatch_list @tf_export('audio_microfrontend') def audio_microfrontend(audio, sample_rate=16000, window_size=25, window_step=10, num_channels=32, upper_band_limit=7500, lower_band_limit=125, smoothing_bits=10, even_smoothing=0.025, odd_smoothing=0.06, min_signal_remaining=0.05, enable_pcan=False, pcan_strength=0.95, pcan_offset=80, gain_bits=21, enable_log=True, scale_shift=6, left_context=0, right_context=0, frame_stride=1, zero_padding=False, out_scale=1, out_type=_dtypes.uint16, name=None): 'Audio Microfrontend Op.\n\n This Op converts a sequence of audio data into one or more\n feature vectors containing filterbanks of the input. The\n conversion process uses a lightweight library to perform:\n\n 1. A slicing window function\n 2. Short-time FFTs\n 3. Filterbank calculations\n 4. Noise reduction\n 5. PCAN Auto Gain Control\n 6. Logarithmic scaling\n\n Arguments\n audio: 1D Tensor, int16 audio data in temporal ordering.\n sample_rate: Integer, the sample rate of the audio in Hz.\n window_size: Integer, length of desired time frames in ms.\n window_step: Integer, length of step size for the next frame in ms.\n num_channels: Integer, the number of filterbank channels to use.\n upper_band_limit: Float, the highest frequency included in the filterbanks.\n lower_band_limit: Float, the lowest frequency included in the filterbanks.\n smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction.\n even_smoothing: Float, smoothing coefficient for even-numbered channels.\n odd_smoothing: Float, smoothing coefficient for odd-numbered channels.\n min_signal_remaining: Float, fraction of signal to preserve in smoothing.\n enable_pcan: Bool, enable PCAN auto gain control.\n pcan_strength: Float, gain normalization exponent.\n pcan_offset: Float, positive value added in the normalization denominator.\n gain_bits: Int, number of fractional bits in the gain.\n enable_log: Bool, enable logarithmic scaling of filterbanks.\n scale_shift: Integer, scale filterbanks by 2^(scale_shift).\n left_context: Integer, number of preceding frames to attach to each frame.\n right_context: Integer, number of preceding frames to attach to each frame.\n frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M].\n zero_padding: Bool, if left/right context is out-of-bounds, attach frame of\n zeroes. Otherwise, frame[0] or frame[size-1] will be copied.\n out_scale: Integer, divide all filterbanks by this number.\n out_type: DType, type of the output Tensor, defaults to UINT16.\n\n Returns\n filterbanks: 2D Tensor, each row is a time frame, each column is a channel.\n\n Args:\n audio: A `Tensor` of type `int16`.\n sample_rate: An optional `int`. Defaults to `16000`.\n window_size: An optional `int`. Defaults to `25`.\n window_step: An optional `int`. Defaults to `10`.\n num_channels: An optional `int`. Defaults to `32`.\n upper_band_limit: An optional `float`. Defaults to `7500`.\n lower_band_limit: An optional `float`. Defaults to `125`.\n smoothing_bits: An optional `int`. Defaults to `10`.\n even_smoothing: An optional `float`. Defaults to `0.025`.\n odd_smoothing: An optional `float`. Defaults to `0.06`.\n min_signal_remaining: An optional `float`. Defaults to `0.05`.\n enable_pcan: An optional `bool`. Defaults to `False`.\n pcan_strength: An optional `float`. Defaults to `0.95`.\n pcan_offset: An optional `float`. Defaults to `80`.\n gain_bits: An optional `int`. Defaults to `21`.\n enable_log: An optional `bool`. Defaults to `True`.\n scale_shift: An optional `int`. Defaults to `6`.\n left_context: An optional `int`. Defaults to `0`.\n right_context: An optional `int`. Defaults to `0`.\n frame_stride: An optional `int`. Defaults to `1`.\n zero_padding: An optional `bool`. Defaults to `False`.\n out_scale: An optional `int`. Defaults to `1`.\n out_type: An optional `tf.DType` from: `tf.uint16, tf.float32`. Defaults to `tf.uint16`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `out_type`.\n ' _ctx = (_context._context or _context.context()) tld = _ctx._thread_local_data if tld.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, tld.device_name, 'AudioMicrofrontend', name, tld.op_callbacks, audio, 'sample_rate', sample_rate, 'window_size', window_size, 'window_step', window_step, 'num_channels', num_channels, 'upper_band_limit', upper_band_limit, 'lower_band_limit', lower_band_limit, 'smoothing_bits', smoothing_bits, 'even_smoothing', even_smoothing, 'odd_smoothing', odd_smoothing, 'min_signal_remaining', min_signal_remaining, 'enable_pcan', enable_pcan, 'pcan_strength', pcan_strength, 'pcan_offset', pcan_offset, 'gain_bits', gain_bits, 'enable_log', enable_log, 'scale_shift', scale_shift, 'left_context', left_context, 'right_context', right_context, 'frame_stride', frame_stride, 'zero_padding', zero_padding, 'out_scale', out_scale, 'out_type', out_type) return _result except _core._FallbackException: try: return audio_microfrontend_eager_fallback(audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name, ctx=_ctx) except _core._SymbolicException: pass except (TypeError, ValueError): result = _dispatch.dispatch(audio_microfrontend, audio=audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name) if (result is not _dispatch.OpDispatcher.NOT_SUPPORTED): return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) if (sample_rate is None): sample_rate = 16000 sample_rate = _execute.make_int(sample_rate, 'sample_rate') if (window_size is None): window_size = 25 window_size = _execute.make_int(window_size, 'window_size') if (window_step is None): window_step = 10 window_step = _execute.make_int(window_step, 'window_step') if (num_channels is None): num_channels = 32 num_channels = _execute.make_int(num_channels, 'num_channels') if (upper_band_limit is None): upper_band_limit = 7500 upper_band_limit = _execute.make_float(upper_band_limit, 'upper_band_limit') if (lower_band_limit is None): lower_band_limit = 125 lower_band_limit = _execute.make_float(lower_band_limit, 'lower_band_limit') if (smoothing_bits is None): smoothing_bits = 10 smoothing_bits = _execute.make_int(smoothing_bits, 'smoothing_bits') if (even_smoothing is None): even_smoothing = 0.025 even_smoothing = _execute.make_float(even_smoothing, 'even_smoothing') if (odd_smoothing is None): odd_smoothing = 0.06 odd_smoothing = _execute.make_float(odd_smoothing, 'odd_smoothing') if (min_signal_remaining is None): min_signal_remaining = 0.05 min_signal_remaining = _execute.make_float(min_signal_remaining, 'min_signal_remaining') if (enable_pcan is None): enable_pcan = False enable_pcan = _execute.make_bool(enable_pcan, 'enable_pcan') if (pcan_strength is None): pcan_strength = 0.95 pcan_strength = _execute.make_float(pcan_strength, 'pcan_strength') if (pcan_offset is None): pcan_offset = 80 pcan_offset = _execute.make_float(pcan_offset, 'pcan_offset') if (gain_bits is None): gain_bits = 21 gain_bits = _execute.make_int(gain_bits, 'gain_bits') if (enable_log is None): enable_log = True enable_log = _execute.make_bool(enable_log, 'enable_log') if (scale_shift is None): scale_shift = 6 scale_shift = _execute.make_int(scale_shift, 'scale_shift') if (left_context is None): left_context = 0 left_context = _execute.make_int(left_context, 'left_context') if (right_context is None): right_context = 0 right_context = _execute.make_int(right_context, 'right_context') if (frame_stride is None): frame_stride = 1 frame_stride = _execute.make_int(frame_stride, 'frame_stride') if (zero_padding is None): zero_padding = False zero_padding = _execute.make_bool(zero_padding, 'zero_padding') if (out_scale is None): out_scale = 1 out_scale = _execute.make_int(out_scale, 'out_scale') if (out_type is None): out_type = _dtypes.uint16 out_type = _execute.make_type(out_type, 'out_type') try: (_, _, _op, _outputs) = _op_def_library._apply_op_helper('AudioMicrofrontend', audio=audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name) except (TypeError, ValueError): result = _dispatch.dispatch(audio_microfrontend, audio=audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name) if (result is not _dispatch.OpDispatcher.NOT_SUPPORTED): return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ('sample_rate', _op._get_attr_int('sample_rate'), 'window_size', _op._get_attr_int('window_size'), 'window_step', _op._get_attr_int('window_step'), 'num_channels', _op._get_attr_int('num_channels'), 'upper_band_limit', _op.get_attr('upper_band_limit'), 'lower_band_limit', _op.get_attr('lower_band_limit'), 'smoothing_bits', _op._get_attr_int('smoothing_bits'), 'even_smoothing', _op.get_attr('even_smoothing'), 'odd_smoothing', _op.get_attr('odd_smoothing'), 'min_signal_remaining', _op.get_attr('min_signal_remaining'), 'enable_pcan', _op._get_attr_bool('enable_pcan'), 'pcan_strength', _op.get_attr('pcan_strength'), 'pcan_offset', _op.get_attr('pcan_offset'), 'gain_bits', _op._get_attr_int('gain_bits'), 'enable_log', _op._get_attr_bool('enable_log'), 'scale_shift', _op._get_attr_int('scale_shift'), 'left_context', _op._get_attr_int('left_context'), 'right_context', _op._get_attr_int('right_context'), 'frame_stride', _op._get_attr_int('frame_stride'), 'zero_padding', _op._get_attr_bool('zero_padding'), 'out_scale', _op._get_attr_int('out_scale'), 'out_type', _op._get_attr_type('out_type')) _inputs_flat = _op.inputs _execute.record_gradient('AudioMicrofrontend', _inputs_flat, _attrs, _result) (_result,) = _result return _result
Audio Microfrontend Op. This Op converts a sequence of audio data into one or more feature vectors containing filterbanks of the input. The conversion process uses a lightweight library to perform: 1. A slicing window function 2. Short-time FFTs 3. Filterbank calculations 4. Noise reduction 5. PCAN Auto Gain Control 6. Logarithmic scaling Arguments audio: 1D Tensor, int16 audio data in temporal ordering. sample_rate: Integer, the sample rate of the audio in Hz. window_size: Integer, length of desired time frames in ms. window_step: Integer, length of step size for the next frame in ms. num_channels: Integer, the number of filterbank channels to use. upper_band_limit: Float, the highest frequency included in the filterbanks. lower_band_limit: Float, the lowest frequency included in the filterbanks. smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction. even_smoothing: Float, smoothing coefficient for even-numbered channels. odd_smoothing: Float, smoothing coefficient for odd-numbered channels. min_signal_remaining: Float, fraction of signal to preserve in smoothing. enable_pcan: Bool, enable PCAN auto gain control. pcan_strength: Float, gain normalization exponent. pcan_offset: Float, positive value added in the normalization denominator. gain_bits: Int, number of fractional bits in the gain. enable_log: Bool, enable logarithmic scaling of filterbanks. scale_shift: Integer, scale filterbanks by 2^(scale_shift). left_context: Integer, number of preceding frames to attach to each frame. right_context: Integer, number of preceding frames to attach to each frame. frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M]. zero_padding: Bool, if left/right context is out-of-bounds, attach frame of zeroes. Otherwise, frame[0] or frame[size-1] will be copied. out_scale: Integer, divide all filterbanks by this number. out_type: DType, type of the output Tensor, defaults to UINT16. Returns filterbanks: 2D Tensor, each row is a time frame, each column is a channel. Args: audio: A `Tensor` of type `int16`. sample_rate: An optional `int`. Defaults to `16000`. window_size: An optional `int`. Defaults to `25`. window_step: An optional `int`. Defaults to `10`. num_channels: An optional `int`. Defaults to `32`. upper_band_limit: An optional `float`. Defaults to `7500`. lower_band_limit: An optional `float`. Defaults to `125`. smoothing_bits: An optional `int`. Defaults to `10`. even_smoothing: An optional `float`. Defaults to `0.025`. odd_smoothing: An optional `float`. Defaults to `0.06`. min_signal_remaining: An optional `float`. Defaults to `0.05`. enable_pcan: An optional `bool`. Defaults to `False`. pcan_strength: An optional `float`. Defaults to `0.95`. pcan_offset: An optional `float`. Defaults to `80`. gain_bits: An optional `int`. Defaults to `21`. enable_log: An optional `bool`. Defaults to `True`. scale_shift: An optional `int`. Defaults to `6`. left_context: An optional `int`. Defaults to `0`. right_context: An optional `int`. Defaults to `0`. frame_stride: An optional `int`. Defaults to `1`. zero_padding: An optional `bool`. Defaults to `False`. out_scale: An optional `int`. Defaults to `1`. out_type: An optional `tf.DType` from: `tf.uint16, tf.float32`. Defaults to `tf.uint16`. name: A name for the operation (optional). Returns: A `Tensor` of type `out_type`.
venv/lib/python3.6/site-packages/tensorflow_core/lite/experimental/microfrontend/ops/gen_audio_microfrontend_op.py
audio_microfrontend
databill86/HyperFoods
2
python
@_dispatch.add_dispatch_list @tf_export('audio_microfrontend') def audio_microfrontend(audio, sample_rate=16000, window_size=25, window_step=10, num_channels=32, upper_band_limit=7500, lower_band_limit=125, smoothing_bits=10, even_smoothing=0.025, odd_smoothing=0.06, min_signal_remaining=0.05, enable_pcan=False, pcan_strength=0.95, pcan_offset=80, gain_bits=21, enable_log=True, scale_shift=6, left_context=0, right_context=0, frame_stride=1, zero_padding=False, out_scale=1, out_type=_dtypes.uint16, name=None): 'Audio Microfrontend Op.\n\n This Op converts a sequence of audio data into one or more\n feature vectors containing filterbanks of the input. The\n conversion process uses a lightweight library to perform:\n\n 1. A slicing window function\n 2. Short-time FFTs\n 3. Filterbank calculations\n 4. Noise reduction\n 5. PCAN Auto Gain Control\n 6. Logarithmic scaling\n\n Arguments\n audio: 1D Tensor, int16 audio data in temporal ordering.\n sample_rate: Integer, the sample rate of the audio in Hz.\n window_size: Integer, length of desired time frames in ms.\n window_step: Integer, length of step size for the next frame in ms.\n num_channels: Integer, the number of filterbank channels to use.\n upper_band_limit: Float, the highest frequency included in the filterbanks.\n lower_band_limit: Float, the lowest frequency included in the filterbanks.\n smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction.\n even_smoothing: Float, smoothing coefficient for even-numbered channels.\n odd_smoothing: Float, smoothing coefficient for odd-numbered channels.\n min_signal_remaining: Float, fraction of signal to preserve in smoothing.\n enable_pcan: Bool, enable PCAN auto gain control.\n pcan_strength: Float, gain normalization exponent.\n pcan_offset: Float, positive value added in the normalization denominator.\n gain_bits: Int, number of fractional bits in the gain.\n enable_log: Bool, enable logarithmic scaling of filterbanks.\n scale_shift: Integer, scale filterbanks by 2^(scale_shift).\n left_context: Integer, number of preceding frames to attach to each frame.\n right_context: Integer, number of preceding frames to attach to each frame.\n frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M].\n zero_padding: Bool, if left/right context is out-of-bounds, attach frame of\n zeroes. Otherwise, frame[0] or frame[size-1] will be copied.\n out_scale: Integer, divide all filterbanks by this number.\n out_type: DType, type of the output Tensor, defaults to UINT16.\n\n Returns\n filterbanks: 2D Tensor, each row is a time frame, each column is a channel.\n\n Args:\n audio: A `Tensor` of type `int16`.\n sample_rate: An optional `int`. Defaults to `16000`.\n window_size: An optional `int`. Defaults to `25`.\n window_step: An optional `int`. Defaults to `10`.\n num_channels: An optional `int`. Defaults to `32`.\n upper_band_limit: An optional `float`. Defaults to `7500`.\n lower_band_limit: An optional `float`. Defaults to `125`.\n smoothing_bits: An optional `int`. Defaults to `10`.\n even_smoothing: An optional `float`. Defaults to `0.025`.\n odd_smoothing: An optional `float`. Defaults to `0.06`.\n min_signal_remaining: An optional `float`. Defaults to `0.05`.\n enable_pcan: An optional `bool`. Defaults to `False`.\n pcan_strength: An optional `float`. Defaults to `0.95`.\n pcan_offset: An optional `float`. Defaults to `80`.\n gain_bits: An optional `int`. Defaults to `21`.\n enable_log: An optional `bool`. Defaults to `True`.\n scale_shift: An optional `int`. Defaults to `6`.\n left_context: An optional `int`. Defaults to `0`.\n right_context: An optional `int`. Defaults to `0`.\n frame_stride: An optional `int`. Defaults to `1`.\n zero_padding: An optional `bool`. Defaults to `False`.\n out_scale: An optional `int`. Defaults to `1`.\n out_type: An optional `tf.DType` from: `tf.uint16, tf.float32`. Defaults to `tf.uint16`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `out_type`.\n ' _ctx = (_context._context or _context.context()) tld = _ctx._thread_local_data if tld.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, tld.device_name, 'AudioMicrofrontend', name, tld.op_callbacks, audio, 'sample_rate', sample_rate, 'window_size', window_size, 'window_step', window_step, 'num_channels', num_channels, 'upper_band_limit', upper_band_limit, 'lower_band_limit', lower_band_limit, 'smoothing_bits', smoothing_bits, 'even_smoothing', even_smoothing, 'odd_smoothing', odd_smoothing, 'min_signal_remaining', min_signal_remaining, 'enable_pcan', enable_pcan, 'pcan_strength', pcan_strength, 'pcan_offset', pcan_offset, 'gain_bits', gain_bits, 'enable_log', enable_log, 'scale_shift', scale_shift, 'left_context', left_context, 'right_context', right_context, 'frame_stride', frame_stride, 'zero_padding', zero_padding, 'out_scale', out_scale, 'out_type', out_type) return _result except _core._FallbackException: try: return audio_microfrontend_eager_fallback(audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name, ctx=_ctx) except _core._SymbolicException: pass except (TypeError, ValueError): result = _dispatch.dispatch(audio_microfrontend, audio=audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name) if (result is not _dispatch.OpDispatcher.NOT_SUPPORTED): return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) if (sample_rate is None): sample_rate = 16000 sample_rate = _execute.make_int(sample_rate, 'sample_rate') if (window_size is None): window_size = 25 window_size = _execute.make_int(window_size, 'window_size') if (window_step is None): window_step = 10 window_step = _execute.make_int(window_step, 'window_step') if (num_channels is None): num_channels = 32 num_channels = _execute.make_int(num_channels, 'num_channels') if (upper_band_limit is None): upper_band_limit = 7500 upper_band_limit = _execute.make_float(upper_band_limit, 'upper_band_limit') if (lower_band_limit is None): lower_band_limit = 125 lower_band_limit = _execute.make_float(lower_band_limit, 'lower_band_limit') if (smoothing_bits is None): smoothing_bits = 10 smoothing_bits = _execute.make_int(smoothing_bits, 'smoothing_bits') if (even_smoothing is None): even_smoothing = 0.025 even_smoothing = _execute.make_float(even_smoothing, 'even_smoothing') if (odd_smoothing is None): odd_smoothing = 0.06 odd_smoothing = _execute.make_float(odd_smoothing, 'odd_smoothing') if (min_signal_remaining is None): min_signal_remaining = 0.05 min_signal_remaining = _execute.make_float(min_signal_remaining, 'min_signal_remaining') if (enable_pcan is None): enable_pcan = False enable_pcan = _execute.make_bool(enable_pcan, 'enable_pcan') if (pcan_strength is None): pcan_strength = 0.95 pcan_strength = _execute.make_float(pcan_strength, 'pcan_strength') if (pcan_offset is None): pcan_offset = 80 pcan_offset = _execute.make_float(pcan_offset, 'pcan_offset') if (gain_bits is None): gain_bits = 21 gain_bits = _execute.make_int(gain_bits, 'gain_bits') if (enable_log is None): enable_log = True enable_log = _execute.make_bool(enable_log, 'enable_log') if (scale_shift is None): scale_shift = 6 scale_shift = _execute.make_int(scale_shift, 'scale_shift') if (left_context is None): left_context = 0 left_context = _execute.make_int(left_context, 'left_context') if (right_context is None): right_context = 0 right_context = _execute.make_int(right_context, 'right_context') if (frame_stride is None): frame_stride = 1 frame_stride = _execute.make_int(frame_stride, 'frame_stride') if (zero_padding is None): zero_padding = False zero_padding = _execute.make_bool(zero_padding, 'zero_padding') if (out_scale is None): out_scale = 1 out_scale = _execute.make_int(out_scale, 'out_scale') if (out_type is None): out_type = _dtypes.uint16 out_type = _execute.make_type(out_type, 'out_type') try: (_, _, _op, _outputs) = _op_def_library._apply_op_helper('AudioMicrofrontend', audio=audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name) except (TypeError, ValueError): result = _dispatch.dispatch(audio_microfrontend, audio=audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name) if (result is not _dispatch.OpDispatcher.NOT_SUPPORTED): return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ('sample_rate', _op._get_attr_int('sample_rate'), 'window_size', _op._get_attr_int('window_size'), 'window_step', _op._get_attr_int('window_step'), 'num_channels', _op._get_attr_int('num_channels'), 'upper_band_limit', _op.get_attr('upper_band_limit'), 'lower_band_limit', _op.get_attr('lower_band_limit'), 'smoothing_bits', _op._get_attr_int('smoothing_bits'), 'even_smoothing', _op.get_attr('even_smoothing'), 'odd_smoothing', _op.get_attr('odd_smoothing'), 'min_signal_remaining', _op.get_attr('min_signal_remaining'), 'enable_pcan', _op._get_attr_bool('enable_pcan'), 'pcan_strength', _op.get_attr('pcan_strength'), 'pcan_offset', _op.get_attr('pcan_offset'), 'gain_bits', _op._get_attr_int('gain_bits'), 'enable_log', _op._get_attr_bool('enable_log'), 'scale_shift', _op._get_attr_int('scale_shift'), 'left_context', _op._get_attr_int('left_context'), 'right_context', _op._get_attr_int('right_context'), 'frame_stride', _op._get_attr_int('frame_stride'), 'zero_padding', _op._get_attr_bool('zero_padding'), 'out_scale', _op._get_attr_int('out_scale'), 'out_type', _op._get_attr_type('out_type')) _inputs_flat = _op.inputs _execute.record_gradient('AudioMicrofrontend', _inputs_flat, _attrs, _result) (_result,) = _result return _result
@_dispatch.add_dispatch_list @tf_export('audio_microfrontend') def audio_microfrontend(audio, sample_rate=16000, window_size=25, window_step=10, num_channels=32, upper_band_limit=7500, lower_band_limit=125, smoothing_bits=10, even_smoothing=0.025, odd_smoothing=0.06, min_signal_remaining=0.05, enable_pcan=False, pcan_strength=0.95, pcan_offset=80, gain_bits=21, enable_log=True, scale_shift=6, left_context=0, right_context=0, frame_stride=1, zero_padding=False, out_scale=1, out_type=_dtypes.uint16, name=None): 'Audio Microfrontend Op.\n\n This Op converts a sequence of audio data into one or more\n feature vectors containing filterbanks of the input. The\n conversion process uses a lightweight library to perform:\n\n 1. A slicing window function\n 2. Short-time FFTs\n 3. Filterbank calculations\n 4. Noise reduction\n 5. PCAN Auto Gain Control\n 6. Logarithmic scaling\n\n Arguments\n audio: 1D Tensor, int16 audio data in temporal ordering.\n sample_rate: Integer, the sample rate of the audio in Hz.\n window_size: Integer, length of desired time frames in ms.\n window_step: Integer, length of step size for the next frame in ms.\n num_channels: Integer, the number of filterbank channels to use.\n upper_band_limit: Float, the highest frequency included in the filterbanks.\n lower_band_limit: Float, the lowest frequency included in the filterbanks.\n smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction.\n even_smoothing: Float, smoothing coefficient for even-numbered channels.\n odd_smoothing: Float, smoothing coefficient for odd-numbered channels.\n min_signal_remaining: Float, fraction of signal to preserve in smoothing.\n enable_pcan: Bool, enable PCAN auto gain control.\n pcan_strength: Float, gain normalization exponent.\n pcan_offset: Float, positive value added in the normalization denominator.\n gain_bits: Int, number of fractional bits in the gain.\n enable_log: Bool, enable logarithmic scaling of filterbanks.\n scale_shift: Integer, scale filterbanks by 2^(scale_shift).\n left_context: Integer, number of preceding frames to attach to each frame.\n right_context: Integer, number of preceding frames to attach to each frame.\n frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M].\n zero_padding: Bool, if left/right context is out-of-bounds, attach frame of\n zeroes. Otherwise, frame[0] or frame[size-1] will be copied.\n out_scale: Integer, divide all filterbanks by this number.\n out_type: DType, type of the output Tensor, defaults to UINT16.\n\n Returns\n filterbanks: 2D Tensor, each row is a time frame, each column is a channel.\n\n Args:\n audio: A `Tensor` of type `int16`.\n sample_rate: An optional `int`. Defaults to `16000`.\n window_size: An optional `int`. Defaults to `25`.\n window_step: An optional `int`. Defaults to `10`.\n num_channels: An optional `int`. Defaults to `32`.\n upper_band_limit: An optional `float`. Defaults to `7500`.\n lower_band_limit: An optional `float`. Defaults to `125`.\n smoothing_bits: An optional `int`. Defaults to `10`.\n even_smoothing: An optional `float`. Defaults to `0.025`.\n odd_smoothing: An optional `float`. Defaults to `0.06`.\n min_signal_remaining: An optional `float`. Defaults to `0.05`.\n enable_pcan: An optional `bool`. Defaults to `False`.\n pcan_strength: An optional `float`. Defaults to `0.95`.\n pcan_offset: An optional `float`. Defaults to `80`.\n gain_bits: An optional `int`. Defaults to `21`.\n enable_log: An optional `bool`. Defaults to `True`.\n scale_shift: An optional `int`. Defaults to `6`.\n left_context: An optional `int`. Defaults to `0`.\n right_context: An optional `int`. Defaults to `0`.\n frame_stride: An optional `int`. Defaults to `1`.\n zero_padding: An optional `bool`. Defaults to `False`.\n out_scale: An optional `int`. Defaults to `1`.\n out_type: An optional `tf.DType` from: `tf.uint16, tf.float32`. Defaults to `tf.uint16`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `out_type`.\n ' _ctx = (_context._context or _context.context()) tld = _ctx._thread_local_data if tld.is_eager: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, tld.device_name, 'AudioMicrofrontend', name, tld.op_callbacks, audio, 'sample_rate', sample_rate, 'window_size', window_size, 'window_step', window_step, 'num_channels', num_channels, 'upper_band_limit', upper_band_limit, 'lower_band_limit', lower_band_limit, 'smoothing_bits', smoothing_bits, 'even_smoothing', even_smoothing, 'odd_smoothing', odd_smoothing, 'min_signal_remaining', min_signal_remaining, 'enable_pcan', enable_pcan, 'pcan_strength', pcan_strength, 'pcan_offset', pcan_offset, 'gain_bits', gain_bits, 'enable_log', enable_log, 'scale_shift', scale_shift, 'left_context', left_context, 'right_context', right_context, 'frame_stride', frame_stride, 'zero_padding', zero_padding, 'out_scale', out_scale, 'out_type', out_type) return _result except _core._FallbackException: try: return audio_microfrontend_eager_fallback(audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name, ctx=_ctx) except _core._SymbolicException: pass except (TypeError, ValueError): result = _dispatch.dispatch(audio_microfrontend, audio=audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name) if (result is not _dispatch.OpDispatcher.NOT_SUPPORTED): return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) if (sample_rate is None): sample_rate = 16000 sample_rate = _execute.make_int(sample_rate, 'sample_rate') if (window_size is None): window_size = 25 window_size = _execute.make_int(window_size, 'window_size') if (window_step is None): window_step = 10 window_step = _execute.make_int(window_step, 'window_step') if (num_channels is None): num_channels = 32 num_channels = _execute.make_int(num_channels, 'num_channels') if (upper_band_limit is None): upper_band_limit = 7500 upper_band_limit = _execute.make_float(upper_band_limit, 'upper_band_limit') if (lower_band_limit is None): lower_band_limit = 125 lower_band_limit = _execute.make_float(lower_band_limit, 'lower_band_limit') if (smoothing_bits is None): smoothing_bits = 10 smoothing_bits = _execute.make_int(smoothing_bits, 'smoothing_bits') if (even_smoothing is None): even_smoothing = 0.025 even_smoothing = _execute.make_float(even_smoothing, 'even_smoothing') if (odd_smoothing is None): odd_smoothing = 0.06 odd_smoothing = _execute.make_float(odd_smoothing, 'odd_smoothing') if (min_signal_remaining is None): min_signal_remaining = 0.05 min_signal_remaining = _execute.make_float(min_signal_remaining, 'min_signal_remaining') if (enable_pcan is None): enable_pcan = False enable_pcan = _execute.make_bool(enable_pcan, 'enable_pcan') if (pcan_strength is None): pcan_strength = 0.95 pcan_strength = _execute.make_float(pcan_strength, 'pcan_strength') if (pcan_offset is None): pcan_offset = 80 pcan_offset = _execute.make_float(pcan_offset, 'pcan_offset') if (gain_bits is None): gain_bits = 21 gain_bits = _execute.make_int(gain_bits, 'gain_bits') if (enable_log is None): enable_log = True enable_log = _execute.make_bool(enable_log, 'enable_log') if (scale_shift is None): scale_shift = 6 scale_shift = _execute.make_int(scale_shift, 'scale_shift') if (left_context is None): left_context = 0 left_context = _execute.make_int(left_context, 'left_context') if (right_context is None): right_context = 0 right_context = _execute.make_int(right_context, 'right_context') if (frame_stride is None): frame_stride = 1 frame_stride = _execute.make_int(frame_stride, 'frame_stride') if (zero_padding is None): zero_padding = False zero_padding = _execute.make_bool(zero_padding, 'zero_padding') if (out_scale is None): out_scale = 1 out_scale = _execute.make_int(out_scale, 'out_scale') if (out_type is None): out_type = _dtypes.uint16 out_type = _execute.make_type(out_type, 'out_type') try: (_, _, _op, _outputs) = _op_def_library._apply_op_helper('AudioMicrofrontend', audio=audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name) except (TypeError, ValueError): result = _dispatch.dispatch(audio_microfrontend, audio=audio, sample_rate=sample_rate, window_size=window_size, window_step=window_step, num_channels=num_channels, upper_band_limit=upper_band_limit, lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, pcan_strength=pcan_strength, pcan_offset=pcan_offset, gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, left_context=left_context, right_context=right_context, frame_stride=frame_stride, zero_padding=zero_padding, out_scale=out_scale, out_type=out_type, name=name) if (result is not _dispatch.OpDispatcher.NOT_SUPPORTED): return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ('sample_rate', _op._get_attr_int('sample_rate'), 'window_size', _op._get_attr_int('window_size'), 'window_step', _op._get_attr_int('window_step'), 'num_channels', _op._get_attr_int('num_channels'), 'upper_band_limit', _op.get_attr('upper_band_limit'), 'lower_band_limit', _op.get_attr('lower_band_limit'), 'smoothing_bits', _op._get_attr_int('smoothing_bits'), 'even_smoothing', _op.get_attr('even_smoothing'), 'odd_smoothing', _op.get_attr('odd_smoothing'), 'min_signal_remaining', _op.get_attr('min_signal_remaining'), 'enable_pcan', _op._get_attr_bool('enable_pcan'), 'pcan_strength', _op.get_attr('pcan_strength'), 'pcan_offset', _op.get_attr('pcan_offset'), 'gain_bits', _op._get_attr_int('gain_bits'), 'enable_log', _op._get_attr_bool('enable_log'), 'scale_shift', _op._get_attr_int('scale_shift'), 'left_context', _op._get_attr_int('left_context'), 'right_context', _op._get_attr_int('right_context'), 'frame_stride', _op._get_attr_int('frame_stride'), 'zero_padding', _op._get_attr_bool('zero_padding'), 'out_scale', _op._get_attr_int('out_scale'), 'out_type', _op._get_attr_type('out_type')) _inputs_flat = _op.inputs _execute.record_gradient('AudioMicrofrontend', _inputs_flat, _attrs, _result) (_result,) = _result return _result<|docstring|>Audio Microfrontend Op. This Op converts a sequence of audio data into one or more feature vectors containing filterbanks of the input. The conversion process uses a lightweight library to perform: 1. A slicing window function 2. Short-time FFTs 3. Filterbank calculations 4. Noise reduction 5. PCAN Auto Gain Control 6. Logarithmic scaling Arguments audio: 1D Tensor, int16 audio data in temporal ordering. sample_rate: Integer, the sample rate of the audio in Hz. window_size: Integer, length of desired time frames in ms. window_step: Integer, length of step size for the next frame in ms. num_channels: Integer, the number of filterbank channels to use. upper_band_limit: Float, the highest frequency included in the filterbanks. lower_band_limit: Float, the lowest frequency included in the filterbanks. smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction. even_smoothing: Float, smoothing coefficient for even-numbered channels. odd_smoothing: Float, smoothing coefficient for odd-numbered channels. min_signal_remaining: Float, fraction of signal to preserve in smoothing. enable_pcan: Bool, enable PCAN auto gain control. pcan_strength: Float, gain normalization exponent. pcan_offset: Float, positive value added in the normalization denominator. gain_bits: Int, number of fractional bits in the gain. enable_log: Bool, enable logarithmic scaling of filterbanks. scale_shift: Integer, scale filterbanks by 2^(scale_shift). left_context: Integer, number of preceding frames to attach to each frame. right_context: Integer, number of preceding frames to attach to each frame. frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M]. zero_padding: Bool, if left/right context is out-of-bounds, attach frame of zeroes. Otherwise, frame[0] or frame[size-1] will be copied. out_scale: Integer, divide all filterbanks by this number. out_type: DType, type of the output Tensor, defaults to UINT16. Returns filterbanks: 2D Tensor, each row is a time frame, each column is a channel. Args: audio: A `Tensor` of type `int16`. sample_rate: An optional `int`. Defaults to `16000`. window_size: An optional `int`. Defaults to `25`. window_step: An optional `int`. Defaults to `10`. num_channels: An optional `int`. Defaults to `32`. upper_band_limit: An optional `float`. Defaults to `7500`. lower_band_limit: An optional `float`. Defaults to `125`. smoothing_bits: An optional `int`. Defaults to `10`. even_smoothing: An optional `float`. Defaults to `0.025`. odd_smoothing: An optional `float`. Defaults to `0.06`. min_signal_remaining: An optional `float`. Defaults to `0.05`. enable_pcan: An optional `bool`. Defaults to `False`. pcan_strength: An optional `float`. Defaults to `0.95`. pcan_offset: An optional `float`. Defaults to `80`. gain_bits: An optional `int`. Defaults to `21`. enable_log: An optional `bool`. Defaults to `True`. scale_shift: An optional `int`. Defaults to `6`. left_context: An optional `int`. Defaults to `0`. right_context: An optional `int`. Defaults to `0`. frame_stride: An optional `int`. Defaults to `1`. zero_padding: An optional `bool`. Defaults to `False`. out_scale: An optional `int`. Defaults to `1`. out_type: An optional `tf.DType` from: `tf.uint16, tf.float32`. Defaults to `tf.uint16`. name: A name for the operation (optional). Returns: A `Tensor` of type `out_type`.<|endoftext|>
05472cfce800e484442ffa387c8d9b51241b7f1f96f781f542a3918df3a19ac8
def check_modified_graph(self, new_stages): 'Generate graph including the new stage to check for errors' if (not getattr(self, '_skip_graph_checks', False)): self._collect_graph((self.stages + new_stages))
Generate graph including the new stage to check for errors
dvc/repo/__init__.py
check_modified_graph
shizacat/dvc
0
python
def check_modified_graph(self, new_stages): if (not getattr(self, '_skip_graph_checks', False)): self._collect_graph((self.stages + new_stages))
def check_modified_graph(self, new_stages): if (not getattr(self, '_skip_graph_checks', False)): self._collect_graph((self.stages + new_stages))<|docstring|>Generate graph including the new stage to check for errors<|endoftext|>
8ea64cdfcea42a0a06fef18963732e81a5fb68d60b8596c991beb0982656389c
def used_cache(self, targets=None, all_branches=False, with_deps=False, all_tags=False, all_commits=False, remote=None, force=False, jobs=None, recursive=False): "Get the stages related to the given target and collect\n the `info` of its outputs.\n\n This is useful to know what files from the cache are _in use_\n (namely, a file described as an output on a stage).\n\n The scope is, by default, the working directory, but you can use\n `all_branches`/`all_tags`/`all_commits` to expand the scope.\n\n Returns:\n A dictionary with Schemes (representing output's location) mapped\n to items containing the output's `dumpd` names and the output's\n children (if the given output is a directory).\n " from dvc.cache import NamedCache cache = NamedCache() for branch in self.brancher(all_branches=all_branches, all_tags=all_tags, all_commits=all_commits): targets = (targets or [None]) pairs = cat((self.collect_granular(target, recursive=recursive, with_deps=with_deps) for target in targets)) suffix = ('({})'.format(branch) if branch else '') for (stage, filter_info) in pairs: used_cache = stage.get_used_cache(remote=remote, force=force, jobs=jobs, filter_info=filter_info) cache.update(used_cache, suffix=suffix) return cache
Get the stages related to the given target and collect the `info` of its outputs. This is useful to know what files from the cache are _in use_ (namely, a file described as an output on a stage). The scope is, by default, the working directory, but you can use `all_branches`/`all_tags`/`all_commits` to expand the scope. Returns: A dictionary with Schemes (representing output's location) mapped to items containing the output's `dumpd` names and the output's children (if the given output is a directory).
dvc/repo/__init__.py
used_cache
shizacat/dvc
0
python
def used_cache(self, targets=None, all_branches=False, with_deps=False, all_tags=False, all_commits=False, remote=None, force=False, jobs=None, recursive=False): "Get the stages related to the given target and collect\n the `info` of its outputs.\n\n This is useful to know what files from the cache are _in use_\n (namely, a file described as an output on a stage).\n\n The scope is, by default, the working directory, but you can use\n `all_branches`/`all_tags`/`all_commits` to expand the scope.\n\n Returns:\n A dictionary with Schemes (representing output's location) mapped\n to items containing the output's `dumpd` names and the output's\n children (if the given output is a directory).\n " from dvc.cache import NamedCache cache = NamedCache() for branch in self.brancher(all_branches=all_branches, all_tags=all_tags, all_commits=all_commits): targets = (targets or [None]) pairs = cat((self.collect_granular(target, recursive=recursive, with_deps=with_deps) for target in targets)) suffix = ('({})'.format(branch) if branch else ) for (stage, filter_info) in pairs: used_cache = stage.get_used_cache(remote=remote, force=force, jobs=jobs, filter_info=filter_info) cache.update(used_cache, suffix=suffix) return cache
def used_cache(self, targets=None, all_branches=False, with_deps=False, all_tags=False, all_commits=False, remote=None, force=False, jobs=None, recursive=False): "Get the stages related to the given target and collect\n the `info` of its outputs.\n\n This is useful to know what files from the cache are _in use_\n (namely, a file described as an output on a stage).\n\n The scope is, by default, the working directory, but you can use\n `all_branches`/`all_tags`/`all_commits` to expand the scope.\n\n Returns:\n A dictionary with Schemes (representing output's location) mapped\n to items containing the output's `dumpd` names and the output's\n children (if the given output is a directory).\n " from dvc.cache import NamedCache cache = NamedCache() for branch in self.brancher(all_branches=all_branches, all_tags=all_tags, all_commits=all_commits): targets = (targets or [None]) pairs = cat((self.collect_granular(target, recursive=recursive, with_deps=with_deps) for target in targets)) suffix = ('({})'.format(branch) if branch else ) for (stage, filter_info) in pairs: used_cache = stage.get_used_cache(remote=remote, force=force, jobs=jobs, filter_info=filter_info) cache.update(used_cache, suffix=suffix) return cache<|docstring|>Get the stages related to the given target and collect the `info` of its outputs. This is useful to know what files from the cache are _in use_ (namely, a file described as an output on a stage). The scope is, by default, the working directory, but you can use `all_branches`/`all_tags`/`all_commits` to expand the scope. Returns: A dictionary with Schemes (representing output's location) mapped to items containing the output's `dumpd` names and the output's children (if the given output is a directory).<|endoftext|>
dedaa7b6bbaf239d2301f8b7023548220daf1ed62dcfacea50bc80f07361fdd0
def _collect_graph(self, stages=None): 'Generate a graph by using the given stages on the given directory\n\n The nodes of the graph are the stage\'s path relative to the root.\n\n Edges are created when the output of one stage is used as a\n dependency in other stage.\n\n The direction of the edges goes from the stage to its dependency:\n\n For example, running the following:\n\n $ dvc run -o A "echo A > A"\n $ dvc run -d A -o B "echo B > B"\n $ dvc run -d B -o C "echo C > C"\n\n Will create the following graph:\n\n ancestors <--\n |\n C.dvc -> B.dvc -> A.dvc\n | |\n | --> descendants\n |\n ------- pipeline ------>\n |\n v\n (weakly connected components)\n\n Args:\n stages (list): used to build a graph, if None given, collect stages\n in the repository.\n\n Raises:\n OutputDuplicationError: two outputs with the same path\n StagePathAsOutputError: stage inside an output directory\n OverlappingOutputPathsError: output inside output directory\n CyclicGraphError: resulting graph has cycles\n ' import networkx as nx from pygtrie import Trie from dvc.exceptions import OutputDuplicationError, StagePathAsOutputError, OverlappingOutputPathsError G = nx.DiGraph() stages = (stages or self.stages) stages = [stage for stage in stages if stage] outs = Trie() for stage in stages: for out in stage.outs: out_key = out.path_info.parts if (out_key in outs): dup_stages = [stage, outs[out_key].stage] raise OutputDuplicationError(str(out), dup_stages) if outs.has_subtrie(out_key): parent = out overlapping = first(outs.values(prefix=out_key)) else: parent = outs.shortest_prefix(out_key).value overlapping = out if (parent and overlapping): msg = "Paths for outs:\n'{}'('{}')\n'{}'('{}')\noverlap. To avoid unpredictable behaviour, rerun command with non overlapping outs paths.".format(str(parent), parent.stage.relpath, str(overlapping), overlapping.stage.relpath) raise OverlappingOutputPathsError(parent, overlapping, msg) outs[out_key] = out for stage in stages: out = outs.shortest_prefix(PathInfo(stage.path).parts).value if out: raise StagePathAsOutputError(stage, str(out)) G.add_nodes_from(stages) for stage in stages: for dep in stage.deps: if (dep.path_info is None): continue dep_key = dep.path_info.parts overlapping = list((n.value for n in outs.prefixes(dep_key))) if outs.has_subtrie(dep_key): overlapping.extend(outs.values(prefix=dep_key)) G.add_edges_from(((stage, out.stage) for out in overlapping)) check_acyclic(G) return G
Generate a graph by using the given stages on the given directory The nodes of the graph are the stage's path relative to the root. Edges are created when the output of one stage is used as a dependency in other stage. The direction of the edges goes from the stage to its dependency: For example, running the following: $ dvc run -o A "echo A > A" $ dvc run -d A -o B "echo B > B" $ dvc run -d B -o C "echo C > C" Will create the following graph: ancestors <-- | C.dvc -> B.dvc -> A.dvc | | | --> descendants | ------- pipeline ------> | v (weakly connected components) Args: stages (list): used to build a graph, if None given, collect stages in the repository. Raises: OutputDuplicationError: two outputs with the same path StagePathAsOutputError: stage inside an output directory OverlappingOutputPathsError: output inside output directory CyclicGraphError: resulting graph has cycles
dvc/repo/__init__.py
_collect_graph
shizacat/dvc
0
python
def _collect_graph(self, stages=None): 'Generate a graph by using the given stages on the given directory\n\n The nodes of the graph are the stage\'s path relative to the root.\n\n Edges are created when the output of one stage is used as a\n dependency in other stage.\n\n The direction of the edges goes from the stage to its dependency:\n\n For example, running the following:\n\n $ dvc run -o A "echo A > A"\n $ dvc run -d A -o B "echo B > B"\n $ dvc run -d B -o C "echo C > C"\n\n Will create the following graph:\n\n ancestors <--\n |\n C.dvc -> B.dvc -> A.dvc\n | |\n | --> descendants\n |\n ------- pipeline ------>\n |\n v\n (weakly connected components)\n\n Args:\n stages (list): used to build a graph, if None given, collect stages\n in the repository.\n\n Raises:\n OutputDuplicationError: two outputs with the same path\n StagePathAsOutputError: stage inside an output directory\n OverlappingOutputPathsError: output inside output directory\n CyclicGraphError: resulting graph has cycles\n ' import networkx as nx from pygtrie import Trie from dvc.exceptions import OutputDuplicationError, StagePathAsOutputError, OverlappingOutputPathsError G = nx.DiGraph() stages = (stages or self.stages) stages = [stage for stage in stages if stage] outs = Trie() for stage in stages: for out in stage.outs: out_key = out.path_info.parts if (out_key in outs): dup_stages = [stage, outs[out_key].stage] raise OutputDuplicationError(str(out), dup_stages) if outs.has_subtrie(out_key): parent = out overlapping = first(outs.values(prefix=out_key)) else: parent = outs.shortest_prefix(out_key).value overlapping = out if (parent and overlapping): msg = "Paths for outs:\n'{}'('{}')\n'{}'('{}')\noverlap. To avoid unpredictable behaviour, rerun command with non overlapping outs paths.".format(str(parent), parent.stage.relpath, str(overlapping), overlapping.stage.relpath) raise OverlappingOutputPathsError(parent, overlapping, msg) outs[out_key] = out for stage in stages: out = outs.shortest_prefix(PathInfo(stage.path).parts).value if out: raise StagePathAsOutputError(stage, str(out)) G.add_nodes_from(stages) for stage in stages: for dep in stage.deps: if (dep.path_info is None): continue dep_key = dep.path_info.parts overlapping = list((n.value for n in outs.prefixes(dep_key))) if outs.has_subtrie(dep_key): overlapping.extend(outs.values(prefix=dep_key)) G.add_edges_from(((stage, out.stage) for out in overlapping)) check_acyclic(G) return G
def _collect_graph(self, stages=None): 'Generate a graph by using the given stages on the given directory\n\n The nodes of the graph are the stage\'s path relative to the root.\n\n Edges are created when the output of one stage is used as a\n dependency in other stage.\n\n The direction of the edges goes from the stage to its dependency:\n\n For example, running the following:\n\n $ dvc run -o A "echo A > A"\n $ dvc run -d A -o B "echo B > B"\n $ dvc run -d B -o C "echo C > C"\n\n Will create the following graph:\n\n ancestors <--\n |\n C.dvc -> B.dvc -> A.dvc\n | |\n | --> descendants\n |\n ------- pipeline ------>\n |\n v\n (weakly connected components)\n\n Args:\n stages (list): used to build a graph, if None given, collect stages\n in the repository.\n\n Raises:\n OutputDuplicationError: two outputs with the same path\n StagePathAsOutputError: stage inside an output directory\n OverlappingOutputPathsError: output inside output directory\n CyclicGraphError: resulting graph has cycles\n ' import networkx as nx from pygtrie import Trie from dvc.exceptions import OutputDuplicationError, StagePathAsOutputError, OverlappingOutputPathsError G = nx.DiGraph() stages = (stages or self.stages) stages = [stage for stage in stages if stage] outs = Trie() for stage in stages: for out in stage.outs: out_key = out.path_info.parts if (out_key in outs): dup_stages = [stage, outs[out_key].stage] raise OutputDuplicationError(str(out), dup_stages) if outs.has_subtrie(out_key): parent = out overlapping = first(outs.values(prefix=out_key)) else: parent = outs.shortest_prefix(out_key).value overlapping = out if (parent and overlapping): msg = "Paths for outs:\n'{}'('{}')\n'{}'('{}')\noverlap. To avoid unpredictable behaviour, rerun command with non overlapping outs paths.".format(str(parent), parent.stage.relpath, str(overlapping), overlapping.stage.relpath) raise OverlappingOutputPathsError(parent, overlapping, msg) outs[out_key] = out for stage in stages: out = outs.shortest_prefix(PathInfo(stage.path).parts).value if out: raise StagePathAsOutputError(stage, str(out)) G.add_nodes_from(stages) for stage in stages: for dep in stage.deps: if (dep.path_info is None): continue dep_key = dep.path_info.parts overlapping = list((n.value for n in outs.prefixes(dep_key))) if outs.has_subtrie(dep_key): overlapping.extend(outs.values(prefix=dep_key)) G.add_edges_from(((stage, out.stage) for out in overlapping)) check_acyclic(G) return G<|docstring|>Generate a graph by using the given stages on the given directory The nodes of the graph are the stage's path relative to the root. Edges are created when the output of one stage is used as a dependency in other stage. The direction of the edges goes from the stage to its dependency: For example, running the following: $ dvc run -o A "echo A > A" $ dvc run -d A -o B "echo B > B" $ dvc run -d B -o C "echo C > C" Will create the following graph: ancestors <-- | C.dvc -> B.dvc -> A.dvc | | | --> descendants | ------- pipeline ------> | v (weakly connected components) Args: stages (list): used to build a graph, if None given, collect stages in the repository. Raises: OutputDuplicationError: two outputs with the same path StagePathAsOutputError: stage inside an output directory OverlappingOutputPathsError: output inside output directory CyclicGraphError: resulting graph has cycles<|endoftext|>
3704efe6e2b1b56e743e8fd194bbeeba9ffa92bdf52961c3d9411e5f11ddea22
@cached_property def stages(self): '\n Walks down the root directory looking for Dvcfiles,\n skipping the directories that are related with\n any SCM (e.g. `.git`), DVC itself (`.dvc`), or directories\n tracked by DVC (e.g. `dvc add data` would skip `data/`)\n\n NOTE: For large repos, this could be an expensive\n operation. Consider using some memoization.\n ' from ..dvcfile import Dvcfile stages = [] outs = set() for (root, dirs, files) in self.tree.walk(self.root_dir): for fname in files: path = os.path.join(root, fname) if (not Dvcfile.is_valid_filename(path)): continue stage = Dvcfile(self, path).load() stages.append(stage) for out in stage.outs: if (out.scheme == 'local'): outs.add(out.fspath) dirs[:] = [d for d in dirs if (os.path.join(root, d) not in outs)] return stages
Walks down the root directory looking for Dvcfiles, skipping the directories that are related with any SCM (e.g. `.git`), DVC itself (`.dvc`), or directories tracked by DVC (e.g. `dvc add data` would skip `data/`) NOTE: For large repos, this could be an expensive operation. Consider using some memoization.
dvc/repo/__init__.py
stages
shizacat/dvc
0
python
@cached_property def stages(self): '\n Walks down the root directory looking for Dvcfiles,\n skipping the directories that are related with\n any SCM (e.g. `.git`), DVC itself (`.dvc`), or directories\n tracked by DVC (e.g. `dvc add data` would skip `data/`)\n\n NOTE: For large repos, this could be an expensive\n operation. Consider using some memoization.\n ' from ..dvcfile import Dvcfile stages = [] outs = set() for (root, dirs, files) in self.tree.walk(self.root_dir): for fname in files: path = os.path.join(root, fname) if (not Dvcfile.is_valid_filename(path)): continue stage = Dvcfile(self, path).load() stages.append(stage) for out in stage.outs: if (out.scheme == 'local'): outs.add(out.fspath) dirs[:] = [d for d in dirs if (os.path.join(root, d) not in outs)] return stages
@cached_property def stages(self): '\n Walks down the root directory looking for Dvcfiles,\n skipping the directories that are related with\n any SCM (e.g. `.git`), DVC itself (`.dvc`), or directories\n tracked by DVC (e.g. `dvc add data` would skip `data/`)\n\n NOTE: For large repos, this could be an expensive\n operation. Consider using some memoization.\n ' from ..dvcfile import Dvcfile stages = [] outs = set() for (root, dirs, files) in self.tree.walk(self.root_dir): for fname in files: path = os.path.join(root, fname) if (not Dvcfile.is_valid_filename(path)): continue stage = Dvcfile(self, path).load() stages.append(stage) for out in stage.outs: if (out.scheme == 'local'): outs.add(out.fspath) dirs[:] = [d for d in dirs if (os.path.join(root, d) not in outs)] return stages<|docstring|>Walks down the root directory looking for Dvcfiles, skipping the directories that are related with any SCM (e.g. `.git`), DVC itself (`.dvc`), or directories tracked by DVC (e.g. `dvc add data` would skip `data/`) NOTE: For large repos, this could be an expensive operation. Consider using some memoization.<|endoftext|>
475d918ec49b3eac3cedb9356315df44d1aff4ec99fcafae40c9456f1ae2feed
@contextmanager def open_by_relpath(self, path, remote=None, mode='r', encoding=None): 'Opens a specified resource as a file descriptor' cause = None try: out = self.find_out_by_relpath(path) except OutputNotFoundError as exc: out = None cause = exc if (out and out.use_cache): try: with self._open_cached(out, remote, mode, encoding) as fd: (yield fd) return except FileNotFoundError as exc: raise FileMissingError(path) from exc abs_path = os.path.join(self.root_dir, path) if os.path.exists(abs_path): with open(abs_path, mode=mode, encoding=encoding) as fd: (yield fd) return raise FileMissingError(path) from cause
Opens a specified resource as a file descriptor
dvc/repo/__init__.py
open_by_relpath
shizacat/dvc
0
python
@contextmanager def open_by_relpath(self, path, remote=None, mode='r', encoding=None): cause = None try: out = self.find_out_by_relpath(path) except OutputNotFoundError as exc: out = None cause = exc if (out and out.use_cache): try: with self._open_cached(out, remote, mode, encoding) as fd: (yield fd) return except FileNotFoundError as exc: raise FileMissingError(path) from exc abs_path = os.path.join(self.root_dir, path) if os.path.exists(abs_path): with open(abs_path, mode=mode, encoding=encoding) as fd: (yield fd) return raise FileMissingError(path) from cause
@contextmanager def open_by_relpath(self, path, remote=None, mode='r', encoding=None): cause = None try: out = self.find_out_by_relpath(path) except OutputNotFoundError as exc: out = None cause = exc if (out and out.use_cache): try: with self._open_cached(out, remote, mode, encoding) as fd: (yield fd) return except FileNotFoundError as exc: raise FileMissingError(path) from exc abs_path = os.path.join(self.root_dir, path) if os.path.exists(abs_path): with open(abs_path, mode=mode, encoding=encoding) as fd: (yield fd) return raise FileMissingError(path) from cause<|docstring|>Opens a specified resource as a file descriptor<|endoftext|>
7d0671f88c8a73205efaed00e7b530210387203b272c66333035d534b1086887
@decorate def defer(coro, delay=1): '\n Returns a coroutine function wrapper that will defer the given coroutine\n execution for a certain amount of seconds in a non-blocking way.\n\n This function can be used as decorator.\n\n Arguments:\n coro (coroutinefunction): coroutine function to defer.\n delay (int/float): number of seconds to defer execution.\n\n Raises:\n TypeError: if coro argument is not a coroutine function.\n\n Returns:\n filtered values (list): ordered list of resultant values.\n\n Usage::\n\n # Usage as function\n await paco.defer(coro, delay=1)\n await paco.defer(coro, delay=0.5)\n\n # Usage as decorator\n @paco.defer(delay=1)\n async def mul_2(num):\n return num * 2\n\n await mul_2(2)\n # => 4\n\n ' assert_corofunction(coro=coro) @asyncio.coroutine def wrapper(*args, **kw): (yield from asyncio.sleep(delay)) return (yield from coro(*args, **kw)) return wrapper
Returns a coroutine function wrapper that will defer the given coroutine execution for a certain amount of seconds in a non-blocking way. This function can be used as decorator. Arguments: coro (coroutinefunction): coroutine function to defer. delay (int/float): number of seconds to defer execution. Raises: TypeError: if coro argument is not a coroutine function. Returns: filtered values (list): ordered list of resultant values. Usage:: # Usage as function await paco.defer(coro, delay=1) await paco.defer(coro, delay=0.5) # Usage as decorator @paco.defer(delay=1) async def mul_2(num): return num * 2 await mul_2(2) # => 4
paco/defer.py
defer
thatmattbone/paco
208
python
@decorate def defer(coro, delay=1): '\n Returns a coroutine function wrapper that will defer the given coroutine\n execution for a certain amount of seconds in a non-blocking way.\n\n This function can be used as decorator.\n\n Arguments:\n coro (coroutinefunction): coroutine function to defer.\n delay (int/float): number of seconds to defer execution.\n\n Raises:\n TypeError: if coro argument is not a coroutine function.\n\n Returns:\n filtered values (list): ordered list of resultant values.\n\n Usage::\n\n # Usage as function\n await paco.defer(coro, delay=1)\n await paco.defer(coro, delay=0.5)\n\n # Usage as decorator\n @paco.defer(delay=1)\n async def mul_2(num):\n return num * 2\n\n await mul_2(2)\n # => 4\n\n ' assert_corofunction(coro=coro) @asyncio.coroutine def wrapper(*args, **kw): (yield from asyncio.sleep(delay)) return (yield from coro(*args, **kw)) return wrapper
@decorate def defer(coro, delay=1): '\n Returns a coroutine function wrapper that will defer the given coroutine\n execution for a certain amount of seconds in a non-blocking way.\n\n This function can be used as decorator.\n\n Arguments:\n coro (coroutinefunction): coroutine function to defer.\n delay (int/float): number of seconds to defer execution.\n\n Raises:\n TypeError: if coro argument is not a coroutine function.\n\n Returns:\n filtered values (list): ordered list of resultant values.\n\n Usage::\n\n # Usage as function\n await paco.defer(coro, delay=1)\n await paco.defer(coro, delay=0.5)\n\n # Usage as decorator\n @paco.defer(delay=1)\n async def mul_2(num):\n return num * 2\n\n await mul_2(2)\n # => 4\n\n ' assert_corofunction(coro=coro) @asyncio.coroutine def wrapper(*args, **kw): (yield from asyncio.sleep(delay)) return (yield from coro(*args, **kw)) return wrapper<|docstring|>Returns a coroutine function wrapper that will defer the given coroutine execution for a certain amount of seconds in a non-blocking way. This function can be used as decorator. Arguments: coro (coroutinefunction): coroutine function to defer. delay (int/float): number of seconds to defer execution. Raises: TypeError: if coro argument is not a coroutine function. Returns: filtered values (list): ordered list of resultant values. Usage:: # Usage as function await paco.defer(coro, delay=1) await paco.defer(coro, delay=0.5) # Usage as decorator @paco.defer(delay=1) async def mul_2(num): return num * 2 await mul_2(2) # => 4<|endoftext|>
772cabecc66d6038ceff84e709d71ecb1ad207899fe439b82c1bcea78a81633e
def main(session): '\n This example uses the declarePathForTags method.\n ' animation_player_service = session.service('ALAnimationPlayer') animation_player_service.declarePathForTags('myanimlib/[robot]/[posture]/')
This example uses the declarePathForTags method.
naoqi-sdk-2.5.5.5-linux64/doc/_downloads/alanimationplayer_tutorial_declarePathForTags.py
main
applejenny66/docker_pepper
0
python
def main(session): '\n \n ' animation_player_service = session.service('ALAnimationPlayer') animation_player_service.declarePathForTags('myanimlib/[robot]/[posture]/')
def main(session): '\n \n ' animation_player_service = session.service('ALAnimationPlayer') animation_player_service.declarePathForTags('myanimlib/[robot]/[posture]/')<|docstring|>This example uses the declarePathForTags method.<|endoftext|>
465e09ad8b1bcfc4b949e497c1e6c76fee69392793733b2afb992cadb78dfa35
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1): 'Make inputs into input and feed functions.\n\n Args:\n x: Numpy, Pandas or Dask matrix or iterable.\n y: Numpy, Pandas or Dask matrix or iterable.\n input_fn: Pre-defined input function for training data.\n feed_fn: Pre-defined data feeder function.\n batch_size: Size to split data into parts. Must be >= 1.\n shuffle: Whether to shuffle the inputs.\n epochs: Number of epochs to run.\n\n Returns:\n Data input and feeder function based on training data.\n\n Raises:\n ValueError: Only one of `(x & y)` or `input_fn` must be provided.\n ' if (input_fn is None): if (x is None): raise ValueError('Either x or input_fn must be provided.') if (contrib_framework.is_tensor(x) or ((y is not None) and contrib_framework.is_tensor(y))): raise ValueError('Inputs cannot be tensors. Please provide input_fn.') if (feed_fn is not None): raise ValueError('Can not provide both feed_fn and x or y.') df = data_feeder.setup_train_data_feeder(x, y, n_classes=None, batch_size=batch_size, shuffle=shuffle, epochs=epochs) return (df.input_builder, df.get_feed_dict_fn()) if ((x is not None) or (y is not None)): raise ValueError('Can not provide both input_fn and x or y.') if (batch_size is not None): raise ValueError('Can not provide both input_fn and batch_size.') return (input_fn, feed_fn)
Make inputs into input and feed functions. Args: x: Numpy, Pandas or Dask matrix or iterable. y: Numpy, Pandas or Dask matrix or iterable. input_fn: Pre-defined input function for training data. feed_fn: Pre-defined data feeder function. batch_size: Size to split data into parts. Must be >= 1. shuffle: Whether to shuffle the inputs. epochs: Number of epochs to run. Returns: Data input and feeder function based on training data. Raises: ValueError: Only one of `(x & y)` or `input_fn` must be provided.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
_get_input_fn
PedroLelis/tensorflow
1
python
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1): 'Make inputs into input and feed functions.\n\n Args:\n x: Numpy, Pandas or Dask matrix or iterable.\n y: Numpy, Pandas or Dask matrix or iterable.\n input_fn: Pre-defined input function for training data.\n feed_fn: Pre-defined data feeder function.\n batch_size: Size to split data into parts. Must be >= 1.\n shuffle: Whether to shuffle the inputs.\n epochs: Number of epochs to run.\n\n Returns:\n Data input and feeder function based on training data.\n\n Raises:\n ValueError: Only one of `(x & y)` or `input_fn` must be provided.\n ' if (input_fn is None): if (x is None): raise ValueError('Either x or input_fn must be provided.') if (contrib_framework.is_tensor(x) or ((y is not None) and contrib_framework.is_tensor(y))): raise ValueError('Inputs cannot be tensors. Please provide input_fn.') if (feed_fn is not None): raise ValueError('Can not provide both feed_fn and x or y.') df = data_feeder.setup_train_data_feeder(x, y, n_classes=None, batch_size=batch_size, shuffle=shuffle, epochs=epochs) return (df.input_builder, df.get_feed_dict_fn()) if ((x is not None) or (y is not None)): raise ValueError('Can not provide both input_fn and x or y.') if (batch_size is not None): raise ValueError('Can not provide both input_fn and batch_size.') return (input_fn, feed_fn)
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1): 'Make inputs into input and feed functions.\n\n Args:\n x: Numpy, Pandas or Dask matrix or iterable.\n y: Numpy, Pandas or Dask matrix or iterable.\n input_fn: Pre-defined input function for training data.\n feed_fn: Pre-defined data feeder function.\n batch_size: Size to split data into parts. Must be >= 1.\n shuffle: Whether to shuffle the inputs.\n epochs: Number of epochs to run.\n\n Returns:\n Data input and feeder function based on training data.\n\n Raises:\n ValueError: Only one of `(x & y)` or `input_fn` must be provided.\n ' if (input_fn is None): if (x is None): raise ValueError('Either x or input_fn must be provided.') if (contrib_framework.is_tensor(x) or ((y is not None) and contrib_framework.is_tensor(y))): raise ValueError('Inputs cannot be tensors. Please provide input_fn.') if (feed_fn is not None): raise ValueError('Can not provide both feed_fn and x or y.') df = data_feeder.setup_train_data_feeder(x, y, n_classes=None, batch_size=batch_size, shuffle=shuffle, epochs=epochs) return (df.input_builder, df.get_feed_dict_fn()) if ((x is not None) or (y is not None)): raise ValueError('Can not provide both input_fn and x or y.') if (batch_size is not None): raise ValueError('Can not provide both input_fn and batch_size.') return (input_fn, feed_fn)<|docstring|>Make inputs into input and feed functions. Args: x: Numpy, Pandas or Dask matrix or iterable. y: Numpy, Pandas or Dask matrix or iterable. input_fn: Pre-defined input function for training data. feed_fn: Pre-defined data feeder function. batch_size: Size to split data into parts. Must be >= 1. shuffle: Whether to shuffle the inputs. epochs: Number of epochs to run. Returns: Data input and feeder function based on training data. Raises: ValueError: Only one of `(x & y)` or `input_fn` must be provided.<|endoftext|>
5ab3f0f479f17d6e4c0d27b30c32fa56c8027b9f0e0a787abddeec1b6afa3dcd
def infer_real_valued_columns_from_input_fn(input_fn): 'Creates `FeatureColumn` objects for inputs defined by `input_fn`.\n\n This interprets all inputs as dense, fixed-length float values. This creates\n a local graph in which it calls `input_fn` to build the tensors, then discards\n it.\n\n Args:\n input_fn: Input function returning a tuple of:\n features - Dictionary of string feature name to `Tensor` or `Tensor`.\n labels - `Tensor` of label values.\n\n Returns:\n List of `FeatureColumn` objects.\n ' with ops.Graph().as_default(): (features, _) = input_fn() return layers.infer_real_valued_columns(features)
Creates `FeatureColumn` objects for inputs defined by `input_fn`. This interprets all inputs as dense, fixed-length float values. This creates a local graph in which it calls `input_fn` to build the tensors, then discards it. Args: input_fn: Input function returning a tuple of: features - Dictionary of string feature name to `Tensor` or `Tensor`. labels - `Tensor` of label values. Returns: List of `FeatureColumn` objects.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
infer_real_valued_columns_from_input_fn
PedroLelis/tensorflow
1
python
def infer_real_valued_columns_from_input_fn(input_fn): 'Creates `FeatureColumn` objects for inputs defined by `input_fn`.\n\n This interprets all inputs as dense, fixed-length float values. This creates\n a local graph in which it calls `input_fn` to build the tensors, then discards\n it.\n\n Args:\n input_fn: Input function returning a tuple of:\n features - Dictionary of string feature name to `Tensor` or `Tensor`.\n labels - `Tensor` of label values.\n\n Returns:\n List of `FeatureColumn` objects.\n ' with ops.Graph().as_default(): (features, _) = input_fn() return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input_fn(input_fn): 'Creates `FeatureColumn` objects for inputs defined by `input_fn`.\n\n This interprets all inputs as dense, fixed-length float values. This creates\n a local graph in which it calls `input_fn` to build the tensors, then discards\n it.\n\n Args:\n input_fn: Input function returning a tuple of:\n features - Dictionary of string feature name to `Tensor` or `Tensor`.\n labels - `Tensor` of label values.\n\n Returns:\n List of `FeatureColumn` objects.\n ' with ops.Graph().as_default(): (features, _) = input_fn() return layers.infer_real_valued_columns(features)<|docstring|>Creates `FeatureColumn` objects for inputs defined by `input_fn`. This interprets all inputs as dense, fixed-length float values. This creates a local graph in which it calls `input_fn` to build the tensors, then discards it. Args: input_fn: Input function returning a tuple of: features - Dictionary of string feature name to `Tensor` or `Tensor`. labels - `Tensor` of label values. Returns: List of `FeatureColumn` objects.<|endoftext|>
0393098720a4149df74956f3eabf657b2e9176a50c7351d5fe44087adc0cff23
def infer_real_valued_columns_from_input(x): 'Creates `FeatureColumn` objects for inputs defined by input `x`.\n\n This interprets all inputs as dense, fixed-length float values.\n\n Args:\n x: Real-valued matrix of shape [n_samples, n_features...]. Can be\n iterator that returns arrays of features.\n\n Returns:\n List of `FeatureColumn` objects.\n ' (input_fn, _) = _get_input_fn(x=x, y=None, input_fn=None, feed_fn=None, batch_size=None) return infer_real_valued_columns_from_input_fn(input_fn)
Creates `FeatureColumn` objects for inputs defined by input `x`. This interprets all inputs as dense, fixed-length float values. Args: x: Real-valued matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. Returns: List of `FeatureColumn` objects.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
infer_real_valued_columns_from_input
PedroLelis/tensorflow
1
python
def infer_real_valued_columns_from_input(x): 'Creates `FeatureColumn` objects for inputs defined by input `x`.\n\n This interprets all inputs as dense, fixed-length float values.\n\n Args:\n x: Real-valued matrix of shape [n_samples, n_features...]. Can be\n iterator that returns arrays of features.\n\n Returns:\n List of `FeatureColumn` objects.\n ' (input_fn, _) = _get_input_fn(x=x, y=None, input_fn=None, feed_fn=None, batch_size=None) return infer_real_valued_columns_from_input_fn(input_fn)
def infer_real_valued_columns_from_input(x): 'Creates `FeatureColumn` objects for inputs defined by input `x`.\n\n This interprets all inputs as dense, fixed-length float values.\n\n Args:\n x: Real-valued matrix of shape [n_samples, n_features...]. Can be\n iterator that returns arrays of features.\n\n Returns:\n List of `FeatureColumn` objects.\n ' (input_fn, _) = _get_input_fn(x=x, y=None, input_fn=None, feed_fn=None, batch_size=None) return infer_real_valued_columns_from_input_fn(input_fn)<|docstring|>Creates `FeatureColumn` objects for inputs defined by input `x`. This interprets all inputs as dense, fixed-length float values. Args: x: Real-valued matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. Returns: List of `FeatureColumn` objects.<|endoftext|>
0ed3468c1cfd0f6b51d3883e2615bd0ed1ed90eeec48c6e715d1e88ce2b3d288
def _get_arguments(func): 'Returns list of arguments this function has.' if hasattr(func, '__code__'): return inspect.getargspec(func).args elif hasattr(func, '__call__'): return _get_arguments(func.__call__) elif hasattr(func, 'func'): return _get_arguments(func.func)
Returns list of arguments this function has.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
_get_arguments
PedroLelis/tensorflow
1
python
def _get_arguments(func): if hasattr(func, '__code__'): return inspect.getargspec(func).args elif hasattr(func, '__call__'): return _get_arguments(func.__call__) elif hasattr(func, 'func'): return _get_arguments(func.func)
def _get_arguments(func): if hasattr(func, '__code__'): return inspect.getargspec(func).args elif hasattr(func, '__call__'): return _get_arguments(func.__call__) elif hasattr(func, 'func'): return _get_arguments(func.func)<|docstring|>Returns list of arguments this function has.<|endoftext|>
5c40b24f01fc8505a07a7ebdc52e763434b752d4c96fae1ff9a4944b4cab3e67
def _get_replica_device_setter(config): 'Creates a replica device setter if required.\n\n Args:\n config: A RunConfig instance.\n\n Returns:\n A replica device setter, or None.\n ' ps_ops = ['Variable', 'AutoReloadVariable', 'MutableHashTable', 'MutableHashTableOfTensors', 'MutableDenseHashTable'] if config.job_name: worker_device = ('/job:%s/task:%d' % (config.job_name, config.task)) else: worker_device = '/job:worker' if (config.num_ps_replicas > 0): return device_setter.replica_device_setter(ps_tasks=config.num_ps_replicas, worker_device=worker_device, merge_devices=False, ps_ops=ps_ops, cluster=config.cluster_spec) else: return None
Creates a replica device setter if required. Args: config: A RunConfig instance. Returns: A replica device setter, or None.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
_get_replica_device_setter
PedroLelis/tensorflow
1
python
def _get_replica_device_setter(config): 'Creates a replica device setter if required.\n\n Args:\n config: A RunConfig instance.\n\n Returns:\n A replica device setter, or None.\n ' ps_ops = ['Variable', 'AutoReloadVariable', 'MutableHashTable', 'MutableHashTableOfTensors', 'MutableDenseHashTable'] if config.job_name: worker_device = ('/job:%s/task:%d' % (config.job_name, config.task)) else: worker_device = '/job:worker' if (config.num_ps_replicas > 0): return device_setter.replica_device_setter(ps_tasks=config.num_ps_replicas, worker_device=worker_device, merge_devices=False, ps_ops=ps_ops, cluster=config.cluster_spec) else: return None
def _get_replica_device_setter(config): 'Creates a replica device setter if required.\n\n Args:\n config: A RunConfig instance.\n\n Returns:\n A replica device setter, or None.\n ' ps_ops = ['Variable', 'AutoReloadVariable', 'MutableHashTable', 'MutableHashTableOfTensors', 'MutableDenseHashTable'] if config.job_name: worker_device = ('/job:%s/task:%d' % (config.job_name, config.task)) else: worker_device = '/job:worker' if (config.num_ps_replicas > 0): return device_setter.replica_device_setter(ps_tasks=config.num_ps_replicas, worker_device=worker_device, merge_devices=False, ps_ops=ps_ops, cluster=config.cluster_spec) else: return None<|docstring|>Creates a replica device setter if required. Args: config: A RunConfig instance. Returns: A replica device setter, or None.<|endoftext|>
805f65f0f4789ab028c57d27894dca7dc64b1789a9a789e776291f2dec39540f
def _make_metrics_ops(metrics, features, labels, predictions): 'Add metrics based on `features`, `labels`, and `predictions`.\n\n `metrics` contains a specification for how to run metrics. It is a dict\n mapping friendly names to either `MetricSpec` objects, or directly to a metric\n function (assuming that `predictions` and `labels` are single tensors), or to\n `(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and\n `labels` to `metric` (assuming `labels` is a single tensor).\n\n Users are encouraged to use `MetricSpec` objects, which are more flexible and\n cleaner. They also lead to clearer errors.\n\n Args:\n metrics: A dict mapping names to metrics specification, for example\n `MetricSpec` objects.\n features: A dict of tensors returned from an input_fn as features/inputs.\n labels: A single tensor or a dict of tensors returned from an input_fn as\n labels.\n predictions: A single tensor or a dict of tensors output from a model as\n predictions.\n\n Returns:\n A dict mapping the friendly given in `metrics` to the result of calling the\n given metric function.\n\n Raises:\n ValueError: If metrics specifications do not work with the type of\n `features`, `labels`, or `predictions` provided. Mostly, a dict is given\n but no pred_name specified.\n ' metrics = (metrics or {}) labels_tensor_or_dict = labels if (isinstance(labels, dict) and (len(labels) == 1)): labels_tensor_or_dict = labels[list(labels.keys())[0]] result = {} for (name, metric) in sorted(six.iteritems(metrics)): if isinstance(metric, metric_spec.MetricSpec): result[name] = metric.create_metric_ops(features, labels, predictions) continue logging.warning('Please specify metrics using MetricSpec. Using bare functions or (key, fn) tuples is deprecated and support for it will be removed on Oct 1, 2016.') if isinstance(name, tuple): if (len(name) != 2): raise ValueError('Invalid metric for {}. It returned a tuple with len {}, expected 2.'.format(name, len(name))) if (not isinstance(predictions, dict)): raise ValueError(('Metrics passed provide (name, prediction), but predictions are not dict. Metrics: %s, Predictions: %s.' % (metrics, predictions))) if (isinstance(labels, dict) and (name[1] in labels)): result[name[0]] = metric(predictions[name[1]], labels[name[1]]) else: result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict) else: if isinstance(predictions, dict): raise ValueError(('Metrics passed provide only name, no prediction, but predictions are dict. Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))) result[name] = metric(predictions, labels_tensor_or_dict) return result
Add metrics based on `features`, `labels`, and `predictions`. `metrics` contains a specification for how to run metrics. It is a dict mapping friendly names to either `MetricSpec` objects, or directly to a metric function (assuming that `predictions` and `labels` are single tensors), or to `(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and `labels` to `metric` (assuming `labels` is a single tensor). Users are encouraged to use `MetricSpec` objects, which are more flexible and cleaner. They also lead to clearer errors. Args: metrics: A dict mapping names to metrics specification, for example `MetricSpec` objects. features: A dict of tensors returned from an input_fn as features/inputs. labels: A single tensor or a dict of tensors returned from an input_fn as labels. predictions: A single tensor or a dict of tensors output from a model as predictions. Returns: A dict mapping the friendly given in `metrics` to the result of calling the given metric function. Raises: ValueError: If metrics specifications do not work with the type of `features`, `labels`, or `predictions` provided. Mostly, a dict is given but no pred_name specified.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
_make_metrics_ops
PedroLelis/tensorflow
1
python
def _make_metrics_ops(metrics, features, labels, predictions): 'Add metrics based on `features`, `labels`, and `predictions`.\n\n `metrics` contains a specification for how to run metrics. It is a dict\n mapping friendly names to either `MetricSpec` objects, or directly to a metric\n function (assuming that `predictions` and `labels` are single tensors), or to\n `(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and\n `labels` to `metric` (assuming `labels` is a single tensor).\n\n Users are encouraged to use `MetricSpec` objects, which are more flexible and\n cleaner. They also lead to clearer errors.\n\n Args:\n metrics: A dict mapping names to metrics specification, for example\n `MetricSpec` objects.\n features: A dict of tensors returned from an input_fn as features/inputs.\n labels: A single tensor or a dict of tensors returned from an input_fn as\n labels.\n predictions: A single tensor or a dict of tensors output from a model as\n predictions.\n\n Returns:\n A dict mapping the friendly given in `metrics` to the result of calling the\n given metric function.\n\n Raises:\n ValueError: If metrics specifications do not work with the type of\n `features`, `labels`, or `predictions` provided. Mostly, a dict is given\n but no pred_name specified.\n ' metrics = (metrics or {}) labels_tensor_or_dict = labels if (isinstance(labels, dict) and (len(labels) == 1)): labels_tensor_or_dict = labels[list(labels.keys())[0]] result = {} for (name, metric) in sorted(six.iteritems(metrics)): if isinstance(metric, metric_spec.MetricSpec): result[name] = metric.create_metric_ops(features, labels, predictions) continue logging.warning('Please specify metrics using MetricSpec. Using bare functions or (key, fn) tuples is deprecated and support for it will be removed on Oct 1, 2016.') if isinstance(name, tuple): if (len(name) != 2): raise ValueError('Invalid metric for {}. It returned a tuple with len {}, expected 2.'.format(name, len(name))) if (not isinstance(predictions, dict)): raise ValueError(('Metrics passed provide (name, prediction), but predictions are not dict. Metrics: %s, Predictions: %s.' % (metrics, predictions))) if (isinstance(labels, dict) and (name[1] in labels)): result[name[0]] = metric(predictions[name[1]], labels[name[1]]) else: result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict) else: if isinstance(predictions, dict): raise ValueError(('Metrics passed provide only name, no prediction, but predictions are dict. Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))) result[name] = metric(predictions, labels_tensor_or_dict) return result
def _make_metrics_ops(metrics, features, labels, predictions): 'Add metrics based on `features`, `labels`, and `predictions`.\n\n `metrics` contains a specification for how to run metrics. It is a dict\n mapping friendly names to either `MetricSpec` objects, or directly to a metric\n function (assuming that `predictions` and `labels` are single tensors), or to\n `(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and\n `labels` to `metric` (assuming `labels` is a single tensor).\n\n Users are encouraged to use `MetricSpec` objects, which are more flexible and\n cleaner. They also lead to clearer errors.\n\n Args:\n metrics: A dict mapping names to metrics specification, for example\n `MetricSpec` objects.\n features: A dict of tensors returned from an input_fn as features/inputs.\n labels: A single tensor or a dict of tensors returned from an input_fn as\n labels.\n predictions: A single tensor or a dict of tensors output from a model as\n predictions.\n\n Returns:\n A dict mapping the friendly given in `metrics` to the result of calling the\n given metric function.\n\n Raises:\n ValueError: If metrics specifications do not work with the type of\n `features`, `labels`, or `predictions` provided. Mostly, a dict is given\n but no pred_name specified.\n ' metrics = (metrics or {}) labels_tensor_or_dict = labels if (isinstance(labels, dict) and (len(labels) == 1)): labels_tensor_or_dict = labels[list(labels.keys())[0]] result = {} for (name, metric) in sorted(six.iteritems(metrics)): if isinstance(metric, metric_spec.MetricSpec): result[name] = metric.create_metric_ops(features, labels, predictions) continue logging.warning('Please specify metrics using MetricSpec. Using bare functions or (key, fn) tuples is deprecated and support for it will be removed on Oct 1, 2016.') if isinstance(name, tuple): if (len(name) != 2): raise ValueError('Invalid metric for {}. It returned a tuple with len {}, expected 2.'.format(name, len(name))) if (not isinstance(predictions, dict)): raise ValueError(('Metrics passed provide (name, prediction), but predictions are not dict. Metrics: %s, Predictions: %s.' % (metrics, predictions))) if (isinstance(labels, dict) and (name[1] in labels)): result[name[0]] = metric(predictions[name[1]], labels[name[1]]) else: result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict) else: if isinstance(predictions, dict): raise ValueError(('Metrics passed provide only name, no prediction, but predictions are dict. Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))) result[name] = metric(predictions, labels_tensor_or_dict) return result<|docstring|>Add metrics based on `features`, `labels`, and `predictions`. `metrics` contains a specification for how to run metrics. It is a dict mapping friendly names to either `MetricSpec` objects, or directly to a metric function (assuming that `predictions` and `labels` are single tensors), or to `(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and `labels` to `metric` (assuming `labels` is a single tensor). Users are encouraged to use `MetricSpec` objects, which are more flexible and cleaner. They also lead to clearer errors. Args: metrics: A dict mapping names to metrics specification, for example `MetricSpec` objects. features: A dict of tensors returned from an input_fn as features/inputs. labels: A single tensor or a dict of tensors returned from an input_fn as labels. predictions: A single tensor or a dict of tensors output from a model as predictions. Returns: A dict mapping the friendly given in `metrics` to the result of calling the given metric function. Raises: ValueError: If metrics specifications do not work with the type of `features`, `labels`, or `predictions` provided. Mostly, a dict is given but no pred_name specified.<|endoftext|>
bd997856ec65f405c090e796e13a6dd63f21a8157985fab639abe10a722e356b
def __init__(self, model_dir=None, config=None): 'Initializes a BaseEstimator instance.\n\n Args:\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n config: A RunConfig instance.\n ' self._model_dir = model_dir if (self._model_dir is None): self._model_dir = tempfile.mkdtemp() logging.warning('Using temporary folder as model directory: %s', self._model_dir) if (config is None): self._config = BaseEstimator._Config() logging.info('Using default config.') else: self._config = config logging.info('Using config: %s', str(vars(self._config))) self._device_fn = _get_replica_device_setter(self._config) self._features_info = None self._labels_info = None self._graph = None
Initializes a BaseEstimator instance. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. config: A RunConfig instance.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
__init__
PedroLelis/tensorflow
1
python
def __init__(self, model_dir=None, config=None): 'Initializes a BaseEstimator instance.\n\n Args:\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n config: A RunConfig instance.\n ' self._model_dir = model_dir if (self._model_dir is None): self._model_dir = tempfile.mkdtemp() logging.warning('Using temporary folder as model directory: %s', self._model_dir) if (config is None): self._config = BaseEstimator._Config() logging.info('Using default config.') else: self._config = config logging.info('Using config: %s', str(vars(self._config))) self._device_fn = _get_replica_device_setter(self._config) self._features_info = None self._labels_info = None self._graph = None
def __init__(self, model_dir=None, config=None): 'Initializes a BaseEstimator instance.\n\n Args:\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n config: A RunConfig instance.\n ' self._model_dir = model_dir if (self._model_dir is None): self._model_dir = tempfile.mkdtemp() logging.warning('Using temporary folder as model directory: %s', self._model_dir) if (config is None): self._config = BaseEstimator._Config() logging.info('Using default config.') else: self._config = config logging.info('Using config: %s', str(vars(self._config))) self._device_fn = _get_replica_device_setter(self._config) self._features_info = None self._labels_info = None self._graph = None<|docstring|>Initializes a BaseEstimator instance. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. config: A RunConfig instance.<|endoftext|>
2651b9418c8cdd2efb43328b33411e28d35aa923417e1acc08309374db1f4a04
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'y', 'batch_size') def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None, monitors=None, max_steps=None): 'See `Trainable`.\n\n Raises:\n ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.\n ValueError: If both `steps` and `max_steps` are not `None`.\n ' if ((steps is not None) and (max_steps is not None)): raise ValueError('Can not provide both steps and max_steps.') (input_fn, feed_fn) = _get_input_fn(x, y, input_fn, feed_fn=None, batch_size=batch_size, shuffle=True, epochs=None) loss = self._train_model(input_fn=input_fn, feed_fn=feed_fn, steps=steps, monitors=monitors, max_steps=max_steps) logging.info('Loss for final step: %s.', loss) return self
See `Trainable`. Raises: ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`. ValueError: If both `steps` and `max_steps` are not `None`.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
fit
PedroLelis/tensorflow
1
python
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'y', 'batch_size') def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None, monitors=None, max_steps=None): 'See `Trainable`.\n\n Raises:\n ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.\n ValueError: If both `steps` and `max_steps` are not `None`.\n ' if ((steps is not None) and (max_steps is not None)): raise ValueError('Can not provide both steps and max_steps.') (input_fn, feed_fn) = _get_input_fn(x, y, input_fn, feed_fn=None, batch_size=batch_size, shuffle=True, epochs=None) loss = self._train_model(input_fn=input_fn, feed_fn=feed_fn, steps=steps, monitors=monitors, max_steps=max_steps) logging.info('Loss for final step: %s.', loss) return self
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'y', 'batch_size') def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None, monitors=None, max_steps=None): 'See `Trainable`.\n\n Raises:\n ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.\n ValueError: If both `steps` and `max_steps` are not `None`.\n ' if ((steps is not None) and (max_steps is not None)): raise ValueError('Can not provide both steps and max_steps.') (input_fn, feed_fn) = _get_input_fn(x, y, input_fn, feed_fn=None, batch_size=batch_size, shuffle=True, epochs=None) loss = self._train_model(input_fn=input_fn, feed_fn=feed_fn, steps=steps, monitors=monitors, max_steps=max_steps) logging.info('Loss for final step: %s.', loss) return self<|docstring|>See `Trainable`. Raises: ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`. ValueError: If both `steps` and `max_steps` are not `None`.<|endoftext|>
5f2e302b3f959b2f01cf0e409b2c5ab10f43fd2f77b9fce25f559cee6df971cc
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'y', 'batch_size') def partial_fit(self, x=None, y=None, input_fn=None, steps=1, batch_size=None, monitors=None): 'Incremental fit on a batch of samples.\n\n This method is expected to be called several times consecutively\n on different or the same chunks of the dataset. This either can\n implement iterative training or out-of-core/online training.\n\n This is especially useful when the whole dataset is too big to\n fit in memory at the same time. Or when model is taking long time\n to converge, and you want to split up training into subparts.\n\n Args:\n x: Matrix of shape [n_samples, n_features...]. Can be iterator that\n returns arrays of features. The training input samples for fitting the\n model. If set, `input_fn` must be `None`.\n y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be\n iterator that returns array of labels. The training label values\n (class labels in classification, real numbers in regression). If set,\n `input_fn` must be `None`.\n input_fn: Input function. If set, `x`, `y`, and `batch_size` must be\n `None`.\n steps: Number of steps for which to train model. If `None`, train forever.\n batch_size: minibatch size to use on the input, defaults to first\n dimension of `x`. Must be `None` if `input_fn` is provided.\n monitors: List of `BaseMonitor` subclass instances. Used for callbacks\n inside the training loop.\n\n Returns:\n `self`, for chaining.\n\n Raises:\n ValueError: If at least one of `x` and `y` is provided, and `input_fn` is\n provided.\n ' logging.warning('The current implementation of partial_fit is not optimized for use in a loop. Consider using fit() instead.') return self.fit(x=x, y=y, input_fn=input_fn, steps=steps, batch_size=batch_size, monitors=monitors)
Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different or the same chunks of the dataset. This either can implement iterative training or out-of-core/online training. This is especially useful when the whole dataset is too big to fit in memory at the same time. Or when model is taking long time to converge, and you want to split up training into subparts. Args: x: Matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. If set, `input_fn` must be `None`. y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be iterator that returns array of labels. The training label values (class labels in classification, real numbers in regression). If set, `input_fn` must be `None`. input_fn: Input function. If set, `x`, `y`, and `batch_size` must be `None`. steps: Number of steps for which to train model. If `None`, train forever. batch_size: minibatch size to use on the input, defaults to first dimension of `x`. Must be `None` if `input_fn` is provided. monitors: List of `BaseMonitor` subclass instances. Used for callbacks inside the training loop. Returns: `self`, for chaining. Raises: ValueError: If at least one of `x` and `y` is provided, and `input_fn` is provided.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
partial_fit
PedroLelis/tensorflow
1
python
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'y', 'batch_size') def partial_fit(self, x=None, y=None, input_fn=None, steps=1, batch_size=None, monitors=None): 'Incremental fit on a batch of samples.\n\n This method is expected to be called several times consecutively\n on different or the same chunks of the dataset. This either can\n implement iterative training or out-of-core/online training.\n\n This is especially useful when the whole dataset is too big to\n fit in memory at the same time. Or when model is taking long time\n to converge, and you want to split up training into subparts.\n\n Args:\n x: Matrix of shape [n_samples, n_features...]. Can be iterator that\n returns arrays of features. The training input samples for fitting the\n model. If set, `input_fn` must be `None`.\n y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be\n iterator that returns array of labels. The training label values\n (class labels in classification, real numbers in regression). If set,\n `input_fn` must be `None`.\n input_fn: Input function. If set, `x`, `y`, and `batch_size` must be\n `None`.\n steps: Number of steps for which to train model. If `None`, train forever.\n batch_size: minibatch size to use on the input, defaults to first\n dimension of `x`. Must be `None` if `input_fn` is provided.\n monitors: List of `BaseMonitor` subclass instances. Used for callbacks\n inside the training loop.\n\n Returns:\n `self`, for chaining.\n\n Raises:\n ValueError: If at least one of `x` and `y` is provided, and `input_fn` is\n provided.\n ' logging.warning('The current implementation of partial_fit is not optimized for use in a loop. Consider using fit() instead.') return self.fit(x=x, y=y, input_fn=input_fn, steps=steps, batch_size=batch_size, monitors=monitors)
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'y', 'batch_size') def partial_fit(self, x=None, y=None, input_fn=None, steps=1, batch_size=None, monitors=None): 'Incremental fit on a batch of samples.\n\n This method is expected to be called several times consecutively\n on different or the same chunks of the dataset. This either can\n implement iterative training or out-of-core/online training.\n\n This is especially useful when the whole dataset is too big to\n fit in memory at the same time. Or when model is taking long time\n to converge, and you want to split up training into subparts.\n\n Args:\n x: Matrix of shape [n_samples, n_features...]. Can be iterator that\n returns arrays of features. The training input samples for fitting the\n model. If set, `input_fn` must be `None`.\n y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be\n iterator that returns array of labels. The training label values\n (class labels in classification, real numbers in regression). If set,\n `input_fn` must be `None`.\n input_fn: Input function. If set, `x`, `y`, and `batch_size` must be\n `None`.\n steps: Number of steps for which to train model. If `None`, train forever.\n batch_size: minibatch size to use on the input, defaults to first\n dimension of `x`. Must be `None` if `input_fn` is provided.\n monitors: List of `BaseMonitor` subclass instances. Used for callbacks\n inside the training loop.\n\n Returns:\n `self`, for chaining.\n\n Raises:\n ValueError: If at least one of `x` and `y` is provided, and `input_fn` is\n provided.\n ' logging.warning('The current implementation of partial_fit is not optimized for use in a loop. Consider using fit() instead.') return self.fit(x=x, y=y, input_fn=input_fn, steps=steps, batch_size=batch_size, monitors=monitors)<|docstring|>Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different or the same chunks of the dataset. This either can implement iterative training or out-of-core/online training. This is especially useful when the whole dataset is too big to fit in memory at the same time. Or when model is taking long time to converge, and you want to split up training into subparts. Args: x: Matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. If set, `input_fn` must be `None`. y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be iterator that returns array of labels. The training label values (class labels in classification, real numbers in regression). If set, `input_fn` must be `None`. input_fn: Input function. If set, `x`, `y`, and `batch_size` must be `None`. steps: Number of steps for which to train model. If `None`, train forever. batch_size: minibatch size to use on the input, defaults to first dimension of `x`. Must be `None` if `input_fn` is provided. monitors: List of `BaseMonitor` subclass instances. Used for callbacks inside the training loop. Returns: `self`, for chaining. Raises: ValueError: If at least one of `x` and `y` is provided, and `input_fn` is provided.<|endoftext|>
daa6965ed4df6d9cf292711ca3ea8ded7d41282092ba58713dc3f7d326833de3
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'y', 'batch_size') def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None, steps=None, metrics=None, name=None): 'See `Evaluable`.\n\n Raises:\n ValueError: If at least one of `x` or `y` is provided, and at least one of\n `input_fn` or `feed_fn` is provided.\n Or if `metrics` is not `None` or `dict`.\n ' (input_fn, feed_fn) = _get_input_fn(x, y, input_fn=input_fn, feed_fn=feed_fn, batch_size=batch_size, shuffle=False, epochs=1) if ((metrics is not None) and (not isinstance(metrics, dict))): raise ValueError(('Metrics argument should be None or dict. Got %s.' % metrics)) (eval_results, global_step) = self._evaluate_model(input_fn=input_fn, feed_fn=feed_fn, steps=steps, metrics=metrics, name=name) if (eval_results is not None): eval_results.update({'global_step': global_step}) return eval_results
See `Evaluable`. Raises: ValueError: If at least one of `x` or `y` is provided, and at least one of `input_fn` or `feed_fn` is provided. Or if `metrics` is not `None` or `dict`.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
evaluate
PedroLelis/tensorflow
1
python
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'y', 'batch_size') def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None, steps=None, metrics=None, name=None): 'See `Evaluable`.\n\n Raises:\n ValueError: If at least one of `x` or `y` is provided, and at least one of\n `input_fn` or `feed_fn` is provided.\n Or if `metrics` is not `None` or `dict`.\n ' (input_fn, feed_fn) = _get_input_fn(x, y, input_fn=input_fn, feed_fn=feed_fn, batch_size=batch_size, shuffle=False, epochs=1) if ((metrics is not None) and (not isinstance(metrics, dict))): raise ValueError(('Metrics argument should be None or dict. Got %s.' % metrics)) (eval_results, global_step) = self._evaluate_model(input_fn=input_fn, feed_fn=feed_fn, steps=steps, metrics=metrics, name=name) if (eval_results is not None): eval_results.update({'global_step': global_step}) return eval_results
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'y', 'batch_size') def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None, steps=None, metrics=None, name=None): 'See `Evaluable`.\n\n Raises:\n ValueError: If at least one of `x` or `y` is provided, and at least one of\n `input_fn` or `feed_fn` is provided.\n Or if `metrics` is not `None` or `dict`.\n ' (input_fn, feed_fn) = _get_input_fn(x, y, input_fn=input_fn, feed_fn=feed_fn, batch_size=batch_size, shuffle=False, epochs=1) if ((metrics is not None) and (not isinstance(metrics, dict))): raise ValueError(('Metrics argument should be None or dict. Got %s.' % metrics)) (eval_results, global_step) = self._evaluate_model(input_fn=input_fn, feed_fn=feed_fn, steps=steps, metrics=metrics, name=name) if (eval_results is not None): eval_results.update({'global_step': global_step}) return eval_results<|docstring|>See `Evaluable`. Raises: ValueError: If at least one of `x` or `y` is provided, and at least one of `input_fn` or `feed_fn` is provided. Or if `metrics` is not `None` or `dict`.<|endoftext|>
e1d61e5553e126ef6a68828128263726dfd4a53927d7683b9664ed05cf8016cd
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'batch_size', 'as_iterable') def predict(self, x=None, input_fn=None, batch_size=None, outputs=None, as_iterable=True): "Returns predictions for given features.\n\n Args:\n x: Matrix of shape [n_samples, n_features...]. Can be iterator that\n returns arrays of features. The training input samples for fitting the\n model. If set, `input_fn` must be `None`.\n input_fn: Input function. If set, `x` and 'batch_size' must be `None`.\n batch_size: Override default batch size. If set, 'input_fn' must be\n 'None'.\n outputs: list of `str`, name of the output to predict.\n If `None`, returns all.\n as_iterable: If True, return an iterable which keeps yielding predictions\n for each example until inputs are exhausted. Note: The inputs must\n terminate if you want the iterable to terminate (e.g. be sure to pass\n num_epochs=1 if you are using something like read_batch_features).\n\n Returns:\n A numpy array of predicted classes or regression values if the\n constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`\n of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of\n predictions if as_iterable is True.\n\n Raises:\n ValueError: If x and input_fn are both provided or both `None`.\n " (input_fn, feed_fn) = _get_input_fn(x, None, input_fn=input_fn, feed_fn=None, batch_size=batch_size, shuffle=False, epochs=1) return self._infer_model(input_fn=input_fn, feed_fn=feed_fn, outputs=outputs, as_iterable=as_iterable)
Returns predictions for given features. Args: x: Matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. If set, `input_fn` must be `None`. input_fn: Input function. If set, `x` and 'batch_size' must be `None`. batch_size: Override default batch size. If set, 'input_fn' must be 'None'. outputs: list of `str`, name of the output to predict. If `None`, returns all. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: A numpy array of predicted classes or regression values if the constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict` of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of predictions if as_iterable is True. Raises: ValueError: If x and input_fn are both provided or both `None`.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
predict
PedroLelis/tensorflow
1
python
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'batch_size', 'as_iterable') def predict(self, x=None, input_fn=None, batch_size=None, outputs=None, as_iterable=True): "Returns predictions for given features.\n\n Args:\n x: Matrix of shape [n_samples, n_features...]. Can be iterator that\n returns arrays of features. The training input samples for fitting the\n model. If set, `input_fn` must be `None`.\n input_fn: Input function. If set, `x` and 'batch_size' must be `None`.\n batch_size: Override default batch size. If set, 'input_fn' must be\n 'None'.\n outputs: list of `str`, name of the output to predict.\n If `None`, returns all.\n as_iterable: If True, return an iterable which keeps yielding predictions\n for each example until inputs are exhausted. Note: The inputs must\n terminate if you want the iterable to terminate (e.g. be sure to pass\n num_epochs=1 if you are using something like read_batch_features).\n\n Returns:\n A numpy array of predicted classes or regression values if the\n constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`\n of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of\n predictions if as_iterable is True.\n\n Raises:\n ValueError: If x and input_fn are both provided or both `None`.\n " (input_fn, feed_fn) = _get_input_fn(x, None, input_fn=input_fn, feed_fn=None, batch_size=batch_size, shuffle=False, epochs=1) return self._infer_model(input_fn=input_fn, feed_fn=feed_fn, outputs=outputs, as_iterable=as_iterable)
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, 'x', 'batch_size', 'as_iterable') def predict(self, x=None, input_fn=None, batch_size=None, outputs=None, as_iterable=True): "Returns predictions for given features.\n\n Args:\n x: Matrix of shape [n_samples, n_features...]. Can be iterator that\n returns arrays of features. The training input samples for fitting the\n model. If set, `input_fn` must be `None`.\n input_fn: Input function. If set, `x` and 'batch_size' must be `None`.\n batch_size: Override default batch size. If set, 'input_fn' must be\n 'None'.\n outputs: list of `str`, name of the output to predict.\n If `None`, returns all.\n as_iterable: If True, return an iterable which keeps yielding predictions\n for each example until inputs are exhausted. Note: The inputs must\n terminate if you want the iterable to terminate (e.g. be sure to pass\n num_epochs=1 if you are using something like read_batch_features).\n\n Returns:\n A numpy array of predicted classes or regression values if the\n constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`\n of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of\n predictions if as_iterable is True.\n\n Raises:\n ValueError: If x and input_fn are both provided or both `None`.\n " (input_fn, feed_fn) = _get_input_fn(x, None, input_fn=input_fn, feed_fn=None, batch_size=batch_size, shuffle=False, epochs=1) return self._infer_model(input_fn=input_fn, feed_fn=feed_fn, outputs=outputs, as_iterable=as_iterable)<|docstring|>Returns predictions for given features. Args: x: Matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. If set, `input_fn` must be `None`. input_fn: Input function. If set, `x` and 'batch_size' must be `None`. batch_size: Override default batch size. If set, 'input_fn' must be 'None'. outputs: list of `str`, name of the output to predict. If `None`, returns all. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: A numpy array of predicted classes or regression values if the constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict` of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of predictions if as_iterable is True. Raises: ValueError: If x and input_fn are both provided or both `None`.<|endoftext|>
c132ed43875eb0c39f2eba744bb443fd571275a5e637c7894cfb7a38efea55bd
def get_variable_value(self, name): 'Returns value of the variable given by name.\n\n Args:\n name: string, name of the tensor.\n\n Returns:\n Numpy array - value of the tensor.\n ' return load_variable(self.model_dir, name)
Returns value of the variable given by name. Args: name: string, name of the tensor. Returns: Numpy array - value of the tensor.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
get_variable_value
PedroLelis/tensorflow
1
python
def get_variable_value(self, name): 'Returns value of the variable given by name.\n\n Args:\n name: string, name of the tensor.\n\n Returns:\n Numpy array - value of the tensor.\n ' return load_variable(self.model_dir, name)
def get_variable_value(self, name): 'Returns value of the variable given by name.\n\n Args:\n name: string, name of the tensor.\n\n Returns:\n Numpy array - value of the tensor.\n ' return load_variable(self.model_dir, name)<|docstring|>Returns value of the variable given by name. Args: name: string, name of the tensor. Returns: Numpy array - value of the tensor.<|endoftext|>
b1ce67ad8027b76bb524785a41101241efcb673571931b5f51751a5435dac623
def get_variable_names(self): 'Returns list of all variable names in this model.\n\n Returns:\n List of names.\n ' return [name for (name, _) in list_variables(self.model_dir)]
Returns list of all variable names in this model. Returns: List of names.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
get_variable_names
PedroLelis/tensorflow
1
python
def get_variable_names(self): 'Returns list of all variable names in this model.\n\n Returns:\n List of names.\n ' return [name for (name, _) in list_variables(self.model_dir)]
def get_variable_names(self): 'Returns list of all variable names in this model.\n\n Returns:\n List of names.\n ' return [name for (name, _) in list_variables(self.model_dir)]<|docstring|>Returns list of all variable names in this model. Returns: List of names.<|endoftext|>
25799618fbc88979703f376ac4aceeb821ee8d3f41542bbc6cc031d443ef28dd
@deprecated_arg_values('2016-09-23', "The signature of the input_fn accepted by export is changing to be consistent with what's used by tf.Learn Estimator's train/evaluate. input_fn (and in most cases, input_feature_key) will become required args, and use_deprecated_input_fn will default to False and be removed altogether.", use_deprecated_input_fn=True, input_fn=None) def export(self, export_dir, input_fn=export._default_input_fn, input_feature_key=None, use_deprecated_input_fn=True, signature_fn=None, prediction_key=None, default_batch_size=1, exports_to_keep=None): "Exports inference graph into given dir.\n\n Args:\n export_dir: A string containing a directory to write the exported graph\n and checkpoints.\n input_fn: If `use_deprecated_input_fn` is true, then a function that given\n `Tensor` of `Example` strings, parses it into features that are then\n passed to the model. Otherwise, a function that takes no argument and\n returns a tuple of (features, labels), where features is a dict of\n string key to `Tensor` and labels is a `Tensor` that's currently not\n used (and so can be `None`).\n input_feature_key: Only used if `use_deprecated_input_fn` is false. String\n key into the features dict returned by `input_fn` that corresponds to a\n the raw `Example` strings `Tensor` that the exported model will take as\n input. Can only be `None` if you're using a custom `signature_fn` that\n does not use the first arg (examples).\n use_deprecated_input_fn: Determines the signature format of `input_fn`.\n signature_fn: Function that returns a default signature and a named\n signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s\n for features and `Tensor` or `dict` of `Tensor`s for predictions.\n prediction_key: The key for a tensor in the `predictions` dict (output\n from the `model_fn`) to use as the `predictions` input to the\n `signature_fn`. Optional. If `None`, predictions will pass to\n `signature_fn` without filtering.\n default_batch_size: Default batch size of the `Example` placeholder.\n exports_to_keep: Number of exports to keep.\n\n Returns:\n The string path to the exported directory. NB: this functionality was\n added ca. 2016/09/25; clients that depend on the return value may need\n to handle the case where this function returns None because subclasses\n are not returning a value.\n " return export._export_estimator(estimator=self, export_dir=export_dir, signature_fn=signature_fn, prediction_key=prediction_key, input_fn=input_fn, input_feature_key=input_feature_key, use_deprecated_input_fn=use_deprecated_input_fn, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep)
Exports inference graph into given dir. Args: export_dir: A string containing a directory to write the exported graph and checkpoints. input_fn: If `use_deprecated_input_fn` is true, then a function that given `Tensor` of `Example` strings, parses it into features that are then passed to the model. Otherwise, a function that takes no argument and returns a tuple of (features, labels), where features is a dict of string key to `Tensor` and labels is a `Tensor` that's currently not used (and so can be `None`). input_feature_key: Only used if `use_deprecated_input_fn` is false. String key into the features dict returned by `input_fn` that corresponds to a the raw `Example` strings `Tensor` that the exported model will take as input. Can only be `None` if you're using a custom `signature_fn` that does not use the first arg (examples). use_deprecated_input_fn: Determines the signature format of `input_fn`. signature_fn: Function that returns a default signature and a named signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s for features and `Tensor` or `dict` of `Tensor`s for predictions. prediction_key: The key for a tensor in the `predictions` dict (output from the `model_fn`) to use as the `predictions` input to the `signature_fn`. Optional. If `None`, predictions will pass to `signature_fn` without filtering. default_batch_size: Default batch size of the `Example` placeholder. exports_to_keep: Number of exports to keep. Returns: The string path to the exported directory. NB: this functionality was added ca. 2016/09/25; clients that depend on the return value may need to handle the case where this function returns None because subclasses are not returning a value.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
export
PedroLelis/tensorflow
1
python
@deprecated_arg_values('2016-09-23', "The signature of the input_fn accepted by export is changing to be consistent with what's used by tf.Learn Estimator's train/evaluate. input_fn (and in most cases, input_feature_key) will become required args, and use_deprecated_input_fn will default to False and be removed altogether.", use_deprecated_input_fn=True, input_fn=None) def export(self, export_dir, input_fn=export._default_input_fn, input_feature_key=None, use_deprecated_input_fn=True, signature_fn=None, prediction_key=None, default_batch_size=1, exports_to_keep=None): "Exports inference graph into given dir.\n\n Args:\n export_dir: A string containing a directory to write the exported graph\n and checkpoints.\n input_fn: If `use_deprecated_input_fn` is true, then a function that given\n `Tensor` of `Example` strings, parses it into features that are then\n passed to the model. Otherwise, a function that takes no argument and\n returns a tuple of (features, labels), where features is a dict of\n string key to `Tensor` and labels is a `Tensor` that's currently not\n used (and so can be `None`).\n input_feature_key: Only used if `use_deprecated_input_fn` is false. String\n key into the features dict returned by `input_fn` that corresponds to a\n the raw `Example` strings `Tensor` that the exported model will take as\n input. Can only be `None` if you're using a custom `signature_fn` that\n does not use the first arg (examples).\n use_deprecated_input_fn: Determines the signature format of `input_fn`.\n signature_fn: Function that returns a default signature and a named\n signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s\n for features and `Tensor` or `dict` of `Tensor`s for predictions.\n prediction_key: The key for a tensor in the `predictions` dict (output\n from the `model_fn`) to use as the `predictions` input to the\n `signature_fn`. Optional. If `None`, predictions will pass to\n `signature_fn` without filtering.\n default_batch_size: Default batch size of the `Example` placeholder.\n exports_to_keep: Number of exports to keep.\n\n Returns:\n The string path to the exported directory. NB: this functionality was\n added ca. 2016/09/25; clients that depend on the return value may need\n to handle the case where this function returns None because subclasses\n are not returning a value.\n " return export._export_estimator(estimator=self, export_dir=export_dir, signature_fn=signature_fn, prediction_key=prediction_key, input_fn=input_fn, input_feature_key=input_feature_key, use_deprecated_input_fn=use_deprecated_input_fn, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep)
@deprecated_arg_values('2016-09-23', "The signature of the input_fn accepted by export is changing to be consistent with what's used by tf.Learn Estimator's train/evaluate. input_fn (and in most cases, input_feature_key) will become required args, and use_deprecated_input_fn will default to False and be removed altogether.", use_deprecated_input_fn=True, input_fn=None) def export(self, export_dir, input_fn=export._default_input_fn, input_feature_key=None, use_deprecated_input_fn=True, signature_fn=None, prediction_key=None, default_batch_size=1, exports_to_keep=None): "Exports inference graph into given dir.\n\n Args:\n export_dir: A string containing a directory to write the exported graph\n and checkpoints.\n input_fn: If `use_deprecated_input_fn` is true, then a function that given\n `Tensor` of `Example` strings, parses it into features that are then\n passed to the model. Otherwise, a function that takes no argument and\n returns a tuple of (features, labels), where features is a dict of\n string key to `Tensor` and labels is a `Tensor` that's currently not\n used (and so can be `None`).\n input_feature_key: Only used if `use_deprecated_input_fn` is false. String\n key into the features dict returned by `input_fn` that corresponds to a\n the raw `Example` strings `Tensor` that the exported model will take as\n input. Can only be `None` if you're using a custom `signature_fn` that\n does not use the first arg (examples).\n use_deprecated_input_fn: Determines the signature format of `input_fn`.\n signature_fn: Function that returns a default signature and a named\n signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s\n for features and `Tensor` or `dict` of `Tensor`s for predictions.\n prediction_key: The key for a tensor in the `predictions` dict (output\n from the `model_fn`) to use as the `predictions` input to the\n `signature_fn`. Optional. If `None`, predictions will pass to\n `signature_fn` without filtering.\n default_batch_size: Default batch size of the `Example` placeholder.\n exports_to_keep: Number of exports to keep.\n\n Returns:\n The string path to the exported directory. NB: this functionality was\n added ca. 2016/09/25; clients that depend on the return value may need\n to handle the case where this function returns None because subclasses\n are not returning a value.\n " return export._export_estimator(estimator=self, export_dir=export_dir, signature_fn=signature_fn, prediction_key=prediction_key, input_fn=input_fn, input_feature_key=input_feature_key, use_deprecated_input_fn=use_deprecated_input_fn, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep)<|docstring|>Exports inference graph into given dir. Args: export_dir: A string containing a directory to write the exported graph and checkpoints. input_fn: If `use_deprecated_input_fn` is true, then a function that given `Tensor` of `Example` strings, parses it into features that are then passed to the model. Otherwise, a function that takes no argument and returns a tuple of (features, labels), where features is a dict of string key to `Tensor` and labels is a `Tensor` that's currently not used (and so can be `None`). input_feature_key: Only used if `use_deprecated_input_fn` is false. String key into the features dict returned by `input_fn` that corresponds to a the raw `Example` strings `Tensor` that the exported model will take as input. Can only be `None` if you're using a custom `signature_fn` that does not use the first arg (examples). use_deprecated_input_fn: Determines the signature format of `input_fn`. signature_fn: Function that returns a default signature and a named signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s for features and `Tensor` or `dict` of `Tensor`s for predictions. prediction_key: The key for a tensor in the `predictions` dict (output from the `model_fn`) to use as the `predictions` input to the `signature_fn`. Optional. If `None`, predictions will pass to `signature_fn` without filtering. default_batch_size: Default batch size of the `Example` placeholder. exports_to_keep: Number of exports to keep. Returns: The string path to the exported directory. NB: this functionality was added ca. 2016/09/25; clients that depend on the return value may need to handle the case where this function returns None because subclasses are not returning a value.<|endoftext|>
c575f6ef0fd06f45562725bdbd1ba50cfd961ba0bc7ee4be5b2044f171823a1b
@abc.abstractproperty def _get_train_ops(self, features, labels): 'Method that builds model graph and returns trainer ops.\n\n Expected to be overriden by sub-classes that require custom support.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n A `ModelFnOps` object.\n ' pass
Method that builds model graph and returns trainer ops. Expected to be overriden by sub-classes that require custom support. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. Returns: A `ModelFnOps` object.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
_get_train_ops
PedroLelis/tensorflow
1
python
@abc.abstractproperty def _get_train_ops(self, features, labels): 'Method that builds model graph and returns trainer ops.\n\n Expected to be overriden by sub-classes that require custom support.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n A `ModelFnOps` object.\n ' pass
@abc.abstractproperty def _get_train_ops(self, features, labels): 'Method that builds model graph and returns trainer ops.\n\n Expected to be overriden by sub-classes that require custom support.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n A `ModelFnOps` object.\n ' pass<|docstring|>Method that builds model graph and returns trainer ops. Expected to be overriden by sub-classes that require custom support. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. Returns: A `ModelFnOps` object.<|endoftext|>
024d38f0b799da6c91c09adb6f6973c46212ffe97d0434ec49a784c0ac308cc6
@abc.abstractproperty def _get_predict_ops(self, features): 'Method that builds model graph and returns prediction ops.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n A `ModelFnOps` object.\n ' pass
Method that builds model graph and returns prediction ops. Args: features: `Tensor` or `dict` of `Tensor` objects. Returns: A `ModelFnOps` object.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
_get_predict_ops
PedroLelis/tensorflow
1
python
@abc.abstractproperty def _get_predict_ops(self, features): 'Method that builds model graph and returns prediction ops.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n A `ModelFnOps` object.\n ' pass
@abc.abstractproperty def _get_predict_ops(self, features): 'Method that builds model graph and returns prediction ops.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n A `ModelFnOps` object.\n ' pass<|docstring|>Method that builds model graph and returns prediction ops. Args: features: `Tensor` or `dict` of `Tensor` objects. Returns: A `ModelFnOps` object.<|endoftext|>
384fd1b2a662676db7ac6596c010c2b2bfb436e91a165ec56f23848587f93c6a
def _get_eval_ops(self, features, labels, metrics): 'Method that builds model graph and returns evaluation ops.\n\n Expected to be overriden by sub-classes that require custom support.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n metrics: Dict of metrics to run. If None, the default metric functions\n are used; if {}, no metrics are used. Otherwise, `metrics` should map\n friendly names for the metric to a `MetricSpec` object defining which\n model outputs to evaluate against which labels with which metric\n function. Metric ops should support streaming, e.g., returning\n update_op and value tensors. See more details in\n `../../../../metrics/python/metrics/ops/streaming_metrics.py` and\n `../metric_spec.py`.\n\n Returns:\n A `ModelFnOps` object.\n ' raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
Method that builds model graph and returns evaluation ops. Expected to be overriden by sub-classes that require custom support. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. metrics: Dict of metrics to run. If None, the default metric functions are used; if {}, no metrics are used. Otherwise, `metrics` should map friendly names for the metric to a `MetricSpec` object defining which model outputs to evaluate against which labels with which metric function. Metric ops should support streaming, e.g., returning update_op and value tensors. See more details in `../../../../metrics/python/metrics/ops/streaming_metrics.py` and `../metric_spec.py`. Returns: A `ModelFnOps` object.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
_get_eval_ops
PedroLelis/tensorflow
1
python
def _get_eval_ops(self, features, labels, metrics): 'Method that builds model graph and returns evaluation ops.\n\n Expected to be overriden by sub-classes that require custom support.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n metrics: Dict of metrics to run. If None, the default metric functions\n are used; if {}, no metrics are used. Otherwise, `metrics` should map\n friendly names for the metric to a `MetricSpec` object defining which\n model outputs to evaluate against which labels with which metric\n function. Metric ops should support streaming, e.g., returning\n update_op and value tensors. See more details in\n `../../../../metrics/python/metrics/ops/streaming_metrics.py` and\n `../metric_spec.py`.\n\n Returns:\n A `ModelFnOps` object.\n ' raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
def _get_eval_ops(self, features, labels, metrics): 'Method that builds model graph and returns evaluation ops.\n\n Expected to be overriden by sub-classes that require custom support.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n metrics: Dict of metrics to run. If None, the default metric functions\n are used; if {}, no metrics are used. Otherwise, `metrics` should map\n friendly names for the metric to a `MetricSpec` object defining which\n model outputs to evaluate against which labels with which metric\n function. Metric ops should support streaming, e.g., returning\n update_op and value tensors. See more details in\n `../../../../metrics/python/metrics/ops/streaming_metrics.py` and\n `../metric_spec.py`.\n\n Returns:\n A `ModelFnOps` object.\n ' raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')<|docstring|>Method that builds model graph and returns evaluation ops. Expected to be overriden by sub-classes that require custom support. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. metrics: Dict of metrics to run. If None, the default metric functions are used; if {}, no metrics are used. Otherwise, `metrics` should map friendly names for the metric to a `MetricSpec` object defining which model outputs to evaluate against which labels with which metric function. Metric ops should support streaming, e.g., returning update_op and value tensors. See more details in `../../../../metrics/python/metrics/ops/streaming_metrics.py` and `../metric_spec.py`. Returns: A `ModelFnOps` object.<|endoftext|>
66faa95459ede715efb3a352ae0936e63834eb3a731fb3bf6130709775c79c52
@deprecated('2016-09-23', "The signature of the input_fn accepted by export is changing to be consistent with what's used by tf.Learn Estimator's train/evaluate, which makes this function useless. This will be removed after the deprecation date.") def _get_feature_ops_from_example(self, examples_batch): 'Returns feature parser for given example batch using features info.\n\n This function requires `fit()` has been called.\n\n Args:\n examples_batch: batch of tf.Example\n\n Returns:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Raises:\n ValueError: If `_features_info` attribute is not available (usually\n because `fit()` has not been called).\n ' if (self._features_info is None): raise ValueError('Features information missing, was fit() ever called?') return tensor_signature.create_example_parser_from_signatures(self._features_info, examples_batch)
Returns feature parser for given example batch using features info. This function requires `fit()` has been called. Args: examples_batch: batch of tf.Example Returns: features: `Tensor` or `dict` of `Tensor` objects. Raises: ValueError: If `_features_info` attribute is not available (usually because `fit()` has not been called).
tensorflow/contrib/learn/python/learn/estimators/estimator.py
_get_feature_ops_from_example
PedroLelis/tensorflow
1
python
@deprecated('2016-09-23', "The signature of the input_fn accepted by export is changing to be consistent with what's used by tf.Learn Estimator's train/evaluate, which makes this function useless. This will be removed after the deprecation date.") def _get_feature_ops_from_example(self, examples_batch): 'Returns feature parser for given example batch using features info.\n\n This function requires `fit()` has been called.\n\n Args:\n examples_batch: batch of tf.Example\n\n Returns:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Raises:\n ValueError: If `_features_info` attribute is not available (usually\n because `fit()` has not been called).\n ' if (self._features_info is None): raise ValueError('Features information missing, was fit() ever called?') return tensor_signature.create_example_parser_from_signatures(self._features_info, examples_batch)
@deprecated('2016-09-23', "The signature of the input_fn accepted by export is changing to be consistent with what's used by tf.Learn Estimator's train/evaluate, which makes this function useless. This will be removed after the deprecation date.") def _get_feature_ops_from_example(self, examples_batch): 'Returns feature parser for given example batch using features info.\n\n This function requires `fit()` has been called.\n\n Args:\n examples_batch: batch of tf.Example\n\n Returns:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Raises:\n ValueError: If `_features_info` attribute is not available (usually\n because `fit()` has not been called).\n ' if (self._features_info is None): raise ValueError('Features information missing, was fit() ever called?') return tensor_signature.create_example_parser_from_signatures(self._features_info, examples_batch)<|docstring|>Returns feature parser for given example batch using features info. This function requires `fit()` has been called. Args: examples_batch: batch of tf.Example Returns: features: `Tensor` or `dict` of `Tensor` objects. Raises: ValueError: If `_features_info` attribute is not available (usually because `fit()` has not been called).<|endoftext|>
52820747918dd33bc481372aeb7581aba210bb852c32eb66d92c86b137a1c938
def _extract_metric_update_ops(self, eval_dict): 'Separate update operations from metric value operations.' update_ops = [] value_ops = {} for (name, metric_ops) in six.iteritems(eval_dict): if isinstance(metric_ops, (list, tuple)): if (len(metric_ops) == 2): value_ops[name] = metric_ops[0] update_ops.append(metric_ops[1]) else: logging.warning('Ignoring metric {}. It returned a list|tuple with len {}, expected 2'.format(name, len(metric_ops))) value_ops[name] = metric_ops else: value_ops[name] = metric_ops if update_ops: update_ops = control_flow_ops.group(*update_ops) else: update_ops = None return (update_ops, value_ops)
Separate update operations from metric value operations.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
_extract_metric_update_ops
PedroLelis/tensorflow
1
python
def _extract_metric_update_ops(self, eval_dict): update_ops = [] value_ops = {} for (name, metric_ops) in six.iteritems(eval_dict): if isinstance(metric_ops, (list, tuple)): if (len(metric_ops) == 2): value_ops[name] = metric_ops[0] update_ops.append(metric_ops[1]) else: logging.warning('Ignoring metric {}. It returned a list|tuple with len {}, expected 2'.format(name, len(metric_ops))) value_ops[name] = metric_ops else: value_ops[name] = metric_ops if update_ops: update_ops = control_flow_ops.group(*update_ops) else: update_ops = None return (update_ops, value_ops)
def _extract_metric_update_ops(self, eval_dict): update_ops = [] value_ops = {} for (name, metric_ops) in six.iteritems(eval_dict): if isinstance(metric_ops, (list, tuple)): if (len(metric_ops) == 2): value_ops[name] = metric_ops[0] update_ops.append(metric_ops[1]) else: logging.warning('Ignoring metric {}. It returned a list|tuple with len {}, expected 2'.format(name, len(metric_ops))) value_ops[name] = metric_ops else: value_ops[name] = metric_ops if update_ops: update_ops = control_flow_ops.group(*update_ops) else: update_ops = None return (update_ops, value_ops)<|docstring|>Separate update operations from metric value operations.<|endoftext|>
5fba301cf0d3adcd8bc9f27d223e8d70c8569dc1e5aacbd8f80467cf5c6df658
def __init__(self, model_fn=None, model_dir=None, config=None, params=None, feature_engineering_fn=None): "Constructs an `Estimator` instance.\n\n Args:\n model_fn: Model function. Follows the signature:\n * Args:\n * `features` are single `Tensor` or `dict` of `Tensor`s\n (depending on data passed to `fit`),\n * `labels` are `Tensor` or `dict` of `Tensor`s (for multi-head\n models). If mode is `ModeKeys.INFER`, `labels=None` will be\n passed. If the `model_fn`'s signature does not accept\n `mode`, the `model_fn` must still be able to handle\n `labels=None`.\n * `mode` specifies if this training, evaluation or\n prediction. See `ModeKeys`.\n * `params` is a `dict` of hyperparameters. Will receive what\n is passed to Estimator in `params` parameter. This allows\n to configure Estimators from hyper parameter tuning.\n\n * Returns:\n `ModelFnOps`\n\n Also supports a legacy signature which returns tuple of:\n\n * predictions: `Tensor`, `SparseTensor` or dictionary of same.\n Can also be any type that is convertible to a `Tensor` or\n `SparseTensor`, or dictionary of same.\n * loss: Scalar loss `Tensor`.\n * train_op: Training update `Tensor` or `Operation`.\n\n Supports next three signatures for the function:\n\n * `(features, labels) -> (predictions, loss, train_op)`\n * `(features, labels, mode) -> (predictions, loss, train_op)`\n * `(features, labels, mode, params) -> (predictions, loss, train_op)`\n\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n config: Configuration object.\n params: `dict` of hyper parameters that will be passed into `model_fn`.\n Keys are names of parameters, values are basic python types.\n feature_engineering_fn: Feature engineering function. Takes features and\n labels which are the output of `input_fn` and\n returns features and labels which will be fed\n into `model_fn`. Please check `model_fn` for\n a definition of features and labels.\n\n Raises:\n ValueError: parameters of `model_fn` don't match `params`.\n " super(Estimator, self).__init__(model_dir=model_dir, config=config) if (model_fn is not None): model_fn_args = _get_arguments(model_fn) if ((params is not None) and ('params' not in model_fn_args)): raise ValueError(("Estimator's model_fn (%s) has less than 4 arguments, but not None params (%s) are passed." % (model_fn, params))) if ((params is None) and ('params' in model_fn_args)): logging.warning("Estimator's model_fn (%s) includes params argument, but params are not passed to Estimator.", model_fn) self._model_fn = model_fn self.params = params self._feature_engineering_fn = (feature_engineering_fn or _identity_feature_engineering_fn)
Constructs an `Estimator` instance. Args: model_fn: Model function. Follows the signature: * Args: * `features` are single `Tensor` or `dict` of `Tensor`s (depending on data passed to `fit`), * `labels` are `Tensor` or `dict` of `Tensor`s (for multi-head models). If mode is `ModeKeys.INFER`, `labels=None` will be passed. If the `model_fn`'s signature does not accept `mode`, the `model_fn` must still be able to handle `labels=None`. * `mode` specifies if this training, evaluation or prediction. See `ModeKeys`. * `params` is a `dict` of hyperparameters. Will receive what is passed to Estimator in `params` parameter. This allows to configure Estimators from hyper parameter tuning. * Returns: `ModelFnOps` Also supports a legacy signature which returns tuple of: * predictions: `Tensor`, `SparseTensor` or dictionary of same. Can also be any type that is convertible to a `Tensor` or `SparseTensor`, or dictionary of same. * loss: Scalar loss `Tensor`. * train_op: Training update `Tensor` or `Operation`. Supports next three signatures for the function: * `(features, labels) -> (predictions, loss, train_op)` * `(features, labels, mode) -> (predictions, loss, train_op)` * `(features, labels, mode, params) -> (predictions, loss, train_op)` model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. config: Configuration object. params: `dict` of hyper parameters that will be passed into `model_fn`. Keys are names of parameters, values are basic python types. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into `model_fn`. Please check `model_fn` for a definition of features and labels. Raises: ValueError: parameters of `model_fn` don't match `params`.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
__init__
PedroLelis/tensorflow
1
python
def __init__(self, model_fn=None, model_dir=None, config=None, params=None, feature_engineering_fn=None): "Constructs an `Estimator` instance.\n\n Args:\n model_fn: Model function. Follows the signature:\n * Args:\n * `features` are single `Tensor` or `dict` of `Tensor`s\n (depending on data passed to `fit`),\n * `labels` are `Tensor` or `dict` of `Tensor`s (for multi-head\n models). If mode is `ModeKeys.INFER`, `labels=None` will be\n passed. If the `model_fn`'s signature does not accept\n `mode`, the `model_fn` must still be able to handle\n `labels=None`.\n * `mode` specifies if this training, evaluation or\n prediction. See `ModeKeys`.\n * `params` is a `dict` of hyperparameters. Will receive what\n is passed to Estimator in `params` parameter. This allows\n to configure Estimators from hyper parameter tuning.\n\n * Returns:\n `ModelFnOps`\n\n Also supports a legacy signature which returns tuple of:\n\n * predictions: `Tensor`, `SparseTensor` or dictionary of same.\n Can also be any type that is convertible to a `Tensor` or\n `SparseTensor`, or dictionary of same.\n * loss: Scalar loss `Tensor`.\n * train_op: Training update `Tensor` or `Operation`.\n\n Supports next three signatures for the function:\n\n * `(features, labels) -> (predictions, loss, train_op)`\n * `(features, labels, mode) -> (predictions, loss, train_op)`\n * `(features, labels, mode, params) -> (predictions, loss, train_op)`\n\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n config: Configuration object.\n params: `dict` of hyper parameters that will be passed into `model_fn`.\n Keys are names of parameters, values are basic python types.\n feature_engineering_fn: Feature engineering function. Takes features and\n labels which are the output of `input_fn` and\n returns features and labels which will be fed\n into `model_fn`. Please check `model_fn` for\n a definition of features and labels.\n\n Raises:\n ValueError: parameters of `model_fn` don't match `params`.\n " super(Estimator, self).__init__(model_dir=model_dir, config=config) if (model_fn is not None): model_fn_args = _get_arguments(model_fn) if ((params is not None) and ('params' not in model_fn_args)): raise ValueError(("Estimator's model_fn (%s) has less than 4 arguments, but not None params (%s) are passed." % (model_fn, params))) if ((params is None) and ('params' in model_fn_args)): logging.warning("Estimator's model_fn (%s) includes params argument, but params are not passed to Estimator.", model_fn) self._model_fn = model_fn self.params = params self._feature_engineering_fn = (feature_engineering_fn or _identity_feature_engineering_fn)
def __init__(self, model_fn=None, model_dir=None, config=None, params=None, feature_engineering_fn=None): "Constructs an `Estimator` instance.\n\n Args:\n model_fn: Model function. Follows the signature:\n * Args:\n * `features` are single `Tensor` or `dict` of `Tensor`s\n (depending on data passed to `fit`),\n * `labels` are `Tensor` or `dict` of `Tensor`s (for multi-head\n models). If mode is `ModeKeys.INFER`, `labels=None` will be\n passed. If the `model_fn`'s signature does not accept\n `mode`, the `model_fn` must still be able to handle\n `labels=None`.\n * `mode` specifies if this training, evaluation or\n prediction. See `ModeKeys`.\n * `params` is a `dict` of hyperparameters. Will receive what\n is passed to Estimator in `params` parameter. This allows\n to configure Estimators from hyper parameter tuning.\n\n * Returns:\n `ModelFnOps`\n\n Also supports a legacy signature which returns tuple of:\n\n * predictions: `Tensor`, `SparseTensor` or dictionary of same.\n Can also be any type that is convertible to a `Tensor` or\n `SparseTensor`, or dictionary of same.\n * loss: Scalar loss `Tensor`.\n * train_op: Training update `Tensor` or `Operation`.\n\n Supports next three signatures for the function:\n\n * `(features, labels) -> (predictions, loss, train_op)`\n * `(features, labels, mode) -> (predictions, loss, train_op)`\n * `(features, labels, mode, params) -> (predictions, loss, train_op)`\n\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n config: Configuration object.\n params: `dict` of hyper parameters that will be passed into `model_fn`.\n Keys are names of parameters, values are basic python types.\n feature_engineering_fn: Feature engineering function. Takes features and\n labels which are the output of `input_fn` and\n returns features and labels which will be fed\n into `model_fn`. Please check `model_fn` for\n a definition of features and labels.\n\n Raises:\n ValueError: parameters of `model_fn` don't match `params`.\n " super(Estimator, self).__init__(model_dir=model_dir, config=config) if (model_fn is not None): model_fn_args = _get_arguments(model_fn) if ((params is not None) and ('params' not in model_fn_args)): raise ValueError(("Estimator's model_fn (%s) has less than 4 arguments, but not None params (%s) are passed." % (model_fn, params))) if ((params is None) and ('params' in model_fn_args)): logging.warning("Estimator's model_fn (%s) includes params argument, but params are not passed to Estimator.", model_fn) self._model_fn = model_fn self.params = params self._feature_engineering_fn = (feature_engineering_fn or _identity_feature_engineering_fn)<|docstring|>Constructs an `Estimator` instance. Args: model_fn: Model function. Follows the signature: * Args: * `features` are single `Tensor` or `dict` of `Tensor`s (depending on data passed to `fit`), * `labels` are `Tensor` or `dict` of `Tensor`s (for multi-head models). If mode is `ModeKeys.INFER`, `labels=None` will be passed. If the `model_fn`'s signature does not accept `mode`, the `model_fn` must still be able to handle `labels=None`. * `mode` specifies if this training, evaluation or prediction. See `ModeKeys`. * `params` is a `dict` of hyperparameters. Will receive what is passed to Estimator in `params` parameter. This allows to configure Estimators from hyper parameter tuning. * Returns: `ModelFnOps` Also supports a legacy signature which returns tuple of: * predictions: `Tensor`, `SparseTensor` or dictionary of same. Can also be any type that is convertible to a `Tensor` or `SparseTensor`, or dictionary of same. * loss: Scalar loss `Tensor`. * train_op: Training update `Tensor` or `Operation`. Supports next three signatures for the function: * `(features, labels) -> (predictions, loss, train_op)` * `(features, labels, mode) -> (predictions, loss, train_op)` * `(features, labels, mode, params) -> (predictions, loss, train_op)` model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. config: Configuration object. params: `dict` of hyper parameters that will be passed into `model_fn`. Keys are names of parameters, values are basic python types. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into `model_fn`. Please check `model_fn` for a definition of features and labels. Raises: ValueError: parameters of `model_fn` don't match `params`.<|endoftext|>
76e9ac00d6e695f00e53470ececca4a51de3306e73a6ce9f6c8ef6ee1c52f4b6
def _call_model_fn(self, features, labels, mode): 'Calls model function with support of 2, 3 or 4 arguments.\n\n Args:\n features: features dict.\n labels: labels dict.\n mode: ModeKeys\n\n Returns:\n A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a\n `ModelFnOps` object.\n\n Raises:\n ValueError: if model_fn returns invalid objects.\n ' (features, labels) = self._feature_engineering_fn(features, labels) model_fn_args = _get_arguments(self._model_fn) if ('mode' in model_fn_args): if ('params' in model_fn_args): model_fn_results = self._model_fn(features, labels, mode=mode, params=self.params) else: model_fn_results = self._model_fn(features, labels, mode=mode) else: model_fn_results = self._model_fn(features, labels) if isinstance(model_fn_results, ModelFnOps): return model_fn_results if (len(model_fn_results) != 3): raise ValueError('Unrecognized value returned by model_fn, please return ModelFnOps.') return ModelFnOps(mode=mode, predictions=model_fn_results[0], loss=model_fn_results[1], train_op=model_fn_results[2])
Calls model function with support of 2, 3 or 4 arguments. Args: features: features dict. labels: labels dict. mode: ModeKeys Returns: A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a `ModelFnOps` object. Raises: ValueError: if model_fn returns invalid objects.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
_call_model_fn
PedroLelis/tensorflow
1
python
def _call_model_fn(self, features, labels, mode): 'Calls model function with support of 2, 3 or 4 arguments.\n\n Args:\n features: features dict.\n labels: labels dict.\n mode: ModeKeys\n\n Returns:\n A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a\n `ModelFnOps` object.\n\n Raises:\n ValueError: if model_fn returns invalid objects.\n ' (features, labels) = self._feature_engineering_fn(features, labels) model_fn_args = _get_arguments(self._model_fn) if ('mode' in model_fn_args): if ('params' in model_fn_args): model_fn_results = self._model_fn(features, labels, mode=mode, params=self.params) else: model_fn_results = self._model_fn(features, labels, mode=mode) else: model_fn_results = self._model_fn(features, labels) if isinstance(model_fn_results, ModelFnOps): return model_fn_results if (len(model_fn_results) != 3): raise ValueError('Unrecognized value returned by model_fn, please return ModelFnOps.') return ModelFnOps(mode=mode, predictions=model_fn_results[0], loss=model_fn_results[1], train_op=model_fn_results[2])
def _call_model_fn(self, features, labels, mode): 'Calls model function with support of 2, 3 or 4 arguments.\n\n Args:\n features: features dict.\n labels: labels dict.\n mode: ModeKeys\n\n Returns:\n A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a\n `ModelFnOps` object.\n\n Raises:\n ValueError: if model_fn returns invalid objects.\n ' (features, labels) = self._feature_engineering_fn(features, labels) model_fn_args = _get_arguments(self._model_fn) if ('mode' in model_fn_args): if ('params' in model_fn_args): model_fn_results = self._model_fn(features, labels, mode=mode, params=self.params) else: model_fn_results = self._model_fn(features, labels, mode=mode) else: model_fn_results = self._model_fn(features, labels) if isinstance(model_fn_results, ModelFnOps): return model_fn_results if (len(model_fn_results) != 3): raise ValueError('Unrecognized value returned by model_fn, please return ModelFnOps.') return ModelFnOps(mode=mode, predictions=model_fn_results[0], loss=model_fn_results[1], train_op=model_fn_results[2])<|docstring|>Calls model function with support of 2, 3 or 4 arguments. Args: features: features dict. labels: labels dict. mode: ModeKeys Returns: A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a `ModelFnOps` object. Raises: ValueError: if model_fn returns invalid objects.<|endoftext|>
64226145ac77bda4c279742eb2fefd2ac471d72659ab368d38196bfbaf9ef6be
def _get_train_ops(self, features, labels): 'Method that builds model graph and returns trainer ops.\n\n Expected to be overriden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n `ModelFnOps` object.\n ' return self._call_model_fn(features, labels, ModeKeys.TRAIN)
Method that builds model graph and returns trainer ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. Returns: `ModelFnOps` object.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
_get_train_ops
PedroLelis/tensorflow
1
python
def _get_train_ops(self, features, labels): 'Method that builds model graph and returns trainer ops.\n\n Expected to be overriden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n `ModelFnOps` object.\n ' return self._call_model_fn(features, labels, ModeKeys.TRAIN)
def _get_train_ops(self, features, labels): 'Method that builds model graph and returns trainer ops.\n\n Expected to be overriden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n `ModelFnOps` object.\n ' return self._call_model_fn(features, labels, ModeKeys.TRAIN)<|docstring|>Method that builds model graph and returns trainer ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. Returns: `ModelFnOps` object.<|endoftext|>
25815dd813d1eef73cae50461f4d41e14f6081b4408e0be28c168e12676a59fb
def _get_eval_ops(self, features, labels, metrics): "Method that builds model graph and returns evaluation ops.\n\n Expected to be overriden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n metrics: Dict of metrics to run. If None, the default metric functions\n are used; if {}, no metrics are used. Otherwise, `metrics` should map\n friendly names for the metric to a `MetricSpec` object defining which\n model outputs to evaluate against which labels with which metric\n function. Metric ops should support streaming, e.g., returning\n update_op and value tensors. See more details in\n `../../../../metrics/python/metrics/ops/streaming_metrics.py` and\n `../metric_spec.py`.\n\n Returns:\n `ModelFnOps` object.\n\n Raises:\n ValueError: if `metrics` don't match `labels`.\n " model_fn_ops = self._call_model_fn(features, labels, ModeKeys.EVAL) if metrics: model_fn_ops.eval_metric_ops.update(_make_metrics_ops(metrics, features, labels, model_fn_ops.predictions)) if (metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops): model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = metrics_lib.streaming_mean(model_fn_ops.loss) return model_fn_ops
Method that builds model graph and returns evaluation ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. metrics: Dict of metrics to run. If None, the default metric functions are used; if {}, no metrics are used. Otherwise, `metrics` should map friendly names for the metric to a `MetricSpec` object defining which model outputs to evaluate against which labels with which metric function. Metric ops should support streaming, e.g., returning update_op and value tensors. See more details in `../../../../metrics/python/metrics/ops/streaming_metrics.py` and `../metric_spec.py`. Returns: `ModelFnOps` object. Raises: ValueError: if `metrics` don't match `labels`.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
_get_eval_ops
PedroLelis/tensorflow
1
python
def _get_eval_ops(self, features, labels, metrics): "Method that builds model graph and returns evaluation ops.\n\n Expected to be overriden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n metrics: Dict of metrics to run. If None, the default metric functions\n are used; if {}, no metrics are used. Otherwise, `metrics` should map\n friendly names for the metric to a `MetricSpec` object defining which\n model outputs to evaluate against which labels with which metric\n function. Metric ops should support streaming, e.g., returning\n update_op and value tensors. See more details in\n `../../../../metrics/python/metrics/ops/streaming_metrics.py` and\n `../metric_spec.py`.\n\n Returns:\n `ModelFnOps` object.\n\n Raises:\n ValueError: if `metrics` don't match `labels`.\n " model_fn_ops = self._call_model_fn(features, labels, ModeKeys.EVAL) if metrics: model_fn_ops.eval_metric_ops.update(_make_metrics_ops(metrics, features, labels, model_fn_ops.predictions)) if (metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops): model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = metrics_lib.streaming_mean(model_fn_ops.loss) return model_fn_ops
def _get_eval_ops(self, features, labels, metrics): "Method that builds model graph and returns evaluation ops.\n\n Expected to be overriden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n labels: `Tensor` or `dict` of `Tensor` objects.\n metrics: Dict of metrics to run. If None, the default metric functions\n are used; if {}, no metrics are used. Otherwise, `metrics` should map\n friendly names for the metric to a `MetricSpec` object defining which\n model outputs to evaluate against which labels with which metric\n function. Metric ops should support streaming, e.g., returning\n update_op and value tensors. See more details in\n `../../../../metrics/python/metrics/ops/streaming_metrics.py` and\n `../metric_spec.py`.\n\n Returns:\n `ModelFnOps` object.\n\n Raises:\n ValueError: if `metrics` don't match `labels`.\n " model_fn_ops = self._call_model_fn(features, labels, ModeKeys.EVAL) if metrics: model_fn_ops.eval_metric_ops.update(_make_metrics_ops(metrics, features, labels, model_fn_ops.predictions)) if (metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops): model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = metrics_lib.streaming_mean(model_fn_ops.loss) return model_fn_ops<|docstring|>Method that builds model graph and returns evaluation ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. metrics: Dict of metrics to run. If None, the default metric functions are used; if {}, no metrics are used. Otherwise, `metrics` should map friendly names for the metric to a `MetricSpec` object defining which model outputs to evaluate against which labels with which metric function. Metric ops should support streaming, e.g., returning update_op and value tensors. See more details in `../../../../metrics/python/metrics/ops/streaming_metrics.py` and `../metric_spec.py`. Returns: `ModelFnOps` object. Raises: ValueError: if `metrics` don't match `labels`.<|endoftext|>
8d13f19a35f3de85f14451421e48102a6481d9ec56df119acdaddc8dc2dfcddb
def _get_predict_ops(self, features): 'Method that builds model graph and returns prediction ops.\n\n Expected to be overriden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n `ModelFnOps` object.\n ' labels = tensor_signature.create_placeholders_from_signatures(self._labels_info) return self._call_model_fn(features, labels, ModeKeys.INFER)
Method that builds model graph and returns prediction ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. Returns: `ModelFnOps` object.
tensorflow/contrib/learn/python/learn/estimators/estimator.py
_get_predict_ops
PedroLelis/tensorflow
1
python
def _get_predict_ops(self, features): 'Method that builds model graph and returns prediction ops.\n\n Expected to be overriden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n `ModelFnOps` object.\n ' labels = tensor_signature.create_placeholders_from_signatures(self._labels_info) return self._call_model_fn(features, labels, ModeKeys.INFER)
def _get_predict_ops(self, features): 'Method that builds model graph and returns prediction ops.\n\n Expected to be overriden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n `ModelFnOps` object.\n ' labels = tensor_signature.create_placeholders_from_signatures(self._labels_info) return self._call_model_fn(features, labels, ModeKeys.INFER)<|docstring|>Method that builds model graph and returns prediction ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. Returns: `ModelFnOps` object.<|endoftext|>
54713279f7b567d4b275ce70fcfb3b9fc1be225c85d554b1b54545b97f64f0ee
def setUp(self): '\n Set up the app for following tests\n ' settings.DEBUG = True call_command('init_proj_config') self.factory = RequestFactory() data = {'access_token': 'myaccesstoken', 'refresh_token': 'bar', 'expires_in': 36000} self.oh_member = OpenHumansMember.create(oh_id='1234', data=data) self.oh_member.save() self.user = self.oh_member.user self.user.save()
Set up the app for following tests
main/tests/tests_management.py
setUp
jhdulaney/oh-ubiome-source
0
python
def setUp(self): '\n \n ' settings.DEBUG = True call_command('init_proj_config') self.factory = RequestFactory() data = {'access_token': 'myaccesstoken', 'refresh_token': 'bar', 'expires_in': 36000} self.oh_member = OpenHumansMember.create(oh_id='1234', data=data) self.oh_member.save() self.user = self.oh_member.user self.user.save()
def setUp(self): '\n \n ' settings.DEBUG = True call_command('init_proj_config') self.factory = RequestFactory() data = {'access_token': 'myaccesstoken', 'refresh_token': 'bar', 'expires_in': 36000} self.oh_member = OpenHumansMember.create(oh_id='1234', data=data) self.oh_member.save() self.user = self.oh_member.user self.user.save()<|docstring|>Set up the app for following tests<|endoftext|>
b3889f8804e41b8469f84c1e9201fd6e6f510786cf7a8c532fb26b8bbfd94a61
def __init__(self, class_size, pretrained_name='bert-base-chinese'): '\n Args: \n class_size :指定分类模型的最终类别数目,以确定线性分类器的映射维度\n pretrained_name :用以指定bert的预训练模型\n ' super(BertSST2Model, self).__init__() self.bert = BertModel.from_pretrained(pretrained_name, return_dict=True) self.classifier = nn.Linear(768, class_size)
Args: class_size :指定分类模型的最终类别数目,以确定线性分类器的映射维度 pretrained_name :用以指定bert的预训练模型
bert-sst2/bert_sst2.py
__init__
yyxx1997/pytorch
1
python
def __init__(self, class_size, pretrained_name='bert-base-chinese'): '\n Args: \n class_size :指定分类模型的最终类别数目,以确定线性分类器的映射维度\n pretrained_name :用以指定bert的预训练模型\n ' super(BertSST2Model, self).__init__() self.bert = BertModel.from_pretrained(pretrained_name, return_dict=True) self.classifier = nn.Linear(768, class_size)
def __init__(self, class_size, pretrained_name='bert-base-chinese'): '\n Args: \n class_size :指定分类模型的最终类别数目,以确定线性分类器的映射维度\n pretrained_name :用以指定bert的预训练模型\n ' super(BertSST2Model, self).__init__() self.bert = BertModel.from_pretrained(pretrained_name, return_dict=True) self.classifier = nn.Linear(768, class_size)<|docstring|>Args: class_size :指定分类模型的最终类别数目,以确定线性分类器的映射维度 pretrained_name :用以指定bert的预训练模型<|endoftext|>
bf8900b3df1a80eb91154514da71eae3fe8190390dbb2405bf80238ec1ba2cf3
def zipdir(dirpath, zipfileobj): 'does the work of writing data into our zipfile' for (root, dirs, files) in os.walk(dirpath): for file in files: print(os.path.join(root, file)) zipfileobj.write(os.path.join(root, file)) return None
does the work of writing data into our zipfile
ZipFileLearn/zip.file.py
zipdir
subash-kc/2022-01-04-Python
1
python
def zipdir(dirpath, zipfileobj): for (root, dirs, files) in os.walk(dirpath): for file in files: print(os.path.join(root, file)) zipfileobj.write(os.path.join(root, file)) return None
def zipdir(dirpath, zipfileobj): for (root, dirs, files) in os.walk(dirpath): for file in files: print(os.path.join(root, file)) zipfileobj.write(os.path.join(root, file)) return None<|docstring|>does the work of writing data into our zipfile<|endoftext|>
6a59f3a2211627bbb077fcbcd3dcecefe9d017b78ab3aa0a4532bcaafa0e1470
def main(): 'called at runtime' dirpath = input('What directory are we archiving today? ') if os.path.isdir(dirpath): zippedfn = input('What should we call the finished archive? ') with zipfile.ZipFile(zippedfn, 'w', zipfile.ZIP_DEFLATED) as zipfileobj: zipdir(dirpath, zipfileobj) else: print('Run the script again when you have a valid directory to zip.')
called at runtime
ZipFileLearn/zip.file.py
main
subash-kc/2022-01-04-Python
1
python
def main(): dirpath = input('What directory are we archiving today? ') if os.path.isdir(dirpath): zippedfn = input('What should we call the finished archive? ') with zipfile.ZipFile(zippedfn, 'w', zipfile.ZIP_DEFLATED) as zipfileobj: zipdir(dirpath, zipfileobj) else: print('Run the script again when you have a valid directory to zip.')
def main(): dirpath = input('What directory are we archiving today? ') if os.path.isdir(dirpath): zippedfn = input('What should we call the finished archive? ') with zipfile.ZipFile(zippedfn, 'w', zipfile.ZIP_DEFLATED) as zipfileobj: zipdir(dirpath, zipfileobj) else: print('Run the script again when you have a valid directory to zip.')<|docstring|>called at runtime<|endoftext|>
74023bb7f4f9ff6f5dfc7c79f0ba28845f0240641efd3e218ab2ea62619c9856
@grok.adapter(icemac.addressbook.interfaces.IAddressBook) @grok.implementer(icemac.ab.calendar.interfaces.ICalendar) def calendar(address_book): 'Adapt the event to its calendar.' return address_book.calendar
Adapt the event to its calendar.
src/icemac/ab/calendar/calendar.py
calendar
icemac/icemac.ab.calendar
1
python
@grok.adapter(icemac.addressbook.interfaces.IAddressBook) @grok.implementer(icemac.ab.calendar.interfaces.ICalendar) def calendar(address_book): return address_book.calendar
@grok.adapter(icemac.addressbook.interfaces.IAddressBook) @grok.implementer(icemac.ab.calendar.interfaces.ICalendar) def calendar(address_book): return address_book.calendar<|docstring|>Adapt the event to its calendar.<|endoftext|>
41b8dc33141fd7e14e0e4d7e2a866806515c314a9243437ca507a7e27d9da300
def get_events_for_month(self, month, timezone=None): 'Get all events which belong to `month`.' timezone = self._timezone_name_to_timezone(timezone) midnight = time(0, 0, 0) start = timezone.localize(datetime.combine(month.firstOfMonth(), midnight)) end = timezone.localize(datetime.combine((month + 1).firstOfMonth(), midnight)) return self._get_events(start, end, timezone, categories=[])
Get all events which belong to `month`.
src/icemac/ab/calendar/calendar.py
get_events_for_month
icemac/icemac.ab.calendar
1
python
def get_events_for_month(self, month, timezone=None): timezone = self._timezone_name_to_timezone(timezone) midnight = time(0, 0, 0) start = timezone.localize(datetime.combine(month.firstOfMonth(), midnight)) end = timezone.localize(datetime.combine((month + 1).firstOfMonth(), midnight)) return self._get_events(start, end, timezone, categories=[])
def get_events_for_month(self, month, timezone=None): timezone = self._timezone_name_to_timezone(timezone) midnight = time(0, 0, 0) start = timezone.localize(datetime.combine(month.firstOfMonth(), midnight)) end = timezone.localize(datetime.combine((month + 1).firstOfMonth(), midnight)) return self._get_events(start, end, timezone, categories=[])<|docstring|>Get all events which belong to `month`.<|endoftext|>
acfbe1d21507f3583b2204d5a4a8bb6f6ffc292dc00916a6601653aab672d2ce
def get_events(self, start, end, timezone=None, categories=[]): 'Get all events between `start` and `end` with one of `categories`.\n\n `start` and `end` have to be datetime objects.\n `categories` is a list of category titles.\n `start` is part of the interval, but `end` is not.\n ' timezone = self._timezone_name_to_timezone(timezone) return self._get_events(start, end, timezone, categories)
Get all events between `start` and `end` with one of `categories`. `start` and `end` have to be datetime objects. `categories` is a list of category titles. `start` is part of the interval, but `end` is not.
src/icemac/ab/calendar/calendar.py
get_events
icemac/icemac.ab.calendar
1
python
def get_events(self, start, end, timezone=None, categories=[]): 'Get all events between `start` and `end` with one of `categories`.\n\n `start` and `end` have to be datetime objects.\n `categories` is a list of category titles.\n `start` is part of the interval, but `end` is not.\n ' timezone = self._timezone_name_to_timezone(timezone) return self._get_events(start, end, timezone, categories)
def get_events(self, start, end, timezone=None, categories=[]): 'Get all events between `start` and `end` with one of `categories`.\n\n `start` and `end` have to be datetime objects.\n `categories` is a list of category titles.\n `start` is part of the interval, but `end` is not.\n ' timezone = self._timezone_name_to_timezone(timezone) return self._get_events(start, end, timezone, categories)<|docstring|>Get all events between `start` and `end` with one of `categories`. `start` and `end` have to be datetime objects. `categories` is a list of category titles. `start` is part of the interval, but `end` is not.<|endoftext|>
52c63c51a30ea6dbe382d5d96096a1466698d295ee10c3dbb7725dff9d6f6ce6
def _get_events(self, start, end, timezone, categories): 'Get all events between `start` and `end`.\n\n `start` is part of the interval, but `end` is not.\n `categories` is a list of category titles.\n Only return events of the given `categories`.\n If `categories` is an empty list, do not restrict by category.\n ' recurring_events = zope.component.getUtility(icemac.ab.calendar.interfaces.IRecurringEvents).get_events(categories) recurred_events = [x.get_events(start, end, timezone) for x in recurring_events] events_map = {(x.category, x.in_timezone(timezone)): x for x in itertools.chain(*recurred_events)} single_events = self.query_single_events(start, end, categories=categories) sorted_single_events = sorted(single_events, key=(lambda x: int(x.deleted)), reverse=True) single_events_map = {(x.category, x.in_timezone(timezone)): x for x in sorted_single_events} events_map.update(single_events_map) return sorted((x for x in events_map.values() if (not x.deleted)), key=(lambda x: (x.in_timezone(timezone), icemac.addressbook.interfaces.ITitle(x.category, None))))
Get all events between `start` and `end`. `start` is part of the interval, but `end` is not. `categories` is a list of category titles. Only return events of the given `categories`. If `categories` is an empty list, do not restrict by category.
src/icemac/ab/calendar/calendar.py
_get_events
icemac/icemac.ab.calendar
1
python
def _get_events(self, start, end, timezone, categories): 'Get all events between `start` and `end`.\n\n `start` is part of the interval, but `end` is not.\n `categories` is a list of category titles.\n Only return events of the given `categories`.\n If `categories` is an empty list, do not restrict by category.\n ' recurring_events = zope.component.getUtility(icemac.ab.calendar.interfaces.IRecurringEvents).get_events(categories) recurred_events = [x.get_events(start, end, timezone) for x in recurring_events] events_map = {(x.category, x.in_timezone(timezone)): x for x in itertools.chain(*recurred_events)} single_events = self.query_single_events(start, end, categories=categories) sorted_single_events = sorted(single_events, key=(lambda x: int(x.deleted)), reverse=True) single_events_map = {(x.category, x.in_timezone(timezone)): x for x in sorted_single_events} events_map.update(single_events_map) return sorted((x for x in events_map.values() if (not x.deleted)), key=(lambda x: (x.in_timezone(timezone), icemac.addressbook.interfaces.ITitle(x.category, None))))
def _get_events(self, start, end, timezone, categories): 'Get all events between `start` and `end`.\n\n `start` is part of the interval, but `end` is not.\n `categories` is a list of category titles.\n Only return events of the given `categories`.\n If `categories` is an empty list, do not restrict by category.\n ' recurring_events = zope.component.getUtility(icemac.ab.calendar.interfaces.IRecurringEvents).get_events(categories) recurred_events = [x.get_events(start, end, timezone) for x in recurring_events] events_map = {(x.category, x.in_timezone(timezone)): x for x in itertools.chain(*recurred_events)} single_events = self.query_single_events(start, end, categories=categories) sorted_single_events = sorted(single_events, key=(lambda x: int(x.deleted)), reverse=True) single_events_map = {(x.category, x.in_timezone(timezone)): x for x in sorted_single_events} events_map.update(single_events_map) return sorted((x for x in events_map.values() if (not x.deleted)), key=(lambda x: (x.in_timezone(timezone), icemac.addressbook.interfaces.ITitle(x.category, None))))<|docstring|>Get all events between `start` and `end`. `start` is part of the interval, but `end` is not. `categories` is a list of category titles. Only return events of the given `categories`. If `categories` is an empty list, do not restrict by category.<|endoftext|>
88d47993df8166fb81148b98c9791cf937cfc548951053034a3b3b6334c04751
def _timezone_name_to_timezone(self, name): 'Return a timezone object. If `name` is None, return UTC.' if (name is None): timezone = pytz.utc else: timezone = pytz.timezone(name) return timezone
Return a timezone object. If `name` is None, return UTC.
src/icemac/ab/calendar/calendar.py
_timezone_name_to_timezone
icemac/icemac.ab.calendar
1
python
def _timezone_name_to_timezone(self, name): if (name is None): timezone = pytz.utc else: timezone = pytz.timezone(name) return timezone
def _timezone_name_to_timezone(self, name): if (name is None): timezone = pytz.utc else: timezone = pytz.timezone(name) return timezone<|docstring|>Return a timezone object. If `name` is None, return UTC.<|endoftext|>
58f4030ca60fd5b994ae499595638151946d726f4ed71f2703a3229c94794698
def cancel_scheduled_docker_run_state_by_id(self, dataset_id, scheduled_id, **kwargs): 'cancel_scheduled_docker_run_state_by_id # noqa: E501\n\n Cancel a scheduled run. This will fail if the state of the scheduled run is no longer OPEN (e.g when it is LOCKED) # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.cancel_scheduled_docker_run_state_by_id(dataset_id, scheduled_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param MongoObjectID dataset_id: ObjectId of the dataset (required)\n :param MongoObjectID scheduled_id: ObjectId of the docker worker run configg (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, **kwargs) else: data = self.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, **kwargs) return data
cancel_scheduled_docker_run_state_by_id # noqa: E501 Cancel a scheduled run. This will fail if the state of the scheduled run is no longer OPEN (e.g when it is LOCKED) # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.cancel_scheduled_docker_run_state_by_id(dataset_id, scheduled_id, async_req=True) >>> result = thread.get() :param async_req bool :param MongoObjectID dataset_id: ObjectId of the dataset (required) :param MongoObjectID scheduled_id: ObjectId of the docker worker run configg (required) :return: None If the method is called asynchronously, returns the request thread.
lightly/openapi_generated/swagger_client/api/docker_api.py
cancel_scheduled_docker_run_state_by_id
dczifra/lightly
1
python
def cancel_scheduled_docker_run_state_by_id(self, dataset_id, scheduled_id, **kwargs): 'cancel_scheduled_docker_run_state_by_id # noqa: E501\n\n Cancel a scheduled run. This will fail if the state of the scheduled run is no longer OPEN (e.g when it is LOCKED) # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.cancel_scheduled_docker_run_state_by_id(dataset_id, scheduled_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param MongoObjectID dataset_id: ObjectId of the dataset (required)\n :param MongoObjectID scheduled_id: ObjectId of the docker worker run configg (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, **kwargs) else: data = self.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, **kwargs) return data
def cancel_scheduled_docker_run_state_by_id(self, dataset_id, scheduled_id, **kwargs): 'cancel_scheduled_docker_run_state_by_id # noqa: E501\n\n Cancel a scheduled run. This will fail if the state of the scheduled run is no longer OPEN (e.g when it is LOCKED) # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.cancel_scheduled_docker_run_state_by_id(dataset_id, scheduled_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param MongoObjectID dataset_id: ObjectId of the dataset (required)\n :param MongoObjectID scheduled_id: ObjectId of the docker worker run configg (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, **kwargs) else: data = self.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, **kwargs) return data<|docstring|>cancel_scheduled_docker_run_state_by_id # noqa: E501 Cancel a scheduled run. This will fail if the state of the scheduled run is no longer OPEN (e.g when it is LOCKED) # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.cancel_scheduled_docker_run_state_by_id(dataset_id, scheduled_id, async_req=True) >>> result = thread.get() :param async_req bool :param MongoObjectID dataset_id: ObjectId of the dataset (required) :param MongoObjectID scheduled_id: ObjectId of the docker worker run configg (required) :return: None If the method is called asynchronously, returns the request thread.<|endoftext|>
63f06886db2cae355b9ac9ebe59127fa260ea0bdc3845e3822c4e25b1a543fdb
def cancel_scheduled_docker_run_state_by_id_with_http_info(self, dataset_id, scheduled_id, **kwargs): 'cancel_scheduled_docker_run_state_by_id # noqa: E501\n\n Cancel a scheduled run. This will fail if the state of the scheduled run is no longer OPEN (e.g when it is LOCKED) # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param MongoObjectID dataset_id: ObjectId of the dataset (required)\n :param MongoObjectID scheduled_id: ObjectId of the docker worker run configg (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' all_params = ['dataset_id', 'scheduled_id'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in six.iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method cancel_scheduled_docker_run_state_by_id" % key)) params[key] = val del params['kwargs'] if (self.api_client.client_side_validation and (('dataset_id' not in params) or (params['dataset_id'] is None))): raise ValueError('Missing the required parameter `dataset_id` when calling `cancel_scheduled_docker_run_state_by_id`') if (self.api_client.client_side_validation and (('scheduled_id' not in params) or (params['scheduled_id'] is None))): raise ValueError('Missing the required parameter `scheduled_id` when calling `cancel_scheduled_docker_run_state_by_id`') collection_formats = {} path_params = {} if ('dataset_id' in params): path_params['datasetId'] = params['dataset_id'] if ('scheduled_id' in params): path_params['scheduledId'] = params['scheduled_id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) auth_settings = ['ApiKeyAuth', 'auth0Bearer'] return self.api_client.call_api('/v1/datasets/{datasetId}/docker/worker/schedule/{scheduledId}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
cancel_scheduled_docker_run_state_by_id # noqa: E501 Cancel a scheduled run. This will fail if the state of the scheduled run is no longer OPEN (e.g when it is LOCKED) # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, async_req=True) >>> result = thread.get() :param async_req bool :param MongoObjectID dataset_id: ObjectId of the dataset (required) :param MongoObjectID scheduled_id: ObjectId of the docker worker run configg (required) :return: None If the method is called asynchronously, returns the request thread.
lightly/openapi_generated/swagger_client/api/docker_api.py
cancel_scheduled_docker_run_state_by_id_with_http_info
dczifra/lightly
1
python
def cancel_scheduled_docker_run_state_by_id_with_http_info(self, dataset_id, scheduled_id, **kwargs): 'cancel_scheduled_docker_run_state_by_id # noqa: E501\n\n Cancel a scheduled run. This will fail if the state of the scheduled run is no longer OPEN (e.g when it is LOCKED) # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param MongoObjectID dataset_id: ObjectId of the dataset (required)\n :param MongoObjectID scheduled_id: ObjectId of the docker worker run configg (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' all_params = ['dataset_id', 'scheduled_id'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in six.iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method cancel_scheduled_docker_run_state_by_id" % key)) params[key] = val del params['kwargs'] if (self.api_client.client_side_validation and (('dataset_id' not in params) or (params['dataset_id'] is None))): raise ValueError('Missing the required parameter `dataset_id` when calling `cancel_scheduled_docker_run_state_by_id`') if (self.api_client.client_side_validation and (('scheduled_id' not in params) or (params['scheduled_id'] is None))): raise ValueError('Missing the required parameter `scheduled_id` when calling `cancel_scheduled_docker_run_state_by_id`') collection_formats = {} path_params = {} if ('dataset_id' in params): path_params['datasetId'] = params['dataset_id'] if ('scheduled_id' in params): path_params['scheduledId'] = params['scheduled_id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) auth_settings = ['ApiKeyAuth', 'auth0Bearer'] return self.api_client.call_api('/v1/datasets/{datasetId}/docker/worker/schedule/{scheduledId}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def cancel_scheduled_docker_run_state_by_id_with_http_info(self, dataset_id, scheduled_id, **kwargs): 'cancel_scheduled_docker_run_state_by_id # noqa: E501\n\n Cancel a scheduled run. This will fail if the state of the scheduled run is no longer OPEN (e.g when it is LOCKED) # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param MongoObjectID dataset_id: ObjectId of the dataset (required)\n :param MongoObjectID scheduled_id: ObjectId of the docker worker run configg (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n ' all_params = ['dataset_id', 'scheduled_id'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in six.iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method cancel_scheduled_docker_run_state_by_id" % key)) params[key] = val del params['kwargs'] if (self.api_client.client_side_validation and (('dataset_id' not in params) or (params['dataset_id'] is None))): raise ValueError('Missing the required parameter `dataset_id` when calling `cancel_scheduled_docker_run_state_by_id`') if (self.api_client.client_side_validation and (('scheduled_id' not in params) or (params['scheduled_id'] is None))): raise ValueError('Missing the required parameter `scheduled_id` when calling `cancel_scheduled_docker_run_state_by_id`') collection_formats = {} path_params = {} if ('dataset_id' in params): path_params['datasetId'] = params['dataset_id'] if ('scheduled_id' in params): path_params['scheduledId'] = params['scheduled_id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept(['application/json']) auth_settings = ['ApiKeyAuth', 'auth0Bearer'] return self.api_client.call_api('/v1/datasets/{datasetId}/docker/worker/schedule/{scheduledId}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)<|docstring|>cancel_scheduled_docker_run_state_by_id # noqa: E501 Cancel a scheduled run. This will fail if the state of the scheduled run is no longer OPEN (e.g when it is LOCKED) # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, async_req=True) >>> result = thread.get() :param async_req bool :param MongoObjectID dataset_id: ObjectId of the dataset (required) :param MongoObjectID scheduled_id: ObjectId of the docker worker run configg (required) :return: None If the method is called asynchronously, returns the request thread.<|endoftext|>
0dfec1952a75ff80e284e0469d03da308ea9a9ec0291f785f7ec7a93034d3419
def create_docker_run(self, body, **kwargs): 'create_docker_run # noqa: E501\n\n Creates a new docker run database entry. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_run(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerRunCreateRequest body: (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_docker_run_with_http_info(body, **kwargs) else: data = self.create_docker_run_with_http_info(body, **kwargs) return data
create_docker_run # noqa: E501 Creates a new docker run database entry. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_docker_run(body, async_req=True) >>> result = thread.get() :param async_req bool :param DockerRunCreateRequest body: (required) :return: CreateEntityResponse If the method is called asynchronously, returns the request thread.
lightly/openapi_generated/swagger_client/api/docker_api.py
create_docker_run
dczifra/lightly
1
python
def create_docker_run(self, body, **kwargs): 'create_docker_run # noqa: E501\n\n Creates a new docker run database entry. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_run(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerRunCreateRequest body: (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_docker_run_with_http_info(body, **kwargs) else: data = self.create_docker_run_with_http_info(body, **kwargs) return data
def create_docker_run(self, body, **kwargs): 'create_docker_run # noqa: E501\n\n Creates a new docker run database entry. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_run(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerRunCreateRequest body: (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_docker_run_with_http_info(body, **kwargs) else: data = self.create_docker_run_with_http_info(body, **kwargs) return data<|docstring|>create_docker_run # noqa: E501 Creates a new docker run database entry. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_docker_run(body, async_req=True) >>> result = thread.get() :param async_req bool :param DockerRunCreateRequest body: (required) :return: CreateEntityResponse If the method is called asynchronously, returns the request thread.<|endoftext|>
222489d641c656861048e0a89864bda8012df7981ef1d8d0f7c11612a92329a6
def create_docker_run_with_http_info(self, body, **kwargs): 'create_docker_run # noqa: E501\n\n Creates a new docker run database entry. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_run_with_http_info(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerRunCreateRequest body: (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n ' all_params = ['body'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in six.iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method create_docker_run" % key)) params[key] = val del params['kwargs'] if (self.api_client.client_side_validation and (('body' not in params) or (params['body'] is None))): raise ValueError('Missing the required parameter `body` when calling `create_docker_run`') collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if ('body' in params): body_params = params['body'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['ApiKeyAuth', 'auth0Bearer'] return self.api_client.call_api('/v1/docker/runs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CreateEntityResponse', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
create_docker_run # noqa: E501 Creates a new docker run database entry. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_docker_run_with_http_info(body, async_req=True) >>> result = thread.get() :param async_req bool :param DockerRunCreateRequest body: (required) :return: CreateEntityResponse If the method is called asynchronously, returns the request thread.
lightly/openapi_generated/swagger_client/api/docker_api.py
create_docker_run_with_http_info
dczifra/lightly
1
python
def create_docker_run_with_http_info(self, body, **kwargs): 'create_docker_run # noqa: E501\n\n Creates a new docker run database entry. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_run_with_http_info(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerRunCreateRequest body: (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n ' all_params = ['body'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in six.iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method create_docker_run" % key)) params[key] = val del params['kwargs'] if (self.api_client.client_side_validation and (('body' not in params) or (params['body'] is None))): raise ValueError('Missing the required parameter `body` when calling `create_docker_run`') collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if ('body' in params): body_params = params['body'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['ApiKeyAuth', 'auth0Bearer'] return self.api_client.call_api('/v1/docker/runs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CreateEntityResponse', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def create_docker_run_with_http_info(self, body, **kwargs): 'create_docker_run # noqa: E501\n\n Creates a new docker run database entry. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_run_with_http_info(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerRunCreateRequest body: (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n ' all_params = ['body'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in six.iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method create_docker_run" % key)) params[key] = val del params['kwargs'] if (self.api_client.client_side_validation and (('body' not in params) or (params['body'] is None))): raise ValueError('Missing the required parameter `body` when calling `create_docker_run`') collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if ('body' in params): body_params = params['body'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['ApiKeyAuth', 'auth0Bearer'] return self.api_client.call_api('/v1/docker/runs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CreateEntityResponse', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)<|docstring|>create_docker_run # noqa: E501 Creates a new docker run database entry. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_docker_run_with_http_info(body, async_req=True) >>> result = thread.get() :param async_req bool :param DockerRunCreateRequest body: (required) :return: CreateEntityResponse If the method is called asynchronously, returns the request thread.<|endoftext|>
ab5bed2ce416023a8d07d7ffdc8c6809ecf0331897fb096757aa563733192bef
def create_docker_run_scheduled_by_dataset_id(self, body, dataset_id, **kwargs): "create_docker_run_scheduled_by_dataset_id # noqa: E501\n\n Schedule a docker run by dataset id. With docker runs it's possible to process unlabeled images from a datasource and use active learning to select the most relevant samples for further processing and visualization in the web app # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_run_scheduled_by_dataset_id(body, dataset_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerRunScheduledCreateRequest body: (required)\n :param MongoObjectID dataset_id: ObjectId of the dataset (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n " kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_docker_run_scheduled_by_dataset_id_with_http_info(body, dataset_id, **kwargs) else: data = self.create_docker_run_scheduled_by_dataset_id_with_http_info(body, dataset_id, **kwargs) return data
create_docker_run_scheduled_by_dataset_id # noqa: E501 Schedule a docker run by dataset id. With docker runs it's possible to process unlabeled images from a datasource and use active learning to select the most relevant samples for further processing and visualization in the web app # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_docker_run_scheduled_by_dataset_id(body, dataset_id, async_req=True) >>> result = thread.get() :param async_req bool :param DockerRunScheduledCreateRequest body: (required) :param MongoObjectID dataset_id: ObjectId of the dataset (required) :return: CreateEntityResponse If the method is called asynchronously, returns the request thread.
lightly/openapi_generated/swagger_client/api/docker_api.py
create_docker_run_scheduled_by_dataset_id
dczifra/lightly
1
python
def create_docker_run_scheduled_by_dataset_id(self, body, dataset_id, **kwargs): "create_docker_run_scheduled_by_dataset_id # noqa: E501\n\n Schedule a docker run by dataset id. With docker runs it's possible to process unlabeled images from a datasource and use active learning to select the most relevant samples for further processing and visualization in the web app # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_run_scheduled_by_dataset_id(body, dataset_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerRunScheduledCreateRequest body: (required)\n :param MongoObjectID dataset_id: ObjectId of the dataset (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n " kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_docker_run_scheduled_by_dataset_id_with_http_info(body, dataset_id, **kwargs) else: data = self.create_docker_run_scheduled_by_dataset_id_with_http_info(body, dataset_id, **kwargs) return data
def create_docker_run_scheduled_by_dataset_id(self, body, dataset_id, **kwargs): "create_docker_run_scheduled_by_dataset_id # noqa: E501\n\n Schedule a docker run by dataset id. With docker runs it's possible to process unlabeled images from a datasource and use active learning to select the most relevant samples for further processing and visualization in the web app # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_run_scheduled_by_dataset_id(body, dataset_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerRunScheduledCreateRequest body: (required)\n :param MongoObjectID dataset_id: ObjectId of the dataset (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n " kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_docker_run_scheduled_by_dataset_id_with_http_info(body, dataset_id, **kwargs) else: data = self.create_docker_run_scheduled_by_dataset_id_with_http_info(body, dataset_id, **kwargs) return data<|docstring|>create_docker_run_scheduled_by_dataset_id # noqa: E501 Schedule a docker run by dataset id. With docker runs it's possible to process unlabeled images from a datasource and use active learning to select the most relevant samples for further processing and visualization in the web app # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_docker_run_scheduled_by_dataset_id(body, dataset_id, async_req=True) >>> result = thread.get() :param async_req bool :param DockerRunScheduledCreateRequest body: (required) :param MongoObjectID dataset_id: ObjectId of the dataset (required) :return: CreateEntityResponse If the method is called asynchronously, returns the request thread.<|endoftext|>
23edf72cc01bc8915aeb6a9f757ec022c772285a65faf7a151ffddb4d02ed633
def create_docker_run_scheduled_by_dataset_id_with_http_info(self, body, dataset_id, **kwargs): "create_docker_run_scheduled_by_dataset_id # noqa: E501\n\n Schedule a docker run by dataset id. With docker runs it's possible to process unlabeled images from a datasource and use active learning to select the most relevant samples for further processing and visualization in the web app # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_run_scheduled_by_dataset_id_with_http_info(body, dataset_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerRunScheduledCreateRequest body: (required)\n :param MongoObjectID dataset_id: ObjectId of the dataset (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n " all_params = ['body', 'dataset_id'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in six.iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method create_docker_run_scheduled_by_dataset_id" % key)) params[key] = val del params['kwargs'] if (self.api_client.client_side_validation and (('body' not in params) or (params['body'] is None))): raise ValueError('Missing the required parameter `body` when calling `create_docker_run_scheduled_by_dataset_id`') if (self.api_client.client_side_validation and (('dataset_id' not in params) or (params['dataset_id'] is None))): raise ValueError('Missing the required parameter `dataset_id` when calling `create_docker_run_scheduled_by_dataset_id`') collection_formats = {} path_params = {} if ('dataset_id' in params): path_params['datasetId'] = params['dataset_id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if ('body' in params): body_params = params['body'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['ApiKeyAuth', 'auth0Bearer'] return self.api_client.call_api('/v1/datasets/{datasetId}/docker/worker/schedule', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CreateEntityResponse', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
create_docker_run_scheduled_by_dataset_id # noqa: E501 Schedule a docker run by dataset id. With docker runs it's possible to process unlabeled images from a datasource and use active learning to select the most relevant samples for further processing and visualization in the web app # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_docker_run_scheduled_by_dataset_id_with_http_info(body, dataset_id, async_req=True) >>> result = thread.get() :param async_req bool :param DockerRunScheduledCreateRequest body: (required) :param MongoObjectID dataset_id: ObjectId of the dataset (required) :return: CreateEntityResponse If the method is called asynchronously, returns the request thread.
lightly/openapi_generated/swagger_client/api/docker_api.py
create_docker_run_scheduled_by_dataset_id_with_http_info
dczifra/lightly
1
python
def create_docker_run_scheduled_by_dataset_id_with_http_info(self, body, dataset_id, **kwargs): "create_docker_run_scheduled_by_dataset_id # noqa: E501\n\n Schedule a docker run by dataset id. With docker runs it's possible to process unlabeled images from a datasource and use active learning to select the most relevant samples for further processing and visualization in the web app # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_run_scheduled_by_dataset_id_with_http_info(body, dataset_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerRunScheduledCreateRequest body: (required)\n :param MongoObjectID dataset_id: ObjectId of the dataset (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n " all_params = ['body', 'dataset_id'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in six.iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method create_docker_run_scheduled_by_dataset_id" % key)) params[key] = val del params['kwargs'] if (self.api_client.client_side_validation and (('body' not in params) or (params['body'] is None))): raise ValueError('Missing the required parameter `body` when calling `create_docker_run_scheduled_by_dataset_id`') if (self.api_client.client_side_validation and (('dataset_id' not in params) or (params['dataset_id'] is None))): raise ValueError('Missing the required parameter `dataset_id` when calling `create_docker_run_scheduled_by_dataset_id`') collection_formats = {} path_params = {} if ('dataset_id' in params): path_params['datasetId'] = params['dataset_id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if ('body' in params): body_params = params['body'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['ApiKeyAuth', 'auth0Bearer'] return self.api_client.call_api('/v1/datasets/{datasetId}/docker/worker/schedule', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CreateEntityResponse', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def create_docker_run_scheduled_by_dataset_id_with_http_info(self, body, dataset_id, **kwargs): "create_docker_run_scheduled_by_dataset_id # noqa: E501\n\n Schedule a docker run by dataset id. With docker runs it's possible to process unlabeled images from a datasource and use active learning to select the most relevant samples for further processing and visualization in the web app # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_run_scheduled_by_dataset_id_with_http_info(body, dataset_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerRunScheduledCreateRequest body: (required)\n :param MongoObjectID dataset_id: ObjectId of the dataset (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n " all_params = ['body', 'dataset_id'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in six.iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method create_docker_run_scheduled_by_dataset_id" % key)) params[key] = val del params['kwargs'] if (self.api_client.client_side_validation and (('body' not in params) or (params['body'] is None))): raise ValueError('Missing the required parameter `body` when calling `create_docker_run_scheduled_by_dataset_id`') if (self.api_client.client_side_validation and (('dataset_id' not in params) or (params['dataset_id'] is None))): raise ValueError('Missing the required parameter `dataset_id` when calling `create_docker_run_scheduled_by_dataset_id`') collection_formats = {} path_params = {} if ('dataset_id' in params): path_params['datasetId'] = params['dataset_id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if ('body' in params): body_params = params['body'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['ApiKeyAuth', 'auth0Bearer'] return self.api_client.call_api('/v1/datasets/{datasetId}/docker/worker/schedule', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CreateEntityResponse', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)<|docstring|>create_docker_run_scheduled_by_dataset_id # noqa: E501 Schedule a docker run by dataset id. With docker runs it's possible to process unlabeled images from a datasource and use active learning to select the most relevant samples for further processing and visualization in the web app # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_docker_run_scheduled_by_dataset_id_with_http_info(body, dataset_id, async_req=True) >>> result = thread.get() :param async_req bool :param DockerRunScheduledCreateRequest body: (required) :param MongoObjectID dataset_id: ObjectId of the dataset (required) :return: CreateEntityResponse If the method is called asynchronously, returns the request thread.<|endoftext|>
bade6b664060137cb451bdd312c98e712fa298154b4660a223d8f4d6fdde41cf
def create_docker_worker_config(self, body, **kwargs): 'create_docker_worker_config # noqa: E501\n\n Creates a docker worker configuration. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_worker_config(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerWorkerConfigCreateRequest body: (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_docker_worker_config_with_http_info(body, **kwargs) else: data = self.create_docker_worker_config_with_http_info(body, **kwargs) return data
create_docker_worker_config # noqa: E501 Creates a docker worker configuration. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_docker_worker_config(body, async_req=True) >>> result = thread.get() :param async_req bool :param DockerWorkerConfigCreateRequest body: (required) :return: CreateEntityResponse If the method is called asynchronously, returns the request thread.
lightly/openapi_generated/swagger_client/api/docker_api.py
create_docker_worker_config
dczifra/lightly
1
python
def create_docker_worker_config(self, body, **kwargs): 'create_docker_worker_config # noqa: E501\n\n Creates a docker worker configuration. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_worker_config(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerWorkerConfigCreateRequest body: (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_docker_worker_config_with_http_info(body, **kwargs) else: data = self.create_docker_worker_config_with_http_info(body, **kwargs) return data
def create_docker_worker_config(self, body, **kwargs): 'create_docker_worker_config # noqa: E501\n\n Creates a docker worker configuration. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_worker_config(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerWorkerConfigCreateRequest body: (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_docker_worker_config_with_http_info(body, **kwargs) else: data = self.create_docker_worker_config_with_http_info(body, **kwargs) return data<|docstring|>create_docker_worker_config # noqa: E501 Creates a docker worker configuration. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_docker_worker_config(body, async_req=True) >>> result = thread.get() :param async_req bool :param DockerWorkerConfigCreateRequest body: (required) :return: CreateEntityResponse If the method is called asynchronously, returns the request thread.<|endoftext|>
846afe5b9ea502b855be42e89dce05b54dc015f7370d2ed89ebb40e9d605851e
def create_docker_worker_config_with_http_info(self, body, **kwargs): 'create_docker_worker_config # noqa: E501\n\n Creates a docker worker configuration. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_worker_config_with_http_info(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerWorkerConfigCreateRequest body: (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n ' all_params = ['body'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in six.iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method create_docker_worker_config" % key)) params[key] = val del params['kwargs'] if (self.api_client.client_side_validation and (('body' not in params) or (params['body'] is None))): raise ValueError('Missing the required parameter `body` when calling `create_docker_worker_config`') collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if ('body' in params): body_params = params['body'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['ApiKeyAuth', 'auth0Bearer'] return self.api_client.call_api('/v1/docker/worker/config', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CreateEntityResponse', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
create_docker_worker_config # noqa: E501 Creates a docker worker configuration. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_docker_worker_config_with_http_info(body, async_req=True) >>> result = thread.get() :param async_req bool :param DockerWorkerConfigCreateRequest body: (required) :return: CreateEntityResponse If the method is called asynchronously, returns the request thread.
lightly/openapi_generated/swagger_client/api/docker_api.py
create_docker_worker_config_with_http_info
dczifra/lightly
1
python
def create_docker_worker_config_with_http_info(self, body, **kwargs): 'create_docker_worker_config # noqa: E501\n\n Creates a docker worker configuration. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_worker_config_with_http_info(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerWorkerConfigCreateRequest body: (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n ' all_params = ['body'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in six.iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method create_docker_worker_config" % key)) params[key] = val del params['kwargs'] if (self.api_client.client_side_validation and (('body' not in params) or (params['body'] is None))): raise ValueError('Missing the required parameter `body` when calling `create_docker_worker_config`') collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if ('body' in params): body_params = params['body'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['ApiKeyAuth', 'auth0Bearer'] return self.api_client.call_api('/v1/docker/worker/config', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CreateEntityResponse', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def create_docker_worker_config_with_http_info(self, body, **kwargs): 'create_docker_worker_config # noqa: E501\n\n Creates a docker worker configuration. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_docker_worker_config_with_http_info(body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param DockerWorkerConfigCreateRequest body: (required)\n :return: CreateEntityResponse\n If the method is called asynchronously,\n returns the request thread.\n ' all_params = ['body'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for (key, val) in six.iteritems(params['kwargs']): if (key not in all_params): raise TypeError(("Got an unexpected keyword argument '%s' to method create_docker_worker_config" % key)) params[key] = val del params['kwargs'] if (self.api_client.client_side_validation and (('body' not in params) or (params['body'] is None))): raise ValueError('Missing the required parameter `body` when calling `create_docker_worker_config`') collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if ('body' in params): body_params = params['body'] header_params['Accept'] = self.api_client.select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json']) auth_settings = ['ApiKeyAuth', 'auth0Bearer'] return self.api_client.call_api('/v1/docker/worker/config', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CreateEntityResponse', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)<|docstring|>create_docker_worker_config # noqa: E501 Creates a docker worker configuration. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_docker_worker_config_with_http_info(body, async_req=True) >>> result = thread.get() :param async_req bool :param DockerWorkerConfigCreateRequest body: (required) :return: CreateEntityResponse If the method is called asynchronously, returns the request thread.<|endoftext|>