body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
92d6350e992c38d4fd651c897653aae987a12c872857fd246ca91a723b522db1 | def hardness_ratio(base_dir, name, obsid_0, stn_target, num_bins, output_dir, color_map, wcs_fits, bin_file):
"\n Calculate hardness ratio and plot\n params:\n base_dir -- path to base directory\n name -- name of cluster\n obsid_0 -- first obsid (doesn't actually matter which)\n stn_target -- target signal-to-noise ratio\n num_bins -- number of bins from WVT\n output_dir -- name of folder which contains region files for bins\n "
if os.path.exists((base_dir + '/HR')):
os.chdir((base_dir + '/HR'))
else:
os.mkdir((base_dir + '/HR'))
os.chdir((base_dir + '/HR'))
hr_file = open((('HR_' + str(stn_target)) + '.txt'), 'w+')
hr_file.write('Bin,Hard_Counts,Soft_Counts,HR\n')
for bin_i in range(int(num_bins)):
reg_file = ((((((base_dir + '/') + obsid_0) + '/repro/') + output_dir) + str(bin_i)) + '.reg')
shutil.copy(reg_file, (str(bin_i) + '.reg'))
reg_file = (str(bin_i) + '.reg')
(hard, soft) = calc_vals(wcs_fits, reg_file, str(bin_i))
HR = ((hard - soft) / (hard + soft))
hr_file.write((((((((str(bin_i) + ',') + str(hard)) + ',') + str(soft)) + ',') + str(HR)) + '\n'))
hr_file.close()
hardness_plot(bin_file, (('HR_' + str(stn_target)) + '.txt'), base_dir, name, color_map, stn_target, wcs_fits)
return None | Calculate hardness ratio and plot
params:
base_dir -- path to base directory
name -- name of cluster
obsid_0 -- first obsid (doesn't actually matter which)
stn_target -- target signal-to-noise ratio
num_bins -- number of bins from WVT
output_dir -- name of folder which contains region files for bins | TemperatureMapPipeline/Hardness_Ratio.py | hardness_ratio | crhea93/AstronomyTools | 8 | python | def hardness_ratio(base_dir, name, obsid_0, stn_target, num_bins, output_dir, color_map, wcs_fits, bin_file):
"\n Calculate hardness ratio and plot\n params:\n base_dir -- path to base directory\n name -- name of cluster\n obsid_0 -- first obsid (doesn't actually matter which)\n stn_target -- target signal-to-noise ratio\n num_bins -- number of bins from WVT\n output_dir -- name of folder which contains region files for bins\n "
if os.path.exists((base_dir + '/HR')):
os.chdir((base_dir + '/HR'))
else:
os.mkdir((base_dir + '/HR'))
os.chdir((base_dir + '/HR'))
hr_file = open((('HR_' + str(stn_target)) + '.txt'), 'w+')
hr_file.write('Bin,Hard_Counts,Soft_Counts,HR\n')
for bin_i in range(int(num_bins)):
reg_file = ((((((base_dir + '/') + obsid_0) + '/repro/') + output_dir) + str(bin_i)) + '.reg')
shutil.copy(reg_file, (str(bin_i) + '.reg'))
reg_file = (str(bin_i) + '.reg')
(hard, soft) = calc_vals(wcs_fits, reg_file, str(bin_i))
HR = ((hard - soft) / (hard + soft))
hr_file.write((((((((str(bin_i) + ',') + str(hard)) + ',') + str(soft)) + ',') + str(HR)) + '\n'))
hr_file.close()
hardness_plot(bin_file, (('HR_' + str(stn_target)) + '.txt'), base_dir, name, color_map, stn_target, wcs_fits)
return None | def hardness_ratio(base_dir, name, obsid_0, stn_target, num_bins, output_dir, color_map, wcs_fits, bin_file):
"\n Calculate hardness ratio and plot\n params:\n base_dir -- path to base directory\n name -- name of cluster\n obsid_0 -- first obsid (doesn't actually matter which)\n stn_target -- target signal-to-noise ratio\n num_bins -- number of bins from WVT\n output_dir -- name of folder which contains region files for bins\n "
if os.path.exists((base_dir + '/HR')):
os.chdir((base_dir + '/HR'))
else:
os.mkdir((base_dir + '/HR'))
os.chdir((base_dir + '/HR'))
hr_file = open((('HR_' + str(stn_target)) + '.txt'), 'w+')
hr_file.write('Bin,Hard_Counts,Soft_Counts,HR\n')
for bin_i in range(int(num_bins)):
reg_file = ((((((base_dir + '/') + obsid_0) + '/repro/') + output_dir) + str(bin_i)) + '.reg')
shutil.copy(reg_file, (str(bin_i) + '.reg'))
reg_file = (str(bin_i) + '.reg')
(hard, soft) = calc_vals(wcs_fits, reg_file, str(bin_i))
HR = ((hard - soft) / (hard + soft))
hr_file.write((((((((str(bin_i) + ',') + str(hard)) + ',') + str(soft)) + ',') + str(HR)) + '\n'))
hr_file.close()
hardness_plot(bin_file, (('HR_' + str(stn_target)) + '.txt'), base_dir, name, color_map, stn_target, wcs_fits)
return None<|docstring|>Calculate hardness ratio and plot
params:
base_dir -- path to base directory
name -- name of cluster
obsid_0 -- first obsid (doesn't actually matter which)
stn_target -- target signal-to-noise ratio
num_bins -- number of bins from WVT
output_dir -- name of folder which contains region files for bins<|endoftext|> |
c8fb87f77ec3f3d97a067964ecc0e69a0fba3b39c01289ddc00a145a0874a090 | def __init__(self, predict_fn, feature_names=None, feature_types=None, **kwargs):
' Initializes class.\n\n Args:\n predict_fn: Function of blackbox that takes input, and returns prediction.\n feature_names: List of feature names.\n feature_types: List of feature types.\n **kwargs: Currently unused. Due for deprecation.\n '
self.predict_fn = predict_fn
self.feature_names = feature_names
self.feature_types = feature_types
self.kwargs = kwargs | Initializes class.
Args:
predict_fn: Function of blackbox that takes input, and returns prediction.
feature_names: List of feature names.
feature_types: List of feature types.
**kwargs: Currently unused. Due for deprecation. | python/interpret-core/interpret/perf/curve.py | __init__ | eddy-geek/interpret | 2,674 | python | def __init__(self, predict_fn, feature_names=None, feature_types=None, **kwargs):
' Initializes class.\n\n Args:\n predict_fn: Function of blackbox that takes input, and returns prediction.\n feature_names: List of feature names.\n feature_types: List of feature types.\n **kwargs: Currently unused. Due for deprecation.\n '
self.predict_fn = predict_fn
self.feature_names = feature_names
self.feature_types = feature_types
self.kwargs = kwargs | def __init__(self, predict_fn, feature_names=None, feature_types=None, **kwargs):
' Initializes class.\n\n Args:\n predict_fn: Function of blackbox that takes input, and returns prediction.\n feature_names: List of feature names.\n feature_types: List of feature types.\n **kwargs: Currently unused. Due for deprecation.\n '
self.predict_fn = predict_fn
self.feature_names = feature_names
self.feature_types = feature_types
self.kwargs = kwargs<|docstring|>Initializes class.
Args:
predict_fn: Function of blackbox that takes input, and returns prediction.
feature_names: List of feature names.
feature_types: List of feature types.
**kwargs: Currently unused. Due for deprecation.<|endoftext|> |
1b0e1414e51ea27150705ae86553f31c1683f8a59ecee79a01ff516eae200dd1 | def explain_perf(self, X, y, name=None):
' Produce precision-recall curves.\n\n Args:\n X: Numpy array for X to compare predict function against.\n y: Numpy vector for y to compare predict function against.\n name: User-defined explanation name.\n\n Returns:\n An explanation object.\n '
if (name is None):
name = gen_name_from_class(self)
(X, y, self.feature_names, self.feature_types) = unify_data(X, y, self.feature_names, self.feature_types, missing_data_allowed=True)
predict_fn = unify_predict_fn(self.predict_fn, X)
scores = predict_fn(X)
(precision, recall, thresh) = precision_recall_curve(y, scores)
ap = average_precision_score(y, scores)
abs_residuals = np.abs((y - scores))
(counts, values) = np.histogram(abs_residuals, bins='doane')
overall_dict = {'type': 'perf_curve', 'density': {'names': values, 'scores': counts}, 'scores': scores, 'x_values': recall, 'y_values': precision, 'threshold': thresh, 'auc': ap}
internal_obj = {'overall': overall_dict, 'specific': None}
return PRExplanation('perf', internal_obj, feature_names=self.feature_names, feature_types=self.feature_types, name=name) | Produce precision-recall curves.
Args:
X: Numpy array for X to compare predict function against.
y: Numpy vector for y to compare predict function against.
name: User-defined explanation name.
Returns:
An explanation object. | python/interpret-core/interpret/perf/curve.py | explain_perf | eddy-geek/interpret | 2,674 | python | def explain_perf(self, X, y, name=None):
' Produce precision-recall curves.\n\n Args:\n X: Numpy array for X to compare predict function against.\n y: Numpy vector for y to compare predict function against.\n name: User-defined explanation name.\n\n Returns:\n An explanation object.\n '
if (name is None):
name = gen_name_from_class(self)
(X, y, self.feature_names, self.feature_types) = unify_data(X, y, self.feature_names, self.feature_types, missing_data_allowed=True)
predict_fn = unify_predict_fn(self.predict_fn, X)
scores = predict_fn(X)
(precision, recall, thresh) = precision_recall_curve(y, scores)
ap = average_precision_score(y, scores)
abs_residuals = np.abs((y - scores))
(counts, values) = np.histogram(abs_residuals, bins='doane')
overall_dict = {'type': 'perf_curve', 'density': {'names': values, 'scores': counts}, 'scores': scores, 'x_values': recall, 'y_values': precision, 'threshold': thresh, 'auc': ap}
internal_obj = {'overall': overall_dict, 'specific': None}
return PRExplanation('perf', internal_obj, feature_names=self.feature_names, feature_types=self.feature_types, name=name) | def explain_perf(self, X, y, name=None):
' Produce precision-recall curves.\n\n Args:\n X: Numpy array for X to compare predict function against.\n y: Numpy vector for y to compare predict function against.\n name: User-defined explanation name.\n\n Returns:\n An explanation object.\n '
if (name is None):
name = gen_name_from_class(self)
(X, y, self.feature_names, self.feature_types) = unify_data(X, y, self.feature_names, self.feature_types, missing_data_allowed=True)
predict_fn = unify_predict_fn(self.predict_fn, X)
scores = predict_fn(X)
(precision, recall, thresh) = precision_recall_curve(y, scores)
ap = average_precision_score(y, scores)
abs_residuals = np.abs((y - scores))
(counts, values) = np.histogram(abs_residuals, bins='doane')
overall_dict = {'type': 'perf_curve', 'density': {'names': values, 'scores': counts}, 'scores': scores, 'x_values': recall, 'y_values': precision, 'threshold': thresh, 'auc': ap}
internal_obj = {'overall': overall_dict, 'specific': None}
return PRExplanation('perf', internal_obj, feature_names=self.feature_names, feature_types=self.feature_types, name=name)<|docstring|>Produce precision-recall curves.
Args:
X: Numpy array for X to compare predict function against.
y: Numpy vector for y to compare predict function against.
name: User-defined explanation name.
Returns:
An explanation object.<|endoftext|> |
c8fb87f77ec3f3d97a067964ecc0e69a0fba3b39c01289ddc00a145a0874a090 | def __init__(self, predict_fn, feature_names=None, feature_types=None, **kwargs):
' Initializes class.\n\n Args:\n predict_fn: Function of blackbox that takes input, and returns prediction.\n feature_names: List of feature names.\n feature_types: List of feature types.\n **kwargs: Currently unused. Due for deprecation.\n '
self.predict_fn = predict_fn
self.feature_names = feature_names
self.feature_types = feature_types
self.kwargs = kwargs | Initializes class.
Args:
predict_fn: Function of blackbox that takes input, and returns prediction.
feature_names: List of feature names.
feature_types: List of feature types.
**kwargs: Currently unused. Due for deprecation. | python/interpret-core/interpret/perf/curve.py | __init__ | eddy-geek/interpret | 2,674 | python | def __init__(self, predict_fn, feature_names=None, feature_types=None, **kwargs):
' Initializes class.\n\n Args:\n predict_fn: Function of blackbox that takes input, and returns prediction.\n feature_names: List of feature names.\n feature_types: List of feature types.\n **kwargs: Currently unused. Due for deprecation.\n '
self.predict_fn = predict_fn
self.feature_names = feature_names
self.feature_types = feature_types
self.kwargs = kwargs | def __init__(self, predict_fn, feature_names=None, feature_types=None, **kwargs):
' Initializes class.\n\n Args:\n predict_fn: Function of blackbox that takes input, and returns prediction.\n feature_names: List of feature names.\n feature_types: List of feature types.\n **kwargs: Currently unused. Due for deprecation.\n '
self.predict_fn = predict_fn
self.feature_names = feature_names
self.feature_types = feature_types
self.kwargs = kwargs<|docstring|>Initializes class.
Args:
predict_fn: Function of blackbox that takes input, and returns prediction.
feature_names: List of feature names.
feature_types: List of feature types.
**kwargs: Currently unused. Due for deprecation.<|endoftext|> |
c7b67264e1e950cf5913008052ff3de618d145986c3a9775501a1e49cde3776a | def explain_perf(self, X, y, name=None):
' Produce ROC curves.\n\n Args:\n X: Numpy array for X to compare predict function against.\n y: Numpy vector for y to compare predict function against.\n name: User-defined explanation name.\n\n Returns:\n An explanation object.\n '
if (name is None):
name = gen_name_from_class(self)
(X, y, self.feature_names, self.feature_types) = unify_data(X, y, self.feature_names, self.feature_types, missing_data_allowed=True)
predict_fn = unify_predict_fn(self.predict_fn, X)
scores = predict_fn(X)
(fpr, tpr, thresh) = roc_curve(y, scores)
roc_auc = auc(fpr, tpr)
abs_residuals = np.abs((y - scores))
(counts, values) = np.histogram(abs_residuals, bins='doane')
overall_dict = {'type': 'perf_curve', 'density': {'names': values, 'scores': counts}, 'scores': scores, 'x_values': fpr, 'y_values': tpr, 'threshold': thresh, 'auc': roc_auc}
internal_obj = {'overall': overall_dict, 'specific': None}
return ROCExplanation('perf', internal_obj, feature_names=self.feature_names, feature_types=self.feature_types, name=name) | Produce ROC curves.
Args:
X: Numpy array for X to compare predict function against.
y: Numpy vector for y to compare predict function against.
name: User-defined explanation name.
Returns:
An explanation object. | python/interpret-core/interpret/perf/curve.py | explain_perf | eddy-geek/interpret | 2,674 | python | def explain_perf(self, X, y, name=None):
' Produce ROC curves.\n\n Args:\n X: Numpy array for X to compare predict function against.\n y: Numpy vector for y to compare predict function against.\n name: User-defined explanation name.\n\n Returns:\n An explanation object.\n '
if (name is None):
name = gen_name_from_class(self)
(X, y, self.feature_names, self.feature_types) = unify_data(X, y, self.feature_names, self.feature_types, missing_data_allowed=True)
predict_fn = unify_predict_fn(self.predict_fn, X)
scores = predict_fn(X)
(fpr, tpr, thresh) = roc_curve(y, scores)
roc_auc = auc(fpr, tpr)
abs_residuals = np.abs((y - scores))
(counts, values) = np.histogram(abs_residuals, bins='doane')
overall_dict = {'type': 'perf_curve', 'density': {'names': values, 'scores': counts}, 'scores': scores, 'x_values': fpr, 'y_values': tpr, 'threshold': thresh, 'auc': roc_auc}
internal_obj = {'overall': overall_dict, 'specific': None}
return ROCExplanation('perf', internal_obj, feature_names=self.feature_names, feature_types=self.feature_types, name=name) | def explain_perf(self, X, y, name=None):
' Produce ROC curves.\n\n Args:\n X: Numpy array for X to compare predict function against.\n y: Numpy vector for y to compare predict function against.\n name: User-defined explanation name.\n\n Returns:\n An explanation object.\n '
if (name is None):
name = gen_name_from_class(self)
(X, y, self.feature_names, self.feature_types) = unify_data(X, y, self.feature_names, self.feature_types, missing_data_allowed=True)
predict_fn = unify_predict_fn(self.predict_fn, X)
scores = predict_fn(X)
(fpr, tpr, thresh) = roc_curve(y, scores)
roc_auc = auc(fpr, tpr)
abs_residuals = np.abs((y - scores))
(counts, values) = np.histogram(abs_residuals, bins='doane')
overall_dict = {'type': 'perf_curve', 'density': {'names': values, 'scores': counts}, 'scores': scores, 'x_values': fpr, 'y_values': tpr, 'threshold': thresh, 'auc': roc_auc}
internal_obj = {'overall': overall_dict, 'specific': None}
return ROCExplanation('perf', internal_obj, feature_names=self.feature_names, feature_types=self.feature_types, name=name)<|docstring|>Produce ROC curves.
Args:
X: Numpy array for X to compare predict function against.
y: Numpy vector for y to compare predict function against.
name: User-defined explanation name.
Returns:
An explanation object.<|endoftext|> |
93bf0189516767d84dd81079ea99818b0b1093254deb76734819811bb82e8224 | def __init__(self, explanation_type, internal_obj, feature_names=None, feature_types=None, name=None, selector=None):
' Initializes class.\n\n Args:\n explanation_type: Type of explanation.\n internal_obj: A jsonable object that backs the explanation.\n feature_names: List of feature names.\n feature_types: List of feature types.\n name: User-defined name of explanation.\n selector: A dataframe whose indices correspond to explanation entries.\n '
self.explanation_type = explanation_type
self._internal_obj = internal_obj
self.feature_names = feature_names
self.feature_types = feature_types
self.name = name
self.selector = selector | Initializes class.
Args:
explanation_type: Type of explanation.
internal_obj: A jsonable object that backs the explanation.
feature_names: List of feature names.
feature_types: List of feature types.
name: User-defined name of explanation.
selector: A dataframe whose indices correspond to explanation entries. | python/interpret-core/interpret/perf/curve.py | __init__ | eddy-geek/interpret | 2,674 | python | def __init__(self, explanation_type, internal_obj, feature_names=None, feature_types=None, name=None, selector=None):
' Initializes class.\n\n Args:\n explanation_type: Type of explanation.\n internal_obj: A jsonable object that backs the explanation.\n feature_names: List of feature names.\n feature_types: List of feature types.\n name: User-defined name of explanation.\n selector: A dataframe whose indices correspond to explanation entries.\n '
self.explanation_type = explanation_type
self._internal_obj = internal_obj
self.feature_names = feature_names
self.feature_types = feature_types
self.name = name
self.selector = selector | def __init__(self, explanation_type, internal_obj, feature_names=None, feature_types=None, name=None, selector=None):
' Initializes class.\n\n Args:\n explanation_type: Type of explanation.\n internal_obj: A jsonable object that backs the explanation.\n feature_names: List of feature names.\n feature_types: List of feature types.\n name: User-defined name of explanation.\n selector: A dataframe whose indices correspond to explanation entries.\n '
self.explanation_type = explanation_type
self._internal_obj = internal_obj
self.feature_names = feature_names
self.feature_types = feature_types
self.name = name
self.selector = selector<|docstring|>Initializes class.
Args:
explanation_type: Type of explanation.
internal_obj: A jsonable object that backs the explanation.
feature_names: List of feature names.
feature_types: List of feature types.
name: User-defined name of explanation.
selector: A dataframe whose indices correspond to explanation entries.<|endoftext|> |
11c58d30219a8eac82141b1a8cdaaa4a18ad52a15f7aaa23bbe0df6e8c7ba5ac | def data(self, key=None):
' Provides specific explanation data.\n\n Args:\n key: A number/string that references a specific data item.\n\n Returns:\n A serializable dictionary.\n '
if (key is None):
return self._internal_obj['overall']
return None | Provides specific explanation data.
Args:
key: A number/string that references a specific data item.
Returns:
A serializable dictionary. | python/interpret-core/interpret/perf/curve.py | data | eddy-geek/interpret | 2,674 | python | def data(self, key=None):
' Provides specific explanation data.\n\n Args:\n key: A number/string that references a specific data item.\n\n Returns:\n A serializable dictionary.\n '
if (key is None):
return self._internal_obj['overall']
return None | def data(self, key=None):
' Provides specific explanation data.\n\n Args:\n key: A number/string that references a specific data item.\n\n Returns:\n A serializable dictionary.\n '
if (key is None):
return self._internal_obj['overall']
return None<|docstring|>Provides specific explanation data.
Args:
key: A number/string that references a specific data item.
Returns:
A serializable dictionary.<|endoftext|> |
2ae5b04764de64f389753e5b4cd98ac11d8783f9087e5f809f9d6776ee66d75f | def visualize(self, key=None):
' Provides interactive visualizations.\n\n Args:\n key: Either a scalar or list\n that indexes the internal object for sub-plotting.\n If an overall visualization is requested, pass None.\n\n Returns:\n A Plotly figure.\n '
from ..visual.plot import plot_performance_curve
data_dict = self.data(key)
if (data_dict is None):
return None
return plot_performance_curve(data_dict, xtitle='FPR', ytitle='TPR', baseline=True, title=('ROC Curve: ' + self.name), auc_prefix='AUC') | Provides interactive visualizations.
Args:
key: Either a scalar or list
that indexes the internal object for sub-plotting.
If an overall visualization is requested, pass None.
Returns:
A Plotly figure. | python/interpret-core/interpret/perf/curve.py | visualize | eddy-geek/interpret | 2,674 | python | def visualize(self, key=None):
' Provides interactive visualizations.\n\n Args:\n key: Either a scalar or list\n that indexes the internal object for sub-plotting.\n If an overall visualization is requested, pass None.\n\n Returns:\n A Plotly figure.\n '
from ..visual.plot import plot_performance_curve
data_dict = self.data(key)
if (data_dict is None):
return None
return plot_performance_curve(data_dict, xtitle='FPR', ytitle='TPR', baseline=True, title=('ROC Curve: ' + self.name), auc_prefix='AUC') | def visualize(self, key=None):
' Provides interactive visualizations.\n\n Args:\n key: Either a scalar or list\n that indexes the internal object for sub-plotting.\n If an overall visualization is requested, pass None.\n\n Returns:\n A Plotly figure.\n '
from ..visual.plot import plot_performance_curve
data_dict = self.data(key)
if (data_dict is None):
return None
return plot_performance_curve(data_dict, xtitle='FPR', ytitle='TPR', baseline=True, title=('ROC Curve: ' + self.name), auc_prefix='AUC')<|docstring|>Provides interactive visualizations.
Args:
key: Either a scalar or list
that indexes the internal object for sub-plotting.
If an overall visualization is requested, pass None.
Returns:
A Plotly figure.<|endoftext|> |
93bf0189516767d84dd81079ea99818b0b1093254deb76734819811bb82e8224 | def __init__(self, explanation_type, internal_obj, feature_names=None, feature_types=None, name=None, selector=None):
' Initializes class.\n\n Args:\n explanation_type: Type of explanation.\n internal_obj: A jsonable object that backs the explanation.\n feature_names: List of feature names.\n feature_types: List of feature types.\n name: User-defined name of explanation.\n selector: A dataframe whose indices correspond to explanation entries.\n '
self.explanation_type = explanation_type
self._internal_obj = internal_obj
self.feature_names = feature_names
self.feature_types = feature_types
self.name = name
self.selector = selector | Initializes class.
Args:
explanation_type: Type of explanation.
internal_obj: A jsonable object that backs the explanation.
feature_names: List of feature names.
feature_types: List of feature types.
name: User-defined name of explanation.
selector: A dataframe whose indices correspond to explanation entries. | python/interpret-core/interpret/perf/curve.py | __init__ | eddy-geek/interpret | 2,674 | python | def __init__(self, explanation_type, internal_obj, feature_names=None, feature_types=None, name=None, selector=None):
' Initializes class.\n\n Args:\n explanation_type: Type of explanation.\n internal_obj: A jsonable object that backs the explanation.\n feature_names: List of feature names.\n feature_types: List of feature types.\n name: User-defined name of explanation.\n selector: A dataframe whose indices correspond to explanation entries.\n '
self.explanation_type = explanation_type
self._internal_obj = internal_obj
self.feature_names = feature_names
self.feature_types = feature_types
self.name = name
self.selector = selector | def __init__(self, explanation_type, internal_obj, feature_names=None, feature_types=None, name=None, selector=None):
' Initializes class.\n\n Args:\n explanation_type: Type of explanation.\n internal_obj: A jsonable object that backs the explanation.\n feature_names: List of feature names.\n feature_types: List of feature types.\n name: User-defined name of explanation.\n selector: A dataframe whose indices correspond to explanation entries.\n '
self.explanation_type = explanation_type
self._internal_obj = internal_obj
self.feature_names = feature_names
self.feature_types = feature_types
self.name = name
self.selector = selector<|docstring|>Initializes class.
Args:
explanation_type: Type of explanation.
internal_obj: A jsonable object that backs the explanation.
feature_names: List of feature names.
feature_types: List of feature types.
name: User-defined name of explanation.
selector: A dataframe whose indices correspond to explanation entries.<|endoftext|> |
11c58d30219a8eac82141b1a8cdaaa4a18ad52a15f7aaa23bbe0df6e8c7ba5ac | def data(self, key=None):
' Provides specific explanation data.\n\n Args:\n key: A number/string that references a specific data item.\n\n Returns:\n A serializable dictionary.\n '
if (key is None):
return self._internal_obj['overall']
return None | Provides specific explanation data.
Args:
key: A number/string that references a specific data item.
Returns:
A serializable dictionary. | python/interpret-core/interpret/perf/curve.py | data | eddy-geek/interpret | 2,674 | python | def data(self, key=None):
' Provides specific explanation data.\n\n Args:\n key: A number/string that references a specific data item.\n\n Returns:\n A serializable dictionary.\n '
if (key is None):
return self._internal_obj['overall']
return None | def data(self, key=None):
' Provides specific explanation data.\n\n Args:\n key: A number/string that references a specific data item.\n\n Returns:\n A serializable dictionary.\n '
if (key is None):
return self._internal_obj['overall']
return None<|docstring|>Provides specific explanation data.
Args:
key: A number/string that references a specific data item.
Returns:
A serializable dictionary.<|endoftext|> |
3a8924419b6f2cfb54f9f1dc2c5762156e77f491131f8801ad81dfccfda9c7f1 | def visualize(self, key=None):
' Provides interactive visualizations.\n\n Args:\n key: Either a scalar or list\n that indexes the internal object for sub-plotting.\n If an overall visualization is requested, pass None.\n\n Returns:\n A Plotly figure.\n '
from ..visual.plot import plot_performance_curve
data_dict = self.data(key)
if (data_dict is None):
return None
return plot_performance_curve(data_dict, xtitle='Recall', ytitle='Precision', baseline=False, title=('PR Curve: ' + self.name), auc_prefix='Average Precision') | Provides interactive visualizations.
Args:
key: Either a scalar or list
that indexes the internal object for sub-plotting.
If an overall visualization is requested, pass None.
Returns:
A Plotly figure. | python/interpret-core/interpret/perf/curve.py | visualize | eddy-geek/interpret | 2,674 | python | def visualize(self, key=None):
' Provides interactive visualizations.\n\n Args:\n key: Either a scalar or list\n that indexes the internal object for sub-plotting.\n If an overall visualization is requested, pass None.\n\n Returns:\n A Plotly figure.\n '
from ..visual.plot import plot_performance_curve
data_dict = self.data(key)
if (data_dict is None):
return None
return plot_performance_curve(data_dict, xtitle='Recall', ytitle='Precision', baseline=False, title=('PR Curve: ' + self.name), auc_prefix='Average Precision') | def visualize(self, key=None):
' Provides interactive visualizations.\n\n Args:\n key: Either a scalar or list\n that indexes the internal object for sub-plotting.\n If an overall visualization is requested, pass None.\n\n Returns:\n A Plotly figure.\n '
from ..visual.plot import plot_performance_curve
data_dict = self.data(key)
if (data_dict is None):
return None
return plot_performance_curve(data_dict, xtitle='Recall', ytitle='Precision', baseline=False, title=('PR Curve: ' + self.name), auc_prefix='Average Precision')<|docstring|>Provides interactive visualizations.
Args:
key: Either a scalar or list
that indexes the internal object for sub-plotting.
If an overall visualization is requested, pass None.
Returns:
A Plotly figure.<|endoftext|> |
1fe1785af4cc40b61a27d4c03137c9125961df6822aa85ff92b3038e1239a26e | def get_state(self, creds):
'\n Get the state that belongs to a particular account.\n\n @param creds: The credentials which identify a particular account.\n @type creds: L{AWSCredentials}\n\n @return: The state for the account, creating it if necessary. The\n state will be whatever C{state_factory} returns.\n '
key = (creds.access_key, creds.secret_key)
return self._state.setdefault(key, self.state_factory()) | Get the state that belongs to a particular account.
@param creds: The credentials which identify a particular account.
@type creds: L{AWSCredentials}
@return: The state for the account, creating it if necessary. The
state will be whatever C{state_factory} returns. | txaws/testing/base.py | get_state | gmorell/txaws | 24 | python | def get_state(self, creds):
'\n Get the state that belongs to a particular account.\n\n @param creds: The credentials which identify a particular account.\n @type creds: L{AWSCredentials}\n\n @return: The state for the account, creating it if necessary. The\n state will be whatever C{state_factory} returns.\n '
key = (creds.access_key, creds.secret_key)
return self._state.setdefault(key, self.state_factory()) | def get_state(self, creds):
'\n Get the state that belongs to a particular account.\n\n @param creds: The credentials which identify a particular account.\n @type creds: L{AWSCredentials}\n\n @return: The state for the account, creating it if necessary. The\n state will be whatever C{state_factory} returns.\n '
key = (creds.access_key, creds.secret_key)
return self._state.setdefault(key, self.state_factory())<|docstring|>Get the state that belongs to a particular account.
@param creds: The credentials which identify a particular account.
@type creds: L{AWSCredentials}
@return: The state for the account, creating it if necessary. The
state will be whatever C{state_factory} returns.<|endoftext|> |
8533e71696129af0ee8156b9919985c907e8726a15e799a62a855821eb46e4fd | def client(self, creds, *a, **kw):
'\n Get an in-memory verified fake client for this service.\n\n @param creds: The credentials to associate with the account. No\n authentication is performed but this identifies the state the\n client will find.\n @type creds: L{AWSCredentials}\n\n @return: A new client for this service along with the state object for\n the client.\n @rtype: L{tuple}\n '
client = self.client_factory(self, creds, *a, **kw)
return (client, self.get_state(creds)) | Get an in-memory verified fake client for this service.
@param creds: The credentials to associate with the account. No
authentication is performed but this identifies the state the
client will find.
@type creds: L{AWSCredentials}
@return: A new client for this service along with the state object for
the client.
@rtype: L{tuple} | txaws/testing/base.py | client | gmorell/txaws | 24 | python | def client(self, creds, *a, **kw):
'\n Get an in-memory verified fake client for this service.\n\n @param creds: The credentials to associate with the account. No\n authentication is performed but this identifies the state the\n client will find.\n @type creds: L{AWSCredentials}\n\n @return: A new client for this service along with the state object for\n the client.\n @rtype: L{tuple}\n '
client = self.client_factory(self, creds, *a, **kw)
return (client, self.get_state(creds)) | def client(self, creds, *a, **kw):
'\n Get an in-memory verified fake client for this service.\n\n @param creds: The credentials to associate with the account. No\n authentication is performed but this identifies the state the\n client will find.\n @type creds: L{AWSCredentials}\n\n @return: A new client for this service along with the state object for\n the client.\n @rtype: L{tuple}\n '
client = self.client_factory(self, creds, *a, **kw)
return (client, self.get_state(creds))<|docstring|>Get an in-memory verified fake client for this service.
@param creds: The credentials to associate with the account. No
authentication is performed but this identifies the state the
client will find.
@type creds: L{AWSCredentials}
@return: A new client for this service along with the state object for
the client.
@rtype: L{tuple}<|endoftext|> |
8b2dc35055cf381e7bbfb1771d14c31006b8e21a1815850c74a9362f8c8e3130 | def rgChromaticity(rgb):
'\n Converting an RGB image into normalized RGB removes the effect\n of any intensity variations.\n\n rg Chromaticity\n http://en.wikipedia.org/wiki/Rg_chromaticity\n\n Also know as normalised RGB as per paper:\n Color-based object recognition, Theo Gevers and Arnold W.M. Smeulders,\n Pattern Recognition,number 3, pages 453-464, volume 32, 1999.\n '
rgChrom = img_as_float(rgb)
r = (rgb[(:, :, 1)] + 1e-11)
g = (rgb[(:, :, 0)] + 1e-11)
b = (rgb[(:, :, 2)] + 1e-11)
divisor = ((r + g) + b)
rgChrom[(:, :, 1)] = (r / divisor)
rgChrom[(:, :, 0)] = (g / divisor)
rgChrom[(:, :, 2)] = (b / divisor)
return rgChrom | Converting an RGB image into normalized RGB removes the effect
of any intensity variations.
rg Chromaticity
http://en.wikipedia.org/wiki/Rg_chromaticity
Also know as normalised RGB as per paper:
Color-based object recognition, Theo Gevers and Arnold W.M. Smeulders,
Pattern Recognition,number 3, pages 453-464, volume 32, 1999. | ipfe/colour.py | rgChromaticity | michaelborck/ipfe | 3 | python | def rgChromaticity(rgb):
'\n Converting an RGB image into normalized RGB removes the effect\n of any intensity variations.\n\n rg Chromaticity\n http://en.wikipedia.org/wiki/Rg_chromaticity\n\n Also know as normalised RGB as per paper:\n Color-based object recognition, Theo Gevers and Arnold W.M. Smeulders,\n Pattern Recognition,number 3, pages 453-464, volume 32, 1999.\n '
rgChrom = img_as_float(rgb)
r = (rgb[(:, :, 1)] + 1e-11)
g = (rgb[(:, :, 0)] + 1e-11)
b = (rgb[(:, :, 2)] + 1e-11)
divisor = ((r + g) + b)
rgChrom[(:, :, 1)] = (r / divisor)
rgChrom[(:, :, 0)] = (g / divisor)
rgChrom[(:, :, 2)] = (b / divisor)
return rgChrom | def rgChromaticity(rgb):
'\n Converting an RGB image into normalized RGB removes the effect\n of any intensity variations.\n\n rg Chromaticity\n http://en.wikipedia.org/wiki/Rg_chromaticity\n\n Also know as normalised RGB as per paper:\n Color-based object recognition, Theo Gevers and Arnold W.M. Smeulders,\n Pattern Recognition,number 3, pages 453-464, volume 32, 1999.\n '
rgChrom = img_as_float(rgb)
r = (rgb[(:, :, 1)] + 1e-11)
g = (rgb[(:, :, 0)] + 1e-11)
b = (rgb[(:, :, 2)] + 1e-11)
divisor = ((r + g) + b)
rgChrom[(:, :, 1)] = (r / divisor)
rgChrom[(:, :, 0)] = (g / divisor)
rgChrom[(:, :, 2)] = (b / divisor)
return rgChrom<|docstring|>Converting an RGB image into normalized RGB removes the effect
of any intensity variations.
rg Chromaticity
http://en.wikipedia.org/wiki/Rg_chromaticity
Also know as normalised RGB as per paper:
Color-based object recognition, Theo Gevers and Arnold W.M. Smeulders,
Pattern Recognition,number 3, pages 453-464, volume 32, 1999.<|endoftext|> |
17cc7fc41267ca118b6d5fb61e779e278cee95599378edcb354faec5d634f296 | def normalisedRGB(rgb):
'\n Converting an RGB image into normalized RGB removes the effect\n of any intensity variations.\n\n L2 Norm (Euclidean norm)\n\n '
norm = img_as_float(rgb)
r = (rgb[(:, :, 0)] + 1e-11)
g = (rgb[(:, :, 1)] + 1e-11)
b = (rgb[(:, :, 2)] + 1e-11)
divisor = np.sqrt(((np.square(r) + np.square(g)) + np.square(b)))
norm[(:, :, 1)] = (r / divisor)
norm[(:, :, 0)] = (g / divisor)
norm[(:, :, 2)] = (b / divisor)
return norm | Converting an RGB image into normalized RGB removes the effect
of any intensity variations.
L2 Norm (Euclidean norm) | ipfe/colour.py | normalisedRGB | michaelborck/ipfe | 3 | python | def normalisedRGB(rgb):
'\n Converting an RGB image into normalized RGB removes the effect\n of any intensity variations.\n\n L2 Norm (Euclidean norm)\n\n '
norm = img_as_float(rgb)
r = (rgb[(:, :, 0)] + 1e-11)
g = (rgb[(:, :, 1)] + 1e-11)
b = (rgb[(:, :, 2)] + 1e-11)
divisor = np.sqrt(((np.square(r) + np.square(g)) + np.square(b)))
norm[(:, :, 1)] = (r / divisor)
norm[(:, :, 0)] = (g / divisor)
norm[(:, :, 2)] = (b / divisor)
return norm | def normalisedRGB(rgb):
'\n Converting an RGB image into normalized RGB removes the effect\n of any intensity variations.\n\n L2 Norm (Euclidean norm)\n\n '
norm = img_as_float(rgb)
r = (rgb[(:, :, 0)] + 1e-11)
g = (rgb[(:, :, 1)] + 1e-11)
b = (rgb[(:, :, 2)] + 1e-11)
divisor = np.sqrt(((np.square(r) + np.square(g)) + np.square(b)))
norm[(:, :, 1)] = (r / divisor)
norm[(:, :, 0)] = (g / divisor)
norm[(:, :, 2)] = (b / divisor)
return norm<|docstring|>Converting an RGB image into normalized RGB removes the effect
of any intensity variations.
L2 Norm (Euclidean norm)<|endoftext|> |
d816128e94cb3be15565941202f129c7fc6f56fc26e887cb870e15234143b560 | def linear_normalization(arr):
'\n Converting an RGB image into normalized RGB removes the effect\n of any intensity variations.\n\n Linear normalization\n http://en.wikipedia.org/wiki/Normalization_%28image_processing%29\n '
arr = arr.astype('float')
for i in range(3):
minval = arr[(..., i)].min()
maxval = arr[(..., i)].max()
if (minval != maxval):
arr[(..., i)] -= minval
arr[(..., i)] *= (255.0 / (maxval - minval))
return arr | Converting an RGB image into normalized RGB removes the effect
of any intensity variations.
Linear normalization
http://en.wikipedia.org/wiki/Normalization_%28image_processing%29 | ipfe/colour.py | linear_normalization | michaelborck/ipfe | 3 | python | def linear_normalization(arr):
'\n Converting an RGB image into normalized RGB removes the effect\n of any intensity variations.\n\n Linear normalization\n http://en.wikipedia.org/wiki/Normalization_%28image_processing%29\n '
arr = arr.astype('float')
for i in range(3):
minval = arr[(..., i)].min()
maxval = arr[(..., i)].max()
if (minval != maxval):
arr[(..., i)] -= minval
arr[(..., i)] *= (255.0 / (maxval - minval))
return arr | def linear_normalization(arr):
'\n Converting an RGB image into normalized RGB removes the effect\n of any intensity variations.\n\n Linear normalization\n http://en.wikipedia.org/wiki/Normalization_%28image_processing%29\n '
arr = arr.astype('float')
for i in range(3):
minval = arr[(..., i)].min()
maxval = arr[(..., i)].max()
if (minval != maxval):
arr[(..., i)] -= minval
arr[(..., i)] *= (255.0 / (maxval - minval))
return arr<|docstring|>Converting an RGB image into normalized RGB removes the effect
of any intensity variations.
Linear normalization
http://en.wikipedia.org/wiki/Normalization_%28image_processing%29<|endoftext|> |
8d58395b0f936f57fe427f58d7b9362c0b79123fbc11bece84b36af7dbabea65 | def ilevenshtein(seq1, seqs, max_dist=(- 1)):
'Compute the Levenshtein distance between the sequence `seq1` and the series\n\tof\tsequences `seqs`.\n\t\n\t\t`seq1`: the reference sequence\n\t\t`seqs`: a series of sequences (can be a generator)\n\t\t`max_dist`: if provided and > 0, only the sequences which distance from\n\t\tthe reference sequence is lower or equal to this value will be returned.\n\t\n\tThe return value is a series of pairs (distance, sequence).\n\t\n\tThe sequence objects in `seqs` are expected to be of the same kind than\n\tthe reference sequence in the C implementation; the same holds true for\n\t`ifast_comp`.\n\t'
for seq2 in seqs:
dist = levenshtein(seq1, seq2, max_dist=max_dist)
if (dist != (- 1)):
(yield (dist, seq2)) | Compute the Levenshtein distance between the sequence `seq1` and the series
of sequences `seqs`.
`seq1`: the reference sequence
`seqs`: a series of sequences (can be a generator)
`max_dist`: if provided and > 0, only the sequences which distance from
the reference sequence is lower or equal to this value will be returned.
The return value is a series of pairs (distance, sequence).
The sequence objects in `seqs` are expected to be of the same kind than
the reference sequence in the C implementation; the same holds true for
`ifast_comp`. | Backend/venv/lib/python3.6/site-packages/distance/_iterators.py | ilevenshtein | Pencroff/ai-hackathon | 82 | python | def ilevenshtein(seq1, seqs, max_dist=(- 1)):
'Compute the Levenshtein distance between the sequence `seq1` and the series\n\tof\tsequences `seqs`.\n\t\n\t\t`seq1`: the reference sequence\n\t\t`seqs`: a series of sequences (can be a generator)\n\t\t`max_dist`: if provided and > 0, only the sequences which distance from\n\t\tthe reference sequence is lower or equal to this value will be returned.\n\t\n\tThe return value is a series of pairs (distance, sequence).\n\t\n\tThe sequence objects in `seqs` are expected to be of the same kind than\n\tthe reference sequence in the C implementation; the same holds true for\n\t`ifast_comp`.\n\t'
for seq2 in seqs:
dist = levenshtein(seq1, seq2, max_dist=max_dist)
if (dist != (- 1)):
(yield (dist, seq2)) | def ilevenshtein(seq1, seqs, max_dist=(- 1)):
'Compute the Levenshtein distance between the sequence `seq1` and the series\n\tof\tsequences `seqs`.\n\t\n\t\t`seq1`: the reference sequence\n\t\t`seqs`: a series of sequences (can be a generator)\n\t\t`max_dist`: if provided and > 0, only the sequences which distance from\n\t\tthe reference sequence is lower or equal to this value will be returned.\n\t\n\tThe return value is a series of pairs (distance, sequence).\n\t\n\tThe sequence objects in `seqs` are expected to be of the same kind than\n\tthe reference sequence in the C implementation; the same holds true for\n\t`ifast_comp`.\n\t'
for seq2 in seqs:
dist = levenshtein(seq1, seq2, max_dist=max_dist)
if (dist != (- 1)):
(yield (dist, seq2))<|docstring|>Compute the Levenshtein distance between the sequence `seq1` and the series
of sequences `seqs`.
`seq1`: the reference sequence
`seqs`: a series of sequences (can be a generator)
`max_dist`: if provided and > 0, only the sequences which distance from
the reference sequence is lower or equal to this value will be returned.
The return value is a series of pairs (distance, sequence).
The sequence objects in `seqs` are expected to be of the same kind than
the reference sequence in the C implementation; the same holds true for
`ifast_comp`.<|endoftext|> |
e78c93a29bee77167a966eb590b0ca6f436e0b1800749a169d4110b93a2ff85a | def ifast_comp(seq1, seqs, transpositions=False):
'Return an iterator over all the sequences in `seqs` which distance from\n\t`seq1` is lower or equal to 2. The sequences which distance from the\n\treference sequence is higher than that are dropped.\n\t\n\t\t`seq1`: the reference sequence.\n\t\t`seqs`: a series of sequences (can be a generator)\n\t\t`transpositions` has the same sense than in `fast_comp`.\n\t\n\tThe return value is a series of pairs (distance, sequence).\n\t\n\tYou might want to call `sorted()` on the iterator to get the results in a\n\tsignificant order:\n\t\n\t\t>>> g = ifast_comp("foo", ["fo", "bar", "foob", "foo", "foobaz"])\n\t\t>>> sorted(g)\n\t\t[(0, \'foo\'), (1, \'fo\'), (1, \'foob\')]\n\t'
for seq2 in seqs:
dist = fast_comp(seq1, seq2, transpositions)
if (dist != (- 1)):
(yield (dist, seq2)) | Return an iterator over all the sequences in `seqs` which distance from
`seq1` is lower or equal to 2. The sequences which distance from the
reference sequence is higher than that are dropped.
`seq1`: the reference sequence.
`seqs`: a series of sequences (can be a generator)
`transpositions` has the same sense than in `fast_comp`.
The return value is a series of pairs (distance, sequence).
You might want to call `sorted()` on the iterator to get the results in a
significant order:
>>> g = ifast_comp("foo", ["fo", "bar", "foob", "foo", "foobaz"])
>>> sorted(g)
[(0, 'foo'), (1, 'fo'), (1, 'foob')] | Backend/venv/lib/python3.6/site-packages/distance/_iterators.py | ifast_comp | Pencroff/ai-hackathon | 82 | python | def ifast_comp(seq1, seqs, transpositions=False):
'Return an iterator over all the sequences in `seqs` which distance from\n\t`seq1` is lower or equal to 2. The sequences which distance from the\n\treference sequence is higher than that are dropped.\n\t\n\t\t`seq1`: the reference sequence.\n\t\t`seqs`: a series of sequences (can be a generator)\n\t\t`transpositions` has the same sense than in `fast_comp`.\n\t\n\tThe return value is a series of pairs (distance, sequence).\n\t\n\tYou might want to call `sorted()` on the iterator to get the results in a\n\tsignificant order:\n\t\n\t\t>>> g = ifast_comp("foo", ["fo", "bar", "foob", "foo", "foobaz"])\n\t\t>>> sorted(g)\n\t\t[(0, \'foo\'), (1, \'fo\'), (1, \'foob\')]\n\t'
for seq2 in seqs:
dist = fast_comp(seq1, seq2, transpositions)
if (dist != (- 1)):
(yield (dist, seq2)) | def ifast_comp(seq1, seqs, transpositions=False):
'Return an iterator over all the sequences in `seqs` which distance from\n\t`seq1` is lower or equal to 2. The sequences which distance from the\n\treference sequence is higher than that are dropped.\n\t\n\t\t`seq1`: the reference sequence.\n\t\t`seqs`: a series of sequences (can be a generator)\n\t\t`transpositions` has the same sense than in `fast_comp`.\n\t\n\tThe return value is a series of pairs (distance, sequence).\n\t\n\tYou might want to call `sorted()` on the iterator to get the results in a\n\tsignificant order:\n\t\n\t\t>>> g = ifast_comp("foo", ["fo", "bar", "foob", "foo", "foobaz"])\n\t\t>>> sorted(g)\n\t\t[(0, \'foo\'), (1, \'fo\'), (1, \'foob\')]\n\t'
for seq2 in seqs:
dist = fast_comp(seq1, seq2, transpositions)
if (dist != (- 1)):
(yield (dist, seq2))<|docstring|>Return an iterator over all the sequences in `seqs` which distance from
`seq1` is lower or equal to 2. The sequences which distance from the
reference sequence is higher than that are dropped.
`seq1`: the reference sequence.
`seqs`: a series of sequences (can be a generator)
`transpositions` has the same sense than in `fast_comp`.
The return value is a series of pairs (distance, sequence).
You might want to call `sorted()` on the iterator to get the results in a
significant order:
>>> g = ifast_comp("foo", ["fo", "bar", "foob", "foo", "foobaz"])
>>> sorted(g)
[(0, 'foo'), (1, 'fo'), (1, 'foob')]<|endoftext|> |
86e0ea376c2dc3cc84b4060d225b595bd9fe0a74892a930df03101a81811b807 | def addRecord(temp, press, humd, mois):
'Add a new record to the database'
conn = sqlite3.connect('/home/pi/sensors.db')
cur = conn.cursor()
cur.execute('insert into sensors (temperature, pressure, humidity, moisture) values (?, ?, ?, ?)', (temp, press, humd, mois))
conn.commit()
conn.close() | Add a new record to the database | scripts/record-sensors.py | addRecord | hairyspider/tomcam | 0 | python | def addRecord(temp, press, humd, mois):
conn = sqlite3.connect('/home/pi/sensors.db')
cur = conn.cursor()
cur.execute('insert into sensors (temperature, pressure, humidity, moisture) values (?, ?, ?, ?)', (temp, press, humd, mois))
conn.commit()
conn.close() | def addRecord(temp, press, humd, mois):
conn = sqlite3.connect('/home/pi/sensors.db')
cur = conn.cursor()
cur.execute('insert into sensors (temperature, pressure, humidity, moisture) values (?, ?, ?, ?)', (temp, press, humd, mois))
conn.commit()
conn.close()<|docstring|>Add a new record to the database<|endoftext|> |
0c23a45926f2a20744d14b5080526a5ef1e2bf9fe5af76c515e84e04acdc6ebc | def nepmaster(version='2', param=''):
'\n Launch master in NEP_WS\n\n Parameters\n ----------\n version : string\n Python version, 0 for default, 2 for Python 2 and 3 for Python 3\n\n param : string\n Can be "local", or "network"\n '
if (os.environ.get('OS', '') == 'Windows_NT'):
from subprocess import CREATE_NEW_CONSOLE
nep_ws = nep.getNEPpath()
script = 'master'
command = (((((('python ' + nep_ws) + '/') + script) + '.py') + ' ') + param)
if (os.environ.get('OS', '') == 'Windows_NT'):
if (version == '2'):
print('Running in Python 2')
command = (((((((('py -2 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
elif (version == '3'):
print('Running in Python 3')
command = (((((((('py -3 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
print('Windows launcher in new console .......')
Popen(command, creationflags=CREATE_NEW_CONSOLE)
else:
print('OSX launcher .......')
if (version == '2'):
print('Running in Python 2')
command = (((((((('python2 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
elif (version == '3'):
print('Running in Python 3')
command = (((((((('python3 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
import applescript
tell = 'tell application "Terminal" to do script '
complete = (((tell + '"') + command) + '"')
applescript.AppleScript(complete).run() | Launch master in NEP_WS
Parameters
----------
version : string
Python version, 0 for default, 2 for Python 2 and 3 for Python 3
param : string
Can be "local", or "network" | nep/helpers.py | nepmaster | enriquecoronadozu/NEP | 5 | python | def nepmaster(version='2', param=):
'\n Launch master in NEP_WS\n\n Parameters\n ----------\n version : string\n Python version, 0 for default, 2 for Python 2 and 3 for Python 3\n\n param : string\n Can be "local", or "network"\n '
if (os.environ.get('OS', ) == 'Windows_NT'):
from subprocess import CREATE_NEW_CONSOLE
nep_ws = nep.getNEPpath()
script = 'master'
command = (((((('python ' + nep_ws) + '/') + script) + '.py') + ' ') + param)
if (os.environ.get('OS', ) == 'Windows_NT'):
if (version == '2'):
print('Running in Python 2')
command = (((((((('py -2 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
elif (version == '3'):
print('Running in Python 3')
command = (((((((('py -3 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
print('Windows launcher in new console .......')
Popen(command, creationflags=CREATE_NEW_CONSOLE)
else:
print('OSX launcher .......')
if (version == '2'):
print('Running in Python 2')
command = (((((((('python2 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
elif (version == '3'):
print('Running in Python 3')
command = (((((((('python3 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
import applescript
tell = 'tell application "Terminal" to do script '
complete = (((tell + '"') + command) + '"')
applescript.AppleScript(complete).run() | def nepmaster(version='2', param=):
'\n Launch master in NEP_WS\n\n Parameters\n ----------\n version : string\n Python version, 0 for default, 2 for Python 2 and 3 for Python 3\n\n param : string\n Can be "local", or "network"\n '
if (os.environ.get('OS', ) == 'Windows_NT'):
from subprocess import CREATE_NEW_CONSOLE
nep_ws = nep.getNEPpath()
script = 'master'
command = (((((('python ' + nep_ws) + '/') + script) + '.py') + ' ') + param)
if (os.environ.get('OS', ) == 'Windows_NT'):
if (version == '2'):
print('Running in Python 2')
command = (((((((('py -2 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
elif (version == '3'):
print('Running in Python 3')
command = (((((((('py -3 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
print('Windows launcher in new console .......')
Popen(command, creationflags=CREATE_NEW_CONSOLE)
else:
print('OSX launcher .......')
if (version == '2'):
print('Running in Python 2')
command = (((((((('python2 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
elif (version == '3'):
print('Running in Python 3')
command = (((((((('python3 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
import applescript
tell = 'tell application "Terminal" to do script '
complete = (((tell + '"') + command) + '"')
applescript.AppleScript(complete).run()<|docstring|>Launch master in NEP_WS
Parameters
----------
version : string
Python version, 0 for default, 2 for Python 2 and 3 for Python 3
param : string
Can be "local", or "network"<|endoftext|> |
7ebbd1edfd3b78684243e93d3df5d2106acc6cd9548a76432b648e4055bf555f | def neprun(module, script, parameters, version='2'):
'\n Launch a python script in NEP_WS\n\n Parameters\n ----------\n module : string\n Module name\n\n script : string\n Script name\n\n script : parameters\n Additional command line parameters\n\n version : string\n Python version, 0 for default, 2 for Python 2 and 3 for Python 3\n\n '
try:
if (os.environ.get('OS', '') == 'Windows_NT'):
from subprocess import CREATE_NEW_CONSOLE
nep_ws = nep.getNEPpath()
command = (((((((('python ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
print(('To run: ' + command))
if (os.environ.get('OS', '') == 'Windows_NT'):
if (version == '2'):
print('Running in Python 2')
command = (((((((('py -2 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
elif (version == '3'):
print('Running in Python 3')
command = (((((((('py -3 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
print('Windows launcher in new console .......')
Popen(command, creationflags=CREATE_NEW_CONSOLE)
else:
print('OSX launcher .......')
if (version == '2'):
print('Running in Python 2')
command = (((((((('python2 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
elif (version == '3'):
print('Running in Python 3')
command = (((((((('python3 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
import applescript
tell = 'tell application "Terminal" to do script '
complete = (((tell + '"') + command) + '"')
applescript.AppleScript(complete).run()
except Exception as e:
(exc_type, exc_obj, exc_tb) = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
time.sleep(3)
return False | Launch a python script in NEP_WS
Parameters
----------
module : string
Module name
script : string
Script name
script : parameters
Additional command line parameters
version : string
Python version, 0 for default, 2 for Python 2 and 3 for Python 3 | nep/helpers.py | neprun | enriquecoronadozu/NEP | 5 | python | def neprun(module, script, parameters, version='2'):
'\n Launch a python script in NEP_WS\n\n Parameters\n ----------\n module : string\n Module name\n\n script : string\n Script name\n\n script : parameters\n Additional command line parameters\n\n version : string\n Python version, 0 for default, 2 for Python 2 and 3 for Python 3\n\n '
try:
if (os.environ.get('OS', ) == 'Windows_NT'):
from subprocess import CREATE_NEW_CONSOLE
nep_ws = nep.getNEPpath()
command = (((((((('python ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
print(('To run: ' + command))
if (os.environ.get('OS', ) == 'Windows_NT'):
if (version == '2'):
print('Running in Python 2')
command = (((((((('py -2 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
elif (version == '3'):
print('Running in Python 3')
command = (((((((('py -3 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
print('Windows launcher in new console .......')
Popen(command, creationflags=CREATE_NEW_CONSOLE)
else:
print('OSX launcher .......')
if (version == '2'):
print('Running in Python 2')
command = (((((((('python2 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
elif (version == '3'):
print('Running in Python 3')
command = (((((((('python3 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
import applescript
tell = 'tell application "Terminal" to do script '
complete = (((tell + '"') + command) + '"')
applescript.AppleScript(complete).run()
except Exception as e:
(exc_type, exc_obj, exc_tb) = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
time.sleep(3)
return False | def neprun(module, script, parameters, version='2'):
'\n Launch a python script in NEP_WS\n\n Parameters\n ----------\n module : string\n Module name\n\n script : string\n Script name\n\n script : parameters\n Additional command line parameters\n\n version : string\n Python version, 0 for default, 2 for Python 2 and 3 for Python 3\n\n '
try:
if (os.environ.get('OS', ) == 'Windows_NT'):
from subprocess import CREATE_NEW_CONSOLE
nep_ws = nep.getNEPpath()
command = (((((((('python ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
print(('To run: ' + command))
if (os.environ.get('OS', ) == 'Windows_NT'):
if (version == '2'):
print('Running in Python 2')
command = (((((((('py -2 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
elif (version == '3'):
print('Running in Python 3')
command = (((((((('py -3 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
print('Windows launcher in new console .......')
Popen(command, creationflags=CREATE_NEW_CONSOLE)
else:
print('OSX launcher .......')
if (version == '2'):
print('Running in Python 2')
command = (((((((('python2 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
elif (version == '3'):
print('Running in Python 3')
command = (((((((('python3 ' + nep_ws) + '/') + module) + '/') + script) + '.py') + ' ') + parameters)
import applescript
tell = 'tell application "Terminal" to do script '
complete = (((tell + '"') + command) + '"')
applescript.AppleScript(complete).run()
except Exception as e:
(exc_type, exc_obj, exc_tb) = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
time.sleep(3)
return False<|docstring|>Launch a python script in NEP_WS
Parameters
----------
module : string
Module name
script : string
Script name
script : parameters
Additional command line parameters
version : string
Python version, 0 for default, 2 for Python 2 and 3 for Python 3<|endoftext|> |
1178116a44f11ab4de7a6063292df7f230c4e2019bdced639601d9d8d5534bd0 | def masterRegister(node, topic, master_ip='127.0.0.1', master_port=7000, socket='subscriber', mode='many2many', pid='none', data_type='json'):
' Register topic in master node\n \n Parameters\n ----------\n\n node: string\n Node name\n\n topic : string\n Topic to register\n\n master_ip : string \n IP of the master node service\n\n master_port : int\n Port of the master node service\n\n socket: string\n Socket type. Example "surveyor", "publisher", "subscriber", "respondent", "client", "server"\n\n mode: string\n Parameter only for Publish/Subscriber pattern. Options are "one2many", "many2one" and "many2many".\n \n mode: pid\n PID identifier\n\n data_type: json\n message type\n\n Returns\n ----------\n\n result : bool\n Only if True socket can be connected\n\n port : string\n Port used to connect the socket\n\n ip : string\n IP used to connect the socket\n \n '
topic = topic
client = nep.client(master_ip, master_port, transport='ZMQ', debug=False)
time.sleep(0.01)
message = {'node': node, 'topic': topic, 'mode': mode, 'socket': socket, 'pid': pid, 'msg_type': data_type}
client.send_info(message)
response = client.listen_info()
try:
topic_id = response['topic']
if (topic_id == topic):
port = response['port']
if ('ip' in response):
ip = response['ip']
else:
ip = '127.0.0.1'
state = response['state']
if (state == 'success'):
return (True, port, ip)
else:
print('NEP ERROR: wrong socket configuration')
return (False, port, ip)
except Exception as e:
(exc_type, exc_obj, exc_tb) = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print('NEP ERROR: wrong response from master')
return (False, 'none', 'none')
print('NEP ERROR: wrong topic from master')
return (False, 'none', 'none') | Register topic in master node
Parameters
----------
node: string
Node name
topic : string
Topic to register
master_ip : string
IP of the master node service
master_port : int
Port of the master node service
socket: string
Socket type. Example "surveyor", "publisher", "subscriber", "respondent", "client", "server"
mode: string
Parameter only for Publish/Subscriber pattern. Options are "one2many", "many2one" and "many2many".
mode: pid
PID identifier
data_type: json
message type
Returns
----------
result : bool
Only if True socket can be connected
port : string
Port used to connect the socket
ip : string
IP used to connect the socket | nep/helpers.py | masterRegister | enriquecoronadozu/NEP | 5 | python | def masterRegister(node, topic, master_ip='127.0.0.1', master_port=7000, socket='subscriber', mode='many2many', pid='none', data_type='json'):
' Register topic in master node\n \n Parameters\n ----------\n\n node: string\n Node name\n\n topic : string\n Topic to register\n\n master_ip : string \n IP of the master node service\n\n master_port : int\n Port of the master node service\n\n socket: string\n Socket type. Example "surveyor", "publisher", "subscriber", "respondent", "client", "server"\n\n mode: string\n Parameter only for Publish/Subscriber pattern. Options are "one2many", "many2one" and "many2many".\n \n mode: pid\n PID identifier\n\n data_type: json\n message type\n\n Returns\n ----------\n\n result : bool\n Only if True socket can be connected\n\n port : string\n Port used to connect the socket\n\n ip : string\n IP used to connect the socket\n \n '
topic = topic
client = nep.client(master_ip, master_port, transport='ZMQ', debug=False)
time.sleep(0.01)
message = {'node': node, 'topic': topic, 'mode': mode, 'socket': socket, 'pid': pid, 'msg_type': data_type}
client.send_info(message)
response = client.listen_info()
try:
topic_id = response['topic']
if (topic_id == topic):
port = response['port']
if ('ip' in response):
ip = response['ip']
else:
ip = '127.0.0.1'
state = response['state']
if (state == 'success'):
return (True, port, ip)
else:
print('NEP ERROR: wrong socket configuration')
return (False, port, ip)
except Exception as e:
(exc_type, exc_obj, exc_tb) = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print('NEP ERROR: wrong response from master')
return (False, 'none', 'none')
print('NEP ERROR: wrong topic from master')
return (False, 'none', 'none') | def masterRegister(node, topic, master_ip='127.0.0.1', master_port=7000, socket='subscriber', mode='many2many', pid='none', data_type='json'):
' Register topic in master node\n \n Parameters\n ----------\n\n node: string\n Node name\n\n topic : string\n Topic to register\n\n master_ip : string \n IP of the master node service\n\n master_port : int\n Port of the master node service\n\n socket: string\n Socket type. Example "surveyor", "publisher", "subscriber", "respondent", "client", "server"\n\n mode: string\n Parameter only for Publish/Subscriber pattern. Options are "one2many", "many2one" and "many2many".\n \n mode: pid\n PID identifier\n\n data_type: json\n message type\n\n Returns\n ----------\n\n result : bool\n Only if True socket can be connected\n\n port : string\n Port used to connect the socket\n\n ip : string\n IP used to connect the socket\n \n '
topic = topic
client = nep.client(master_ip, master_port, transport='ZMQ', debug=False)
time.sleep(0.01)
message = {'node': node, 'topic': topic, 'mode': mode, 'socket': socket, 'pid': pid, 'msg_type': data_type}
client.send_info(message)
response = client.listen_info()
try:
topic_id = response['topic']
if (topic_id == topic):
port = response['port']
if ('ip' in response):
ip = response['ip']
else:
ip = '127.0.0.1'
state = response['state']
if (state == 'success'):
return (True, port, ip)
else:
print('NEP ERROR: wrong socket configuration')
return (False, port, ip)
except Exception as e:
(exc_type, exc_obj, exc_tb) = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print('NEP ERROR: wrong response from master')
return (False, 'none', 'none')
print('NEP ERROR: wrong topic from master')
return (False, 'none', 'none')<|docstring|>Register topic in master node
Parameters
----------
node: string
Node name
topic : string
Topic to register
master_ip : string
IP of the master node service
master_port : int
Port of the master node service
socket: string
Socket type. Example "surveyor", "publisher", "subscriber", "respondent", "client", "server"
mode: string
Parameter only for Publish/Subscriber pattern. Options are "one2many", "many2one" and "many2many".
mode: pid
PID identifier
data_type: json
message type
Returns
----------
result : bool
Only if True socket can be connected
port : string
Port used to connect the socket
ip : string
IP used to connect the socket<|endoftext|> |
0c6798bcea0af1445e5ce6c162ae4cfac4818813da7543fbdb837542d0eacb73 | def getNEPpath():
' Get path to NEP Workspace\n\n Returns\n ----------\n\n path : string\n Current workspace path\n\n '
import os
return os.environ['NEP_WS'] | Get path to NEP Workspace
Returns
----------
path : string
Current workspace path | nep/helpers.py | getNEPpath | enriquecoronadozu/NEP | 5 | python | def getNEPpath():
' Get path to NEP Workspace\n\n Returns\n ----------\n\n path : string\n Current workspace path\n\n '
import os
return os.environ['NEP_WS'] | def getNEPpath():
' Get path to NEP Workspace\n\n Returns\n ----------\n\n path : string\n Current workspace path\n\n '
import os
return os.environ['NEP_WS']<|docstring|>Get path to NEP Workspace
Returns
----------
path : string
Current workspace path<|endoftext|> |
1de59a12b78b136ecafb0246d9a14b8c809bcd1409f23171e76f159f8bb3825e | def setNEPpath(new_path):
' Set path to NEP Workspace\n\n Parameters\n ----------\n\n new_path: string\n New path for NEP workspace\n\n '
import os
if (os.environ.get('OS', '') == 'Windows_NT'):
from subprocess import CREATE_NEW_CONSOLE
command = (('setx NEP_WS "' + new_path) + '"')
Popen(command, creationflags=CREATE_NEW_CONSOLE)
os.environ['NEP_WS'] = new_path | Set path to NEP Workspace
Parameters
----------
new_path: string
New path for NEP workspace | nep/helpers.py | setNEPpath | enriquecoronadozu/NEP | 5 | python | def setNEPpath(new_path):
' Set path to NEP Workspace\n\n Parameters\n ----------\n\n new_path: string\n New path for NEP workspace\n\n '
import os
if (os.environ.get('OS', ) == 'Windows_NT'):
from subprocess import CREATE_NEW_CONSOLE
command = (('setx NEP_WS "' + new_path) + '"')
Popen(command, creationflags=CREATE_NEW_CONSOLE)
os.environ['NEP_WS'] = new_path | def setNEPpath(new_path):
' Set path to NEP Workspace\n\n Parameters\n ----------\n\n new_path: string\n New path for NEP workspace\n\n '
import os
if (os.environ.get('OS', ) == 'Windows_NT'):
from subprocess import CREATE_NEW_CONSOLE
command = (('setx NEP_WS "' + new_path) + '"')
Popen(command, creationflags=CREATE_NEW_CONSOLE)
os.environ['NEP_WS'] = new_path<|docstring|>Set path to NEP Workspace
Parameters
----------
new_path: string
New path for NEP workspace<|endoftext|> |
66f74da86cb4da1b15e5634c6e48d6070c87e9f9cbd9e70b5b0e0d71fc5d1a9c | def getMyIP():
' Get current IP address of the PC\n\n Returns\n ----------\n\n ip : string\n Current IP\n\n '
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
cw_ip = s.getsockname()[0]
s.close()
return str(cw_ip) | Get current IP address of the PC
Returns
----------
ip : string
Current IP | nep/helpers.py | getMyIP | enriquecoronadozu/NEP | 5 | python | def getMyIP():
' Get current IP address of the PC\n\n Returns\n ----------\n\n ip : string\n Current IP\n\n '
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
cw_ip = s.getsockname()[0]
s.close()
return str(cw_ip) | def getMyIP():
' Get current IP address of the PC\n\n Returns\n ----------\n\n ip : string\n Current IP\n\n '
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
cw_ip = s.getsockname()[0]
s.close()
return str(cw_ip)<|docstring|>Get current IP address of the PC
Returns
----------
ip : string
Current IP<|endoftext|> |
5b05e8dbb59c5cd9359bf54100c82cf60a677ef3d077e744a32337c8536b50fd | def json2dict(s, **kwargs):
'Convert JSON to python dictionary. See jsonapi.jsonmod.loads for details on kwargs.\n \n Parameters\n ----------\n s: string\n string with the content of the json data\n\n Returns:\n ----------\n dict: dictionary\n dictionary with the content of the json data\n '
if (sys.version_info[0] == 3):
return simplejson.loads(s, **kwargs)
elif ((str is unicode) and isinstance(s, bytes)):
s = s.decode('utf8')
return simplejson.loads(s, **kwargs) | Convert JSON to python dictionary. See jsonapi.jsonmod.loads for details on kwargs.
Parameters
----------
s: string
string with the content of the json data
Returns:
----------
dict: dictionary
dictionary with the content of the json data | nep/helpers.py | json2dict | enriquecoronadozu/NEP | 5 | python | def json2dict(s, **kwargs):
'Convert JSON to python dictionary. See jsonapi.jsonmod.loads for details on kwargs.\n \n Parameters\n ----------\n s: string\n string with the content of the json data\n\n Returns:\n ----------\n dict: dictionary\n dictionary with the content of the json data\n '
if (sys.version_info[0] == 3):
return simplejson.loads(s, **kwargs)
elif ((str is unicode) and isinstance(s, bytes)):
s = s.decode('utf8')
return simplejson.loads(s, **kwargs) | def json2dict(s, **kwargs):
'Convert JSON to python dictionary. See jsonapi.jsonmod.loads for details on kwargs.\n \n Parameters\n ----------\n s: string\n string with the content of the json data\n\n Returns:\n ----------\n dict: dictionary\n dictionary with the content of the json data\n '
if (sys.version_info[0] == 3):
return simplejson.loads(s, **kwargs)
elif ((str is unicode) and isinstance(s, bytes)):
s = s.decode('utf8')
return simplejson.loads(s, **kwargs)<|docstring|>Convert JSON to python dictionary. See jsonapi.jsonmod.loads for details on kwargs.
Parameters
----------
s: string
string with the content of the json data
Returns:
----------
dict: dictionary
dictionary with the content of the json data<|endoftext|> |
4bf656201f500e26c5a39359cd8a2fc35f8a658543a93866e35e87ebc27cbd2e | def dict2json(o, **kwargs):
' Load object from JSON bytes (utf-8). See jsonapi.jsonmod.dumps for details on kwargs.\n \n Parameters\n ----------\n o: dictionary\n dictionary to convert\n \n\n Returns:\n ----------\n s: string\n string in json format\n\n '
if ('separators' not in kwargs):
kwargs['separators'] = (',', ':')
s = simplejson.dumps(o, **kwargs)
import sys
if (sys.version_info[0] == 3):
if isinstance(s, str):
s = s.encode('utf8')
elif isinstance(s, unicode):
s = s.encode('utf8')
return s | Load object from JSON bytes (utf-8). See jsonapi.jsonmod.dumps for details on kwargs.
Parameters
----------
o: dictionary
dictionary to convert
Returns:
----------
s: string
string in json format | nep/helpers.py | dict2json | enriquecoronadozu/NEP | 5 | python | def dict2json(o, **kwargs):
' Load object from JSON bytes (utf-8). See jsonapi.jsonmod.dumps for details on kwargs.\n \n Parameters\n ----------\n o: dictionary\n dictionary to convert\n \n\n Returns:\n ----------\n s: string\n string in json format\n\n '
if ('separators' not in kwargs):
kwargs['separators'] = (',', ':')
s = simplejson.dumps(o, **kwargs)
import sys
if (sys.version_info[0] == 3):
if isinstance(s, str):
s = s.encode('utf8')
elif isinstance(s, unicode):
s = s.encode('utf8')
return s | def dict2json(o, **kwargs):
' Load object from JSON bytes (utf-8). See jsonapi.jsonmod.dumps for details on kwargs.\n \n Parameters\n ----------\n o: dictionary\n dictionary to convert\n \n\n Returns:\n ----------\n s: string\n string in json format\n\n '
if ('separators' not in kwargs):
kwargs['separators'] = (',', ':')
s = simplejson.dumps(o, **kwargs)
import sys
if (sys.version_info[0] == 3):
if isinstance(s, str):
s = s.encode('utf8')
elif isinstance(s, unicode):
s = s.encode('utf8')
return s<|docstring|>Load object from JSON bytes (utf-8). See jsonapi.jsonmod.dumps for details on kwargs.
Parameters
----------
o: dictionary
dictionary to convert
Returns:
----------
s: string
string in json format<|endoftext|> |
b34144a7bad97228cff0548ec109f87978b2affb69eff2ccdbff58d101db3628 | def read_json(json_file):
' Read a json file and return a string \n \n Parameters\n ----------\n json file:string\n Path + name + extension of the json file\n\n Returns:\n ----------\n json_data: string\n string with the content of the json data\n\n '
json_data = open(json_file).read()
return json_data | Read a json file and return a string
Parameters
----------
json file:string
Path + name + extension of the json file
Returns:
----------
json_data: string
string with the content of the json data | nep/helpers.py | read_json | enriquecoronadozu/NEP | 5 | python | def read_json(json_file):
' Read a json file and return a string \n \n Parameters\n ----------\n json file:string\n Path + name + extension of the json file\n\n Returns:\n ----------\n json_data: string\n string with the content of the json data\n\n '
json_data = open(json_file).read()
return json_data | def read_json(json_file):
' Read a json file and return a string \n \n Parameters\n ----------\n json file:string\n Path + name + extension of the json file\n\n Returns:\n ----------\n json_data: string\n string with the content of the json data\n\n '
json_data = open(json_file).read()
return json_data<|docstring|>Read a json file and return a string
Parameters
----------
json file:string
Path + name + extension of the json file
Returns:
----------
json_data: string
string with the content of the json data<|endoftext|> |
452b666f3d04b9822df40795b62a9544f73ed517beae520aa1131e3f43256e1a | def getFiles(path):
' Get a list of files that are inside a folder\n \n Parameters\n ----------\n path: string\n path of the folder\n\n Returns:\n ----------\n onlyfiles: list \n list of strings with the name of the files in the folder\n\n '
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
return onlyfiles | Get a list of files that are inside a folder
Parameters
----------
path: string
path of the folder
Returns:
----------
onlyfiles: list
list of strings with the name of the files in the folder | nep/helpers.py | getFiles | enriquecoronadozu/NEP | 5 | python | def getFiles(path):
' Get a list of files that are inside a folder\n \n Parameters\n ----------\n path: string\n path of the folder\n\n Returns:\n ----------\n onlyfiles: list \n list of strings with the name of the files in the folder\n\n '
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
return onlyfiles | def getFiles(path):
' Get a list of files that are inside a folder\n \n Parameters\n ----------\n path: string\n path of the folder\n\n Returns:\n ----------\n onlyfiles: list \n list of strings with the name of the files in the folder\n\n '
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
return onlyfiles<|docstring|>Get a list of files that are inside a folder
Parameters
----------
path: string
path of the folder
Returns:
----------
onlyfiles: list
list of strings with the name of the files in the folder<|endoftext|> |
db54f8d0c78dd4d8964bfa42bb27ba0199185c1046fee3b0654cf17557002b39 | def fetch_url(url, fname):
' save a url to a local file '
fin = req.urlopen(url)
data = fin.read()
with open(fname, mode='wb') as fout:
fout.write(data) | save a url to a local file | matt_file.py | fetch_url | decareano/boto3 | 0 | python | def fetch_url(url, fname):
' '
fin = req.urlopen(url)
data = fin.read()
with open(fname, mode='wb') as fout:
fout.write(data) | def fetch_url(url, fname):
' '
fin = req.urlopen(url)
data = fin.read()
with open(fname, mode='wb') as fout:
fout.write(data)<|docstring|>save a url to a local file<|endoftext|> |
24df0b9410f8a28f9ec41e6c38555a62d753962923cfe89ab29bb8a16bf0fc75 | def __init__(self, blob_data_payment_rate, db_dir=None, lbryid=None, peer_manager=None, dht_node_port=None, known_dht_nodes=None, peer_finder=None, hash_announcer=None, blob_dir=None, blob_manager=None, peer_port=None, use_upnp=True, rate_limiter=None, wallet=None, dht_node_class=node.Node, blob_tracker_class=None, payment_rate_manager_class=None, is_generous=True):
"@param blob_data_payment_rate: The default payment rate for blob data\n\n @param db_dir: The directory in which levelDB files should be stored\n\n @param lbryid: The unique ID of this node\n\n @param peer_manager: An object which keeps track of all known\n peers. If None, a PeerManager will be created\n\n @param dht_node_port: The port on which the dht node should\n listen for incoming connections\n\n @param known_dht_nodes: A list of nodes which the dht node\n should use to bootstrap into the dht\n\n @param peer_finder: An object which is used to look up peers\n that are associated with some hash. If None, a\n DHTPeerFinder will be used, which looks for peers in the\n distributed hash table.\n\n @param hash_announcer: An object which announces to other\n peers that this peer is associated with some hash. If\n None, and peer_port is not None, a DHTHashAnnouncer will\n be used. If None and peer_port is None, a\n DummyHashAnnouncer will be used, which will not actually\n announce anything.\n\n @param blob_dir: The directory in which blobs will be\n stored. If None and blob_manager is None, blobs will be\n stored in memory only.\n\n @param blob_manager: An object which keeps track of downloaded\n blobs and provides access to them. If None, and blob_dir\n is not None, a DiskBlobManager will be used, with the\n given blob_dir. If None and blob_dir is None, a\n TempBlobManager will be used, which stores blobs in memory\n only.\n\n @param peer_port: The port on which other peers should connect\n to this peer\n\n @param use_upnp: Whether or not to try to open a hole in the\n firewall so that outside peers can connect to this peer's\n peer_port and dht_node_port\n\n @param rate_limiter: An object which keeps track of the amount\n of data transferred to and from this peer, and can limit\n that rate if desired\n\n @param wallet: An object which will be used to keep track of\n expected payments and which will pay peers. If None, a\n wallet which uses the Point Trader system will be used,\n which is meant for testing only\n\n "
self.db_dir = db_dir
self.lbryid = lbryid
self.peer_manager = peer_manager
self.dht_node_port = dht_node_port
self.known_dht_nodes = known_dht_nodes
if (self.known_dht_nodes is None):
self.known_dht_nodes = []
self.peer_finder = peer_finder
self.hash_announcer = hash_announcer
self.blob_dir = blob_dir
self.blob_manager = blob_manager
self.blob_tracker = None
self.blob_tracker_class = (blob_tracker_class or BlobAvailabilityTracker)
self.peer_port = peer_port
self.use_upnp = use_upnp
self.rate_limiter = rate_limiter
self.external_ip = '127.0.0.1'
self.upnp_redirects = []
self.wallet = wallet
self.dht_node_class = dht_node_class
self.dht_node = None
self.base_payment_rate_manager = BasePaymentRateManager(blob_data_payment_rate)
self.payment_rate_manager = None
self.payment_rate_manager_class = (payment_rate_manager_class or NegotiatedPaymentRateManager)
self.is_generous = is_generous | @param blob_data_payment_rate: The default payment rate for blob data
@param db_dir: The directory in which levelDB files should be stored
@param lbryid: The unique ID of this node
@param peer_manager: An object which keeps track of all known
peers. If None, a PeerManager will be created
@param dht_node_port: The port on which the dht node should
listen for incoming connections
@param known_dht_nodes: A list of nodes which the dht node
should use to bootstrap into the dht
@param peer_finder: An object which is used to look up peers
that are associated with some hash. If None, a
DHTPeerFinder will be used, which looks for peers in the
distributed hash table.
@param hash_announcer: An object which announces to other
peers that this peer is associated with some hash. If
None, and peer_port is not None, a DHTHashAnnouncer will
be used. If None and peer_port is None, a
DummyHashAnnouncer will be used, which will not actually
announce anything.
@param blob_dir: The directory in which blobs will be
stored. If None and blob_manager is None, blobs will be
stored in memory only.
@param blob_manager: An object which keeps track of downloaded
blobs and provides access to them. If None, and blob_dir
is not None, a DiskBlobManager will be used, with the
given blob_dir. If None and blob_dir is None, a
TempBlobManager will be used, which stores blobs in memory
only.
@param peer_port: The port on which other peers should connect
to this peer
@param use_upnp: Whether or not to try to open a hole in the
firewall so that outside peers can connect to this peer's
peer_port and dht_node_port
@param rate_limiter: An object which keeps track of the amount
of data transferred to and from this peer, and can limit
that rate if desired
@param wallet: An object which will be used to keep track of
expected payments and which will pay peers. If None, a
wallet which uses the Point Trader system will be used,
which is meant for testing only | lbrynet/core/Session.py | __init__ | shyba/lbry | 1 | python | def __init__(self, blob_data_payment_rate, db_dir=None, lbryid=None, peer_manager=None, dht_node_port=None, known_dht_nodes=None, peer_finder=None, hash_announcer=None, blob_dir=None, blob_manager=None, peer_port=None, use_upnp=True, rate_limiter=None, wallet=None, dht_node_class=node.Node, blob_tracker_class=None, payment_rate_manager_class=None, is_generous=True):
"@param blob_data_payment_rate: The default payment rate for blob data\n\n @param db_dir: The directory in which levelDB files should be stored\n\n @param lbryid: The unique ID of this node\n\n @param peer_manager: An object which keeps track of all known\n peers. If None, a PeerManager will be created\n\n @param dht_node_port: The port on which the dht node should\n listen for incoming connections\n\n @param known_dht_nodes: A list of nodes which the dht node\n should use to bootstrap into the dht\n\n @param peer_finder: An object which is used to look up peers\n that are associated with some hash. If None, a\n DHTPeerFinder will be used, which looks for peers in the\n distributed hash table.\n\n @param hash_announcer: An object which announces to other\n peers that this peer is associated with some hash. If\n None, and peer_port is not None, a DHTHashAnnouncer will\n be used. If None and peer_port is None, a\n DummyHashAnnouncer will be used, which will not actually\n announce anything.\n\n @param blob_dir: The directory in which blobs will be\n stored. If None and blob_manager is None, blobs will be\n stored in memory only.\n\n @param blob_manager: An object which keeps track of downloaded\n blobs and provides access to them. If None, and blob_dir\n is not None, a DiskBlobManager will be used, with the\n given blob_dir. If None and blob_dir is None, a\n TempBlobManager will be used, which stores blobs in memory\n only.\n\n @param peer_port: The port on which other peers should connect\n to this peer\n\n @param use_upnp: Whether or not to try to open a hole in the\n firewall so that outside peers can connect to this peer's\n peer_port and dht_node_port\n\n @param rate_limiter: An object which keeps track of the amount\n of data transferred to and from this peer, and can limit\n that rate if desired\n\n @param wallet: An object which will be used to keep track of\n expected payments and which will pay peers. If None, a\n wallet which uses the Point Trader system will be used,\n which is meant for testing only\n\n "
self.db_dir = db_dir
self.lbryid = lbryid
self.peer_manager = peer_manager
self.dht_node_port = dht_node_port
self.known_dht_nodes = known_dht_nodes
if (self.known_dht_nodes is None):
self.known_dht_nodes = []
self.peer_finder = peer_finder
self.hash_announcer = hash_announcer
self.blob_dir = blob_dir
self.blob_manager = blob_manager
self.blob_tracker = None
self.blob_tracker_class = (blob_tracker_class or BlobAvailabilityTracker)
self.peer_port = peer_port
self.use_upnp = use_upnp
self.rate_limiter = rate_limiter
self.external_ip = '127.0.0.1'
self.upnp_redirects = []
self.wallet = wallet
self.dht_node_class = dht_node_class
self.dht_node = None
self.base_payment_rate_manager = BasePaymentRateManager(blob_data_payment_rate)
self.payment_rate_manager = None
self.payment_rate_manager_class = (payment_rate_manager_class or NegotiatedPaymentRateManager)
self.is_generous = is_generous | def __init__(self, blob_data_payment_rate, db_dir=None, lbryid=None, peer_manager=None, dht_node_port=None, known_dht_nodes=None, peer_finder=None, hash_announcer=None, blob_dir=None, blob_manager=None, peer_port=None, use_upnp=True, rate_limiter=None, wallet=None, dht_node_class=node.Node, blob_tracker_class=None, payment_rate_manager_class=None, is_generous=True):
"@param blob_data_payment_rate: The default payment rate for blob data\n\n @param db_dir: The directory in which levelDB files should be stored\n\n @param lbryid: The unique ID of this node\n\n @param peer_manager: An object which keeps track of all known\n peers. If None, a PeerManager will be created\n\n @param dht_node_port: The port on which the dht node should\n listen for incoming connections\n\n @param known_dht_nodes: A list of nodes which the dht node\n should use to bootstrap into the dht\n\n @param peer_finder: An object which is used to look up peers\n that are associated with some hash. If None, a\n DHTPeerFinder will be used, which looks for peers in the\n distributed hash table.\n\n @param hash_announcer: An object which announces to other\n peers that this peer is associated with some hash. If\n None, and peer_port is not None, a DHTHashAnnouncer will\n be used. If None and peer_port is None, a\n DummyHashAnnouncer will be used, which will not actually\n announce anything.\n\n @param blob_dir: The directory in which blobs will be\n stored. If None and blob_manager is None, blobs will be\n stored in memory only.\n\n @param blob_manager: An object which keeps track of downloaded\n blobs and provides access to them. If None, and blob_dir\n is not None, a DiskBlobManager will be used, with the\n given blob_dir. If None and blob_dir is None, a\n TempBlobManager will be used, which stores blobs in memory\n only.\n\n @param peer_port: The port on which other peers should connect\n to this peer\n\n @param use_upnp: Whether or not to try to open a hole in the\n firewall so that outside peers can connect to this peer's\n peer_port and dht_node_port\n\n @param rate_limiter: An object which keeps track of the amount\n of data transferred to and from this peer, and can limit\n that rate if desired\n\n @param wallet: An object which will be used to keep track of\n expected payments and which will pay peers. If None, a\n wallet which uses the Point Trader system will be used,\n which is meant for testing only\n\n "
self.db_dir = db_dir
self.lbryid = lbryid
self.peer_manager = peer_manager
self.dht_node_port = dht_node_port
self.known_dht_nodes = known_dht_nodes
if (self.known_dht_nodes is None):
self.known_dht_nodes = []
self.peer_finder = peer_finder
self.hash_announcer = hash_announcer
self.blob_dir = blob_dir
self.blob_manager = blob_manager
self.blob_tracker = None
self.blob_tracker_class = (blob_tracker_class or BlobAvailabilityTracker)
self.peer_port = peer_port
self.use_upnp = use_upnp
self.rate_limiter = rate_limiter
self.external_ip = '127.0.0.1'
self.upnp_redirects = []
self.wallet = wallet
self.dht_node_class = dht_node_class
self.dht_node = None
self.base_payment_rate_manager = BasePaymentRateManager(blob_data_payment_rate)
self.payment_rate_manager = None
self.payment_rate_manager_class = (payment_rate_manager_class or NegotiatedPaymentRateManager)
self.is_generous = is_generous<|docstring|>@param blob_data_payment_rate: The default payment rate for blob data
@param db_dir: The directory in which levelDB files should be stored
@param lbryid: The unique ID of this node
@param peer_manager: An object which keeps track of all known
peers. If None, a PeerManager will be created
@param dht_node_port: The port on which the dht node should
listen for incoming connections
@param known_dht_nodes: A list of nodes which the dht node
should use to bootstrap into the dht
@param peer_finder: An object which is used to look up peers
that are associated with some hash. If None, a
DHTPeerFinder will be used, which looks for peers in the
distributed hash table.
@param hash_announcer: An object which announces to other
peers that this peer is associated with some hash. If
None, and peer_port is not None, a DHTHashAnnouncer will
be used. If None and peer_port is None, a
DummyHashAnnouncer will be used, which will not actually
announce anything.
@param blob_dir: The directory in which blobs will be
stored. If None and blob_manager is None, blobs will be
stored in memory only.
@param blob_manager: An object which keeps track of downloaded
blobs and provides access to them. If None, and blob_dir
is not None, a DiskBlobManager will be used, with the
given blob_dir. If None and blob_dir is None, a
TempBlobManager will be used, which stores blobs in memory
only.
@param peer_port: The port on which other peers should connect
to this peer
@param use_upnp: Whether or not to try to open a hole in the
firewall so that outside peers can connect to this peer's
peer_port and dht_node_port
@param rate_limiter: An object which keeps track of the amount
of data transferred to and from this peer, and can limit
that rate if desired
@param wallet: An object which will be used to keep track of
expected payments and which will pay peers. If None, a
wallet which uses the Point Trader system will be used,
which is meant for testing only<|endoftext|> |
80b0394c7811fadb7687b7e72477b90d990788434e70f924b8f7daad48ae2d68 | def setup(self):
'Create the blob directory and database if necessary, start all desired services'
log.debug('Setting up the lbry session')
if (self.lbryid is None):
self.lbryid = generate_id()
if (self.wallet is None):
from lbrynet.core.PTCWallet import PTCWallet
self.wallet = PTCWallet(self.db_dir)
if (self.peer_manager is None):
self.peer_manager = PeerManager()
if (self.use_upnp is True):
d = self._try_upnp()
else:
d = defer.succeed(True)
if (self.peer_finder is None):
d.addCallback((lambda _: self._setup_dht()))
elif ((self.hash_announcer is None) and (self.peer_port is not None)):
log.warning('The server has no way to advertise its available blobs.')
self.hash_announcer = DummyHashAnnouncer()
d.addCallback((lambda _: self._setup_other_components()))
return d | Create the blob directory and database if necessary, start all desired services | lbrynet/core/Session.py | setup | shyba/lbry | 1 | python | def setup(self):
log.debug('Setting up the lbry session')
if (self.lbryid is None):
self.lbryid = generate_id()
if (self.wallet is None):
from lbrynet.core.PTCWallet import PTCWallet
self.wallet = PTCWallet(self.db_dir)
if (self.peer_manager is None):
self.peer_manager = PeerManager()
if (self.use_upnp is True):
d = self._try_upnp()
else:
d = defer.succeed(True)
if (self.peer_finder is None):
d.addCallback((lambda _: self._setup_dht()))
elif ((self.hash_announcer is None) and (self.peer_port is not None)):
log.warning('The server has no way to advertise its available blobs.')
self.hash_announcer = DummyHashAnnouncer()
d.addCallback((lambda _: self._setup_other_components()))
return d | def setup(self):
log.debug('Setting up the lbry session')
if (self.lbryid is None):
self.lbryid = generate_id()
if (self.wallet is None):
from lbrynet.core.PTCWallet import PTCWallet
self.wallet = PTCWallet(self.db_dir)
if (self.peer_manager is None):
self.peer_manager = PeerManager()
if (self.use_upnp is True):
d = self._try_upnp()
else:
d = defer.succeed(True)
if (self.peer_finder is None):
d.addCallback((lambda _: self._setup_dht()))
elif ((self.hash_announcer is None) and (self.peer_port is not None)):
log.warning('The server has no way to advertise its available blobs.')
self.hash_announcer = DummyHashAnnouncer()
d.addCallback((lambda _: self._setup_other_components()))
return d<|docstring|>Create the blob directory and database if necessary, start all desired services<|endoftext|> |
00475c5ff1721a839e12bcbcfc98f1f9103e52b8a40453959a2eea94403a021c | def shut_down(self):
'Stop all services'
log.info('Shutting down %s', self)
ds = []
if (self.blob_tracker is not None):
ds.append(defer.maybeDeferred(self.blob_tracker.stop))
if (self.dht_node is not None):
ds.append(defer.maybeDeferred(self.dht_node.stop))
if (self.rate_limiter is not None):
ds.append(defer.maybeDeferred(self.rate_limiter.stop))
if (self.peer_finder is not None):
ds.append(defer.maybeDeferred(self.peer_finder.stop))
if (self.hash_announcer is not None):
ds.append(defer.maybeDeferred(self.hash_announcer.stop))
if (self.wallet is not None):
ds.append(defer.maybeDeferred(self.wallet.stop))
if (self.blob_manager is not None):
ds.append(defer.maybeDeferred(self.blob_manager.stop))
if (self.use_upnp is True):
ds.append(defer.maybeDeferred(self._unset_upnp))
return defer.DeferredList(ds) | Stop all services | lbrynet/core/Session.py | shut_down | shyba/lbry | 1 | python | def shut_down(self):
log.info('Shutting down %s', self)
ds = []
if (self.blob_tracker is not None):
ds.append(defer.maybeDeferred(self.blob_tracker.stop))
if (self.dht_node is not None):
ds.append(defer.maybeDeferred(self.dht_node.stop))
if (self.rate_limiter is not None):
ds.append(defer.maybeDeferred(self.rate_limiter.stop))
if (self.peer_finder is not None):
ds.append(defer.maybeDeferred(self.peer_finder.stop))
if (self.hash_announcer is not None):
ds.append(defer.maybeDeferred(self.hash_announcer.stop))
if (self.wallet is not None):
ds.append(defer.maybeDeferred(self.wallet.stop))
if (self.blob_manager is not None):
ds.append(defer.maybeDeferred(self.blob_manager.stop))
if (self.use_upnp is True):
ds.append(defer.maybeDeferred(self._unset_upnp))
return defer.DeferredList(ds) | def shut_down(self):
log.info('Shutting down %s', self)
ds = []
if (self.blob_tracker is not None):
ds.append(defer.maybeDeferred(self.blob_tracker.stop))
if (self.dht_node is not None):
ds.append(defer.maybeDeferred(self.dht_node.stop))
if (self.rate_limiter is not None):
ds.append(defer.maybeDeferred(self.rate_limiter.stop))
if (self.peer_finder is not None):
ds.append(defer.maybeDeferred(self.peer_finder.stop))
if (self.hash_announcer is not None):
ds.append(defer.maybeDeferred(self.hash_announcer.stop))
if (self.wallet is not None):
ds.append(defer.maybeDeferred(self.wallet.stop))
if (self.blob_manager is not None):
ds.append(defer.maybeDeferred(self.blob_manager.stop))
if (self.use_upnp is True):
ds.append(defer.maybeDeferred(self._unset_upnp))
return defer.DeferredList(ds)<|docstring|>Stop all services<|endoftext|> |
3821980932ffbd3d0d37b8590daf69f8c6c9b0ea5424683ff2e10d6ce3561c42 | def markdown(text, **kwargs):
'Convert a markdown string to HTML and return HTML as a unicode string.\n\n This is a shortcut function for `Markdown` class to cover the most\n basic use case. It initializes an instance of Markdown, loads the\n necessary extensions and runs the parser on the given text.\n\n Keyword arguments:\n\n * text: Markdown formatted text as Unicode or ASCII string.\n * Any arguments accepted by the Markdown class.\n\n Returns: An HTML document as a string.\n\n '
md = Markdown(**kwargs)
return md.convert(text) | Convert a markdown string to HTML and return HTML as a unicode string.
This is a shortcut function for `Markdown` class to cover the most
basic use case. It initializes an instance of Markdown, loads the
necessary extensions and runs the parser on the given text.
Keyword arguments:
* text: Markdown formatted text as Unicode or ASCII string.
* Any arguments accepted by the Markdown class.
Returns: An HTML document as a string. | pasta-django/venv/lib/python3.8/site-packages/markdown/core.py | markdown | rabeloalcantaraigor/Curso-API-DRF | 14,668 | python | def markdown(text, **kwargs):
'Convert a markdown string to HTML and return HTML as a unicode string.\n\n This is a shortcut function for `Markdown` class to cover the most\n basic use case. It initializes an instance of Markdown, loads the\n necessary extensions and runs the parser on the given text.\n\n Keyword arguments:\n\n * text: Markdown formatted text as Unicode or ASCII string.\n * Any arguments accepted by the Markdown class.\n\n Returns: An HTML document as a string.\n\n '
md = Markdown(**kwargs)
return md.convert(text) | def markdown(text, **kwargs):
'Convert a markdown string to HTML and return HTML as a unicode string.\n\n This is a shortcut function for `Markdown` class to cover the most\n basic use case. It initializes an instance of Markdown, loads the\n necessary extensions and runs the parser on the given text.\n\n Keyword arguments:\n\n * text: Markdown formatted text as Unicode or ASCII string.\n * Any arguments accepted by the Markdown class.\n\n Returns: An HTML document as a string.\n\n '
md = Markdown(**kwargs)
return md.convert(text)<|docstring|>Convert a markdown string to HTML and return HTML as a unicode string.
This is a shortcut function for `Markdown` class to cover the most
basic use case. It initializes an instance of Markdown, loads the
necessary extensions and runs the parser on the given text.
Keyword arguments:
* text: Markdown formatted text as Unicode or ASCII string.
* Any arguments accepted by the Markdown class.
Returns: An HTML document as a string.<|endoftext|> |
267d3a987965fe080fbcf6fae30813ad2453685078037c0c0195dbee100f157d | def markdownFromFile(**kwargs):
'Read markdown code from a file and write it to a file or a stream.\n\n This is a shortcut function which initializes an instance of Markdown,\n and calls the convertFile method rather than convert.\n\n Keyword arguments:\n\n * input: a file name or readable object.\n * output: a file name or writable object.\n * encoding: Encoding of input and output.\n * Any arguments accepted by the Markdown class.\n\n '
md = Markdown(**kwargs)
md.convertFile(kwargs.get('input', None), kwargs.get('output', None), kwargs.get('encoding', None)) | Read markdown code from a file and write it to a file or a stream.
This is a shortcut function which initializes an instance of Markdown,
and calls the convertFile method rather than convert.
Keyword arguments:
* input: a file name or readable object.
* output: a file name or writable object.
* encoding: Encoding of input and output.
* Any arguments accepted by the Markdown class. | pasta-django/venv/lib/python3.8/site-packages/markdown/core.py | markdownFromFile | rabeloalcantaraigor/Curso-API-DRF | 14,668 | python | def markdownFromFile(**kwargs):
'Read markdown code from a file and write it to a file or a stream.\n\n This is a shortcut function which initializes an instance of Markdown,\n and calls the convertFile method rather than convert.\n\n Keyword arguments:\n\n * input: a file name or readable object.\n * output: a file name or writable object.\n * encoding: Encoding of input and output.\n * Any arguments accepted by the Markdown class.\n\n '
md = Markdown(**kwargs)
md.convertFile(kwargs.get('input', None), kwargs.get('output', None), kwargs.get('encoding', None)) | def markdownFromFile(**kwargs):
'Read markdown code from a file and write it to a file or a stream.\n\n This is a shortcut function which initializes an instance of Markdown,\n and calls the convertFile method rather than convert.\n\n Keyword arguments:\n\n * input: a file name or readable object.\n * output: a file name or writable object.\n * encoding: Encoding of input and output.\n * Any arguments accepted by the Markdown class.\n\n '
md = Markdown(**kwargs)
md.convertFile(kwargs.get('input', None), kwargs.get('output', None), kwargs.get('encoding', None))<|docstring|>Read markdown code from a file and write it to a file or a stream.
This is a shortcut function which initializes an instance of Markdown,
and calls the convertFile method rather than convert.
Keyword arguments:
* input: a file name or readable object.
* output: a file name or writable object.
* encoding: Encoding of input and output.
* Any arguments accepted by the Markdown class.<|endoftext|> |
db3070d5298143f6f22d0fc6d322878a1b4f4da95b6bcdf270534fab3f64c16e | def __init__(self, **kwargs):
'\n Creates a new Markdown instance.\n\n Keyword arguments:\n\n * extensions: A list of extensions.\n If an item is an instance of a subclass of `markdown.extension.Extension`, the instance will be used\n as-is. If an item is of type string, first an entry point will be loaded. If that fails, the string is\n assumed to use Python dot notation (`path.to.module:ClassName`) to load a markdown.Extension subclass. If\n no class is specified, then a `makeExtension` function is called within the specified module.\n * extension_configs: Configuration settings for extensions.\n * output_format: Format of output. Supported formats are:\n * "xhtml": Outputs XHTML style tags. Default.\n * "html": Outputs HTML style tags.\n * tab_length: Length of tabs in the source. Default: 4\n\n '
self.tab_length = kwargs.get('tab_length', 4)
self.ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']', '(', ')', '>', '#', '+', '-', '.', '!']
self.block_level_elements = ['address', 'article', 'aside', 'blockquote', 'details', 'div', 'dl', 'fieldset', 'figcaption', 'figure', 'footer', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'header', 'hgroup', 'hr', 'main', 'menu', 'nav', 'ol', 'p', 'pre', 'section', 'table', 'ul', 'canvas', 'colgroup', 'dd', 'body', 'dt', 'group', 'iframe', 'li', 'legend', 'math', 'map', 'noscript', 'output', 'object', 'option', 'progress', 'script', 'style', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'video']
self.registeredExtensions = []
self.docType = ''
self.stripTopLevelTags = True
self.build_parser()
self.references = {}
self.htmlStash = util.HtmlStash()
self.registerExtensions(extensions=kwargs.get('extensions', []), configs=kwargs.get('extension_configs', {}))
self.set_output_format(kwargs.get('output_format', 'xhtml'))
self.reset() | Creates a new Markdown instance.
Keyword arguments:
* extensions: A list of extensions.
If an item is an instance of a subclass of `markdown.extension.Extension`, the instance will be used
as-is. If an item is of type string, first an entry point will be loaded. If that fails, the string is
assumed to use Python dot notation (`path.to.module:ClassName`) to load a markdown.Extension subclass. If
no class is specified, then a `makeExtension` function is called within the specified module.
* extension_configs: Configuration settings for extensions.
* output_format: Format of output. Supported formats are:
* "xhtml": Outputs XHTML style tags. Default.
* "html": Outputs HTML style tags.
* tab_length: Length of tabs in the source. Default: 4 | pasta-django/venv/lib/python3.8/site-packages/markdown/core.py | __init__ | rabeloalcantaraigor/Curso-API-DRF | 14,668 | python | def __init__(self, **kwargs):
'\n Creates a new Markdown instance.\n\n Keyword arguments:\n\n * extensions: A list of extensions.\n If an item is an instance of a subclass of `markdown.extension.Extension`, the instance will be used\n as-is. If an item is of type string, first an entry point will be loaded. If that fails, the string is\n assumed to use Python dot notation (`path.to.module:ClassName`) to load a markdown.Extension subclass. If\n no class is specified, then a `makeExtension` function is called within the specified module.\n * extension_configs: Configuration settings for extensions.\n * output_format: Format of output. Supported formats are:\n * "xhtml": Outputs XHTML style tags. Default.\n * "html": Outputs HTML style tags.\n * tab_length: Length of tabs in the source. Default: 4\n\n '
self.tab_length = kwargs.get('tab_length', 4)
self.ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']', '(', ')', '>', '#', '+', '-', '.', '!']
self.block_level_elements = ['address', 'article', 'aside', 'blockquote', 'details', 'div', 'dl', 'fieldset', 'figcaption', 'figure', 'footer', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'header', 'hgroup', 'hr', 'main', 'menu', 'nav', 'ol', 'p', 'pre', 'section', 'table', 'ul', 'canvas', 'colgroup', 'dd', 'body', 'dt', 'group', 'iframe', 'li', 'legend', 'math', 'map', 'noscript', 'output', 'object', 'option', 'progress', 'script', 'style', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'video']
self.registeredExtensions = []
self.docType =
self.stripTopLevelTags = True
self.build_parser()
self.references = {}
self.htmlStash = util.HtmlStash()
self.registerExtensions(extensions=kwargs.get('extensions', []), configs=kwargs.get('extension_configs', {}))
self.set_output_format(kwargs.get('output_format', 'xhtml'))
self.reset() | def __init__(self, **kwargs):
'\n Creates a new Markdown instance.\n\n Keyword arguments:\n\n * extensions: A list of extensions.\n If an item is an instance of a subclass of `markdown.extension.Extension`, the instance will be used\n as-is. If an item is of type string, first an entry point will be loaded. If that fails, the string is\n assumed to use Python dot notation (`path.to.module:ClassName`) to load a markdown.Extension subclass. If\n no class is specified, then a `makeExtension` function is called within the specified module.\n * extension_configs: Configuration settings for extensions.\n * output_format: Format of output. Supported formats are:\n * "xhtml": Outputs XHTML style tags. Default.\n * "html": Outputs HTML style tags.\n * tab_length: Length of tabs in the source. Default: 4\n\n '
self.tab_length = kwargs.get('tab_length', 4)
self.ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']', '(', ')', '>', '#', '+', '-', '.', '!']
self.block_level_elements = ['address', 'article', 'aside', 'blockquote', 'details', 'div', 'dl', 'fieldset', 'figcaption', 'figure', 'footer', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'header', 'hgroup', 'hr', 'main', 'menu', 'nav', 'ol', 'p', 'pre', 'section', 'table', 'ul', 'canvas', 'colgroup', 'dd', 'body', 'dt', 'group', 'iframe', 'li', 'legend', 'math', 'map', 'noscript', 'output', 'object', 'option', 'progress', 'script', 'style', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'video']
self.registeredExtensions = []
self.docType =
self.stripTopLevelTags = True
self.build_parser()
self.references = {}
self.htmlStash = util.HtmlStash()
self.registerExtensions(extensions=kwargs.get('extensions', []), configs=kwargs.get('extension_configs', {}))
self.set_output_format(kwargs.get('output_format', 'xhtml'))
self.reset()<|docstring|>Creates a new Markdown instance.
Keyword arguments:
* extensions: A list of extensions.
If an item is an instance of a subclass of `markdown.extension.Extension`, the instance will be used
as-is. If an item is of type string, first an entry point will be loaded. If that fails, the string is
assumed to use Python dot notation (`path.to.module:ClassName`) to load a markdown.Extension subclass. If
no class is specified, then a `makeExtension` function is called within the specified module.
* extension_configs: Configuration settings for extensions.
* output_format: Format of output. Supported formats are:
* "xhtml": Outputs XHTML style tags. Default.
* "html": Outputs HTML style tags.
* tab_length: Length of tabs in the source. Default: 4<|endoftext|> |
2e31637ebb02999913e80f48a727fc926d0a603e39ea8ad932de81ad4f89d66f | def build_parser(self):
' Build the parser from the various parts. '
self.preprocessors = build_preprocessors(self)
self.parser = build_block_parser(self)
self.inlinePatterns = build_inlinepatterns(self)
self.treeprocessors = build_treeprocessors(self)
self.postprocessors = build_postprocessors(self)
return self | Build the parser from the various parts. | pasta-django/venv/lib/python3.8/site-packages/markdown/core.py | build_parser | rabeloalcantaraigor/Curso-API-DRF | 14,668 | python | def build_parser(self):
' '
self.preprocessors = build_preprocessors(self)
self.parser = build_block_parser(self)
self.inlinePatterns = build_inlinepatterns(self)
self.treeprocessors = build_treeprocessors(self)
self.postprocessors = build_postprocessors(self)
return self | def build_parser(self):
' '
self.preprocessors = build_preprocessors(self)
self.parser = build_block_parser(self)
self.inlinePatterns = build_inlinepatterns(self)
self.treeprocessors = build_treeprocessors(self)
self.postprocessors = build_postprocessors(self)
return self<|docstring|>Build the parser from the various parts.<|endoftext|> |
a040e840e8dadfff417f5029d104f215856d70bff712a3456d9ef832f1215e8f | def registerExtensions(self, extensions, configs):
'\n Register extensions with this instance of Markdown.\n\n Keyword arguments:\n\n * extensions: A list of extensions, which can either\n be strings or objects.\n * configs: A dictionary mapping extension names to config options.\n\n '
for ext in extensions:
if isinstance(ext, str):
ext = self.build_extension(ext, configs.get(ext, {}))
if isinstance(ext, Extension):
ext._extendMarkdown(self)
logger.debug(('Successfully loaded extension "%s.%s".' % (ext.__class__.__module__, ext.__class__.__name__)))
elif (ext is not None):
raise TypeError('Extension "{}.{}" must be of type: "{}.{}"'.format(ext.__class__.__module__, ext.__class__.__name__, Extension.__module__, Extension.__name__))
return self | Register extensions with this instance of Markdown.
Keyword arguments:
* extensions: A list of extensions, which can either
be strings or objects.
* configs: A dictionary mapping extension names to config options. | pasta-django/venv/lib/python3.8/site-packages/markdown/core.py | registerExtensions | rabeloalcantaraigor/Curso-API-DRF | 14,668 | python | def registerExtensions(self, extensions, configs):
'\n Register extensions with this instance of Markdown.\n\n Keyword arguments:\n\n * extensions: A list of extensions, which can either\n be strings or objects.\n * configs: A dictionary mapping extension names to config options.\n\n '
for ext in extensions:
if isinstance(ext, str):
ext = self.build_extension(ext, configs.get(ext, {}))
if isinstance(ext, Extension):
ext._extendMarkdown(self)
logger.debug(('Successfully loaded extension "%s.%s".' % (ext.__class__.__module__, ext.__class__.__name__)))
elif (ext is not None):
raise TypeError('Extension "{}.{}" must be of type: "{}.{}"'.format(ext.__class__.__module__, ext.__class__.__name__, Extension.__module__, Extension.__name__))
return self | def registerExtensions(self, extensions, configs):
'\n Register extensions with this instance of Markdown.\n\n Keyword arguments:\n\n * extensions: A list of extensions, which can either\n be strings or objects.\n * configs: A dictionary mapping extension names to config options.\n\n '
for ext in extensions:
if isinstance(ext, str):
ext = self.build_extension(ext, configs.get(ext, {}))
if isinstance(ext, Extension):
ext._extendMarkdown(self)
logger.debug(('Successfully loaded extension "%s.%s".' % (ext.__class__.__module__, ext.__class__.__name__)))
elif (ext is not None):
raise TypeError('Extension "{}.{}" must be of type: "{}.{}"'.format(ext.__class__.__module__, ext.__class__.__name__, Extension.__module__, Extension.__name__))
return self<|docstring|>Register extensions with this instance of Markdown.
Keyword arguments:
* extensions: A list of extensions, which can either
be strings or objects.
* configs: A dictionary mapping extension names to config options.<|endoftext|> |
01f8f9667625dfdfafcc8188d599e1c51a3bc762b4abd9aae523473bdc24a2bd | def build_extension(self, ext_name, configs):
'\n Build extension from a string name, then return an instance.\n\n First attempt to load an entry point. The string name must be registered as an entry point in the\n `markdown.extensions` group which points to a subclass of the `markdown.extensions.Extension` class.\n If multiple distributions have registered the same name, the first one found is returned.\n\n If no entry point is found, assume dot notation (`path.to.module:ClassName`). Load the specified class and\n return an instance. If no class is specified, import the module and call a `makeExtension` function and return\n the Extension instance returned by that function.\n '
configs = dict(configs)
entry_points = [ep for ep in util.INSTALLED_EXTENSIONS if (ep.name == ext_name)]
if entry_points:
ext = entry_points[0].load()
return ext(**configs)
(ext_name, class_name) = (ext_name.split(':', 1) if (':' in ext_name) else (ext_name, ''))
try:
module = importlib.import_module(ext_name)
logger.debug(('Successfully imported extension module "%s".' % ext_name))
except ImportError as e:
message = ('Failed loading extension "%s".' % ext_name)
e.args = ((message,) + e.args[1:])
raise
if class_name:
return getattr(module, class_name)(**configs)
else:
try:
return module.makeExtension(**configs)
except AttributeError as e:
message = e.args[0]
message = ("Failed to initiate extension '%s': %s" % (ext_name, message))
e.args = ((message,) + e.args[1:])
raise | Build extension from a string name, then return an instance.
First attempt to load an entry point. The string name must be registered as an entry point in the
`markdown.extensions` group which points to a subclass of the `markdown.extensions.Extension` class.
If multiple distributions have registered the same name, the first one found is returned.
If no entry point is found, assume dot notation (`path.to.module:ClassName`). Load the specified class and
return an instance. If no class is specified, import the module and call a `makeExtension` function and return
the Extension instance returned by that function. | pasta-django/venv/lib/python3.8/site-packages/markdown/core.py | build_extension | rabeloalcantaraigor/Curso-API-DRF | 14,668 | python | def build_extension(self, ext_name, configs):
'\n Build extension from a string name, then return an instance.\n\n First attempt to load an entry point. The string name must be registered as an entry point in the\n `markdown.extensions` group which points to a subclass of the `markdown.extensions.Extension` class.\n If multiple distributions have registered the same name, the first one found is returned.\n\n If no entry point is found, assume dot notation (`path.to.module:ClassName`). Load the specified class and\n return an instance. If no class is specified, import the module and call a `makeExtension` function and return\n the Extension instance returned by that function.\n '
configs = dict(configs)
entry_points = [ep for ep in util.INSTALLED_EXTENSIONS if (ep.name == ext_name)]
if entry_points:
ext = entry_points[0].load()
return ext(**configs)
(ext_name, class_name) = (ext_name.split(':', 1) if (':' in ext_name) else (ext_name, ))
try:
module = importlib.import_module(ext_name)
logger.debug(('Successfully imported extension module "%s".' % ext_name))
except ImportError as e:
message = ('Failed loading extension "%s".' % ext_name)
e.args = ((message,) + e.args[1:])
raise
if class_name:
return getattr(module, class_name)(**configs)
else:
try:
return module.makeExtension(**configs)
except AttributeError as e:
message = e.args[0]
message = ("Failed to initiate extension '%s': %s" % (ext_name, message))
e.args = ((message,) + e.args[1:])
raise | def build_extension(self, ext_name, configs):
'\n Build extension from a string name, then return an instance.\n\n First attempt to load an entry point. The string name must be registered as an entry point in the\n `markdown.extensions` group which points to a subclass of the `markdown.extensions.Extension` class.\n If multiple distributions have registered the same name, the first one found is returned.\n\n If no entry point is found, assume dot notation (`path.to.module:ClassName`). Load the specified class and\n return an instance. If no class is specified, import the module and call a `makeExtension` function and return\n the Extension instance returned by that function.\n '
configs = dict(configs)
entry_points = [ep for ep in util.INSTALLED_EXTENSIONS if (ep.name == ext_name)]
if entry_points:
ext = entry_points[0].load()
return ext(**configs)
(ext_name, class_name) = (ext_name.split(':', 1) if (':' in ext_name) else (ext_name, ))
try:
module = importlib.import_module(ext_name)
logger.debug(('Successfully imported extension module "%s".' % ext_name))
except ImportError as e:
message = ('Failed loading extension "%s".' % ext_name)
e.args = ((message,) + e.args[1:])
raise
if class_name:
return getattr(module, class_name)(**configs)
else:
try:
return module.makeExtension(**configs)
except AttributeError as e:
message = e.args[0]
message = ("Failed to initiate extension '%s': %s" % (ext_name, message))
e.args = ((message,) + e.args[1:])
raise<|docstring|>Build extension from a string name, then return an instance.
First attempt to load an entry point. The string name must be registered as an entry point in the
`markdown.extensions` group which points to a subclass of the `markdown.extensions.Extension` class.
If multiple distributions have registered the same name, the first one found is returned.
If no entry point is found, assume dot notation (`path.to.module:ClassName`). Load the specified class and
return an instance. If no class is specified, import the module and call a `makeExtension` function and return
the Extension instance returned by that function.<|endoftext|> |
dbd1934600db4df9416490968535393c37ba4706f2e70c7796fb75ad3dd35144 | def registerExtension(self, extension):
' This gets called by the extension '
self.registeredExtensions.append(extension)
return self | This gets called by the extension | pasta-django/venv/lib/python3.8/site-packages/markdown/core.py | registerExtension | rabeloalcantaraigor/Curso-API-DRF | 14,668 | python | def registerExtension(self, extension):
' '
self.registeredExtensions.append(extension)
return self | def registerExtension(self, extension):
' '
self.registeredExtensions.append(extension)
return self<|docstring|>This gets called by the extension<|endoftext|> |
53e720c5d1965c68f0577ded91e56a98556503005692ef583b9218e57c45f9e4 | def reset(self):
'\n Resets all state variables so that we can start with a new text.\n '
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
if hasattr(extension, 'reset'):
extension.reset()
return self | Resets all state variables so that we can start with a new text. | pasta-django/venv/lib/python3.8/site-packages/markdown/core.py | reset | rabeloalcantaraigor/Curso-API-DRF | 14,668 | python | def reset(self):
'\n \n '
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
if hasattr(extension, 'reset'):
extension.reset()
return self | def reset(self):
'\n \n '
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
if hasattr(extension, 'reset'):
extension.reset()
return self<|docstring|>Resets all state variables so that we can start with a new text.<|endoftext|> |
feda5539b78e20517a104f3dd58d24b47635f6db0f208b427d0d9d6986329448 | def set_output_format(self, format):
' Set the output format for the class instance. '
self.output_format = format.lower().rstrip('145')
try:
self.serializer = self.output_formats[self.output_format]
except KeyError as e:
valid_formats = list(self.output_formats.keys())
valid_formats.sort()
message = ('Invalid Output Format: "%s". Use one of %s.' % (self.output_format, (('"' + '", "'.join(valid_formats)) + '"')))
e.args = ((message,) + e.args[1:])
raise
return self | Set the output format for the class instance. | pasta-django/venv/lib/python3.8/site-packages/markdown/core.py | set_output_format | rabeloalcantaraigor/Curso-API-DRF | 14,668 | python | def set_output_format(self, format):
' '
self.output_format = format.lower().rstrip('145')
try:
self.serializer = self.output_formats[self.output_format]
except KeyError as e:
valid_formats = list(self.output_formats.keys())
valid_formats.sort()
message = ('Invalid Output Format: "%s". Use one of %s.' % (self.output_format, (('"' + '", "'.join(valid_formats)) + '"')))
e.args = ((message,) + e.args[1:])
raise
return self | def set_output_format(self, format):
' '
self.output_format = format.lower().rstrip('145')
try:
self.serializer = self.output_formats[self.output_format]
except KeyError as e:
valid_formats = list(self.output_formats.keys())
valid_formats.sort()
message = ('Invalid Output Format: "%s". Use one of %s.' % (self.output_format, (('"' + '", "'.join(valid_formats)) + '"')))
e.args = ((message,) + e.args[1:])
raise
return self<|docstring|>Set the output format for the class instance.<|endoftext|> |
9a6f59a369bda7ff2986389b067be3847b044868076ef4c47d1a46aa4bcae801 | def is_block_level(self, tag):
'Check if the tag is a block level HTML tag.'
if isinstance(tag, str):
return (tag.lower().rstrip('/') in self.block_level_elements)
return False | Check if the tag is a block level HTML tag. | pasta-django/venv/lib/python3.8/site-packages/markdown/core.py | is_block_level | rabeloalcantaraigor/Curso-API-DRF | 14,668 | python | def is_block_level(self, tag):
if isinstance(tag, str):
return (tag.lower().rstrip('/') in self.block_level_elements)
return False | def is_block_level(self, tag):
if isinstance(tag, str):
return (tag.lower().rstrip('/') in self.block_level_elements)
return False<|docstring|>Check if the tag is a block level HTML tag.<|endoftext|> |
ad829f479243c7a863f28809720344ca2c66188f15f8b0268bdb4ca519c21772 | def convert(self, source):
'\n Convert markdown to serialized XHTML or HTML.\n\n Keyword arguments:\n\n * source: Source text as a Unicode string.\n\n Markdown processing takes place in five steps:\n\n 1. A bunch of "preprocessors" munge the input text.\n 2. BlockParser() parses the high-level structural elements of the\n pre-processed text into an ElementTree.\n 3. A bunch of "treeprocessors" are run against the ElementTree. One\n such treeprocessor runs InlinePatterns against the ElementTree,\n detecting inline markup.\n 4. Some post-processors are run against the text after the ElementTree\n has been serialized into text.\n 5. The output is written to a string.\n\n '
if (not source.strip()):
return ''
try:
source = str(source)
except UnicodeDecodeError as e:
e.reason += '. -- Note: Markdown only accepts unicode input!'
raise
self.lines = source.split('\n')
for prep in self.preprocessors:
self.lines = prep.run(self.lines)
root = self.parser.parseDocument(self.lines).getroot()
for treeprocessor in self.treeprocessors:
newRoot = treeprocessor.run(root)
if (newRoot is not None):
root = newRoot
output = self.serializer(root)
if self.stripTopLevelTags:
try:
start = ((output.index(('<%s>' % self.doc_tag)) + len(self.doc_tag)) + 2)
end = output.rindex(('</%s>' % self.doc_tag))
output = output[start:end].strip()
except ValueError as e:
if output.strip().endswith(('<%s />' % self.doc_tag)):
output = ''
else:
raise ValueError(('Markdown failed to strip top-level tags. Document=%r' % output.strip())) from e
for pp in self.postprocessors:
output = pp.run(output)
return output.strip() | Convert markdown to serialized XHTML or HTML.
Keyword arguments:
* source: Source text as a Unicode string.
Markdown processing takes place in five steps:
1. A bunch of "preprocessors" munge the input text.
2. BlockParser() parses the high-level structural elements of the
pre-processed text into an ElementTree.
3. A bunch of "treeprocessors" are run against the ElementTree. One
such treeprocessor runs InlinePatterns against the ElementTree,
detecting inline markup.
4. Some post-processors are run against the text after the ElementTree
has been serialized into text.
5. The output is written to a string. | pasta-django/venv/lib/python3.8/site-packages/markdown/core.py | convert | rabeloalcantaraigor/Curso-API-DRF | 14,668 | python | def convert(self, source):
'\n Convert markdown to serialized XHTML or HTML.\n\n Keyword arguments:\n\n * source: Source text as a Unicode string.\n\n Markdown processing takes place in five steps:\n\n 1. A bunch of "preprocessors" munge the input text.\n 2. BlockParser() parses the high-level structural elements of the\n pre-processed text into an ElementTree.\n 3. A bunch of "treeprocessors" are run against the ElementTree. One\n such treeprocessor runs InlinePatterns against the ElementTree,\n detecting inline markup.\n 4. Some post-processors are run against the text after the ElementTree\n has been serialized into text.\n 5. The output is written to a string.\n\n '
if (not source.strip()):
return
try:
source = str(source)
except UnicodeDecodeError as e:
e.reason += '. -- Note: Markdown only accepts unicode input!'
raise
self.lines = source.split('\n')
for prep in self.preprocessors:
self.lines = prep.run(self.lines)
root = self.parser.parseDocument(self.lines).getroot()
for treeprocessor in self.treeprocessors:
newRoot = treeprocessor.run(root)
if (newRoot is not None):
root = newRoot
output = self.serializer(root)
if self.stripTopLevelTags:
try:
start = ((output.index(('<%s>' % self.doc_tag)) + len(self.doc_tag)) + 2)
end = output.rindex(('</%s>' % self.doc_tag))
output = output[start:end].strip()
except ValueError as e:
if output.strip().endswith(('<%s />' % self.doc_tag)):
output =
else:
raise ValueError(('Markdown failed to strip top-level tags. Document=%r' % output.strip())) from e
for pp in self.postprocessors:
output = pp.run(output)
return output.strip() | def convert(self, source):
'\n Convert markdown to serialized XHTML or HTML.\n\n Keyword arguments:\n\n * source: Source text as a Unicode string.\n\n Markdown processing takes place in five steps:\n\n 1. A bunch of "preprocessors" munge the input text.\n 2. BlockParser() parses the high-level structural elements of the\n pre-processed text into an ElementTree.\n 3. A bunch of "treeprocessors" are run against the ElementTree. One\n such treeprocessor runs InlinePatterns against the ElementTree,\n detecting inline markup.\n 4. Some post-processors are run against the text after the ElementTree\n has been serialized into text.\n 5. The output is written to a string.\n\n '
if (not source.strip()):
return
try:
source = str(source)
except UnicodeDecodeError as e:
e.reason += '. -- Note: Markdown only accepts unicode input!'
raise
self.lines = source.split('\n')
for prep in self.preprocessors:
self.lines = prep.run(self.lines)
root = self.parser.parseDocument(self.lines).getroot()
for treeprocessor in self.treeprocessors:
newRoot = treeprocessor.run(root)
if (newRoot is not None):
root = newRoot
output = self.serializer(root)
if self.stripTopLevelTags:
try:
start = ((output.index(('<%s>' % self.doc_tag)) + len(self.doc_tag)) + 2)
end = output.rindex(('</%s>' % self.doc_tag))
output = output[start:end].strip()
except ValueError as e:
if output.strip().endswith(('<%s />' % self.doc_tag)):
output =
else:
raise ValueError(('Markdown failed to strip top-level tags. Document=%r' % output.strip())) from e
for pp in self.postprocessors:
output = pp.run(output)
return output.strip()<|docstring|>Convert markdown to serialized XHTML or HTML.
Keyword arguments:
* source: Source text as a Unicode string.
Markdown processing takes place in five steps:
1. A bunch of "preprocessors" munge the input text.
2. BlockParser() parses the high-level structural elements of the
pre-processed text into an ElementTree.
3. A bunch of "treeprocessors" are run against the ElementTree. One
such treeprocessor runs InlinePatterns against the ElementTree,
detecting inline markup.
4. Some post-processors are run against the text after the ElementTree
has been serialized into text.
5. The output is written to a string.<|endoftext|> |
d469e6da67c8e31bfe700cff011140db21cea3f7d96fc8e95ce76a77902ae46f | def convertFile(self, input=None, output=None, encoding=None):
"Converts a markdown file and returns the HTML as a unicode string.\n\n Decodes the file using the provided encoding (defaults to utf-8),\n passes the file content to markdown, and outputs the html to either\n the provided stream or the file with provided name, using the same\n encoding as the source file. The 'xmlcharrefreplace' error handler is\n used when encoding the output.\n\n **Note:** This is the only place that decoding and encoding of unicode\n takes place in Python-Markdown. (All other code is unicode-in /\n unicode-out.)\n\n Keyword arguments:\n\n * input: File object or path. Reads from stdin if `None`.\n * output: File object or path. Writes to stdout if `None`.\n * encoding: Encoding of input and output files. Defaults to utf-8.\n\n "
encoding = (encoding or 'utf-8')
if input:
if isinstance(input, str):
input_file = codecs.open(input, mode='r', encoding=encoding)
else:
input_file = codecs.getreader(encoding)(input)
text = input_file.read()
input_file.close()
else:
text = sys.stdin.read()
if (not isinstance(text, str)):
text = text.decode(encoding)
text = text.lstrip('\ufeff')
html = self.convert(text)
if output:
if isinstance(output, str):
output_file = codecs.open(output, 'w', encoding=encoding, errors='xmlcharrefreplace')
output_file.write(html)
output_file.close()
else:
writer = codecs.getwriter(encoding)
output_file = writer(output, errors='xmlcharrefreplace')
output_file.write(html)
else:
html = html.encode(encoding, 'xmlcharrefreplace')
try:
sys.stdout.buffer.write(html)
except AttributeError:
sys.stdout.write(html)
return self | Converts a markdown file and returns the HTML as a unicode string.
Decodes the file using the provided encoding (defaults to utf-8),
passes the file content to markdown, and outputs the html to either
the provided stream or the file with provided name, using the same
encoding as the source file. The 'xmlcharrefreplace' error handler is
used when encoding the output.
**Note:** This is the only place that decoding and encoding of unicode
takes place in Python-Markdown. (All other code is unicode-in /
unicode-out.)
Keyword arguments:
* input: File object or path. Reads from stdin if `None`.
* output: File object or path. Writes to stdout if `None`.
* encoding: Encoding of input and output files. Defaults to utf-8. | pasta-django/venv/lib/python3.8/site-packages/markdown/core.py | convertFile | rabeloalcantaraigor/Curso-API-DRF | 14,668 | python | def convertFile(self, input=None, output=None, encoding=None):
"Converts a markdown file and returns the HTML as a unicode string.\n\n Decodes the file using the provided encoding (defaults to utf-8),\n passes the file content to markdown, and outputs the html to either\n the provided stream or the file with provided name, using the same\n encoding as the source file. The 'xmlcharrefreplace' error handler is\n used when encoding the output.\n\n **Note:** This is the only place that decoding and encoding of unicode\n takes place in Python-Markdown. (All other code is unicode-in /\n unicode-out.)\n\n Keyword arguments:\n\n * input: File object or path. Reads from stdin if `None`.\n * output: File object or path. Writes to stdout if `None`.\n * encoding: Encoding of input and output files. Defaults to utf-8.\n\n "
encoding = (encoding or 'utf-8')
if input:
if isinstance(input, str):
input_file = codecs.open(input, mode='r', encoding=encoding)
else:
input_file = codecs.getreader(encoding)(input)
text = input_file.read()
input_file.close()
else:
text = sys.stdin.read()
if (not isinstance(text, str)):
text = text.decode(encoding)
text = text.lstrip('\ufeff')
html = self.convert(text)
if output:
if isinstance(output, str):
output_file = codecs.open(output, 'w', encoding=encoding, errors='xmlcharrefreplace')
output_file.write(html)
output_file.close()
else:
writer = codecs.getwriter(encoding)
output_file = writer(output, errors='xmlcharrefreplace')
output_file.write(html)
else:
html = html.encode(encoding, 'xmlcharrefreplace')
try:
sys.stdout.buffer.write(html)
except AttributeError:
sys.stdout.write(html)
return self | def convertFile(self, input=None, output=None, encoding=None):
"Converts a markdown file and returns the HTML as a unicode string.\n\n Decodes the file using the provided encoding (defaults to utf-8),\n passes the file content to markdown, and outputs the html to either\n the provided stream or the file with provided name, using the same\n encoding as the source file. The 'xmlcharrefreplace' error handler is\n used when encoding the output.\n\n **Note:** This is the only place that decoding and encoding of unicode\n takes place in Python-Markdown. (All other code is unicode-in /\n unicode-out.)\n\n Keyword arguments:\n\n * input: File object or path. Reads from stdin if `None`.\n * output: File object or path. Writes to stdout if `None`.\n * encoding: Encoding of input and output files. Defaults to utf-8.\n\n "
encoding = (encoding or 'utf-8')
if input:
if isinstance(input, str):
input_file = codecs.open(input, mode='r', encoding=encoding)
else:
input_file = codecs.getreader(encoding)(input)
text = input_file.read()
input_file.close()
else:
text = sys.stdin.read()
if (not isinstance(text, str)):
text = text.decode(encoding)
text = text.lstrip('\ufeff')
html = self.convert(text)
if output:
if isinstance(output, str):
output_file = codecs.open(output, 'w', encoding=encoding, errors='xmlcharrefreplace')
output_file.write(html)
output_file.close()
else:
writer = codecs.getwriter(encoding)
output_file = writer(output, errors='xmlcharrefreplace')
output_file.write(html)
else:
html = html.encode(encoding, 'xmlcharrefreplace')
try:
sys.stdout.buffer.write(html)
except AttributeError:
sys.stdout.write(html)
return self<|docstring|>Converts a markdown file and returns the HTML as a unicode string.
Decodes the file using the provided encoding (defaults to utf-8),
passes the file content to markdown, and outputs the html to either
the provided stream or the file with provided name, using the same
encoding as the source file. The 'xmlcharrefreplace' error handler is
used when encoding the output.
**Note:** This is the only place that decoding and encoding of unicode
takes place in Python-Markdown. (All other code is unicode-in /
unicode-out.)
Keyword arguments:
* input: File object or path. Reads from stdin if `None`.
* output: File object or path. Writes to stdout if `None`.
* encoding: Encoding of input and output files. Defaults to utf-8.<|endoftext|> |
c223d00b38f52040c69a15db75d553eb08b79493d25381c09b6690cbb6ef2497 | def sample(self, sample_file):
' Samples to a file. Useful for visualizing the learning process.\n\n Use with:\n\n ffmpeg -i samples/grid-%06d.png -vcodec libx264 -crf 22 -threads 0 grid1-7.mp4\n\n to create a video of the learning process.\n '
sample_list = self.sampler.sample(sample_file, self.args.save_samples)
return sample_list | Samples to a file. Useful for visualizing the learning process.
Use with:
ffmpeg -i samples/grid-%06d.png -vcodec libx264 -crf 22 -threads 0 grid1-7.mp4
to create a video of the learning process. | hypergan/cli.py | sample | SlipknotTN/HyperGAN | 0 | python | def sample(self, sample_file):
' Samples to a file. Useful for visualizing the learning process.\n\n Use with:\n\n ffmpeg -i samples/grid-%06d.png -vcodec libx264 -crf 22 -threads 0 grid1-7.mp4\n\n to create a video of the learning process.\n '
sample_list = self.sampler.sample(sample_file, self.args.save_samples)
return sample_list | def sample(self, sample_file):
' Samples to a file. Useful for visualizing the learning process.\n\n Use with:\n\n ffmpeg -i samples/grid-%06d.png -vcodec libx264 -crf 22 -threads 0 grid1-7.mp4\n\n to create a video of the learning process.\n '
sample_list = self.sampler.sample(sample_file, self.args.save_samples)
return sample_list<|docstring|>Samples to a file. Useful for visualizing the learning process.
Use with:
ffmpeg -i samples/grid-%06d.png -vcodec libx264 -crf 22 -threads 0 grid1-7.mp4
to create a video of the learning process.<|endoftext|> |
7bd4e6fbb29c66ca383c21d1e5fd892a37fc5b5f3e5861fce22fdf8964a9c1df | def setup_platform(hass, config, add_devices, discovery_info=None):
'Set up the smart mi fan platform.'
import miio
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
token = config.get(CONF_TOKEN)
devices = config.get(CONF_SWITCHES, {})
persistent_notification = loader.get_component('persistent_notification')
@asyncio.coroutine
def _learn_command(call):
ir_remote = miio.device(host, token)
if (not ir_remote):
_LOGGER.error('Failed to connect to device.')
return
ir_remote.send('start_ir_learn', [30])
_LOGGER.info('Press the key you want HASS to learn')
start_time = utcnow()
while ((utcnow() - start_time) < timedelta(seconds=DEFAULT_TIMEOUT)):
code = ir_remote.send('get_ir_learn_result', [])
if (code[0] != '(null)'):
log_msg = ('Recieved packet is: %s' % code[0])
_LOGGER.info(log_msg)
persistent_notification.async_create(hass, log_msg, title='Mi_ACpartner switch')
ir_remote.send('end_ir_learn', [30])
return
(yield from asyncio.sleep(1, loop=hass.loop))
_LOGGER.error('Did not received any signal.')
persistent_notification.async_create(hass, 'Did not received any signal', title='Mi_ACpartner switch')
@asyncio.coroutine
def _send_packet(call):
ir_remote = miio.device(host, token)
if (not ir_remote):
_LOGGER.error('Failed to connect to device.')
return
packets = call.data.get('packet', [])
for packet in packets:
for retry in range(DEFAULT_RETRY):
try:
ir_remote.send('send_ir_code', [str(packet)])
break
except ValueError:
_LOGGER.error('Failed to send packet to device.')
ir_remote = miio.device(host, token)
if (not ir_remote):
_LOGGER.error('Failed to connect to device.')
hass.services.register(DOMAIN, ((SERVICE_LEARN + '_') + host.replace('.', '_')), _learn_command)
hass.services.register(DOMAIN, ((SERVICE_SEND + '_') + host.replace('.', '_')), _send_packet)
switches = []
for (object_id, device_config) in devices.items():
switches.append(ChuangmiIRSwitch(ir_remote, device_config.get(CONF_NAME, object_id), device_config.get(CONF_COMMAND_ON), device_config.get(CONF_COMMAND_OFF), 'mdi:volume-high'))
add_devices(switches) | Set up the smart mi fan platform. | custom_components/switch/mi_acpartner_ir.py | setup_platform | mac-zhou/homeassistant-mi-acpartner | 135 | python | def setup_platform(hass, config, add_devices, discovery_info=None):
import miio
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
token = config.get(CONF_TOKEN)
devices = config.get(CONF_SWITCHES, {})
persistent_notification = loader.get_component('persistent_notification')
@asyncio.coroutine
def _learn_command(call):
ir_remote = miio.device(host, token)
if (not ir_remote):
_LOGGER.error('Failed to connect to device.')
return
ir_remote.send('start_ir_learn', [30])
_LOGGER.info('Press the key you want HASS to learn')
start_time = utcnow()
while ((utcnow() - start_time) < timedelta(seconds=DEFAULT_TIMEOUT)):
code = ir_remote.send('get_ir_learn_result', [])
if (code[0] != '(null)'):
log_msg = ('Recieved packet is: %s' % code[0])
_LOGGER.info(log_msg)
persistent_notification.async_create(hass, log_msg, title='Mi_ACpartner switch')
ir_remote.send('end_ir_learn', [30])
return
(yield from asyncio.sleep(1, loop=hass.loop))
_LOGGER.error('Did not received any signal.')
persistent_notification.async_create(hass, 'Did not received any signal', title='Mi_ACpartner switch')
@asyncio.coroutine
def _send_packet(call):
ir_remote = miio.device(host, token)
if (not ir_remote):
_LOGGER.error('Failed to connect to device.')
return
packets = call.data.get('packet', [])
for packet in packets:
for retry in range(DEFAULT_RETRY):
try:
ir_remote.send('send_ir_code', [str(packet)])
break
except ValueError:
_LOGGER.error('Failed to send packet to device.')
ir_remote = miio.device(host, token)
if (not ir_remote):
_LOGGER.error('Failed to connect to device.')
hass.services.register(DOMAIN, ((SERVICE_LEARN + '_') + host.replace('.', '_')), _learn_command)
hass.services.register(DOMAIN, ((SERVICE_SEND + '_') + host.replace('.', '_')), _send_packet)
switches = []
for (object_id, device_config) in devices.items():
switches.append(ChuangmiIRSwitch(ir_remote, device_config.get(CONF_NAME, object_id), device_config.get(CONF_COMMAND_ON), device_config.get(CONF_COMMAND_OFF), 'mdi:volume-high'))
add_devices(switches) | def setup_platform(hass, config, add_devices, discovery_info=None):
import miio
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
token = config.get(CONF_TOKEN)
devices = config.get(CONF_SWITCHES, {})
persistent_notification = loader.get_component('persistent_notification')
@asyncio.coroutine
def _learn_command(call):
ir_remote = miio.device(host, token)
if (not ir_remote):
_LOGGER.error('Failed to connect to device.')
return
ir_remote.send('start_ir_learn', [30])
_LOGGER.info('Press the key you want HASS to learn')
start_time = utcnow()
while ((utcnow() - start_time) < timedelta(seconds=DEFAULT_TIMEOUT)):
code = ir_remote.send('get_ir_learn_result', [])
if (code[0] != '(null)'):
log_msg = ('Recieved packet is: %s' % code[0])
_LOGGER.info(log_msg)
persistent_notification.async_create(hass, log_msg, title='Mi_ACpartner switch')
ir_remote.send('end_ir_learn', [30])
return
(yield from asyncio.sleep(1, loop=hass.loop))
_LOGGER.error('Did not received any signal.')
persistent_notification.async_create(hass, 'Did not received any signal', title='Mi_ACpartner switch')
@asyncio.coroutine
def _send_packet(call):
ir_remote = miio.device(host, token)
if (not ir_remote):
_LOGGER.error('Failed to connect to device.')
return
packets = call.data.get('packet', [])
for packet in packets:
for retry in range(DEFAULT_RETRY):
try:
ir_remote.send('send_ir_code', [str(packet)])
break
except ValueError:
_LOGGER.error('Failed to send packet to device.')
ir_remote = miio.device(host, token)
if (not ir_remote):
_LOGGER.error('Failed to connect to device.')
hass.services.register(DOMAIN, ((SERVICE_LEARN + '_') + host.replace('.', '_')), _learn_command)
hass.services.register(DOMAIN, ((SERVICE_SEND + '_') + host.replace('.', '_')), _send_packet)
switches = []
for (object_id, device_config) in devices.items():
switches.append(ChuangmiIRSwitch(ir_remote, device_config.get(CONF_NAME, object_id), device_config.get(CONF_COMMAND_ON), device_config.get(CONF_COMMAND_OFF), 'mdi:volume-high'))
add_devices(switches)<|docstring|>Set up the smart mi fan platform.<|endoftext|> |
a236d6c37eea148c7561c61d1998bb050e8920f7de10f4d1f36fca2422e9af17 | def __init__(self, device, name, command_on, command_off, icon):
'Initialize the switch.'
self._name = name
self._state = False
self._command_on = (command_on or None)
self._command_off = (command_off or None)
self._device = device
self._icon = icon | Initialize the switch. | custom_components/switch/mi_acpartner_ir.py | __init__ | mac-zhou/homeassistant-mi-acpartner | 135 | python | def __init__(self, device, name, command_on, command_off, icon):
self._name = name
self._state = False
self._command_on = (command_on or None)
self._command_off = (command_off or None)
self._device = device
self._icon = icon | def __init__(self, device, name, command_on, command_off, icon):
self._name = name
self._state = False
self._command_on = (command_on or None)
self._command_off = (command_off or None)
self._device = device
self._icon = icon<|docstring|>Initialize the switch.<|endoftext|> |
b266a171d582c1290c045c84c1a435b8e5deb36d2a825bfb56ab6f4c44a7e41a | @property
def icon(self):
'Return the icon to use for device if any.'
return self._icon | Return the icon to use for device if any. | custom_components/switch/mi_acpartner_ir.py | icon | mac-zhou/homeassistant-mi-acpartner | 135 | python | @property
def icon(self):
return self._icon | @property
def icon(self):
return self._icon<|docstring|>Return the icon to use for device if any.<|endoftext|> |
db2111d58663d126541e4a6d9f51b1697d1b084e7340679be31742a3030c3ce1 | @property
def name(self):
'Return the name of the switch.'
return self._name | Return the name of the switch. | custom_components/switch/mi_acpartner_ir.py | name | mac-zhou/homeassistant-mi-acpartner | 135 | python | @property
def name(self):
return self._name | @property
def name(self):
return self._name<|docstring|>Return the name of the switch.<|endoftext|> |
454edd0c9bd544b7c99a905d438af854cb21d01f1f906d621c0bd328f39e3b17 | @property
def assumed_state(self):
'Return true if unable to access real state of entity.'
return True | Return true if unable to access real state of entity. | custom_components/switch/mi_acpartner_ir.py | assumed_state | mac-zhou/homeassistant-mi-acpartner | 135 | python | @property
def assumed_state(self):
return True | @property
def assumed_state(self):
return True<|docstring|>Return true if unable to access real state of entity.<|endoftext|> |
53669033a44cc2b7f0c0eb1c203b1e7a7c81e72e96769d5c38bc62208b72137f | @property
def should_poll(self):
'No polling needed.'
return False | No polling needed. | custom_components/switch/mi_acpartner_ir.py | should_poll | mac-zhou/homeassistant-mi-acpartner | 135 | python | @property
def should_poll(self):
return False | @property
def should_poll(self):
return False<|docstring|>No polling needed.<|endoftext|> |
627e3004244c927368bebd4854458a6563ba3db2cadfff4b4804f631ffe09428 | @property
def is_on(self):
'Return true if device is on.'
return self._state | Return true if device is on. | custom_components/switch/mi_acpartner_ir.py | is_on | mac-zhou/homeassistant-mi-acpartner | 135 | python | @property
def is_on(self):
return self._state | @property
def is_on(self):
return self._state<|docstring|>Return true if device is on.<|endoftext|> |
c93e1f1feb18de07875a1f1efa91525deefa7d92a297b6f9abfd0b698e2583ec | def turn_on(self, **kwargs):
'Turn the device on.'
if self._sendpacket(self._command_on):
self._state = True
self.schedule_update_ha_state() | Turn the device on. | custom_components/switch/mi_acpartner_ir.py | turn_on | mac-zhou/homeassistant-mi-acpartner | 135 | python | def turn_on(self, **kwargs):
if self._sendpacket(self._command_on):
self._state = True
self.schedule_update_ha_state() | def turn_on(self, **kwargs):
if self._sendpacket(self._command_on):
self._state = True
self.schedule_update_ha_state()<|docstring|>Turn the device on.<|endoftext|> |
e0b811e93f3e1eb66f3ea8e8e46400bab787304e47507c2d31f2a020e9eb9a4a | def turn_off(self, **kwargs):
'Turn the device off.'
if self._sendpacket(self._command_off):
self._state = False
self.schedule_update_ha_state() | Turn the device off. | custom_components/switch/mi_acpartner_ir.py | turn_off | mac-zhou/homeassistant-mi-acpartner | 135 | python | def turn_off(self, **kwargs):
if self._sendpacket(self._command_off):
self._state = False
self.schedule_update_ha_state() | def turn_off(self, **kwargs):
if self._sendpacket(self._command_off):
self._state = False
self.schedule_update_ha_state()<|docstring|>Turn the device off.<|endoftext|> |
fefcc9d224f155aefc3614e58e6bf13cc44383b97057d7380e6bd3af6f09143e | def _sendpacket(self, packet):
'Send packet to device.'
if (packet is None):
_LOGGER.debug('Empty packet.')
return True
try:
self._device.send('send_ir_code', [str(packet)])
_LOGGER.info(str(packet))
except ValueError as error:
_LOGGER.error(error)
return False
return True | Send packet to device. | custom_components/switch/mi_acpartner_ir.py | _sendpacket | mac-zhou/homeassistant-mi-acpartner | 135 | python | def _sendpacket(self, packet):
if (packet is None):
_LOGGER.debug('Empty packet.')
return True
try:
self._device.send('send_ir_code', [str(packet)])
_LOGGER.info(str(packet))
except ValueError as error:
_LOGGER.error(error)
return False
return True | def _sendpacket(self, packet):
if (packet is None):
_LOGGER.debug('Empty packet.')
return True
try:
self._device.send('send_ir_code', [str(packet)])
_LOGGER.info(str(packet))
except ValueError as error:
_LOGGER.error(error)
return False
return True<|docstring|>Send packet to device.<|endoftext|> |
b3270baa21b5bef766d37052e1fb9a69849b78fff8fc034f633ccdf1e663e16c | def is_guild_admin(member: discord.Member) -> bool:
'\n Shorthand for member.guild_permissions.administrator\n :param member: discord.Memeber to check if admin\n '
return member.guild_permissions.administrator | Shorthand for member.guild_permissions.administrator
:param member: discord.Memeber to check if admin | src/permission_management/admin.py | is_guild_admin | kesslermaximilian/JustOneBot | 1 | python | def is_guild_admin(member: discord.Member) -> bool:
'\n Shorthand for member.guild_permissions.administrator\n :param member: discord.Memeber to check if admin\n '
return member.guild_permissions.administrator | def is_guild_admin(member: discord.Member) -> bool:
'\n Shorthand for member.guild_permissions.administrator\n :param member: discord.Memeber to check if admin\n '
return member.guild_permissions.administrator<|docstring|>Shorthand for member.guild_permissions.administrator
:param member: discord.Memeber to check if admin<|endoftext|> |
23c26c297f22cfbb3e147c5a4b5d395b9c6acc48425c632dfb46611a722e5269 | def session_spaces(self, kernel_space):
' Generators unique _MM_SESSION_SPACE objects\n referenced by active processes. \n \n @param space: a kernel AS for process enumeration\n \n @yields _MM_SESSION_SPACE instantiated from the \n session space native_vm. \n '
seen = []
for proc in tasks.pslist(kernel_space):
if ((proc.SessionId != None) and (proc.SessionId.v() not in seen)):
ps_ad = proc.get_process_address_space()
if (ps_ad != None):
seen.append(proc.SessionId.v())
(yield obj.Object('_MM_SESSION_SPACE', offset=proc.Session.v(), vm=ps_ad)) | Generators unique _MM_SESSION_SPACE objects
referenced by active processes.
@param space: a kernel AS for process enumeration
@yields _MM_SESSION_SPACE instantiated from the
session space native_vm. | volatility/volatility/plugins/gui/sessions.py | session_spaces | williamclot/MemoryVisualizer | 2 | python | def session_spaces(self, kernel_space):
' Generators unique _MM_SESSION_SPACE objects\n referenced by active processes. \n \n @param space: a kernel AS for process enumeration\n \n @yields _MM_SESSION_SPACE instantiated from the \n session space native_vm. \n '
seen = []
for proc in tasks.pslist(kernel_space):
if ((proc.SessionId != None) and (proc.SessionId.v() not in seen)):
ps_ad = proc.get_process_address_space()
if (ps_ad != None):
seen.append(proc.SessionId.v())
(yield obj.Object('_MM_SESSION_SPACE', offset=proc.Session.v(), vm=ps_ad)) | def session_spaces(self, kernel_space):
' Generators unique _MM_SESSION_SPACE objects\n referenced by active processes. \n \n @param space: a kernel AS for process enumeration\n \n @yields _MM_SESSION_SPACE instantiated from the \n session space native_vm. \n '
seen = []
for proc in tasks.pslist(kernel_space):
if ((proc.SessionId != None) and (proc.SessionId.v() not in seen)):
ps_ad = proc.get_process_address_space()
if (ps_ad != None):
seen.append(proc.SessionId.v())
(yield obj.Object('_MM_SESSION_SPACE', offset=proc.Session.v(), vm=ps_ad))<|docstring|>Generators unique _MM_SESSION_SPACE objects
referenced by active processes.
@param space: a kernel AS for process enumeration
@yields _MM_SESSION_SPACE instantiated from the
session space native_vm.<|endoftext|> |
6518c3cfd1b825bea7322675bb87745581d8dd0d59e9a24258018cf11c81c228 | def find_session_space(self, kernel_space, session_id):
' Get a session address space by its ID. \n \n @param space: a kernel AS for process enumeration\n @param session_id: the session ID to find.\n \n @returns _MM_SESSION_SPACE instantiated from the \n session space native_vm. \n '
for proc in tasks.pslist(kernel_space):
if (proc.SessionId == session_id):
ps_ad = proc.get_process_address_space()
if (ps_ad != None):
return obj.Object('_MM_SESSION_SPACE', offset=proc.Session.v(), vm=ps_ad)
return obj.NoneObject('Cannot locate a session') | Get a session address space by its ID.
@param space: a kernel AS for process enumeration
@param session_id: the session ID to find.
@returns _MM_SESSION_SPACE instantiated from the
session space native_vm. | volatility/volatility/plugins/gui/sessions.py | find_session_space | williamclot/MemoryVisualizer | 2 | python | def find_session_space(self, kernel_space, session_id):
' Get a session address space by its ID. \n \n @param space: a kernel AS for process enumeration\n @param session_id: the session ID to find.\n \n @returns _MM_SESSION_SPACE instantiated from the \n session space native_vm. \n '
for proc in tasks.pslist(kernel_space):
if (proc.SessionId == session_id):
ps_ad = proc.get_process_address_space()
if (ps_ad != None):
return obj.Object('_MM_SESSION_SPACE', offset=proc.Session.v(), vm=ps_ad)
return obj.NoneObject('Cannot locate a session') | def find_session_space(self, kernel_space, session_id):
' Get a session address space by its ID. \n \n @param space: a kernel AS for process enumeration\n @param session_id: the session ID to find.\n \n @returns _MM_SESSION_SPACE instantiated from the \n session space native_vm. \n '
for proc in tasks.pslist(kernel_space):
if (proc.SessionId == session_id):
ps_ad = proc.get_process_address_space()
if (ps_ad != None):
return obj.Object('_MM_SESSION_SPACE', offset=proc.Session.v(), vm=ps_ad)
return obj.NoneObject('Cannot locate a session')<|docstring|>Get a session address space by its ID.
@param space: a kernel AS for process enumeration
@param session_id: the session ID to find.
@returns _MM_SESSION_SPACE instantiated from the
session space native_vm.<|endoftext|> |
a208e5e3a500767d57cfa6e54affc5cc4620d2e97248a80cb4d625c3cfc2bcd1 | def __init__(self, executor, job_id, qobj, backend_name, job_tags=None, job_name=None):
'Initialize a fake job.'
self._job_id = job_id
self._status = ApiJobStatus.CREATING
self.qobj = qobj
self._future = executor.submit(self._auto_progress)
self._result = None
self._backend_name = backend_name
self._job_tags = job_tags
self._job_name = job_name | Initialize a fake job. | test/fake_account_client.py | __init__ | jwoehr/qiskit-ibmq-provider | 199 | python | def __init__(self, executor, job_id, qobj, backend_name, job_tags=None, job_name=None):
self._job_id = job_id
self._status = ApiJobStatus.CREATING
self.qobj = qobj
self._future = executor.submit(self._auto_progress)
self._result = None
self._backend_name = backend_name
self._job_tags = job_tags
self._job_name = job_name | def __init__(self, executor, job_id, qobj, backend_name, job_tags=None, job_name=None):
self._job_id = job_id
self._status = ApiJobStatus.CREATING
self.qobj = qobj
self._future = executor.submit(self._auto_progress)
self._result = None
self._backend_name = backend_name
self._job_tags = job_tags
self._job_name = job_name<|docstring|>Initialize a fake job.<|endoftext|> |
f4db3b497a1f73ada98b4eac6e4863904a4ead9df16e7eeb35bb9f0db86098c5 | def _auto_progress(self):
'Automatically update job status.'
for status in self._job_progress:
time.sleep(0.5)
self._status = status
if (self._status == ApiJobStatus.COMPLETED):
new_result = copy.deepcopy(VALID_RESULT_RESPONSE)
for _ in range(len(self.qobj['experiments'])):
valid_result = copy.deepcopy(VALID_RESULT)
counts = randrange(1024)
valid_result['data']['counts'] = {'0x0': counts, '0x3': (1024 - counts)}
new_result['results'].append(valid_result)
new_result['job_id'] = self._job_id
new_result['backend_name'] = self._backend_name
self._result = new_result | Automatically update job status. | test/fake_account_client.py | _auto_progress | jwoehr/qiskit-ibmq-provider | 199 | python | def _auto_progress(self):
for status in self._job_progress:
time.sleep(0.5)
self._status = status
if (self._status == ApiJobStatus.COMPLETED):
new_result = copy.deepcopy(VALID_RESULT_RESPONSE)
for _ in range(len(self.qobj['experiments'])):
valid_result = copy.deepcopy(VALID_RESULT)
counts = randrange(1024)
valid_result['data']['counts'] = {'0x0': counts, '0x3': (1024 - counts)}
new_result['results'].append(valid_result)
new_result['job_id'] = self._job_id
new_result['backend_name'] = self._backend_name
self._result = new_result | def _auto_progress(self):
for status in self._job_progress:
time.sleep(0.5)
self._status = status
if (self._status == ApiJobStatus.COMPLETED):
new_result = copy.deepcopy(VALID_RESULT_RESPONSE)
for _ in range(len(self.qobj['experiments'])):
valid_result = copy.deepcopy(VALID_RESULT)
counts = randrange(1024)
valid_result['data']['counts'] = {'0x0': counts, '0x3': (1024 - counts)}
new_result['results'].append(valid_result)
new_result['job_id'] = self._job_id
new_result['backend_name'] = self._backend_name
self._result = new_result<|docstring|>Automatically update job status.<|endoftext|> |
715395286d4ca3c3811a9232a886bc4f6e552955ecc81955161e65baa9030209 | def data(self):
'Return job data.'
data = {'job_id': self._job_id, 'kind': 'q-object', 'status': self._status.value, 'creation_date': '2019-01-01T13:15:58.425972', '_backend_info': {'name': self._backend_name}}
if self._job_tags:
data['tags'] = self._job_tags.copy()
if self._job_name:
data['name'] = self._job_name
return data | Return job data. | test/fake_account_client.py | data | jwoehr/qiskit-ibmq-provider | 199 | python | def data(self):
data = {'job_id': self._job_id, 'kind': 'q-object', 'status': self._status.value, 'creation_date': '2019-01-01T13:15:58.425972', '_backend_info': {'name': self._backend_name}}
if self._job_tags:
data['tags'] = self._job_tags.copy()
if self._job_name:
data['name'] = self._job_name
return data | def data(self):
data = {'job_id': self._job_id, 'kind': 'q-object', 'status': self._status.value, 'creation_date': '2019-01-01T13:15:58.425972', '_backend_info': {'name': self._backend_name}}
if self._job_tags:
data['tags'] = self._job_tags.copy()
if self._job_name:
data['name'] = self._job_name
return data<|docstring|>Return job data.<|endoftext|> |
18cef473718005bcd6593f8bd0ca81aafe67790fab8e2b18e23ad8edd716f211 | def cancel(self):
'Cancel the job.'
self._future.cancel()
wait([self._future])
self._status = ApiJobStatus.CANCELLED
self._result = None | Cancel the job. | test/fake_account_client.py | cancel | jwoehr/qiskit-ibmq-provider | 199 | python | def cancel(self):
self._future.cancel()
wait([self._future])
self._status = ApiJobStatus.CANCELLED
self._result = None | def cancel(self):
self._future.cancel()
wait([self._future])
self._status = ApiJobStatus.CANCELLED
self._result = None<|docstring|>Cancel the job.<|endoftext|> |
cae6c557197eff0f9ae4fdd4df6417d877d65ebe116b6d8efcbb4bbf147ac36f | def result(self):
'Return job result.'
if (not self._result):
raise RequestsApiError('Result is not available')
return self._result | Return job result. | test/fake_account_client.py | result | jwoehr/qiskit-ibmq-provider | 199 | python | def result(self):
if (not self._result):
raise RequestsApiError('Result is not available')
return self._result | def result(self):
if (not self._result):
raise RequestsApiError('Result is not available')
return self._result<|docstring|>Return job result.<|endoftext|> |
526d5537655214e1ae490cf7930084c7bde1c8dcbe547526d9978f8da3b4bec0 | def status(self):
'Return job status.'
return self._status | Return job status. | test/fake_account_client.py | status | jwoehr/qiskit-ibmq-provider | 199 | python | def status(self):
return self._status | def status(self):
return self._status<|docstring|>Return job status.<|endoftext|> |
4b5288aed731366923415a8a27666f9b92beeb21764bae63540bd11175284abd | def name(self):
'Return job name.'
return self._job_name | Return job name. | test/fake_account_client.py | name | jwoehr/qiskit-ibmq-provider | 199 | python | def name(self):
return self._job_name | def name(self):
return self._job_name<|docstring|>Return job name.<|endoftext|> |
8c5f5a4a2526b3fbe87838964ed228c99bf314186a7d1656a3beed09c7f3200a | def data(self):
'Return job data.'
data = super().data()
data['new_field'] = 'foo'
return data | Return job data. | test/fake_account_client.py | data | jwoehr/qiskit-ibmq-provider | 199 | python | def data(self):
data = super().data()
data['new_field'] = 'foo'
return data | def data(self):
data = super().data()
data['new_field'] = 'foo'
return data<|docstring|>Return job data.<|endoftext|> |
50706f5866d5a35f5f758962f10943b795b0b847f411a628232a2a91c7aa962e | def data(self):
'Return job data.'
data = super().data()
del data['job_id']
return data | Return job data. | test/fake_account_client.py | data | jwoehr/qiskit-ibmq-provider | 199 | python | def data(self):
data = super().data()
del data['job_id']
return data | def data(self):
data = super().data()
del data['job_id']
return data<|docstring|>Return job data.<|endoftext|> |
f33605549d84ac194c42b814f1a9c6126642e50c56c59bd48c456c61db9e4176 | def data(self):
'Return job data.'
data = super().data()
if (self.status() == ApiJobStatus.ERROR_RUNNING_JOB):
data['error'] = {'message': 'Job failed.', 'code': 1234}
return data | Return job data. | test/fake_account_client.py | data | jwoehr/qiskit-ibmq-provider | 199 | python | def data(self):
data = super().data()
if (self.status() == ApiJobStatus.ERROR_RUNNING_JOB):
data['error'] = {'message': 'Job failed.', 'code': 1234}
return data | def data(self):
data = super().data()
if (self.status() == ApiJobStatus.ERROR_RUNNING_JOB):
data['error'] = {'message': 'Job failed.', 'code': 1234}
return data<|docstring|>Return job data.<|endoftext|> |
d8e0a22c6f29f825be41b7726d5303806a717fb7ad793c3354eb6a89d171e158 | def __init__(self, job_limit=(- 1), job_class=BaseFakeJob):
'Initialize a fake account client.'
self._jobs = {}
self._results_retrieved = set()
self._job_limit = job_limit
self._executor = ThreadPoolExecutor()
self._job_class = job_class
if isinstance(self._job_class, list):
self._job_class.reverse() | Initialize a fake account client. | test/fake_account_client.py | __init__ | jwoehr/qiskit-ibmq-provider | 199 | python | def __init__(self, job_limit=(- 1), job_class=BaseFakeJob):
self._jobs = {}
self._results_retrieved = set()
self._job_limit = job_limit
self._executor = ThreadPoolExecutor()
self._job_class = job_class
if isinstance(self._job_class, list):
self._job_class.reverse() | def __init__(self, job_limit=(- 1), job_class=BaseFakeJob):
self._jobs = {}
self._results_retrieved = set()
self._job_limit = job_limit
self._executor = ThreadPoolExecutor()
self._job_class = job_class
if isinstance(self._job_class, list):
self._job_class.reverse()<|docstring|>Initialize a fake account client.<|endoftext|> |
82398fac2189e16b9c10192f19ecf82d7e1a943ec16ae20b102a44a0b8a4833c | def list_jobs_statuses(self, limit, skip, descending=True, extra_filter=None):
'Return a list of statuses of jobs.'
job_data = []
for job in list(self._jobs.values())[skip:(skip + limit)]:
job_data.append(job.data())
if (not descending):
job_data.reverse()
return job_data | Return a list of statuses of jobs. | test/fake_account_client.py | list_jobs_statuses | jwoehr/qiskit-ibmq-provider | 199 | python | def list_jobs_statuses(self, limit, skip, descending=True, extra_filter=None):
job_data = []
for job in list(self._jobs.values())[skip:(skip + limit)]:
job_data.append(job.data())
if (not descending):
job_data.reverse()
return job_data | def list_jobs_statuses(self, limit, skip, descending=True, extra_filter=None):
job_data = []
for job in list(self._jobs.values())[skip:(skip + limit)]:
job_data.append(job.data())
if (not descending):
job_data.reverse()
return job_data<|docstring|>Return a list of statuses of jobs.<|endoftext|> |
6bdc6cdc5c3ff79547351cf3cac391e6cad98c09dc6a77d13857082353a7cb84 | def job_submit(self, backend_name, qobj_dict, job_name, job_tags, *_args, **_kwargs):
'Submit a Qobj to a device.'
if ((self._job_limit != (- 1)) and (self._unfinished_jobs() >= self._job_limit)):
raise RequestsApiError('400 Client Error: Bad Request for url: <url>. Reached maximum number of concurrent jobs, Error code: 3458.')
new_job_id = uuid.uuid4().hex
job_class = (self._job_class.pop() if isinstance(self._job_class, list) else self._job_class)
new_job = job_class(executor=self._executor, job_id=new_job_id, qobj=qobj_dict, backend_name=backend_name, job_tags=job_tags, job_name=job_name)
self._jobs[new_job_id] = new_job
return new_job.data() | Submit a Qobj to a device. | test/fake_account_client.py | job_submit | jwoehr/qiskit-ibmq-provider | 199 | python | def job_submit(self, backend_name, qobj_dict, job_name, job_tags, *_args, **_kwargs):
if ((self._job_limit != (- 1)) and (self._unfinished_jobs() >= self._job_limit)):
raise RequestsApiError('400 Client Error: Bad Request for url: <url>. Reached maximum number of concurrent jobs, Error code: 3458.')
new_job_id = uuid.uuid4().hex
job_class = (self._job_class.pop() if isinstance(self._job_class, list) else self._job_class)
new_job = job_class(executor=self._executor, job_id=new_job_id, qobj=qobj_dict, backend_name=backend_name, job_tags=job_tags, job_name=job_name)
self._jobs[new_job_id] = new_job
return new_job.data() | def job_submit(self, backend_name, qobj_dict, job_name, job_tags, *_args, **_kwargs):
if ((self._job_limit != (- 1)) and (self._unfinished_jobs() >= self._job_limit)):
raise RequestsApiError('400 Client Error: Bad Request for url: <url>. Reached maximum number of concurrent jobs, Error code: 3458.')
new_job_id = uuid.uuid4().hex
job_class = (self._job_class.pop() if isinstance(self._job_class, list) else self._job_class)
new_job = job_class(executor=self._executor, job_id=new_job_id, qobj=qobj_dict, backend_name=backend_name, job_tags=job_tags, job_name=job_name)
self._jobs[new_job_id] = new_job
return new_job.data()<|docstring|>Submit a Qobj to a device.<|endoftext|> |
9031da2602ab6df90989c1a06eb115890642013ccbc2131b5867be03b8e639c4 | def job_download_qobj(self, job_id, *_args, **_kwargs):
'Retrieve and return a Qobj.'
return copy.deepcopy(self._get_job(job_id).qobj) | Retrieve and return a Qobj. | test/fake_account_client.py | job_download_qobj | jwoehr/qiskit-ibmq-provider | 199 | python | def job_download_qobj(self, job_id, *_args, **_kwargs):
return copy.deepcopy(self._get_job(job_id).qobj) | def job_download_qobj(self, job_id, *_args, **_kwargs):
return copy.deepcopy(self._get_job(job_id).qobj)<|docstring|>Retrieve and return a Qobj.<|endoftext|> |
8aa2195dcd00ac266b56b183930da04a246b6e8d82b98b32a7661d2586d217d7 | def job_result(self, job_id, *_args, **_kwargs):
'Return a random job result.'
if (job_id in self._results_retrieved):
raise ValueError('Result already retrieved for job {}!'.format(job_id))
self._results_retrieved.add(job_id)
return self._get_job(job_id).result() | Return a random job result. | test/fake_account_client.py | job_result | jwoehr/qiskit-ibmq-provider | 199 | python | def job_result(self, job_id, *_args, **_kwargs):
if (job_id in self._results_retrieved):
raise ValueError('Result already retrieved for job {}!'.format(job_id))
self._results_retrieved.add(job_id)
return self._get_job(job_id).result() | def job_result(self, job_id, *_args, **_kwargs):
if (job_id in self._results_retrieved):
raise ValueError('Result already retrieved for job {}!'.format(job_id))
self._results_retrieved.add(job_id)
return self._get_job(job_id).result()<|docstring|>Return a random job result.<|endoftext|> |
d856f4c6950da0b2ddc6ff7cf19281c656c89ff05117721284f05ddbae863089 | def job_get(self, job_id, *_args, **_kwargs):
'Return information about a job.'
return self._get_job(job_id).data() | Return information about a job. | test/fake_account_client.py | job_get | jwoehr/qiskit-ibmq-provider | 199 | python | def job_get(self, job_id, *_args, **_kwargs):
return self._get_job(job_id).data() | def job_get(self, job_id, *_args, **_kwargs):
return self._get_job(job_id).data()<|docstring|>Return information about a job.<|endoftext|> |
c7585b62c276fb09a7e8df066a82ccc8d79718d3d297009887b071b7988a2011 | def job_status(self, job_id, *_args, **_kwargs):
'Return the status of a job.'
return {'status': self._get_job(job_id).status().value} | Return the status of a job. | test/fake_account_client.py | job_status | jwoehr/qiskit-ibmq-provider | 199 | python | def job_status(self, job_id, *_args, **_kwargs):
return {'status': self._get_job(job_id).status().value} | def job_status(self, job_id, *_args, **_kwargs):
return {'status': self._get_job(job_id).status().value}<|docstring|>Return the status of a job.<|endoftext|> |
f937be69afbbd276396a3b36d1825c4ad1ebd022ca81132885c1f31d765a8c68 | def job_final_status(self, job_id, *_args, **_kwargs):
'Wait until the job progress to a final state.'
job = self._get_job(job_id)
status = job.status()
while (status not in API_JOB_FINAL_STATES):
time.sleep(0.5)
status = job.status()
if _kwargs.get('status_queue', None):
data = {'status': status.value}
if (status is ApiJobStatus.QUEUED):
data['infoQueue'] = {'status': 'PENDING_IN_QUEUE', 'position': 1}
_kwargs['status_queue'].put(data)
return self.job_status(job_id) | Wait until the job progress to a final state. | test/fake_account_client.py | job_final_status | jwoehr/qiskit-ibmq-provider | 199 | python | def job_final_status(self, job_id, *_args, **_kwargs):
job = self._get_job(job_id)
status = job.status()
while (status not in API_JOB_FINAL_STATES):
time.sleep(0.5)
status = job.status()
if _kwargs.get('status_queue', None):
data = {'status': status.value}
if (status is ApiJobStatus.QUEUED):
data['infoQueue'] = {'status': 'PENDING_IN_QUEUE', 'position': 1}
_kwargs['status_queue'].put(data)
return self.job_status(job_id) | def job_final_status(self, job_id, *_args, **_kwargs):
job = self._get_job(job_id)
status = job.status()
while (status not in API_JOB_FINAL_STATES):
time.sleep(0.5)
status = job.status()
if _kwargs.get('status_queue', None):
data = {'status': status.value}
if (status is ApiJobStatus.QUEUED):
data['infoQueue'] = {'status': 'PENDING_IN_QUEUE', 'position': 1}
_kwargs['status_queue'].put(data)
return self.job_status(job_id)<|docstring|>Wait until the job progress to a final state.<|endoftext|> |
a185881baedff00626b4bba40ade263d2729a7146a34b99db7e89b2d98a97c29 | def job_properties(self, *_args, **_kwargs):
'Return the backend properties of a job.'
return FakePoughkeepsie().properties() | Return the backend properties of a job. | test/fake_account_client.py | job_properties | jwoehr/qiskit-ibmq-provider | 199 | python | def job_properties(self, *_args, **_kwargs):
return FakePoughkeepsie().properties() | def job_properties(self, *_args, **_kwargs):
return FakePoughkeepsie().properties()<|docstring|>Return the backend properties of a job.<|endoftext|> |
db0e811ad3b3e3220e281db1c15b99c9a7847bebea37fa06a6c6997744566dd8 | def job_cancel(self, job_id, *_args, **_kwargs):
'Submit a request for cancelling a job.'
self._get_job(job_id).cancel()
return {'cancelled': True} | Submit a request for cancelling a job. | test/fake_account_client.py | job_cancel | jwoehr/qiskit-ibmq-provider | 199 | python | def job_cancel(self, job_id, *_args, **_kwargs):
self._get_job(job_id).cancel()
return {'cancelled': True} | def job_cancel(self, job_id, *_args, **_kwargs):
self._get_job(job_id).cancel()
return {'cancelled': True}<|docstring|>Submit a request for cancelling a job.<|endoftext|> |
ac0fe8769945c650da40d16f774d350c7dd5940ac9cfce67ea93376547957582 | def backend_job_limit(self, *_args, **_kwargs):
'Return the job limit for the backend.'
return {'maximumJobs': self._job_limit, 'runningJobs': self._unfinished_jobs()} | Return the job limit for the backend. | test/fake_account_client.py | backend_job_limit | jwoehr/qiskit-ibmq-provider | 199 | python | def backend_job_limit(self, *_args, **_kwargs):
return {'maximumJobs': self._job_limit, 'runningJobs': self._unfinished_jobs()} | def backend_job_limit(self, *_args, **_kwargs):
return {'maximumJobs': self._job_limit, 'runningJobs': self._unfinished_jobs()}<|docstring|>Return the job limit for the backend.<|endoftext|> |
290ad6d2c10f6d8e9c0aa13ddea91798208a67a498b38086e101134047e9096a | def job_update_attribute(self, job_id, attr_name, attr_value, *_args, **_kwargs):
'Update the specified job attribute with the given value.'
job = self._get_job(job_id)
if (attr_name == 'name'):
job._job_name = attr_value
if (attr_name == 'tags'):
job._job_tags = attr_value.copy()
return {attr_name: attr_value} | Update the specified job attribute with the given value. | test/fake_account_client.py | job_update_attribute | jwoehr/qiskit-ibmq-provider | 199 | python | def job_update_attribute(self, job_id, attr_name, attr_value, *_args, **_kwargs):
job = self._get_job(job_id)
if (attr_name == 'name'):
job._job_name = attr_value
if (attr_name == 'tags'):
job._job_tags = attr_value.copy()
return {attr_name: attr_value} | def job_update_attribute(self, job_id, attr_name, attr_value, *_args, **_kwargs):
job = self._get_job(job_id)
if (attr_name == 'name'):
job._job_name = attr_value
if (attr_name == 'tags'):
job._job_tags = attr_value.copy()
return {attr_name: attr_value}<|docstring|>Update the specified job attribute with the given value.<|endoftext|> |
ef5cefbac515a4f7db2a52aef51ad469c53c2da2dd36226854e569e451ba3a50 | def tear_down(self):
'Clean up job threads.'
for job_id in list(self._jobs.keys()):
try:
self._jobs[job_id].cancel()
except KeyError:
pass | Clean up job threads. | test/fake_account_client.py | tear_down | jwoehr/qiskit-ibmq-provider | 199 | python | def tear_down(self):
for job_id in list(self._jobs.keys()):
try:
self._jobs[job_id].cancel()
except KeyError:
pass | def tear_down(self):
for job_id in list(self._jobs.keys()):
try:
self._jobs[job_id].cancel()
except KeyError:
pass<|docstring|>Clean up job threads.<|endoftext|> |
59252f40e16571e58e1b8673f9cfc54236b11f203cd013eb4ce7ede44d418e27 | def _unfinished_jobs(self):
'Return the number of unfinished jobs.'
return sum((1 for job in self._jobs.values() if (job.status() not in API_JOB_FINAL_STATES))) | Return the number of unfinished jobs. | test/fake_account_client.py | _unfinished_jobs | jwoehr/qiskit-ibmq-provider | 199 | python | def _unfinished_jobs(self):
return sum((1 for job in self._jobs.values() if (job.status() not in API_JOB_FINAL_STATES))) | def _unfinished_jobs(self):
return sum((1 for job in self._jobs.values() if (job.status() not in API_JOB_FINAL_STATES)))<|docstring|>Return the number of unfinished jobs.<|endoftext|> |
e7e6312c71d29353e63e277291e3dbb45892ccf0a8e3ce151127244f9f41e169 | def _get_job(self, job_id):
'Return job if found.'
if (job_id not in self._jobs):
raise RequestsApiError('Job not found. Error code: 3250.')
return self._jobs[job_id] | Return job if found. | test/fake_account_client.py | _get_job | jwoehr/qiskit-ibmq-provider | 199 | python | def _get_job(self, job_id):
if (job_id not in self._jobs):
raise RequestsApiError('Job not found. Error code: 3250.')
return self._jobs[job_id] | def _get_job(self, job_id):
if (job_id not in self._jobs):
raise RequestsApiError('Job not found. Error code: 3250.')
return self._jobs[job_id]<|docstring|>Return job if found.<|endoftext|> |
06c5b4d8e4283b6b7c457e2491433ef38eeabeb8021415c725518e169a8fe570 | def __init__(self, max_fail_count=(- 1)):
'JobSubmitFailClient constructor.'
self._fail_count = max_fail_count
super().__init__() | JobSubmitFailClient constructor. | test/fake_account_client.py | __init__ | jwoehr/qiskit-ibmq-provider | 199 | python | def __init__(self, max_fail_count=(- 1)):
self._fail_count = max_fail_count
super().__init__() | def __init__(self, max_fail_count=(- 1)):
self._fail_count = max_fail_count
super().__init__()<|docstring|>JobSubmitFailClient constructor.<|endoftext|> |
c27110a172142b3f26aed663070531ae767e10e414b9741f5c19d6c9bf99cf68 | def job_submit(self, *_args, **_kwargs):
'Failing job submit.'
if (self._fail_count != 0):
self._fail_count -= 1
raise RequestsApiError('Job submit failed!')
return super().job_submit(*_args, **_kwargs) | Failing job submit. | test/fake_account_client.py | job_submit | jwoehr/qiskit-ibmq-provider | 199 | python | def job_submit(self, *_args, **_kwargs):
if (self._fail_count != 0):
self._fail_count -= 1
raise RequestsApiError('Job submit failed!')
return super().job_submit(*_args, **_kwargs) | def job_submit(self, *_args, **_kwargs):
if (self._fail_count != 0):
self._fail_count -= 1
raise RequestsApiError('Job submit failed!')
return super().job_submit(*_args, **_kwargs)<|docstring|>Failing job submit.<|endoftext|> |
4a7dc3470947496bbcc219b8fa82af67fbb213e97bae13bff5b7102ece3aeef4 | def __init__(self, *args, max_fail_count=(- 1), **kwargs):
'JobTimeoutClient constructor.'
self._fail_count = max_fail_count
super().__init__(*args, **kwargs) | JobTimeoutClient constructor. | test/fake_account_client.py | __init__ | jwoehr/qiskit-ibmq-provider | 199 | python | def __init__(self, *args, max_fail_count=(- 1), **kwargs):
self._fail_count = max_fail_count
super().__init__(*args, **kwargs) | def __init__(self, *args, max_fail_count=(- 1), **kwargs):
self._fail_count = max_fail_count
super().__init__(*args, **kwargs)<|docstring|>JobTimeoutClient constructor.<|endoftext|> |
be725bbcdd80f862edabc14afc3f5fee5b945ede789f181ca59258536cebbb95 | def job_final_status(self, job_id, *_args, **_kwargs):
'Wait until the job progress to a final state.'
if (self._fail_count != 0):
self._fail_count -= 1
raise UserTimeoutExceededError('Job timed out!')
return super().job_final_status(job_id, *_args, **_kwargs) | Wait until the job progress to a final state. | test/fake_account_client.py | job_final_status | jwoehr/qiskit-ibmq-provider | 199 | python | def job_final_status(self, job_id, *_args, **_kwargs):
if (self._fail_count != 0):
self._fail_count -= 1
raise UserTimeoutExceededError('Job timed out!')
return super().job_final_status(job_id, *_args, **_kwargs) | def job_final_status(self, job_id, *_args, **_kwargs):
if (self._fail_count != 0):
self._fail_count -= 1
raise UserTimeoutExceededError('Job timed out!')
return super().job_final_status(job_id, *_args, **_kwargs)<|docstring|>Wait until the job progress to a final state.<|endoftext|> |
6ef4a166a0b64f8f408f5077ee1b958a0d7edc5bce3d3d9b693f43ac5347a37f | def get_quotes():
'\n function that gets the json response from the base url\n '
with urllib.request.urlopen(base_url) as url:
data = url.read()
response = json.loads(data)
results = process_quote(response)
return results | function that gets the json response from the base url | blog/request.py | get_quotes | petermirithu/blog_site | 0 | python | def get_quotes():
'\n \n '
with urllib.request.urlopen(base_url) as url:
data = url.read()
response = json.loads(data)
results = process_quote(response)
return results | def get_quotes():
'\n \n '
with urllib.request.urlopen(base_url) as url:
data = url.read()
response = json.loads(data)
results = process_quote(response)
return results<|docstring|>function that gets the json response from the base url<|endoftext|> |
41a4249abca8fbe5d53bf15727143b4c5668d8aabea31e071ce102986d22b925 | def process_quote(item):
'\n function that processes the response from json format\n '
results = []
author = item.get('author')
quote = item.get('quote')
quote_object = Quote(author, quote)
results.append(quote_object)
return results | function that processes the response from json format | blog/request.py | process_quote | petermirithu/blog_site | 0 | python | def process_quote(item):
'\n \n '
results = []
author = item.get('author')
quote = item.get('quote')
quote_object = Quote(author, quote)
results.append(quote_object)
return results | def process_quote(item):
'\n \n '
results = []
author = item.get('author')
quote = item.get('quote')
quote_object = Quote(author, quote)
results.append(quote_object)
return results<|docstring|>function that processes the response from json format<|endoftext|> |
e6ae353071262792e6edb2a6d24ed77fab70baa322ed67a364e689e044a94e44 | def kernel_zhao(s, s0=0.08333, theta=0.242):
'\n Calculates Zhao kernel for given value.\n\n :param s: time point to evaluate\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :return: value at time point s\n '
c0 = ((1.0 / s0) / (1 - (1.0 / (- theta))))
if (s >= 0):
if (s <= s0):
return c0
else:
return (c0 * ((s / s0) ** (- (1.0 + theta))))
else:
return 0 | Calculates Zhao kernel for given value.
:param s: time point to evaluate
:param s0: initial reaction time
:param theta: empirically determined constant
:return: value at time point s | tideh/functions.py | kernel_zhao | sebaruehl/TiDeH | 0 | python | def kernel_zhao(s, s0=0.08333, theta=0.242):
'\n Calculates Zhao kernel for given value.\n\n :param s: time point to evaluate\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :return: value at time point s\n '
c0 = ((1.0 / s0) / (1 - (1.0 / (- theta))))
if (s >= 0):
if (s <= s0):
return c0
else:
return (c0 * ((s / s0) ** (- (1.0 + theta))))
else:
return 0 | def kernel_zhao(s, s0=0.08333, theta=0.242):
'\n Calculates Zhao kernel for given value.\n\n :param s: time point to evaluate\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :return: value at time point s\n '
c0 = ((1.0 / s0) / (1 - (1.0 / (- theta))))
if (s >= 0):
if (s <= s0):
return c0
else:
return (c0 * ((s / s0) ** (- (1.0 + theta))))
else:
return 0<|docstring|>Calculates Zhao kernel for given value.
:param s: time point to evaluate
:param s0: initial reaction time
:param theta: empirically determined constant
:return: value at time point s<|endoftext|> |
56a686165733a9d1b52bbf74bfe0a280bda82cf45b5282f221235e12821c0288 | def kernel_zhao_vec(s, s0=0.08333, theta=0.242):
'\n Calculates Zhao kernel for given value.\n Optimized using nd-arrays and vectorization.\n\n :param s: time points to evaluate, should be a nd-array\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :return: values at given time points\n '
c0 = ((1.0 / s0) / (1 - (1.0 / (- theta))))
res = np.copy(s)
res[(s < 0)] = 0
res[((s <= s0) & (s >= 0))] = c0
res[(s > s0)] = (c0 * ((res[(s > s0)] / s0) ** (- (1.0 + theta))))
return res | Calculates Zhao kernel for given value.
Optimized using nd-arrays and vectorization.
:param s: time points to evaluate, should be a nd-array
:param s0: initial reaction time
:param theta: empirically determined constant
:return: values at given time points | tideh/functions.py | kernel_zhao_vec | sebaruehl/TiDeH | 0 | python | def kernel_zhao_vec(s, s0=0.08333, theta=0.242):
'\n Calculates Zhao kernel for given value.\n Optimized using nd-arrays and vectorization.\n\n :param s: time points to evaluate, should be a nd-array\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :return: values at given time points\n '
c0 = ((1.0 / s0) / (1 - (1.0 / (- theta))))
res = np.copy(s)
res[(s < 0)] = 0
res[((s <= s0) & (s >= 0))] = c0
res[(s > s0)] = (c0 * ((res[(s > s0)] / s0) ** (- (1.0 + theta))))
return res | def kernel_zhao_vec(s, s0=0.08333, theta=0.242):
'\n Calculates Zhao kernel for given value.\n Optimized using nd-arrays and vectorization.\n\n :param s: time points to evaluate, should be a nd-array\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :return: values at given time points\n '
c0 = ((1.0 / s0) / (1 - (1.0 / (- theta))))
res = np.copy(s)
res[(s < 0)] = 0
res[((s <= s0) & (s >= 0))] = c0
res[(s > s0)] = (c0 * ((res[(s > s0)] / s0) ** (- (1.0 + theta))))
return res<|docstring|>Calculates Zhao kernel for given value.
Optimized using nd-arrays and vectorization.
:param s: time points to evaluate, should be a nd-array
:param s0: initial reaction time
:param theta: empirically determined constant
:return: values at given time points<|endoftext|> |
66fe2176b748fabb50b0ba8d2d9d136c0bc6f325d7925b8074c37ec33da2bc95 | def kernel_primitive_zhao(x, s0=0.08333, theta=0.242):
'\n Calculates the primitive of the Zhao kernel for given values.\n\n :param x: point to evaluate\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :return: primitive evaluated at x\n '
c0 = ((1.0 / s0) / (1 - (1.0 / (- theta))))
if (x < 0):
return 0
elif (x <= s0):
return (c0 * x)
else:
return (c0 * (s0 + ((s0 * (1 - ((x / s0) ** (- theta)))) / theta))) | Calculates the primitive of the Zhao kernel for given values.
:param x: point to evaluate
:param s0: initial reaction time
:param theta: empirically determined constant
:return: primitive evaluated at x | tideh/functions.py | kernel_primitive_zhao | sebaruehl/TiDeH | 0 | python | def kernel_primitive_zhao(x, s0=0.08333, theta=0.242):
'\n Calculates the primitive of the Zhao kernel for given values.\n\n :param x: point to evaluate\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :return: primitive evaluated at x\n '
c0 = ((1.0 / s0) / (1 - (1.0 / (- theta))))
if (x < 0):
return 0
elif (x <= s0):
return (c0 * x)
else:
return (c0 * (s0 + ((s0 * (1 - ((x / s0) ** (- theta)))) / theta))) | def kernel_primitive_zhao(x, s0=0.08333, theta=0.242):
'\n Calculates the primitive of the Zhao kernel for given values.\n\n :param x: point to evaluate\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :return: primitive evaluated at x\n '
c0 = ((1.0 / s0) / (1 - (1.0 / (- theta))))
if (x < 0):
return 0
elif (x <= s0):
return (c0 * x)
else:
return (c0 * (s0 + ((s0 * (1 - ((x / s0) ** (- theta)))) / theta)))<|docstring|>Calculates the primitive of the Zhao kernel for given values.
:param x: point to evaluate
:param s0: initial reaction time
:param theta: empirically determined constant
:return: primitive evaluated at x<|endoftext|> |
f453187073d70bb3c91c14634e0efe33605e0dba44ec3d2f839abc190f0a3d1c | def kernel_primitive_zhao_vec(x, s0=0.08333, theta=0.242):
'\n Calculates the primitive of the Zhao kernel for given values.\n Optimized using nd-arrays and vectorization.\n\n :param x: points to evaluate, should be a nd-array\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :param c0: normalization constant\n :return: primitives evaluated at given points\n '
c0 = ((1.0 / s0) / (1 - (1.0 / (- theta))))
res = np.copy(x)
res[(x < 0)] = 0
res[((x <= s0) & (x >= 0))] = (c0 * res[((x <= s0) & (x >= 0))])
res[(x > s0)] = (c0 * (s0 + ((s0 * (1 - ((res[(x > s0)] / s0) ** (- theta)))) / theta)))
return res | Calculates the primitive of the Zhao kernel for given values.
Optimized using nd-arrays and vectorization.
:param x: points to evaluate, should be a nd-array
:param s0: initial reaction time
:param theta: empirically determined constant
:param c0: normalization constant
:return: primitives evaluated at given points | tideh/functions.py | kernel_primitive_zhao_vec | sebaruehl/TiDeH | 0 | python | def kernel_primitive_zhao_vec(x, s0=0.08333, theta=0.242):
'\n Calculates the primitive of the Zhao kernel for given values.\n Optimized using nd-arrays and vectorization.\n\n :param x: points to evaluate, should be a nd-array\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :param c0: normalization constant\n :return: primitives evaluated at given points\n '
c0 = ((1.0 / s0) / (1 - (1.0 / (- theta))))
res = np.copy(x)
res[(x < 0)] = 0
res[((x <= s0) & (x >= 0))] = (c0 * res[((x <= s0) & (x >= 0))])
res[(x > s0)] = (c0 * (s0 + ((s0 * (1 - ((res[(x > s0)] / s0) ** (- theta)))) / theta)))
return res | def kernel_primitive_zhao_vec(x, s0=0.08333, theta=0.242):
'\n Calculates the primitive of the Zhao kernel for given values.\n Optimized using nd-arrays and vectorization.\n\n :param x: points to evaluate, should be a nd-array\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :param c0: normalization constant\n :return: primitives evaluated at given points\n '
c0 = ((1.0 / s0) / (1 - (1.0 / (- theta))))
res = np.copy(x)
res[(x < 0)] = 0
res[((x <= s0) & (x >= 0))] = (c0 * res[((x <= s0) & (x >= 0))])
res[(x > s0)] = (c0 * (s0 + ((s0 * (1 - ((res[(x > s0)] / s0) ** (- theta)))) / theta)))
return res<|docstring|>Calculates the primitive of the Zhao kernel for given values.
Optimized using nd-arrays and vectorization.
:param x: points to evaluate, should be a nd-array
:param s0: initial reaction time
:param theta: empirically determined constant
:param c0: normalization constant
:return: primitives evaluated at given points<|endoftext|> |
1b4b73e6c79f17c42c85f14bbb31fbbb23196962e37b981d63f43ab903fca7c4 | def integral_zhao(x1, x2, s0=0.08333, theta=0.242):
'\n Calculates definite integral of Zhao function.\n\n :param x1: start\n :param x2: end\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :return: integral of Zhao function\n '
return (kernel_primitive_zhao(x2, s0, theta) - kernel_primitive_zhao(x1, s0, theta)) | Calculates definite integral of Zhao function.
:param x1: start
:param x2: end
:param s0: initial reaction time
:param theta: empirically determined constant
:return: integral of Zhao function | tideh/functions.py | integral_zhao | sebaruehl/TiDeH | 0 | python | def integral_zhao(x1, x2, s0=0.08333, theta=0.242):
'\n Calculates definite integral of Zhao function.\n\n :param x1: start\n :param x2: end\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :return: integral of Zhao function\n '
return (kernel_primitive_zhao(x2, s0, theta) - kernel_primitive_zhao(x1, s0, theta)) | def integral_zhao(x1, x2, s0=0.08333, theta=0.242):
'\n Calculates definite integral of Zhao function.\n\n :param x1: start\n :param x2: end\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :return: integral of Zhao function\n '
return (kernel_primitive_zhao(x2, s0, theta) - kernel_primitive_zhao(x1, s0, theta))<|docstring|>Calculates definite integral of Zhao function.
:param x1: start
:param x2: end
:param s0: initial reaction time
:param theta: empirically determined constant
:return: integral of Zhao function<|endoftext|> |
45d4f18d893a4a471a7e89977963e0e251fc1298817773bf6cf00a9f034b76a0 | def integral_zhao_vec(x1, x2, s0=0.08333, theta=0.242):
'\n Calculates definite integral of Zhao function.\n Optimized using nd-arrays and vectorization.\n\n x1 and x2 should be nd-arrays of same size.\n\n :param x1: start values\n :param x2: end values\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :return: integrals of Zhao function\n '
return (kernel_primitive_zhao_vec(x2, s0, theta) - kernel_primitive_zhao_vec(x1, s0, theta)) | Calculates definite integral of Zhao function.
Optimized using nd-arrays and vectorization.
x1 and x2 should be nd-arrays of same size.
:param x1: start values
:param x2: end values
:param s0: initial reaction time
:param theta: empirically determined constant
:return: integrals of Zhao function | tideh/functions.py | integral_zhao_vec | sebaruehl/TiDeH | 0 | python | def integral_zhao_vec(x1, x2, s0=0.08333, theta=0.242):
'\n Calculates definite integral of Zhao function.\n Optimized using nd-arrays and vectorization.\n\n x1 and x2 should be nd-arrays of same size.\n\n :param x1: start values\n :param x2: end values\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :return: integrals of Zhao function\n '
return (kernel_primitive_zhao_vec(x2, s0, theta) - kernel_primitive_zhao_vec(x1, s0, theta)) | def integral_zhao_vec(x1, x2, s0=0.08333, theta=0.242):
'\n Calculates definite integral of Zhao function.\n Optimized using nd-arrays and vectorization.\n\n x1 and x2 should be nd-arrays of same size.\n\n :param x1: start values\n :param x2: end values\n :param s0: initial reaction time\n :param theta: empirically determined constant\n :return: integrals of Zhao function\n '
return (kernel_primitive_zhao_vec(x2, s0, theta) - kernel_primitive_zhao_vec(x1, s0, theta))<|docstring|>Calculates definite integral of Zhao function.
Optimized using nd-arrays and vectorization.
x1 and x2 should be nd-arrays of same size.
:param x1: start values
:param x2: end values
:param s0: initial reaction time
:param theta: empirically determined constant
:return: integrals of Zhao function<|endoftext|> |
8da6ba90113d86be0ceb56ae4ed72bc90a5a4046b019e071849117832e14c0ed | def infectious_rate_tweets(t, p0=0.001, r0=0.424, phi0=0.125, taum=2.0, t0=0, tm=24, bounds=None):
'\n Alternative form of infectious rate from paper. Supports bounds for r0 and taum. Bounds should be passed as an array\n in the form of [(lower r0, lower taum), (upper r0, upper taum)].\n Converted to hours.\n\n :param t: point to evaluate function at (in hours)\n :param p0: base rate\n :param r0: amplitude\n :param phi0: shift (in days)\n :param taum: decay/freshness (in days)\n :param t0: start time of observation (in hours)\n :param tm: cyclic property (after what time a full circle passed, in hours)\n :param bounds: bounds for r0 and taum\n :return: infectiousness for time t\n '
if (bounds is not None):
if (not (bounds[0][0] < r0 < bounds[1][0])):
r0 = max(bounds[0][0], (bounds[1][0] * sigmoid((taum / bounds[1][0]))))
if (not (bounds[0][1] < taum < bounds[1][1])):
taum = max(bounds[0][1], (bounds[1][1] * sigmoid((taum / bounds[1][1]))))
return ((p0 * (1.0 - (r0 * sin((((48 / tm) * pi) * (((t + t0) / 24) + phi0)))))) * exp(((- t) / (24 * taum)))) | Alternative form of infectious rate from paper. Supports bounds for r0 and taum. Bounds should be passed as an array
in the form of [(lower r0, lower taum), (upper r0, upper taum)].
Converted to hours.
:param t: point to evaluate function at (in hours)
:param p0: base rate
:param r0: amplitude
:param phi0: shift (in days)
:param taum: decay/freshness (in days)
:param t0: start time of observation (in hours)
:param tm: cyclic property (after what time a full circle passed, in hours)
:param bounds: bounds for r0 and taum
:return: infectiousness for time t | tideh/functions.py | infectious_rate_tweets | sebaruehl/TiDeH | 0 | python | def infectious_rate_tweets(t, p0=0.001, r0=0.424, phi0=0.125, taum=2.0, t0=0, tm=24, bounds=None):
'\n Alternative form of infectious rate from paper. Supports bounds for r0 and taum. Bounds should be passed as an array\n in the form of [(lower r0, lower taum), (upper r0, upper taum)].\n Converted to hours.\n\n :param t: point to evaluate function at (in hours)\n :param p0: base rate\n :param r0: amplitude\n :param phi0: shift (in days)\n :param taum: decay/freshness (in days)\n :param t0: start time of observation (in hours)\n :param tm: cyclic property (after what time a full circle passed, in hours)\n :param bounds: bounds for r0 and taum\n :return: infectiousness for time t\n '
if (bounds is not None):
if (not (bounds[0][0] < r0 < bounds[1][0])):
r0 = max(bounds[0][0], (bounds[1][0] * sigmoid((taum / bounds[1][0]))))
if (not (bounds[0][1] < taum < bounds[1][1])):
taum = max(bounds[0][1], (bounds[1][1] * sigmoid((taum / bounds[1][1]))))
return ((p0 * (1.0 - (r0 * sin((((48 / tm) * pi) * (((t + t0) / 24) + phi0)))))) * exp(((- t) / (24 * taum)))) | def infectious_rate_tweets(t, p0=0.001, r0=0.424, phi0=0.125, taum=2.0, t0=0, tm=24, bounds=None):
'\n Alternative form of infectious rate from paper. Supports bounds for r0 and taum. Bounds should be passed as an array\n in the form of [(lower r0, lower taum), (upper r0, upper taum)].\n Converted to hours.\n\n :param t: point to evaluate function at (in hours)\n :param p0: base rate\n :param r0: amplitude\n :param phi0: shift (in days)\n :param taum: decay/freshness (in days)\n :param t0: start time of observation (in hours)\n :param tm: cyclic property (after what time a full circle passed, in hours)\n :param bounds: bounds for r0 and taum\n :return: infectiousness for time t\n '
if (bounds is not None):
if (not (bounds[0][0] < r0 < bounds[1][0])):
r0 = max(bounds[0][0], (bounds[1][0] * sigmoid((taum / bounds[1][0]))))
if (not (bounds[0][1] < taum < bounds[1][1])):
taum = max(bounds[0][1], (bounds[1][1] * sigmoid((taum / bounds[1][1]))))
return ((p0 * (1.0 - (r0 * sin((((48 / tm) * pi) * (((t + t0) / 24) + phi0)))))) * exp(((- t) / (24 * taum))))<|docstring|>Alternative form of infectious rate from paper. Supports bounds for r0 and taum. Bounds should be passed as an array
in the form of [(lower r0, lower taum), (upper r0, upper taum)].
Converted to hours.
:param t: point to evaluate function at (in hours)
:param p0: base rate
:param r0: amplitude
:param phi0: shift (in days)
:param taum: decay/freshness (in days)
:param t0: start time of observation (in hours)
:param tm: cyclic property (after what time a full circle passed, in hours)
:param bounds: bounds for r0 and taum
:return: infectiousness for time t<|endoftext|> |
cb51467bc9cd8c8401e8e8005c91db46ac03e376ef1ef12278ba3ab2489eedb3 | def infectious_rate_tweets_vec(t, p0=0.001, r0=0.424, phi0=0.125, taum=2.0, t0=0, tm=24.0, bounds=None):
'\n Alternative form of infectious rate from paper. Supports bounds for r0 and taum. Bound should be passed as an array\n in the form of [(lower r0, lower taum), (upper r0, upper taum)].\n Converted to hours.\n Vectorized version.\n\n :param t: points to evaluate function at, should be a nd-array (in hours)\n :param p0: base rate\n :param r0: amplitude\n :param phi0: shift (in days)\n :param taum: decay/freshness (in days)\n :param t0: start time of observation (in hours)\n :param tm: cyclic property (after what time a full circle passed, in hours)\n :param bounds: bounds for r0 and taum\n :return: infectiousness for given t\n '
if (bounds is not None):
if (not (bounds[0][0] < r0 < bounds[1][0])):
r0 = max(bounds[0][0], (bounds[1][0] * sigmoid((taum / bounds[1][0]))))
if (not (bounds[0][1] < taum < bounds[1][1])):
taum = max(bounds[0][1], (bounds[1][1] * sigmoid((taum / bounds[1][1]))))
return ((p0 * (1.0 - (r0 * np.sin((((48.0 / tm) * np.pi) * (((t + t0) / 24.0) + phi0)))))) * np.exp(((- t) / (24.0 * taum)))) | Alternative form of infectious rate from paper. Supports bounds for r0 and taum. Bound should be passed as an array
in the form of [(lower r0, lower taum), (upper r0, upper taum)].
Converted to hours.
Vectorized version.
:param t: points to evaluate function at, should be a nd-array (in hours)
:param p0: base rate
:param r0: amplitude
:param phi0: shift (in days)
:param taum: decay/freshness (in days)
:param t0: start time of observation (in hours)
:param tm: cyclic property (after what time a full circle passed, in hours)
:param bounds: bounds for r0 and taum
:return: infectiousness for given t | tideh/functions.py | infectious_rate_tweets_vec | sebaruehl/TiDeH | 0 | python | def infectious_rate_tweets_vec(t, p0=0.001, r0=0.424, phi0=0.125, taum=2.0, t0=0, tm=24.0, bounds=None):
'\n Alternative form of infectious rate from paper. Supports bounds for r0 and taum. Bound should be passed as an array\n in the form of [(lower r0, lower taum), (upper r0, upper taum)].\n Converted to hours.\n Vectorized version.\n\n :param t: points to evaluate function at, should be a nd-array (in hours)\n :param p0: base rate\n :param r0: amplitude\n :param phi0: shift (in days)\n :param taum: decay/freshness (in days)\n :param t0: start time of observation (in hours)\n :param tm: cyclic property (after what time a full circle passed, in hours)\n :param bounds: bounds for r0 and taum\n :return: infectiousness for given t\n '
if (bounds is not None):
if (not (bounds[0][0] < r0 < bounds[1][0])):
r0 = max(bounds[0][0], (bounds[1][0] * sigmoid((taum / bounds[1][0]))))
if (not (bounds[0][1] < taum < bounds[1][1])):
taum = max(bounds[0][1], (bounds[1][1] * sigmoid((taum / bounds[1][1]))))
return ((p0 * (1.0 - (r0 * np.sin((((48.0 / tm) * np.pi) * (((t + t0) / 24.0) + phi0)))))) * np.exp(((- t) / (24.0 * taum)))) | def infectious_rate_tweets_vec(t, p0=0.001, r0=0.424, phi0=0.125, taum=2.0, t0=0, tm=24.0, bounds=None):
'\n Alternative form of infectious rate from paper. Supports bounds for r0 and taum. Bound should be passed as an array\n in the form of [(lower r0, lower taum), (upper r0, upper taum)].\n Converted to hours.\n Vectorized version.\n\n :param t: points to evaluate function at, should be a nd-array (in hours)\n :param p0: base rate\n :param r0: amplitude\n :param phi0: shift (in days)\n :param taum: decay/freshness (in days)\n :param t0: start time of observation (in hours)\n :param tm: cyclic property (after what time a full circle passed, in hours)\n :param bounds: bounds for r0 and taum\n :return: infectiousness for given t\n '
if (bounds is not None):
if (not (bounds[0][0] < r0 < bounds[1][0])):
r0 = max(bounds[0][0], (bounds[1][0] * sigmoid((taum / bounds[1][0]))))
if (not (bounds[0][1] < taum < bounds[1][1])):
taum = max(bounds[0][1], (bounds[1][1] * sigmoid((taum / bounds[1][1]))))
return ((p0 * (1.0 - (r0 * np.sin((((48.0 / tm) * np.pi) * (((t + t0) / 24.0) + phi0)))))) * np.exp(((- t) / (24.0 * taum))))<|docstring|>Alternative form of infectious rate from paper. Supports bounds for r0 and taum. Bound should be passed as an array
in the form of [(lower r0, lower taum), (upper r0, upper taum)].
Converted to hours.
Vectorized version.
:param t: points to evaluate function at, should be a nd-array (in hours)
:param p0: base rate
:param r0: amplitude
:param phi0: shift (in days)
:param taum: decay/freshness (in days)
:param t0: start time of observation (in hours)
:param tm: cyclic property (after what time a full circle passed, in hours)
:param bounds: bounds for r0 and taum
:return: infectiousness for given t<|endoftext|> |
e099e83d297d7eff4cfa4a2ddb3441e3bf4c71ecbd4fffb2c10cf9fac909903b | def infectious_rate_dv_p0(t, r0=0.424, phi0=0.125, taum=2.0, t0=0, tm=24.0):
'\n Derivation of infectious rate after p0.\n\n Required for direct maximum likelihood estimation.\n\n :param t: points to evaluate function at, shoult be nd-arrays (in hours)\n :param r0: amplitude\n :param phi0: shift (in days)\n :param taum: decay/freshness (in days)\n :param t0: start time of observation (in hours)\n :param tm: cyclic property (after what a fill circle passed, on hours)\n :return: infectious rate derived after p0\n '
return ((1.0 - (r0 * np.sin((((48.0 / tm) * np.pi) * (((t + t0) / 24.0) + phi0))))) * np.exp(((- t) / (24.0 * taum)))) | Derivation of infectious rate after p0.
Required for direct maximum likelihood estimation.
:param t: points to evaluate function at, shoult be nd-arrays (in hours)
:param r0: amplitude
:param phi0: shift (in days)
:param taum: decay/freshness (in days)
:param t0: start time of observation (in hours)
:param tm: cyclic property (after what a fill circle passed, on hours)
:return: infectious rate derived after p0 | tideh/functions.py | infectious_rate_dv_p0 | sebaruehl/TiDeH | 0 | python | def infectious_rate_dv_p0(t, r0=0.424, phi0=0.125, taum=2.0, t0=0, tm=24.0):
'\n Derivation of infectious rate after p0.\n\n Required for direct maximum likelihood estimation.\n\n :param t: points to evaluate function at, shoult be nd-arrays (in hours)\n :param r0: amplitude\n :param phi0: shift (in days)\n :param taum: decay/freshness (in days)\n :param t0: start time of observation (in hours)\n :param tm: cyclic property (after what a fill circle passed, on hours)\n :return: infectious rate derived after p0\n '
return ((1.0 - (r0 * np.sin((((48.0 / tm) * np.pi) * (((t + t0) / 24.0) + phi0))))) * np.exp(((- t) / (24.0 * taum)))) | def infectious_rate_dv_p0(t, r0=0.424, phi0=0.125, taum=2.0, t0=0, tm=24.0):
'\n Derivation of infectious rate after p0.\n\n Required for direct maximum likelihood estimation.\n\n :param t: points to evaluate function at, shoult be nd-arrays (in hours)\n :param r0: amplitude\n :param phi0: shift (in days)\n :param taum: decay/freshness (in days)\n :param t0: start time of observation (in hours)\n :param tm: cyclic property (after what a fill circle passed, on hours)\n :return: infectious rate derived after p0\n '
return ((1.0 - (r0 * np.sin((((48.0 / tm) * np.pi) * (((t + t0) / 24.0) + phi0))))) * np.exp(((- t) / (24.0 * taum))))<|docstring|>Derivation of infectious rate after p0.
Required for direct maximum likelihood estimation.
:param t: points to evaluate function at, shoult be nd-arrays (in hours)
:param r0: amplitude
:param phi0: shift (in days)
:param taum: decay/freshness (in days)
:param t0: start time of observation (in hours)
:param tm: cyclic property (after what a fill circle passed, on hours)
:return: infectious rate derived after p0<|endoftext|> |
c41601ec166ab822e27d8f57bd1ba57d74a9b7aa37d29cdd199a327085d59cbf | def sigmoid(x):
'\n Calculates sigmoid function for value x.\n '
return (1 / (1 + exp((- x)))) | Calculates sigmoid function for value x. | tideh/functions.py | sigmoid | sebaruehl/TiDeH | 0 | python | def sigmoid(x):
'\n \n '
return (1 / (1 + exp((- x)))) | def sigmoid(x):
'\n \n '
return (1 / (1 + exp((- x))))<|docstring|>Calculates sigmoid function for value x.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.