code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def add_fluctuations(hdf5_file, N_columns, N_processes):
random_state = np.random.RandomState(0)
slice_queue = multiprocessing.JoinableQueue()
pid_list = []
for i in range(N_processes):
worker = Fluctuations_worker(hdf5_file,
'/aff_prop_group/similarities', random_state,
N_columns, slice_queue)
worker.daemon = True
worker.start()
pid_list.append(worker.pid)
for rows_slice in chunk_generator(N_columns, 4 * N_processes):
slice_queue.put(rows_slice)
slice_queue.join()
slice_queue.close()
terminate_processes(pid_list)
gc.collect() | This procedure organizes the addition of small fluctuations on top of
a matrix of similarities at 'hdf5_file' across 'N_processes'
different processes. Each of those processes is an instance of the
class 'Fluctuations_Worker' defined elsewhere in this module. |
def compute_responsibilities(hdf5_file, N_columns, damping, N_processes):
slice_queue = multiprocessing.JoinableQueue()
pid_list = []
for i in range(N_processes):
worker = Responsibilities_worker(hdf5_file, '/aff_prop_group',
N_columns, damping, slice_queue)
worker.daemon = True
worker.start()
pid_list.append(worker.pid)
for rows_slice in chunk_generator(N_columns, 8 * N_processes):
slice_queue.put(rows_slice)
slice_queue.join()
slice_queue.close()
terminate_processes(pid_list) | Organize the computation and update of the responsibility matrix
for Affinity Propagation clustering with 'damping' as the eponymous
damping parameter. Each of the processes concurrently involved in this task
is an instance of the class 'Responsibilities_worker' defined above. |
def rows_sum_init(hdf5_file, path, out_lock, *numpy_args):
global g_hdf5_file, g_path, g_out, g_out_lock
g_hdf5_file, g_path, g_out_lock = hdf5_file, path, out_lock
g_out = to_numpy_array(*numpy_args) | Create global variables sharing the same object as the one pointed by
'hdf5_file', 'path' and 'out_lock'.
Also Create a NumPy array copy of a multiprocessing.Array ctypes array
specified by '*numpy_args'. |
def to_numpy_array(multiprocessing_array, shape, dtype):
return np.frombuffer(multiprocessing_array.get_obj(),
dtype = dtype).reshape(shape) | Convert a share multiprocessing array to a numpy array.
No data copying involved. |
def compute_rows_sum(hdf5_file, path, N_columns, N_processes, method = 'Process'):
assert isinstance(method, str), "parameter 'method' must consist in a string of characters"
assert method in ('Ordinary', 'Pool'), "parameter 'method' must be set to either of 'Ordinary' or 'Pool'"
if method == 'Ordinary':
rows_sum = np.zeros(N_columns, dtype = float)
chunk_size = get_chunk_size(N_columns, 2)
with Worker.hdf5_lock:
with tables.open_file(hdf5_file, 'r+') as fileh:
hdf5_array = fileh.get_node(path)
N_rows = hdf5_array.nrows
assert N_columns == N_rows
for i in range(0, N_columns, chunk_size):
slc = slice(i, min(i+chunk_size, N_columns))
tmp = hdf5_array[:, slc]
rows_sum[slc] = tmp[:].sum(axis = 0)
else:
rows_sum_array = multiprocessing.Array(c_double, N_columns, lock = True)
chunk_size = get_chunk_size(N_columns, 2 * N_processes)
numpy_args = rows_sum_array, N_columns, np.float64
with closing(multiprocessing.Pool(N_processes,
initializer = rows_sum_init,
initargs = (hdf5_file, path, rows_sum_array.get_lock()) +
numpy_args)) as pool:
pool.map_async(multiprocessing_get_sum,
chunk_generator(N_columns, 2 * N_processes), chunk_size)
pool.close()
pool.join()
rows_sum = to_numpy_array(*numpy_args)
gc.collect()
return rows_sum | Parallel computation of the sums across the rows of two-dimensional array
accessible at the node specified by 'path' in the 'hdf5_file'
hierarchical data format. |
def compute_availabilities(hdf5_file, N_columns, damping, N_processes, rows_sum):
slice_queue = multiprocessing.JoinableQueue()
pid_list = []
for i in range(N_processes):
worker = Availabilities_worker(hdf5_file, '/aff_prop_group',
N_columns, damping, slice_queue, rows_sum)
worker.daemon = True
worker.start()
pid_list.append(worker.pid)
for rows_slice in chunk_generator(N_columns, 8 * N_processes):
slice_queue.put(rows_slice)
slice_queue.join()
slice_queue.close()
terminate_processes(pid_list)
gc.collect() | Coordinates the computation and update of the availability matrix
for Affinity Propagation clustering.
Parameters
----------
hdf5_file : string or file handle
Specify access to the hierarchical data format used throughout all the iterations
of message-passing between data-points involved in Affinity Propagation clustering.
N_columns : int
The number of samples in the data-set subjected to Affinity Propagation clustering.
damping : float
The damping parameter of Affinity Propagation clustering, typically set to 0.5.
N_processes : int
The number of subprocesses involved in the parallel computation and update of the
matrix of availabitilies.
rows_sum : array of shape (N_columns,)
A vector containing, for each column entry of the similarities matrix, the sum
of its rows entries. |
def check_convergence(hdf5_file, iteration, convergence_iter, max_iter):
Worker.hdf5_lock.acquire()
with tables.open_file(hdf5_file, 'r+') as fileh:
A = fileh.root.aff_prop_group.availabilities
R = fileh.root.aff_prop_group.responsibilities
P = fileh.root.aff_prop_group.parallel_updates
N = A.nrows
diag_ind = np.diag_indices(N)
E = (A[diag_ind] + R[diag_ind]) > 0
P[:, iteration % convergence_iter] = E
e_mat = P[:]
K = E.sum(axis = 0)
Worker.hdf5_lock.release()
if iteration >= convergence_iter:
se = e_mat.sum(axis = 1)
unconverged = (np.sum((se == convergence_iter) + (se == 0)) != N)
if (not unconverged and (K > 0)) or (iteration == max_iter):
return True
return False | If the estimated number of clusters has not changed for 'convergence_iter'
consecutive iterations in a total of 'max_iter' rounds of message-passing,
the procedure herewith returns 'True'.
Otherwise, returns 'False'.
Parameter 'iteration' identifies the run of message-passing
that has just completed. |
def cluster_labels_A(hdf5_file, c, lock, I, rows_slice):
with Worker.hdf5_lock:
with tables.open_file(hdf5_file, 'r+') as fileh:
S = fileh.root.aff_prop_group.similarities
s = S[rows_slice, ...]
s = np.argmax(s[:, I], axis = 1)
with lock:
c[rows_slice] = s[:]
del s | One of the task to be performed by a pool of subprocesses, as the first
step in identifying the cluster labels and indices of the cluster centers
for Affinity Propagation clustering. |
def cluster_labels_B(hdf5_file, s_reduced, lock, I, ii, iix, rows_slice):
with Worker.hdf5_lock:
with tables.open_file(hdf5_file, 'r+') as fileh:
S = fileh.root.aff_prop_group.similarities
s = S[rows_slice, ...]
s = s[:, ii]
s = s[iix[rows_slice]]
with lock:
s_reduced += s[:].sum(axis = 0)
del s | Second task to be performed by a pool of subprocesses before
the cluster labels and cluster center indices can be identified. |
def output_clusters(labels, cluster_centers_indices):
here = os.getcwd()
try:
output_directory = os.path.join(here, 'concurrent_AP_output')
os.makedirs(output_directory)
except OSError:
if not os.path.isdir(output_directory):
print("ERROR: concurrent_AP: output_clusters: cannot create a directory "
"for storage of the results of Affinity Propagation clustering "
"in your current working directory")
sys.exit(1)
if any(np.isnan(labels)):
fmt = '%.1f'
else:
fmt = '%d'
with open(os.path.join(output_directory, 'labels.tsv'), 'w') as fh:
np.savetxt(fh, labels, fmt = fmt, delimiter = '\t')
if cluster_centers_indices is not None:
with open(os.path.join(output_directory, 'cluster_centers_indices.tsv'), 'w') as fh:
np.savetxt(fh, cluster_centers_indices, fmt = '%.1f',
delimiter = '\t') | Write in tab-separated files the vectors of cluster identities and
of indices of cluster centers. |
def set_preference(data, chunk_size):
N_samples, N_features = data.shape
rng = np.arange(0, N_samples, dtype = int)
medians = []
for i in range(15):
selected_samples = np.random.choice(N_samples, size = chunk_size, replace = False)
samples = data[selected_samples, :]
S = - euclidean_distances(samples, data, squared = True)
n = chunk_size * N_samples - (chunk_size * (chunk_size + 1) / 2)
rows = np.zeros(0, dtype = int)
for i in range(chunk_size):
rows = np.append(rows, np.full(N_samples - i, i, dtype = int))
cols = np.zeros(0, dtype = int)
for i in range(chunk_size):
cols = np.append(cols, np.delete(rng, selected_samples[:i+1]))
triu_indices = tuple((rows, cols))
preference = np.median(S, overwrite_input = True)
medians.append(preference)
del S
if i % 4 == 3:
gc.collect()
preference = np.median(medians)
return preference | Return the median of the distribution of pairwise L2 Euclidean distances
between samples (the rows of 'data') as the default preference parameter
for Affinity Propagation clustering.
Parameters
----------
data : array of shape (N_samples, N_features)
The data-set submitted for Affinity Propagation clustering.
chunk_size : int
The size of random subsamples from the data-set whose similarity
matrix is computed. The resulting median of the distribution of
pairwise distances between the data-points selected as part of a
given subsample is stored into a list of medians.
Returns
-------
preference : float
The preference parameter for Affinity Propagation clustering is computed
as the median of the list of median pairwise distances between the data-points
selected as part of each of 15 rounds of random subsampling. |
def getTerms(self, term=None, getFingerprint=None, startIndex=0, maxResults=10):
return self._terms.getTerm(self._retina, term, getFingerprint, startIndex, maxResults) | Get term objects
Args:
term, str: A term in the retina (optional)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
Returns:
list of Term
Raises:
CorticalioException: if the request was not successful |
def getContextsForTerm(self, term, getFingerprint=None, startIndex=0, maxResults=5):
return self._terms.getContextsForTerm(self._retina, term, getFingerprint, startIndex, maxResults) | Get the contexts for a given term
Args:
term, str: A term in the retina (required)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
Returns:
list of Context
Raises:
CorticalioException: if the request was not successful |
def getSimilarTermsForTerm(self, term, contextId=None, posType=None, getFingerprint=None, startIndex=0, maxResults=10):
return self._terms.getSimilarTerms(self._retina, term, contextId, posType, getFingerprint, startIndex, maxResults) | Get the similar terms of a given term
Args:
term, str: A term in the retina (required)
contextId, int: The identifier of a context (optional)
posType, str: Part of speech (optional)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
Returns:
list of Term
Raises:
CorticalioException: if the request was not successful |
def getTokensForText(self, body, POStags=None):
return self._text.getTokensForText(self._retina, body, POStags) | Get tokenized input text
Args:
body, str: The text to be tokenized (required)
POStags, str: Specify desired POS types (optional)
Returns:
list of str
Raises:
CorticalioException: if the request was not successful |
def getSlicesForText(self, body, getFingerprint=None, startIndex=0, maxResults=10):
return self._text.getSlicesForText(self._retina, body, getFingerprint, startIndex, maxResults) | Get a list of slices of the text
Args:
body, str: The text to be evaluated (required)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
Returns:
list of Text
Raises:
CorticalioException: if the request was not successful |
def getFingerprintsForTexts(self, strings, sparsity=1.0):
body = [{"text": s} for s in strings]
return self._text.getRepresentationsForBulkText(self._retina, json.dumps(body), sparsity) | Bulk get Fingerprint for text.
Args:
strings, list(str): A list of texts to be evaluated (required)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
list of Fingerprint
Raises:
CorticalioException: if the request was not successful |
def getFingerprintForExpression(self, body, sparsity=1.0):
return self._expressions.resolveExpression(self._retina, body, sparsity) | Resolve an expression
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
Fingerprint
Raises:
CorticalioException: if the request was not successful |
def getContextsForExpression(self, body, getFingerprint=None, startIndex=0, maxResults=5, sparsity=1.0):
return self._expressions.getContextsForExpression(self._retina, body, getFingerprint, startIndex, maxResults, sparsity) | Get semantic contexts for the input expression
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
list of Context
Raises:
CorticalioException: if the request was not successful |
def getSimilarTermsForExpression(self, body, contextId=None, posType=None, getFingerprint=None, startIndex=0, maxResults=10, sparsity=1.0):
return self._expressions.getSimilarTermsForExpressionContext(self._retina, body, contextId, posType, getFingerprint, startIndex, maxResults, sparsity) | Get similar terms for the contexts of an expression
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
contextId, int: The identifier of a context (optional)
posType, str: Part of speech (optional)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
list of Term
Raises:
CorticalioException: if the request was not successful |
def getFingerprintsForExpressions(self, body, sparsity=1.0):
return self._expressions.resolveBulkExpression(self._retina, body, sparsity) | Bulk resolution of expressions
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
list of Fingerprint
Raises:
CorticalioException: if the request was not successful |
def getContextsForExpressions(self, body, getFingerprint=None, startIndex=0, maxResults=5, sparsity=1.0):
return self._expressions.getContextsForBulkExpression(self._retina, body, getFingerprint, startIndex, maxResults, sparsity) | Bulk get contexts for input expressions
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
list of Context
Raises:
CorticalioException: if the request was not successful |
def getSimilarTermsForExpressions(self, body, contextId=None, posType=None, getFingerprint=None, startIndex=0, maxResults=10, sparsity=1.0):
return self._expressions.getSimilarTermsForBulkExpressionContext(self._retina, body, contextId, posType, getFingerprint, startIndex, maxResults, sparsity) | Bulk get similar terms for input expressions
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
contextId, int: The identifier of a context (optional)
posType, str: Part of speech (optional)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
list of Term
Raises:
CorticalioException: if the request was not successful |
def getImage(self, body, imageScalar=2, plotShape="circle", imageEncoding="base64/png", sparsity=1.0):
return self._image.getImageForExpression(self._retina, body, imageScalar, plotShape, imageEncoding, sparsity) | Get images for expressions
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
imageScalar, int: The scale of the image (optional)
plotShape, str: The image shape (optional)
imageEncoding, str: The encoding of the returned image (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
str with the raw byte data of the image
Raises:
CorticalioException: if the request was not successful |
def compareImage(self, body, plotShape="circle", imageScalar=2, imageEncoding="base64/png"):
return self._image.getOverlayImage(self._retina, body, plotShape, imageScalar, imageEncoding) | Get an overlay image for two expressions
Args:
body, ExpressionOperation: The JSON encoded comparison array to be evaluated (required)
plotShape, str: The image shape (optional)
imageScalar, int: The scale of the image (optional)
imageEncoding, str: The encoding of the returned image (optional)
Returns:
str with the raw byte data of the image
Raises:
CorticalioException: if the request was not successful |
def getImages(self, body, getFingerprint=None, imageScalar=2, plotShape="circle", sparsity=1.0):
return self._image.getImageForBulkExpressions(self._retina, body, getFingerprint, imageScalar, plotShape, sparsity) | Bulk get images for expressions
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
imageScalar, int: The scale of the image (optional)
plotShape, str: The image shape (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
list of Image
Raises:
CorticalioException: if the request was not successful |
def createCategoryFilter(self, filterName, positiveExamples, negativeExamples=[]):
samples = {"positiveExamples": [{"text": s} for s in positiveExamples],
"negativeExamples": [{"text": s} for s in negativeExamples]}
body = json.dumps(samples)
return self._classify.createCategoryFilter(self._retina, filterName, body) | Get a classifier filter (fingerprint) for positive and negative text samples
Args:
filterName, str: A unique name for the filter. (required)
positiveExamples, list(str): The list of positive example texts. (required)
negativeExamples, list(str): The list of negative example texts. (optional)
Returns:
CategoryFilter
Raises:
CorticalioException: if the request was not successful |
def getContextsForTerm(self, retina_name, term, get_fingerprint=None, start_index=0, max_results=5):
resourcePath = '/terms/contexts'
method = 'GET'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['term'] = term
queryParams['start_index'] = start_index
queryParams['max_results'] = max_results
queryParams['get_fingerprint'] = get_fingerprint
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [context.Context(**r) for r in response.json()] | Get the contexts for a given term
Args:
retina_name, str: The retina name (required)
term, str: A term in the retina (required)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
start_index, int: The start-index for pagination (optional) (optional)
max_results, int: Max results per page (optional) (optional)
Returns: Array[Context] |
def getSimilarTerms(self, retina_name, term, context_id=None, pos_type=None, get_fingerprint=None, start_index=0, max_results=10):
resourcePath = '/terms/similar_terms'
method = 'GET'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['term'] = term
queryParams['context_id'] = context_id
queryParams['start_index'] = start_index
queryParams['max_results'] = max_results
queryParams['pos_type'] = pos_type
queryParams['get_fingerprint'] = get_fingerprint
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [Term(**r) for r in response.json()] | Get the similar terms of a given term
Args:
retina_name, str: The retina name (required)
term, str: A term in the retina (required)
context_id, int: The identifier of a context (optional) (optional)
pos_type, str: Part of speech (optional) (optional)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
start_index, int: The start-index for pagination (optional) (optional)
max_results, int: Max results per page (optional) (optional)
Returns: Array[Term] |
def createCategoryFilter(self, retina_name, filter_name, body, ):
resourcePath = '/classify/create_category_filter'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['filter_name'] = filter_name
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return category_filter.CategoryFilter(**response.json()) | get filter for classifier
Args:
filter_name, str: A unique name for the filter. (required)
body, FilterTrainingObject: The list of positive and negative (optional) example items. (required)
retina_name, str: The retina name (required)
Returns: CategoryFilter |
def getImageForExpression(self, retina_name, body, image_scalar=2, plot_shape="circle", image_encoding="base64/png", sparsity=1.0):
resourcePath = '/image'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'image/png', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['image_scalar'] = image_scalar
queryParams['plot_shape'] = plot_shape
queryParams['image_encoding'] = image_encoding
queryParams['sparsity'] = sparsity
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return response.content | Get images for expressions
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
image_scalar, int: The scale of the image (optional) (optional)
plot_shape, str: The image shape (optional) (optional)
image_encoding, str: The encoding of the returned image (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: java.io.ByteArrayInputStream |
def getImageForBulkExpressions(self, retina_name, body, get_fingerprint=None, image_scalar=2, plot_shape="circle", sparsity=1.0):
resourcePath = '/image/bulk'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['image_scalar'] = image_scalar
queryParams['plot_shape'] = plot_shape
queryParams['sparsity'] = sparsity
queryParams['get_fingerprint'] = get_fingerprint
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [image.Image(**r) for r in response.json()] | Bulk get images for expressions
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
image_scalar, int: The scale of the image (optional) (optional)
plot_shape, str: The image shape (optional) (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: Array[Image] |
def getKeywordsForText(self, retina_name, body, ):
resourcePath = '/text/keywords'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return response.json() | Get a list of keywords from the text
Args:
retina_name, str: The retina name (required)
body, str: The text to be evaluated (required)
Returns: Array[str] |
def getSlicesForText(self, retina_name, body, get_fingerprint=None, start_index=0, max_results=10):
resourcePath = '/text/slices'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['start_index'] = start_index
queryParams['max_results'] = max_results
queryParams['get_fingerprint'] = get_fingerprint
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [text.Text(**r) for r in response.json()] | Get a list of slices of the text
Args:
retina_name, str: The retina name (required)
body, str: The text to be evaluated (required)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
start_index, int: The start-index for pagination (optional) (optional)
max_results, int: Max results per page (optional) (optional)
Returns: Array[Text] |
def getLanguage(self, body, ):
resourcePath = '/text/detect_language'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return language_rest.LanguageRest(**response.json()) | Detect the language of a text
Args:
body, str: Your input text (UTF-8) (required)
Returns: LanguageRest |
def stylesheet_declarations(string, is_merc=False, scale=1):
# everything is display: map by default
display_map = Declaration(Selector(SelectorElement(['*'], [])),
Property('display'), Value('map', False),
(False, (0, 0, 0), (0, 0)))
declarations = [display_map]
tokens = cssTokenizer().tokenize(string)
variables = {}
while True:
try:
for declaration in parse_rule(tokens, variables, [], [], is_merc):
if scale != 1:
declaration.scaleBy(scale)
declarations.append(declaration)
except StopIteration:
break
# sort by a css-like method
return sorted(declarations, key=operator.attrgetter('sort_key')) | Parse a string representing a stylesheet into a list of declarations.
Required boolean is_merc indicates whether the projection should
be interpreted as spherical mercator, so we know what to do with
zoom/scale-denominator in parse_rule(). |
def get_coin_list(coins='all'):
# convert single coins input to single element lists
if not isinstance(coins, list) and coins != 'all':
coins = [coins]
# load data
url = build_url('coinlist')
data = load_data(url)['Data']
# coins specified
if coins != 'all':
data = {c: data[c] for c in coins}
return data | Get general information about all the coins available on
cryptocompare.com.
Args:
coins: Default value of 'all' returns information about all the coins
available on the site. Otherwise a single string or list of coin
symbols can be used.
Returns:
The function returns a dictionairy containing individual dictionairies
for the coins specified by the input. The key of the top dictionary
corresponds to the coin symbol. Each coin dictionary has the following
structure:
{coin_symbol1: {'Algorithm' : ...,
'CoinName': ...,
'FullName': ...,
'FullyPremined': ...,
'Id': ...,
'ImageUrl': ...,
'Name': ...,
'PreMinedValue': ...,
'ProofType': ...,
'SortOrder': ...,
'TotalCoinsFreeFloat': ...,
'TotalCoinSupply': ...,
'Url': ...},
coin_symbol2: {...},
...} |
def get_coin_snapshot(fsym, tsym):
# load data
url = build_url('coinsnapshot', fsym=fsym, tsym=tsym)
data = load_data(url)['Data']
return data | Get blockchain information, aggregated data as well as data for the
individual exchanges available for the specified currency pair.
Args:
fsym: FROM symbol.
tsym: TO symbol.
Returns:
The function returns a dictionairy containing blockain as well as
trading information from the different exchanges were the specified
currency pair is available.
{'AggregatedData': dict,
'Algorithm': ...,
'BlockNumber': ...,
'BlockReward': ...,
'Exchanges': [dict1, dict2, ...],
'NetHashesPerSecond': ...,
'ProofType': ...,
'TotalCoinsMined': ...}
dict = {'FLAGS': ...,
'FROMSYMBOL': ...,
'HIGH24HOUR': ...,
'LASTMARKET': ...,
'LASTTRADEID': ...,
'LASTUPDATE': ...,
'LASTVOLUME': ...,
'LASTVOLUMETO': ...,
'LOW24HOUR': ...,
'MARKET': ...,
'OPEN24HOUR': ...,
'PRICE': ...,
'TOSYMBOL': ...,
'TYPE': ...,
'VOLUME24HOUR': ...,
'VOLUME24HOURTO': ...} |
def compareBulk(self, retina_name, body):
resourcePath = '/compare/bulk'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [metric.Metric(**r) for r in response.json()] | Bulk compare
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: Bulk comparison of elements 2 by 2 (required)
Returns: Array[Metric] |
def getRetinas(self, retina_name=None):
resourcePath = '/retinas'
method = 'GET'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [retina.Retina(**r) for r in response.json()] | Information about retinas
Args:
retina_name, str: The retina name (optional) (optional)
Returns: Array[Retina] |
def specificity(self):
ids = sum(a.countIDs() for a in self.elements)
non_ids = sum((a.countNames() - a.countIDs()) for a in self.elements)
tests = sum(len(a.tests) for a in self.elements)
return (ids, non_ids, tests) | Loosely based on http://www.w3.org/TR/REC-CSS2/cascade.html#specificity |
def matches(self, tag, id, classes):
element = self.elements[0]
unmatched_ids = [name[1:] for name in element.names if name.startswith('#')]
unmatched_classes = [name[1:] for name in element.names if name.startswith('.')]
unmatched_tags = [name for name in element.names if name is not '*' and not name.startswith('#') and not name.startswith('.')]
if tag and tag in unmatched_tags:
unmatched_tags.remove(tag)
if id and id in unmatched_ids:
unmatched_ids.remove(id)
for class_ in classes:
if class_ in unmatched_classes:
unmatched_classes.remove(class_)
if unmatched_tags or unmatched_ids or unmatched_classes:
return False
else:
return True | Given an id and a list of classes, return True if this selector would match. |
def scaledBy(self, scale):
scaled = deepcopy(self)
for test in scaled.elements[0].tests:
if type(test.value) in (int, float):
if test.property == 'scale-denominator':
test.value /= scale
elif test.property == 'zoom':
test.value += log(scale)/log(2)
return scaled | Return a new Selector with scale denominators scaled by a number. |
def scaledBy(self, scale):
scaled = deepcopy(self)
if type(scaled.value) in (int, float):
scaled.value *= scale
elif isinstance(scaled.value, numbers):
scaled.value.values = tuple(v * scale for v in scaled.value.values)
return scaled | Return a new Value scaled by a given number for ints and floats. |
def load_map(map, src_file, output_dir, scale=1, cache_dir=None, datasources_cfg=None, user_styles=[], verbose=False):
scheme, n, path, p, q, f = urlparse(src_file)
if scheme in ('file', ''):
assert exists(src_file), "We'd prefer an input file that exists to one that doesn't"
if cache_dir is None:
cache_dir = expanduser(CACHE_DIR)
# only make the cache dir if it wasn't user-provided
if not isdir(cache_dir):
mkdir(cache_dir)
chmod(cache_dir, 0755)
dirs = Directories(output_dir, realpath(cache_dir), dirname(src_file))
compile(src_file, dirs, verbose, datasources_cfg=datasources_cfg, user_styles=user_styles, scale=scale).to_mapnik(map, dirs) | Apply a stylesheet source file to a given mapnik Map instance, like mapnik.load_map().
Parameters:
map:
Instance of mapnik.Map.
src_file:
Location of stylesheet .mml file. Can be relative path, absolute path,
or fully-qualified URL of a remote stylesheet.
output_dir:
...
Keyword Parameters:
scale:
Optional scale value for output map, 2 doubles the size for high-res displays.
cache_dir:
...
datasources_cfg:
...
user_styles:
A optional list of files or URLs, that override styles defined in
the map source. These are evaluated in order, with declarations from
later styles overriding those from earlier styles.
verbose:
... |
def getSimilarTerms(self, textOrFingerprint):
expression = self._createDictionary(textOrFingerprint)
terms = self._fullClient.getSimilarTermsForExpression(json.dumps(expression), maxResults=20)
return [t.term for t in terms] | Get the similar terms for a given text or fingerprint
Args:
textOrFingerprint, str OR list of integers
Returns:
list of str: the 20 most similar terms
Raises:
CorticalioException: if the request was not successful |
def getFingerprint(self, text):
fp = self._fullClient.getFingerprintForText(text)
return fp.positions | Get the semantic fingerprint of the input text.
Args:
text, str: The text to be evaluated
Returns:
list of str: the positions of the semantic fingerprint
Raises:
CorticalioException: if the request was not successful |
def compare(self, textOrFingerprint1, textOrFingerprint2):
compareList = [self._createDictionary(textOrFingerprint1), self._createDictionary(textOrFingerprint2)]
metric = self._fullClient.compare(json.dumps(compareList))
return metric.cosineSimilarity | Returns the semantic similarity of texts or fingerprints. Each argument can be eiter a text or a fingerprint.
Args:
textOrFingerprint1, str OR list of integers
textOrFingerprint2, str OR list of integers
Returns:
float: the semantic similarity in the range [0;1]
Raises:
CorticalioException: if the request was not successful |
def createCategoryFilter(self, positiveExamples):
categoryFilter = self._fullClient.createCategoryFilter("CategoryFilter", positiveExamples)
return categoryFilter.positions | Creates a filter fingerprint.
Args:
positiveExamples, list(str): The list of positive example texts.
Returns:
list of int: the positions representing the filter representing the texts
Raises:
CorticalioException: if the request was not successful |
def get_mining_contracts():
# load data
url = build_url('miningcontracts')
data = load_data(url)
coin_data = data['CoinData']
mining_data = data['MiningData']
return coin_data, mining_data | Get all the mining contracts information available.
Returns:
This function returns two major dictionaries. The first one contains
information about the coins for which mining contracts data is
available:
coin_data:
{symbol1: {'BlockNumber': ...,
'BlockReward': ...,
'BlockRewardReduction': ...,
'BlockTime': ...,
'DifficultyAdjustment': ...,
'NetHashesPerSecond': ...,
'PreviousTotalCoinsMined': ...,
'PriceUSD': ...,
'Symbol': ...,
'TotalCoinsMined': ...},
symbol2: {...},
...}
The other one contains all the available mining contracts:
mining_data:
{id1: {'AffiliateURL': ...,
'Algorithm': ...,
'Company': ...,
'ContractLength': ...,
'Cost': ...,
'CurrenciesAvailable': ...,
'CurrenciesAvailableLogo': ...,
'CurrenciesAvailableName': ...,
'Currency': ...,
'FeePercentage': ...,
'FeeValue': ...,
'FeeValueCurrency': ...,
'HashesPerSecond': ...,
'Id': id1,
'LogoUrl': ...,
'Name': ...,
'ParentId': ...,
'Recommended': ...,
'Sponsored': ...,
'Url': ...},
id2: {...},
...} |
def get_mining_equipment():
# load data
url = build_url('miningequipment')
data = load_data(url)
coin_data = data['CoinData']
mining_data = data['MiningData']
return coin_data, mining_data | Get all the mining equipment information available.
Returns:
This function returns two major dictionaries. The first one contains information about the coins for which mining equipment data is available.
coin_data:
{symbol1: {'BlockNumber': ...,
'BlockReward': ...,
'BlockRewardReduction': ...,
'BlockTime': ...,
'DifficultyAdjustment': ...,
'NetHashesPerSecond': ...,
'PreviousTotalCoinsMined': ...,
'PriceUSD': ...,
'Symbol': ...,
'TotalCoinsMined': ...},
symbol2: {...},
...}
The other one contains all the available mining equipment.
mining_data:
{id1: {'AffiliateURL': ...,
'Algorithm': ...,
'Company': ...,
'Cost': ...,
'CurrenciesAvailable': ...,
'CurrenciesAvailableLogo': ...,
'CurrenciesAvailableName': ...,
'Currency': ...,
'EquipmentType': ...,
'HashesPerSecond': ...,
'Id': ...,
'LogoUrl': ...,
'Name': ...,
'ParentId': ...,
'PowerConsumption': ...,
'Recommended': ...,
'Sponsored': ...,
'Url': ...},
id2: {...}, |
def write(self, fp):
if self._defaults:
fp.write("[%s]\n" % ConfigParser.DEFAULTSECT)
for (key, value) in sorted(self._defaults.items(), key=lambda x: x[0]):
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
for section in sorted(self._sections):
fp.write("[%s]\n" % section)
for (key, value) in sorted(self._sections[section].items(), key=lambda x: x[0]):
if key != "__name__":
fp.write("%s = %s\n" %
(key, str(value).replace('\n', '\n\t')))
fp.write("\n") | Write an .ini-format representation of the configuration state. |
def main(src_file, dest_file, **kwargs):
mmap = mapnik.Map(1, 1)
# allow [zoom] filters to work
mmap.srs = '+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null'
load_kwargs = dict([(k, v) for (k, v) in kwargs.items() if k in ('cache_dir', 'scale', 'verbose', 'datasources_cfg', 'user_styles')])
cascadenik.load_map(mmap, src_file, dirname(realpath(dest_file)), **load_kwargs)
(handle, tmp_file) = tempfile.mkstemp(suffix='.xml', prefix='cascadenik-mapnik-')
os.close(handle)
mapnik.save_map(mmap, tmp_file)
if kwargs.get('pretty'):
doc = ElementTree.fromstring(open(tmp_file, 'rb').read())
cascadenik._compile.indent(doc)
f = open(tmp_file, 'wb')
ElementTree.ElementTree(doc).write(f)
f.close()
# manually unlinking seems to be required on windows
if os.path.exists(dest_file):
os.unlink(dest_file)
os.chmod(tmp_file, 0666^os.umask(0))
shutil.move(tmp_file, dest_file)
return 0 | Given an input layers file and a directory, print the compiled
XML file to stdout and save any encountered external image files
to the named directory. |
def resolveExpression(self, retina_name, body, sparsity=1.0):
resourcePath = '/expressions'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['sparsity'] = sparsity
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return fingerprint.Fingerprint(**response.json()) | Resolve an expression
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: The JSON formatted encoded to be evaluated (required)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: Fingerprint |
def getContextsForExpression(self, retina_name, body, get_fingerprint=None, start_index=0, max_results=5, sparsity=1.0):
resourcePath = '/expressions/contexts'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['start_index'] = start_index
queryParams['max_results'] = max_results
queryParams['sparsity'] = sparsity
queryParams['get_fingerprint'] = get_fingerprint
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [context.Context(**r) for r in response.json()] | Get semantic contexts for the input expression
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
start_index, int: The start-index for pagination (optional) (optional)
max_results, int: Max results per page (optional) (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: Array[Context] |
def get_top_exchanges(fsym, tsym, limit=5):
# load data
url = build_url('exchanges', fsym=fsym, tsym=tsym, limit=limit)
data = load_data(url)
# price_data = data['Data']
# return [{'exchange': p['exchange'],
# 'volume24hto': p['volume24hTo']} for p in price_data]
return data['Data'] | Get top exchanges by 24 hour trading volume for the currency pair.
Args:
fsym: FROM symbol.
tsym: TO symbol.
limit: Number of results. Default value returns top 5 exchanges.
Returns:
Function returns a list containing a dictionary for each result:
[{'exchange': ..., 'fromSymbol': ..., 'toSymbole': ...,
'volume24h': ..., 'volume24hTo': ...},
{...},
...]
The list is ordered based on the volume of the FROM currency starting
with the highest value. |
def get_top_coins(tsym, limit=20):
# load data
url = build_url('volumes', tsym=tsym, limit=limit)
data = load_data(url)
return data['Data'] | Get top coins by 24 hour trading volume value in the requested currency.
Args:
tsym: TO symbol.
limit: Number of results. Default value returns top 20 coins.
Returns:
Function returns a list containing a dictionary for each result:
[{'SUPPLY': ..., 'SYMBOL': ..., 'VOLUME24HOURTO': ...},
{...},
...]
The list is ordered based on the volume of the TO currency starting with
the highest value. |
def get_top_pairs(fsym, limit=5):
# load data
url = build_url('pairs', fsym=fsym, limit=limit)
data = load_data(url)
return data['Data'] | Get top trading pairs by 24 hour aggregated volume for a currency.
Args:
fsym: FROM symbol.
limit: Number of results. Default value returns top 5 pairs.
Returns:
Function returns a list containing a dictionary for each result:
[{'exchange': ..., 'fromSymbol': ..., 'toSymbol': ..., 'volume24h': ...,
'volume24hTo': ...},
{...},
...]
The list is ordered based on the volume of the FROM currency starting
with the highest value. |
def key(base):
for root, dirs, files in os.walk(base, topdown=False):
for file in files:
yield os.path.join(root, file) | get a list of all *leaf* directories as strings |
def chunk(url):
chunks = lambda l, n: [l[x: x+n] for x in xrange(0, len(l), n)]
url_64 = base64.urlsafe_b64encode(url)
return chunks(url_64, 255) | create filesystem-safe places for url-keyed data to be stored |
def main(filename):
input = open(filename, 'r').read()
declarations = cascadenik.stylesheet_declarations(input, is_merc=True)
for dec in declarations:
print dec.selector,
print '{',
print dec.property.name+':',
if cascadenik.style.properties[dec.property.name] in (cascadenik.style.color, cascadenik.style.boolean, cascadenik.style.numbers):
print str(dec.value.value)+';',
elif cascadenik.style.properties[dec.property.name] is cascadenik.style.uri:
print 'url("'+str(dec.value.value)+'");',
elif cascadenik.style.properties[dec.property.name] is str:
print '"'+str(dec.value.value)+'";',
elif cascadenik.style.properties[dec.property.name] in (int, float) or type(cascadenik.style.properties[dec.property.name]) is tuple:
print str(dec.value.value)+';',
print '}'
return 0 | Given an input file containing nothing but styles, print out an
unrolled list of declarations in cascade order. |
def validate_gps(value):
try:
latitude, longitude, altitude = value.split(',')
vol.Coerce(float)(latitude)
vol.Coerce(float)(longitude)
vol.Coerce(float)(altitude)
except (TypeError, ValueError, vol.Invalid):
raise vol.Invalid(
'GPS value should be of format "latitude,longitude,altitude"')
return value | Validate GPS value. |
def _check_connection(self):
if ((self.tcp_disconnect_timer + 2 * self.reconnect_timeout) <
time.time()):
self.tcp_disconnect_timer = time.time()
raise OSError('No response from {}. Disconnecting'.format(
self.server_address))
if (self.tcp_check_timer + self.reconnect_timeout) >= time.time():
return
msg = Message().modify(
child_id=255, type=self.const.MessageType.internal,
sub_type=self.const.Internal.I_VERSION)
self.add_job(msg.encode)
self.tcp_check_timer = time.time() | Check if connection is alive every reconnect_timeout seconds. |
def get_gateway_id(self):
host, _ = self.server_address
try:
ip_address = ipaddress.ip_address(host)
except ValueError:
# Only hosts using ip address supports unique id.
return None
if ip_address.version == 6:
mac = get_mac_address(ip6=host)
else:
mac = get_mac_address(ip=host)
return mac | Return a unique id for the gateway. |
def _connect(self):
while self.protocol:
_LOGGER.info('Trying to connect to %s', self.server_address)
try:
sock = socket.create_connection(
self.server_address, self.reconnect_timeout)
except socket.timeout:
_LOGGER.error(
'Connecting to socket timed out for %s',
self.server_address)
_LOGGER.info(
'Waiting %s secs before trying to connect again',
self.reconnect_timeout)
time.sleep(self.reconnect_timeout)
except OSError:
_LOGGER.error(
'Failed to connect to socket at %s', self.server_address)
_LOGGER.info(
'Waiting %s secs before trying to connect again',
self.reconnect_timeout)
time.sleep(self.reconnect_timeout)
else:
self.tcp_check_timer = time.time()
self.tcp_disconnect_timer = time.time()
transport = TCPTransport(
sock, lambda: self.protocol, self._check_connection)
poll_thread = threading.Thread(target=self._poll_queue)
self._stop_event.clear()
poll_thread.start()
transport.start()
transport.connect()
return | Connect to socket. This should be run in a new thread. |
def _connect(self):
try:
while True:
_LOGGER.info('Trying to connect to %s', self.server_address)
try:
yield from asyncio.wait_for(
self.loop.create_connection(
lambda: self.protocol, *self.server_address),
self.reconnect_timeout, loop=self.loop)
self.tcp_check_timer = time.time()
self.tcp_disconnect_timer = time.time()
self._check_connection()
return
except asyncio.TimeoutError:
_LOGGER.error(
'Connecting to socket timed out for %s',
self.server_address)
_LOGGER.info(
'Waiting %s secs before trying to connect again',
self.reconnect_timeout)
yield from asyncio.sleep(
self.reconnect_timeout, loop=self.loop)
except OSError:
_LOGGER.error(
'Failed to connect to socket at %s',
self.server_address)
_LOGGER.info(
'Waiting %s secs before trying to connect again',
self.reconnect_timeout)
yield from asyncio.sleep(
self.reconnect_timeout, loop=self.loop)
except asyncio.CancelledError:
_LOGGER.debug(
'Connect attempt to %s cancelled', self.server_address) | Connect to the socket. |
def _check_connection(self):
try:
super()._check_connection()
except OSError as exc:
_LOGGER.error(exc)
self.protocol.transport.close()
self.protocol.conn_lost_callback()
return
task = self.loop.call_later(
self.reconnect_timeout + 0.1, self._check_connection)
self.cancel_check_conn = task.cancel | Check if connection is alive every reconnect_timeout seconds. |
def connection_lost(self, exc):
_LOGGER.debug('Connection lost with %s', self.transport)
if self.gateway.cancel_check_conn:
self.gateway.cancel_check_conn()
self.gateway.cancel_check_conn = None
if exc:
_LOGGER.error(exc)
self.conn_lost_callback()
self.transport = None | Handle lost connection. |
def _check_socket(self, timeout=None):
sock = self.sock
available_socks = select.select([sock], [sock], [sock], timeout)
if available_socks[2]:
raise OSError
return available_socks | Check if socket is readable/writable. |
def run(self):
# pylint: disable=broad-except
self.protocol = self.protocol_factory()
try:
self.protocol.connection_made(self)
except Exception as exc:
self.alive = False
self.protocol.connection_lost(exc)
self._connection_made.set()
return
error = None
self._connection_made.set()
while self.alive:
data = None
try:
available_socks = self._check_socket()
if available_socks[0]:
data = self.sock.recv(120)
except Exception as exc:
error = exc
break
else:
if data:
try:
self.protocol.data_received(data)
except Exception as exc:
error = exc
break
try:
self._check_connection()
except OSError as exc:
error = exc
break
time.sleep(0.02) # short sleep to avoid burning 100% cpu
self.alive = False
self.protocol.connection_lost(error)
self.protocol = None | Transport thread loop. |
def register(self, name):
def decorator(func):
"""Register decorated function."""
self[name] = func
return func
return decorator | Return decorator to register item with a specific name. |
def _handle_subscription(self, topics):
if not isinstance(topics, list):
topics = [topics]
for topic in topics:
topic_levels = topic.split('/')
try:
qos = int(topic_levels[-2])
except ValueError:
qos = 0
try:
_LOGGER.debug('Subscribing to: %s, qos: %s', topic, qos)
self._sub_callback(topic, self.recv, qos)
except Exception as exception: # pylint: disable=broad-except
_LOGGER.exception(
'Subscribe to %s failed: %s', topic, exception) | Handle subscription of topics. |
def _init_topics(self):
_LOGGER.info('Setting up initial MQTT topic subscription')
init_topics = [
'{}/+/+/0/+/+'.format(self._in_prefix),
'{}/+/+/3/+/+'.format(self._in_prefix),
]
self._handle_subscription(init_topics)
if not self.persistence:
return
topics = [
'{}/{}/{}/{}/+/+'.format(
self._in_prefix, str(sensor.sensor_id), str(child.id),
msg_type) for sensor in self.sensors.values()
for child in sensor.children.values()
for msg_type in (int(self.const.MessageType.set),
int(self.const.MessageType.req))
]
topics.extend([
'{}/{}/+/{}/+/+'.format(
self._in_prefix, str(sensor.sensor_id),
int(self.const.MessageType.stream))
for sensor in self.sensors.values()])
self._handle_subscription(topics) | Set up initial subscription of mysensors topics. |
def _parse_mqtt_to_message(self, topic, payload, qos):
topic_levels = topic.split('/')
topic_levels = not_prefix = topic_levels[-5:]
prefix_end_idx = topic.find('/'.join(not_prefix)) - 1
prefix = topic[:prefix_end_idx]
if prefix != self._in_prefix:
return None
if qos and qos > 0:
ack = '1'
else:
ack = '0'
topic_levels[3] = ack
topic_levels.append(str(payload))
return ';'.join(topic_levels) | Parse a MQTT topic and payload.
Return a mysensors command string. |
def _parse_message_to_mqtt(self, data):
msg = Message(data, self)
payload = str(msg.payload)
msg.payload = ''
# prefix/node/child/type/ack/subtype : payload
return ('{}/{}'.format(self._out_prefix, msg.encode('/'))[:-2],
payload, msg.ack) | Parse a mysensors command string.
Return a MQTT topic, payload and qos-level as a tuple. |
def _handle_presentation(self, msg):
ret_msg = handle_presentation(msg)
if msg.child_id == 255 or ret_msg is None:
return
# this is a presentation of a child sensor
topics = [
'{}/{}/{}/{}/+/+'.format(
self._in_prefix, str(msg.node_id), str(msg.child_id),
msg_type)
for msg_type in (int(self.const.MessageType.set),
int(self.const.MessageType.req))
]
topics.append('{}/{}/+/{}/+/+'.format(
self._in_prefix, str(msg.node_id),
int(self.const.MessageType.stream)))
self._handle_subscription(topics) | Process a MQTT presentation message. |
def recv(self, topic, payload, qos):
data = self._parse_mqtt_to_message(topic, payload, qos)
if data is None:
return
_LOGGER.debug('Receiving %s', data)
self.add_job(self.logic, data) | Receive a MQTT message.
Call this method when a message is received from the MQTT broker. |
def send(self, message):
if not message:
return
topic, payload, qos = self._parse_message_to_mqtt(message)
try:
_LOGGER.debug('Publishing %s', message.strip())
self._pub_callback(topic, payload, qos, self._retain)
except Exception as exception: # pylint: disable=broad-except
_LOGGER.exception('Publish to %s failed: %s', topic, exception) | Publish a command string to the gateway via MQTT. |
def start(self):
self._init_topics()
poll_thread = threading.Thread(target=self._poll_queue)
poll_thread.start() | Start the connection to a transport. |
def contribute_to_class(self, cls, name, virtual_only=False):
super(RegexField, self).contribute_to_class(cls, name, virtual_only)
setattr(cls, name, CastOnAssignDescriptor(self)) | Cast to the correct value every |
def to_python(self, value):
if isinstance(value, type(re.compile(''))):
return value
else:
if value is None and self.null:
return None
else:
try:
return self.get_compiled_regex(value)
except:
raise ValidationError('Invalid regex {0}'.format(value)) | Handles the following cases:
1. If the value is already the proper type (a regex), return it.
2. If the value is a string, compile and return the regex.
Raises: A ValidationError if the regex cannot be compiled. |
def run_validators(self, value):
value = self.to_python(value)
value = self.value_to_string(value)
return super(RegexField, self).run_validators(value) | Make sure value is a string so it can run through django validators |
def validate_hex(value):
try:
binascii.unhexlify(value)
except Exception:
raise vol.Invalid(
'{} is not of hex format'.format(value))
return value | Validate that value has hex format. |
def validate_v_rgb(value):
if len(value) != 6:
raise vol.Invalid(
'{} is not six characters long'.format(value))
return validate_hex(value) | Validate a V_RGB value. |
def validate_v_rgbw(value):
if len(value) != 8:
raise vol.Invalid(
'{} is not eight characters long'.format(value))
return validate_hex(value) | Validate a V_RGBW value. |
def copy(self, **kwargs):
msg = Message(self.encode(), self.gateway)
for key, val in kwargs.items():
setattr(msg, key, val)
return msg | Copy a message, optionally replace attributes with kwargs. |
def modify(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
return self | Modify and return message, replace attributes with kwargs. |
def decode(self, data, delimiter=';'):
try:
list_data = data.rstrip().split(delimiter)
self.payload = list_data.pop()
(self.node_id,
self.child_id,
self.type,
self.ack,
self.sub_type) = [int(f) for f in list_data]
except ValueError:
_LOGGER.warning('Error decoding message from gateway, '
'bad data received: %s', data.rstrip())
raise | Decode a message from command string. |
def encode(self, delimiter=';'):
try:
return delimiter.join([str(f) for f in [
self.node_id,
self.child_id,
int(self.type),
self.ack,
int(self.sub_type),
self.payload,
]]) + '\n'
except ValueError:
_LOGGER.error('Error encoding message to gateway') | Encode a command string from message. |
def _save_pickle(self, filename):
with open(filename, 'wb') as file_handle:
pickle.dump(self._sensors, file_handle, pickle.HIGHEST_PROTOCOL)
file_handle.flush()
os.fsync(file_handle.fileno()) | Save sensors to pickle file. |
def _load_pickle(self, filename):
with open(filename, 'rb') as file_handle:
self._sensors.update(pickle.load(file_handle)) | Load sensors from pickle file. |
def _save_json(self, filename):
with open(filename, 'w') as file_handle:
json.dump(self._sensors, file_handle, cls=MySensorsJSONEncoder,
indent=4)
file_handle.flush()
os.fsync(file_handle.fileno()) | Save sensors to json file. |
def _load_json(self, filename):
with open(filename, 'r') as file_handle:
self._sensors.update(json.load(
file_handle, cls=MySensorsJSONDecoder)) | Load sensors from json file. |
def save_sensors(self):
if not self.need_save:
return
fname = os.path.realpath(self.persistence_file)
exists = os.path.isfile(fname)
dirname = os.path.dirname(fname)
if (not os.access(dirname, os.W_OK) or exists and
not os.access(fname, os.W_OK)):
_LOGGER.error('Permission denied when writing to %s', fname)
return
split_fname = os.path.splitext(fname)
tmp_fname = '{}.tmp{}'.format(split_fname[0], split_fname[1])
_LOGGER.debug('Saving sensors to persistence file %s', fname)
self._perform_file_action(tmp_fname, 'save')
if exists:
os.rename(fname, self.persistence_bak)
os.rename(tmp_fname, fname)
if exists:
os.remove(self.persistence_bak)
self.need_save = False | Save sensors to file. |
def _load_sensors(self, path=None):
if path is None:
path = self.persistence_file
exists = os.path.isfile(path)
if exists and os.access(path, os.R_OK):
if path == self.persistence_bak:
os.rename(path, self.persistence_file)
path = self.persistence_file
_LOGGER.debug('Loading sensors from persistence file %s', path)
self._perform_file_action(path, 'load')
return True
_LOGGER.warning('File does not exist or is not readable: %s', path)
return False | Load sensors from file. |
def safe_load_sensors(self):
try:
loaded = self._load_sensors()
except (EOFError, ValueError):
_LOGGER.error('Bad file contents: %s', self.persistence_file)
loaded = False
if not loaded:
_LOGGER.warning('Trying backup file: %s', self.persistence_bak)
try:
if not self._load_sensors(self.persistence_bak):
_LOGGER.warning('Failed to load sensors from file: %s',
self.persistence_file)
except (EOFError, ValueError):
_LOGGER.error('Bad file contents: %s', self.persistence_file)
_LOGGER.warning('Removing file: %s', self.persistence_file)
os.remove(self.persistence_file) | Load sensors safely from file. |
def _perform_file_action(self, filename, action):
ext = os.path.splitext(filename)[1]
try:
func = getattr(self, '_{}_{}'.format(action, ext[1:]))
except AttributeError:
raise Exception('Unsupported file type {}'.format(ext[1:]))
func(filename) | Perform action on specific file types.
Dynamic dispatch function for performing actions on
specific file types. |
def default(self, obj):
# pylint: disable=method-hidden, protected-access, arguments-differ
if isinstance(obj, Sensor):
return {
'sensor_id': obj.sensor_id,
'children': obj.children,
'type': obj.type,
'sketch_name': obj.sketch_name,
'sketch_version': obj.sketch_version,
'battery_level': obj.battery_level,
'protocol_version': obj.protocol_version,
'heartbeat': obj.heartbeat,
}
if isinstance(obj, ChildSensor):
return {
'id': obj.id,
'type': obj.type,
'description': obj.description,
'values': obj.values,
}
return json.JSONEncoder.default(self, obj) | Serialize obj into JSON. |
def dict_to_object(self, obj): # pylint: disable=no-self-use
if not isinstance(obj, dict):
return obj
if 'sensor_id' in obj:
sensor = Sensor(obj['sensor_id'])
for key, val in obj.items():
setattr(sensor, key, val)
return sensor
if all(k in obj for k in ['id', 'type', 'values']):
child = ChildSensor(
obj['id'], obj['type'], obj.get('description', ''))
child.values = obj['values']
return child
if all(k.isdigit() for k in obj.keys()):
return {int(k): v for k, v in obj.items()}
return obj | Return object from dict. |
def get_const(protocol_version):
path = next((
CONST_VERSIONS[const_version]
for const_version in sorted(CONST_VERSIONS, reverse=True)
if parse_ver(protocol_version) >= parse_ver(const_version)
), 'mysensors.const_14')
if path in LOADED_CONST:
return LOADED_CONST[path]
const = import_module(path)
LOADED_CONST[path] = const # Cache the module
return const | Return the const module for the protocol_version. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.