Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
381,800
def cache_control_expires(num_hours): num_seconds = int(num_hours * 60 * 60) def decorator(func): @wraps(func) def inner(request, *args, **kwargs): response = func(request, *args, **kwargs) patch_response_headers(response, num_seconds) return response return inner return decorator
Set the appropriate Cache-Control and Expires headers for the given number of hours.
381,801
def to_meshpoint(meshcode, lat_multiplier, lon_multiplier): def mesh_cord(func_higher_cord, func_unit_cord, func_multiplier): return func_higher_cord() + func_unit_cord() * func_multiplier() lat_multiplier_lv = lambda: lat_multiplier lon_multiplier_lv = lambda: lon_multiplier lat_multiplier_lv1 = _functools.partial( lambda meshcode: int(meshcode[0:2]), meshcode=meshcode) lon_multiplier_lv1 = _functools.partial( lambda meshcode: int(meshcode[2:4]), meshcode=meshcode) lat_multiplier_40000 = _functools.partial( lambda meshcode: int(bin(int(meshcode[4:5])-1)[2:].zfill(2)[0:1]), meshcode=meshcode) lon_multiplier_40000 = _functools.partial( lambda meshcode: int(bin(int(meshcode[4:5])-1)[2:].zfill(2)[1:2]), meshcode=meshcode) lat_multiplier_20000 = _functools.partial( lambda meshcode: int(bin(int(meshcode[5:6])-1)[2:].zfill(2)[0:1]), meshcode=meshcode) lon_multiplier_20000 = _functools.partial( lambda meshcode: int(bin(int(meshcode[5:6])-1)[2:].zfill(2)[1:2]), meshcode=meshcode) lat_multiplier_16000 = _functools.partial( lambda meshcode: int(meshcode[4:5])/2, meshcode=meshcode) lon_multiplier_16000 = _functools.partial( lambda meshcode: int(meshcode[5:6])/2, meshcode=meshcode) lat_multiplier_lv2 = _functools.partial( lambda meshcode: int(meshcode[4:5]), meshcode=meshcode) lon_multiplier_lv2 = _functools.partial( lambda meshcode: int(meshcode[5:6]), meshcode=meshcode) lat_multiplier_8000 = _functools.partial( lambda meshcode: int(meshcode[4:5]), meshcode=meshcode) lon_multiplier_8000 = _functools.partial( lambda meshcode: int(meshcode[5:6]), meshcode=meshcode) lat_multiplier_5000 = _functools.partial( lambda meshcode: int(bin(int(meshcode[6:7])-1)[2:].zfill(2)[0:1]), meshcode=meshcode) lon_multiplier_5000 = _functools.partial( lambda meshcode: int(bin(int(meshcode[6:7])-1)[2:].zfill(2)[1:2]), meshcode=meshcode) lat_multiplier_4000 = _functools.partial( lambda meshcode: int(bin(int(meshcode[7:8])-1)[2:].zfill(2)[0:1]), meshcode=meshcode) lon_multiplier_4000 = _functools.partial( lambda meshcode: int(bin(int(meshcode[7:8])-1)[2:].zfill(2)[1:2]), meshcode=meshcode) lat_multiplier_2500 = _functools.partial( lambda meshcode: int(bin(int(meshcode[7:8])-1)[2:].zfill(2)[0:1]), meshcode=meshcode) lon_multiplier_2500 = _functools.partial( lambda meshcode: int(bin(int(meshcode[7:8])-1)[2:].zfill(2)[1:2]), meshcode=meshcode) lat_multiplier_2000 = _functools.partial( lambda meshcode: int(meshcode[6:7])/2, meshcode=meshcode) lon_multiplier_2000 = _functools.partial( lambda meshcode: int(meshcode[7:8])/2, meshcode=meshcode) lat_multiplier_lv3 = _functools.partial( lambda meshcode: int(meshcode[6:7]), meshcode=meshcode) lon_multiplier_lv3 = _functools.partial( lambda meshcode: int(meshcode[7:8]), meshcode=meshcode) lat_multiplier_lv4 = _functools.partial( lambda meshcode: int(bin(int(meshcode[8:9])-1)[2:].zfill(2)[0:1]), meshcode=meshcode) lon_multiplier_lv4 = _functools.partial( lambda meshcode: int(bin(int(meshcode[8:9])-1)[2:].zfill(2)[1:2]), meshcode=meshcode) lat_multiplier_lv5 = _functools.partial( lambda meshcode: int(bin(int(meshcode[9:10])-1)[2:].zfill(2)[0:1]), meshcode=meshcode) lon_multiplier_lv5 = _functools.partial( lambda meshcode: int(bin(int(meshcode[9:10])-1)[2:].zfill(2)[1:2]), meshcode=meshcode) lat_multiplier_lv6 = _functools.partial( lambda meshcode: int(bin(int(meshcode[10:11])-1)[2:].zfill(2)[0:1]), meshcode=meshcode) lon_multiplier_lv6 = _functools.partial( lambda meshcode: int(bin(int(meshcode[10:11])-1)[2:].zfill(2)[1:2]), meshcode=meshcode) mesh_lv1_default_lat = _functools.partial( mesh_cord, func_higher_cord=lambda: 0, func_unit_cord=_unit_lat_lv1, func_multiplier=lat_multiplier_lv1) mesh_lv1_default_lon = _functools.partial( mesh_cord, func_higher_cord=lambda: 100, func_unit_cord=_unit_lon_lv1, func_multiplier=lon_multiplier_lv1) mesh_40000_default_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv1_default_lat, func_unit_cord=_unit_lat_40000, func_multiplier=lat_multiplier_40000) mesh_40000_default_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv1_default_lon, func_unit_cord=_unit_lon_40000, func_multiplier=lon_multiplier_40000) mesh_20000_default_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_40000_default_lat, func_unit_cord=_unit_lat_20000, func_multiplier=lat_multiplier_20000) mesh_20000_default_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_40000_default_lon, func_unit_cord=_unit_lon_20000, func_multiplier=lon_multiplier_20000) mesh_16000_default_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv1_default_lat, func_unit_cord=_unit_lat_16000, func_multiplier=lat_multiplier_16000) mesh_16000_default_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv1_default_lon, func_unit_cord=_unit_lon_16000, func_multiplier=lon_multiplier_16000) mesh_lv2_default_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv1_default_lat, func_unit_cord=_unit_lat_lv2, func_multiplier=lat_multiplier_lv2) mesh_lv2_default_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv1_default_lon, func_unit_cord=_unit_lon_lv2, func_multiplier=lon_multiplier_lv2) mesh_8000_default_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv1_default_lat, func_unit_cord=_unit_lat_8000, func_multiplier=lat_multiplier_8000) mesh_8000_default_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv1_default_lon, func_unit_cord=_unit_lon_8000, func_multiplier=lon_multiplier_8000) mesh_5000_default_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv2_default_lat, func_unit_cord=_unit_lat_5000, func_multiplier=lat_multiplier_5000) mesh_5000_default_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv2_default_lon, func_unit_cord=_unit_lon_5000, func_multiplier=lon_multiplier_5000) mesh_4000_default_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_8000_default_lat, func_unit_cord=_unit_lat_4000, func_multiplier=lat_multiplier_4000) mesh_4000_default_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_8000_default_lon, func_unit_cord=_unit_lon_4000, func_multiplier=lon_multiplier_4000) mesh_2500_default_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_5000_default_lat, func_unit_cord=_unit_lat_2500, func_multiplier=lat_multiplier_2500) mesh_2500_default_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_5000_default_lon, func_unit_cord=_unit_lon_2500, func_multiplier=lon_multiplier_2500) mesh_2000_default_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv2_default_lat, func_unit_cord=_unit_lat_2000, func_multiplier=lat_multiplier_2000) mesh_2000_default_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv2_default_lon, func_unit_cord=_unit_lon_2000, func_multiplier=lon_multiplier_2000) mesh_lv3_default_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv2_default_lat, func_unit_cord=_unit_lat_lv3, func_multiplier=lat_multiplier_lv3) mesh_lv3_default_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv2_default_lon, func_unit_cord=_unit_lon_lv3, func_multiplier=lon_multiplier_lv3) mesh_lv4_default_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv3_default_lat, func_unit_cord=_unit_lat_lv4, func_multiplier=lat_multiplier_lv4) mesh_lv4_default_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv3_default_lon, func_unit_cord=_unit_lon_lv4, func_multiplier=lon_multiplier_lv4) mesh_lv5_default_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv4_default_lat, func_unit_cord=_unit_lat_lv5, func_multiplier=lat_multiplier_lv5) mesh_lv5_default_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv4_default_lon, func_unit_cord=_unit_lon_lv5, func_multiplier=lon_multiplier_lv5) mesh_lv6_default_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv5_default_lat, func_unit_cord=_unit_lat_lv6, func_multiplier=lat_multiplier_lv6) mesh_lv6_default_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv5_default_lon, func_unit_cord=_unit_lon_lv6, func_multiplier=lon_multiplier_lv6) mesh_lv1_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv1_default_lat, func_unit_cord=_unit_lat_lv1, func_multiplier=lat_multiplier_lv) mesh_lv1_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv1_default_lon, func_unit_cord=_unit_lon_lv1, func_multiplier=lon_multiplier_lv) mesh_40000_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_40000_default_lat, func_unit_cord=_unit_lat_40000, func_multiplier=lat_multiplier_lv) mesh_40000_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_40000_default_lon, func_unit_cord=_unit_lon_40000, func_multiplier=lon_multiplier_lv) mesh_20000_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_20000_default_lat, func_unit_cord=_unit_lat_20000, func_multiplier=lat_multiplier_lv) mesh_20000_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_20000_default_lon, func_unit_cord=_unit_lon_20000, func_multiplier=lon_multiplier_lv) mesh_16000_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_16000_default_lat, func_unit_cord=_unit_lat_16000, func_multiplier=lat_multiplier_lv) mesh_16000_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_16000_default_lon, func_unit_cord=_unit_lon_16000, func_multiplier=lon_multiplier_lv) mesh_lv2_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv2_default_lat, func_unit_cord=_unit_lat_lv2, func_multiplier=lat_multiplier_lv) mesh_lv2_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv2_default_lon, func_unit_cord=_unit_lon_lv2, func_multiplier=lon_multiplier_lv) mesh_8000_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_8000_default_lat, func_unit_cord=_unit_lat_8000, func_multiplier=lat_multiplier_lv) mesh_8000_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_8000_default_lon, func_unit_cord=_unit_lon_8000, func_multiplier=lon_multiplier_lv) mesh_5000_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_5000_default_lat, func_unit_cord=_unit_lat_5000, func_multiplier=lat_multiplier_lv) mesh_5000_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_5000_default_lon, func_unit_cord=_unit_lon_5000, func_multiplier=lon_multiplier_lv) mesh_4000_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_4000_default_lat, func_unit_cord=_unit_lat_4000, func_multiplier=lat_multiplier_lv) mesh_4000_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_4000_default_lon, func_unit_cord=_unit_lon_4000, func_multiplier=lon_multiplier_lv) mesh_2500_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_2500_default_lat, func_unit_cord=_unit_lat_2500, func_multiplier=lat_multiplier_lv) mesh_2500_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_2500_default_lon, func_unit_cord=_unit_lon_2500, func_multiplier=lon_multiplier_lv) mesh_2000_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_2000_default_lat, func_unit_cord=_unit_lat_2000, func_multiplier=lat_multiplier_lv) mesh_2000_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_2000_default_lon, func_unit_cord=_unit_lon_2000, func_multiplier=lon_multiplier_lv) mesh_lv3_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv3_default_lat, func_unit_cord=_unit_lat_lv3, func_multiplier=lat_multiplier_lv) mesh_lv3_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv3_default_lon, func_unit_cord=_unit_lon_lv3, func_multiplier=lon_multiplier_lv) mesh_lv4_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv4_default_lat, func_unit_cord=_unit_lat_lv4, func_multiplier=lat_multiplier_lv) mesh_lv4_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv4_default_lon, func_unit_cord=_unit_lon_lv4, func_multiplier=lon_multiplier_lv) mesh_lv5_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv5_default_lat, func_unit_cord=_unit_lat_lv5, func_multiplier=lat_multiplier_lv) mesh_lv5_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv5_default_lon, func_unit_cord=_unit_lon_lv5, func_multiplier=lon_multiplier_lv) mesh_lv6_lat = _functools.partial( mesh_cord, func_higher_cord=mesh_lv6_default_lat, func_unit_cord=_unit_lat_lv6, func_multiplier=lat_multiplier_lv) mesh_lv6_lon = _functools.partial( mesh_cord, func_higher_cord=mesh_lv6_default_lon, func_unit_cord=_unit_lon_lv6, func_multiplier=lon_multiplier_lv) level = to_meshlevel(meshcode) if level == 1: return mesh_lv1_lat(), mesh_lv1_lon() if level == 40000: return mesh_40000_lat(), mesh_40000_lon() if level == 20000: return mesh_20000_lat(), mesh_20000_lon() if level == 16000: return mesh_16000_lat(), mesh_16000_lon() if level == 2: return mesh_lv2_lat(), mesh_lv2_lon() if level == 8000: return mesh_8000_lat(), mesh_8000_lon() if level == 5000: return mesh_5000_lat(), mesh_5000_lon() if level == 4000: return mesh_4000_lat(), mesh_4000_lon() if level == 2500: return mesh_2500_lat(), mesh_2500_lon() if level == 2000: return mesh_2000_lat(), mesh_2000_lon() if level == 3: return mesh_lv3_lat(), mesh_lv3_lon() if level == 4: return mesh_lv4_lat(), mesh_lv4_lon() if level == 5: return mesh_lv5_lat(), mesh_lv5_lon() if level == 6: return mesh_lv6_lat(), mesh_lv6_lon() raise ValueError("the level is unsupported.")
地域メッシュコードから緯度経度を算出する。 下記のメッシュに対応している。 1次(80km四方):1 40倍(40km四方):40000 20倍(20km四方):20000 16倍(16km四方):16000 2次(10km四方):2 8倍(8km四方):8000 5倍(5km四方):5000 4倍(4km四方):4000 2.5倍(2.5km四方):2500 2倍(2km四方):2000 3次(1km四方):3 4次(500m四方):4 5次(250m四方):5 6次(125m四方):6 Args: meshcode: 指定次の地域メッシュコード lat_multiplier: 当該メッシュの基準点(南西端)から、緯度座標上の点の位置を当該メッシュの単位緯度の倍数で指定 lon_multiplier: 当該メッシュの基準点(南西端)から、経度座標上の点の位置を当該メッシュの単位経度の倍数で指定 Return: lat: 世界測地系の緯度(度単位) lon: 世界測地系の経度(度単位)
381,802
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None): print() print() if N_clusters_max == None: N_clusters_max = int(np.nanmax(cluster_runs)) + 1 N_runs = cluster_runs.shape[0] N_samples = cluster_runs.shape[1] print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.") hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name) w = hypergraph_adjacency.sum(axis = 1) N_rows = hypergraph_adjacency.shape[0] print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. " "Starting computation of Jaccard similarity matrix.") with tables.open_file(hdf5_file_name, ) as fileh: FILTERS = get_compression_filter(4 * (N_rows ** 2)) similarities_MCLA = fileh.create_carray(fileh.root.consensus_group, , tables.Float32Atom(), (N_rows, N_rows), "Matrix of pairwise Jaccard " "similarity scores", filters = FILTERS) scale_factor = 100.0 print("INFO: Cluster_Ensembles: MCLA: " "starting computation of Jaccard similarity matrix.") squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose()) squared_sums = hypergraph_adjacency.sum(axis = 1) squared_sums = np.squeeze(np.asarray(squared_sums)) chunks_size = get_chunk_size(N_rows, 7) for i in range(0, N_rows, chunks_size): n_dim = min(chunks_size, N_rows - i) temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense() temp = np.squeeze(np.asarray(temp)) x = squared_sums[i:min(i+chunks_size, N_rows)] x = x.reshape(-1, 1) x = np.dot(x, np.ones((1, squared_sums.size))) y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1)) temp = np.divide(temp, x + y - temp) temp *= scale_factor Jaccard_matrix = np.rint(temp) similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix del Jaccard_matrix, temp, x, y gc.collect() print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of " "pairwise Jaccard similarity scores.") cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w) cluster_labels = one_to_max(cluster_labels) N_consensus = np.amax(cluster_labels) + 1 fileh = tables.open_file(hdf5_file_name, ) FILTERS = get_compression_filter(4 * N_consensus * N_samples) clb_cum = fileh.create_carray(fileh.root.consensus_group, , tables.Float32Atom(), (N_consensus, N_samples), , filters = FILTERS) chunks_size = get_chunk_size(N_samples, 7) for i in range(0, N_consensus, chunks_size): x = min(chunks_size, N_consensus - i) matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1))) M = np.zeros((x, N_samples)) for j in range(x): coord = np.where(matched_clusters[0] == j)[0] M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0)) clb_cum[i:min(i+chunks_size, N_consensus)] = M del hypergraph_adjacency gc.collect() chunks_size = get_chunk_size(N_consensus, 4) N_chunks, remainder = divmod(N_samples, chunks_size) if N_chunks == 0: null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0] else: szumsz = np.zeros(0) for i in range(N_chunks): M = clb_cum[:, i*chunks_size:(i+1)*chunks_size] szumsz = np.append(szumsz, M.sum(axis = 0)) if remainder != 0: M = clb_cum[:, N_chunks*chunks_size:N_samples] szumsz = np.append(szumsz, M.sum(axis = 0)) null_columns = np.where(szumsz == 0)[0] if null_columns.size != 0: print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations " "in matrix of meta-clusters.".format(null_columns.size)) clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size) random_state = np.random.RandomState() tmp = fileh.create_carray(fileh.root.consensus_group, , tables.Float32Atom(), (N_consensus, N_samples), "Temporary matrix to help with " "collapsing to meta-hyper-edges", filters = FILTERS) chunks_size = get_chunk_size(N_samples, 2) N_chunks, remainder = divmod(N_consensus, chunks_size) if N_chunks == 0: tmp[:] = random_state.rand(N_consensus, N_samples) else: for i in range(N_chunks): tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples) if remainder !=0: tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples) expr = tables.Expr("clb_cum + (tmp / 10000)") expr.set_output(clb_cum) expr.eval() expr = tables.Expr("abs(tmp)") expr.set_output(tmp) expr.eval() chunks_size = get_chunk_size(N_consensus, 2) N_chunks, remainder = divmod(N_samples, chunks_size) if N_chunks == 0: sum_diag = tmp[:].sum(axis = 0) else: sum_diag = np.empty(0) for i in range(N_chunks): M = tmp[:, i*chunks_size:(i+1)*chunks_size] sum_diag = np.append(sum_diag, M.sum(axis = 0)) if remainder != 0: M = tmp[:, N_chunks*chunks_size:N_samples] sum_diag = np.append(sum_diag, M.sum(axis = 0)) fileh.remove_node(fileh.root.consensus_group, "tmp") inv_sum_diag = np.reciprocal(sum_diag.astype(float)) if N_chunks == 0: clb_cum *= inv_sum_diag max_entries = np.amax(clb_cum, axis = 0) else: max_entries = np.zeros(N_samples) for i in range(N_chunks): clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size] max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0) if remainder != 0: clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples] max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0) cluster_labels = np.zeros(N_samples, dtype = int) winner_probabilities = np.zeros(N_samples) chunks_size = get_chunk_size(N_samples, 2) for i in reversed(range(0, N_consensus, chunks_size)): ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)]) cluster_labels[ind[1]] = i + ind[0] winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])] cluster_labels = one_to_max(cluster_labels) print("INFO: Cluster_Ensembles: MCLA: delivering " "{} clusters.".format(np.unique(cluster_labels).size)) print("INFO: Cluster_Ensembles: MCLA: average posterior " "probability is {}".format(np.mean(winner_probabilities))) if cluster_labels.size <= 7: print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:") print(winner_probabilities) print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:") print(clb_cum) fileh.remove_node(fileh.root.consensus_group, "clb_cum") fileh.close() return cluster_labels
Meta-CLustering Algorithm for a consensus function. Parameters ---------- hdf5_file_name : file handle or string cluster_runs : array of shape (n_partitions, n_samples) verbose : bool, optional (default = False) N_clusters_max : int, optional (default = None) Returns ------- A vector specifying the cluster label to which each sample has been assigned by the MCLA approximation algorithm for consensus clustering. Reference --------- A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework for Combining Multiple Partitions". In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
381,803
def restore_data(self, data_dict): session[self._base_key] = data_dict self._data_dict = session[self._base_key]
Restore the data dict - update the flask session and this object
381,804
def add_copies(self, node, parent, elem, minel): rep = 0 if minel is None else int(minel.arg) - 1 for i in range(rep): parent.append(copy.deepcopy(elem))
Add appropriate number of `elem` copies to `parent`.
381,805
def get_service_status(service): dbs = db.get_session() srvs = dbs.query(db.ServiceStates).filter(db.ServiceStates.type == service) if srvs.count(): return srvs[0].status return db.ServiceStatus.STOPPED
Update the status of a particular service in the database.
381,806
def incr(self, field, by=1): return self._client.hincrby(self.key_prefix, field, by)
:see::meth:RedisMap.incr
381,807
def decode_address(self, addr): if _debug: Address._debug("decode_address %r (%s)", addr, type(addr)) self.addrType = Address.localStationAddr self.addrNet = None if addr == "*": if _debug: Address._debug(" - localBroadcast") self.addrType = Address.localBroadcastAddr self.addrNet = None self.addrAddr = None self.addrLen = None elif addr == "*:*": if _debug: Address._debug(" - globalBroadcast") self.addrType = Address.globalBroadcastAddr self.addrNet = None self.addrAddr = None self.addrLen = None elif isinstance(addr, int): if _debug: Address._debug(" - int") if (addr < 0) or (addr >= 256): raise ValueError("address out of range") self.addrAddr = struct.pack(, addr) self.addrLen = 1 elif isinstance(addr, basestring): if _debug: Address._debug(" - str") m = ip_address_mask_port_re.match(addr) if m: if _debug: Address._debug(" - IP address") net, addr, mask, port = m.groups() if not mask: mask = if not port: port = if _debug: Address._debug(" - net, addr, mask, port: %r, %r, %r, %r", net, addr, mask, port) if net: net = int(net) if (net >= 65535): raise ValueError("network out of range") self.addrType = Address.remoteStationAddr self.addrNet = net self.addrPort = int(port) self.addrTuple = (addr, self.addrPort) addrstr = socket.inet_aton(addr) self.addrIP = struct.unpack(, addrstr)[0] self.addrMask = (_long_mask << (32 - int(mask))) & _long_mask self.addrHost = (self.addrIP & ~self.addrMask) self.addrSubnet = (self.addrIP & self.addrMask) bcast = (self.addrSubnet | ~self.addrMask) self.addrBroadcastTuple = (socket.inet_ntoa(struct.pack(, bcast & _long_mask)), self.addrPort) self.addrAddr = addrstr + struct.pack(, self.addrPort & _short_mask) self.addrLen = 6 elif ethernet_re.match(addr): if _debug: Address._debug(" - ethernet") self.addrAddr = xtob(addr, ) self.addrLen = len(self.addrAddr) elif re.match(r"^\d+$", addr): if _debug: Address._debug(" - int") addr = int(addr) if (addr > 255): raise ValueError("address out of range") self.addrAddr = struct.pack(, addr) self.addrLen = 1 elif re.match(r"^\d+:[*]$", addr): if _debug: Address._debug(" - remote broadcast") addr = int(addr[:-2]) if (addr >= 65535): raise ValueError("network out of range") self.addrType = Address.remoteBroadcastAddr self.addrNet = addr self.addrAddr = None self.addrLen = None elif re.match(r"^\d+:\d+$",addr): if _debug: Address._debug(" - remote station") net, addr = addr.split() net = int(net) addr = int(addr) if (net >= 65535): raise ValueError("network out of range") if (addr > 255): raise ValueError("address out of range") self.addrType = Address.remoteStationAddr self.addrNet = net self.addrAddr = struct.pack(, addr) self.addrLen = 1 elif re.match(r"^0x([0-9A-Fa-f][0-9A-Fa-f])+$",addr): if _debug: Address._debug(" - modern hex string") self.addrAddr = xtob(addr[2:]) self.addrLen = len(self.addrAddr) elif re.match(r"^X$",addr): if _debug: Address._debug(" - old school hex string") self.addrAddr = xtob(addr[2:-1]) self.addrLen = len(self.addrAddr) elif re.match(r"^\d+:0x([0-9A-Fa-f][0-9A-Fa-f])+$",addr): if _debug: Address._debug(" - remote station with modern hex string") net, addr = addr.split() net = int(net) if (net >= 65535): raise ValueError("network out of range") self.addrType = Address.remoteStationAddr self.addrNet = net self.addrAddr = xtob(addr[2:]) self.addrLen = len(self.addrAddr) elif re.match(r"^\d+:X$",addr): if _debug: Address._debug(" - remote station with old school hex string") net, addr = addr.split() net = int(net) if (net >= 65535): raise ValueError("network out of range") self.addrType = Address.remoteStationAddr self.addrNet = net self.addrAddr = xtob(addr[2:-1]) self.addrLen = len(self.addrAddr) elif netifaces and interface_re.match(addr): if _debug: Address._debug(" - interface name with optional port") interface, port = interface_re.match(addr).groups() if port is not None: self.addrPort = int(port) else: self.addrPort = 47808 interfaces = netifaces.interfaces() if interface not in interfaces: raise ValueError("not an interface: %s" % (interface,)) if _debug: Address._debug(" - interfaces: %r", interfaces) ifaddresses = netifaces.ifaddresses(interface) if netifaces.AF_INET not in ifaddresses: raise ValueError("interface does not support IPv4: %s" % (interface,)) ipv4addresses = ifaddresses[netifaces.AF_INET] if len(ipv4addresses) > 1: raise ValueError("interface supports multiple IPv4 addresses: %s" % (interface,)) ifaddress = ipv4addresses[0] if _debug: Address._debug(" - ifaddress: %r", ifaddress) addr = ifaddress[] self.addrTuple = (addr, self.addrPort) if _debug: Address._debug(" - addrTuple: %r", self.addrTuple) addrstr = socket.inet_aton(addr) self.addrIP = struct.unpack(, addrstr)[0] if in ifaddress: maskstr = socket.inet_aton(ifaddress[]) self.addrMask = struct.unpack(, maskstr)[0] else: self.addrMask = _long_mask self.addrHost = (self.addrIP & ~self.addrMask) self.addrSubnet = (self.addrIP & self.addrMask) if in ifaddress: self.addrBroadcastTuple = (ifaddress[], self.addrPort) else: self.addrBroadcastTuple = None if _debug: Address._debug(" - addrBroadcastTuple: %r", self.addrBroadcastTuple) self.addrAddr = addrstr + struct.pack(, self.addrPort & _short_mask) self.addrLen = 6 else: raise ValueError("unrecognized format") elif isinstance(addr, tuple): addr, port = addr self.addrPort = int(port) if isinstance(addr, basestring): if not addr: addrstr = else: addrstr = socket.inet_aton(addr) self.addrTuple = (addr, self.addrPort) elif isinstance(addr, (int, long)): addrstr = struct.pack(, addr & _long_mask) self.addrTuple = (socket.inet_ntoa(addrstr), self.addrPort) else: raise TypeError("tuple must be (string, port) or (long, port)") if _debug: Address._debug(" - addrstr: %r", addrstr) self.addrIP = struct.unpack(, addrstr)[0] self.addrMask = _long_mask self.addrHost = None self.addrSubnet = None self.addrBroadcastTuple = self.addrTuple self.addrAddr = addrstr + struct.pack(, self.addrPort & _short_mask) self.addrLen = 6 else: raise TypeError("integer, string or tuple required")
Initialize the address from a string. Lots of different forms are supported.
381,808
def get_config_from_file(conf_properties_files): config = ExtendedConfigParser() logger = logging.getLogger(__name__) found = False files_list = conf_properties_files.split() for conf_properties_file in files_list: result = config.read(conf_properties_file) if len(result) == 0: message = if len(files_list) == 1: logger.error(message, conf_properties_file) raise Exception(message % conf_properties_file) else: logger.debug(message, conf_properties_file) else: logger.debug(, conf_properties_file) found = True if not found: message = logger.error(message) raise Exception(message) return config
Reads properties files and saves them to a config object :param conf_properties_files: comma-separated list of properties files :returns: config object
381,809
def write_th(self, s, header=False, indent=0, tags=None): if header and self.fmt.col_space is not None: tags = (tags or "") tags += ( .format(colspace=self.fmt.col_space)) return self._write_cell(s, kind=, indent=indent, tags=tags)
Method for writting a formatted <th> cell. If col_space is set on the formatter then that is used for the value of min-width. Parameters ---------- s : object The data to be written inside the cell. header : boolean, default False Set to True if the <th> is for use inside <thead>. This will cause min-width to be set if there is one. indent : int, default 0 The indentation level of the cell. tags : string, default None Tags to include in the cell. Returns ------- A written <th> cell.
381,810
def callback(self): if self._callback_func and callable(self._callback_func): self._callback_func(self)
Run callback.
381,811
def _retrieve_session(self, session_key): session_id = session_key.session_id if (session_id is None): msg = ("Unable to resolve session ID from SessionKey [{0}]." "Returning null to indicate a session could not be " "found.".format(session_key)) logger.debug(msg) return None session = self.session_store.read(session_id) if (session is None): msg2 = "Could not find session with ID [{0}]".format(session_id) raise ValueError(msg2) return session
:type session_key: SessionKey :returns: SimpleSession
381,812
def messages(self): offset = self.LENGTH while offset < len(self.mmap): yield Message(mm=self.mmap, offset=offset) offset += Message.LENGTH
a generator yielding the :class:`Message` structures in the index
381,813
def from_yaml(cls, yaml_str=None, str_or_buffer=None): cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer) default_model_expr = cfg[][] default_ytransform = cfg[][] seg = cls( cfg[], cfg[], cfg[], default_model_expr, YTRANSFORM_MAPPING[default_ytransform], cfg[], cfg[]) if "models" not in cfg: cfg["models"] = {} for name, m in cfg[].items(): m[] = m.get( , default_model_expr) m[] = m.get(, default_ytransform) m[] = None m[] = None reg = RegressionModel.from_yaml(yamlio.convert_to_yaml(m, None)) seg._group.add_model(reg) logger.debug( .format(seg.name)) return seg
Create a SegmentedRegressionModel instance from a saved YAML configuration. Arguments are mutally exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- SegmentedRegressionModel
381,814
def Barr_1981(Re, eD): r fd = (-2*log10(eD/3.7 + 4.518*log10(Re/7.)/(Re*(1+Re**0.52/29*eD**0.7))))**-2 return fd
r'''Calculates Darcy friction factor using the method in Barr (1981) [2]_ as shown in [1]_. .. math:: \frac{1}{\sqrt{f_d}} = -2\log\left\{\frac{\epsilon}{3.7D} + \frac{4.518\log(\frac{Re}{7})}{Re\left[1+\frac{Re^{0.52}}{29} \left(\frac{\epsilon}{D}\right)^{0.7}\right]}\right\} Parameters ---------- Re : float Reynolds number, [-] eD : float Relative roughness, [-] Returns ------- fd : float Darcy friction factor [-] Notes ----- No range of validity specified for this equation. Examples -------- >>> Barr_1981(1E5, 1E-4) 0.01849836032779929 References ---------- .. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence and Combustion 90, no. 1 (January 1, 2013): 1-27. doi:10.1007/s10494-012-9419-7 .. [2] Barr, Dih, and Colebrook White."Technical Note. Solutions Of The Colebrook-White Function For Resistance To Uniform Turbulent Flow." ICE Proceedings 71, no. 2 (January 6, 1981): 529-35. doi:10.1680/iicep.1981.1895.
381,815
def block_idxmat_shuffle(numdraws, numblocks, random_state=None): import numpy as np blocksize = int(numdraws / numblocks) if random_state: np.random.seed(random_state) obsidx = np.random.permutation(numdraws) numdrop = numdraws % numblocks dropped = obsidx[:numdrop] obsidx = obsidx[numdrop:] idxmat = obsidx.reshape((blocksize, numblocks)) return idxmat, dropped
Create K columns with unique random integers from 0 to N-1 Purpose: -------- - Create K blocks for k-fold cross-validation Parameters: ----------- numdraws : int number of observations N or sample size N numblocks : int number of blocks K Example: -------- import pandas as pd import numpy as np import oxyba as ox X = np.random.normal(size=(7,5), scale=50).round(1) N,_ = X.shape K = 3; #number of blocks idxmat, dropped = ox.block_idxmat_shuffle(N,K) for b in range(K): print('\nBlock:',b) print(pd.DataFrame(X[idxmat[:,b],:], index=idxmat[:,b])) print('\nDropped observations\n', X[dropped,:] ) print('\nrow indicies of dropped observations:', dropped, '\') Why is this useful? ------------------- - Avoid creating copies of dataset X during run time - Shuffle the indicies of a data point rather than the data points themselve Links: ------ - How numpy's permutation works, https://stackoverflow.com/a/15474335
381,816
def do_mo(self): log.debug("Start updating mo files ...") for po_dir_path in self._iter_po_dir(): po_path = (po_dir_path / self._basename).with_suffix(".po") lc_path = self._mo_path / po_dir_path.name / "LC_MESSAGES" lc_path.mkdir(parents=True, exist_ok=True) mo_path = (lc_path / self._basename).with_suffix(".mo") log.debug("Creating from {po}: {mo}".format(po=str(po_path), mo=str(mo_path))) check_call(["msgfmt", str(po_path), "-o", str(mo_path)]) log.debug("All mo files updated")
Generate mo files for all po files.
381,817
def ndfrom2d(xtr, rsi): xts = rsi[0] prm = rsi[1] xt = xtr.reshape(xts) x = np.transpose(xt, np.argsort(prm)) return x
Undo the array shape conversion applied by :func:`ndto2d`, returning the input 2D array to its original shape. Parameters ---------- xtr : array_like Two-dimensional input array rsi : tuple A tuple containing the shape of the axis-permuted array and the permutation order applied in :func:`ndto2d`. Returns ------- x : ndarray Multi-dimensional output array
381,818
def mktime_tz(data): if data[9] is None: return time.mktime(data[:8] + (-1,)) else: t = calendar.timegm(data) return t - data[9]
Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp.
381,819
def validated_value(self, raw_value): value = self.value(raw_value) try: for validator in self.validators: validator(value) except: raise else: return value
Return parsed parameter value and run validation handlers. Error message included in exception will be included in http error response Args: value: raw parameter value to parse validate Returns: None Note: Concept of validation for params is understood here as a process of checking if data of valid type (successfully parsed/processed by ``.value()`` handler) does meet some other constraints (lenght, bounds, uniqueness, etc.). It will internally call its ``value()`` handler.
381,820
def inject_settings(mixed: Union[str, Settings], context: MutableMapping[str, Any], fail_silently: bool = False) -> None: if isinstance(mixed, str): try: mixed = import_module(mixed) except Exception: if fail_silently: return raise for key, value in iter_settings(mixed): context[key] = value
Inject settings values to given context. :param mixed: Settings can be a string (that it will be read from Python path), Python module or dict-like instance. :param context: Context to assign settings key values. It should support dict-like item assingment. :param fail_silently: When enabled and reading settings from Python path ignore errors if given Python path couldn't be loaded.
381,821
def get_posts_with_limits(self, include_draft=False, **limits): filter_funcs = [] for attr in (, , , , , ): if limits.get(attr): filter_set = set(to_list(limits.get(attr))) def get_filter_func(filter_set_, attr_): return lambda p: filter_set_.intersection( to_list(getattr(p, attr_))) filter_funcs.append(get_filter_func(filter_set, attr)) for attr in (, ): interval = limits.get(attr) if isinstance(interval, (list, tuple)) and len(interval) == 2 \ and isinstance(interval[0], date) and isinstance( interval[1], date): start, end = interval start = to_datetime(start) if not isinstance(end, datetime): end = datetime.strptime( % (end.year, end.month, end.day), ) end += timedelta(days=1) def get_filter_func(attr_, start_dt, end_dt): return lambda p: start_dt <= getattr(p, attr_) < end_dt filter_funcs.append(get_filter_func(attr, start, end)) return self.get_posts(include_draft=include_draft, filter_functions=filter_funcs)
Get all posts and filter them as needed. :param include_draft: return draft posts or not :param limits: other limits to the attrs of the result, should be a dict with string or list values :return: an iterable of Post objects
381,822
def fol_fc_ask(KB, alpha): while True: new = {} for r in KB.clauses: ps, q = parse_definite_clause(standardize_variables(r)) raise NotImplementedError
Inefficient forward chaining for first-order logic. [Fig. 9.3] KB is a FolKB and alpha must be an atomic sentence.
381,823
def validate_schema(schema_name): def decorator(f): @wraps(f) def wrapper(*args, **kw): instance = args[0] try: instance.validator(instance.schemas[schema_name]).validate(request.get_json()) except ValidationError, e: ret_dict = instance._create_ret_object(instance.FAILURE, None, True, instance.BAD_SCHEMA, e.message) instance.logger.error("Invalid Schema", ret_dict) return jsonify(ret_dict), 400 instance.logger.debug("Schema is valid") return f(*args, **kw) return wrapper return decorator
Validate the JSON against a required schema_name.
381,824
def release(): with settings(warn_only=True): r = local(clom.git[](, , )) if r.return_code != 0: abort() version = open().read().strip() existing_tag = local(clom.git.tag(, version), capture=True) if not existing_tag.strip(): print( % version) local(clom.git.tag(version)) if confirm( % version, default=True): local(clom.git.push(, )) local(clom.git.push(, version)) local(clom.python(, , ))
Release current version to pypi
381,825
def _iupac_ambiguous_equal(ambig_base, unambig_base): iupac_translation = { : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : } for i in (ambig_base, unambig_base): if not len(i) == 1: raise ValueError("only one base may be passed.") return unambig_base.upper() in iupac_translation[ambig_base.upper()]
Tests two bases for equality, accounting for IUPAC ambiguous DNA ambiguous base may be IUPAC ambiguous, unambiguous must be one of ACGT
381,826
def predict_epitopes_from_args(args): mhc_model = mhc_binding_predictor_from_args(args) variants = variant_collection_from_args(args) gene_expression_dict = rna_gene_expression_dict_from_args(args) transcript_expression_dict = rna_transcript_expression_dict_from_args(args) predictor = TopiaryPredictor( mhc_model=mhc_model, padding_around_mutation=args.padding_around_mutation, ic50_cutoff=args.ic50_cutoff, percentile_cutoff=args.percentile_cutoff, min_transcript_expression=args.rna_min_transcript_expression, min_gene_expression=args.rna_min_gene_expression, only_novel_epitopes=args.only_novel_epitopes, raise_on_error=not args.skip_variant_errors) return predictor.predict_from_variants( variants=variants, transcript_expression_dict=transcript_expression_dict, gene_expression_dict=gene_expression_dict)
Returns an epitope collection from the given commandline arguments. Parameters ---------- args : argparse.Namespace Parsed commandline arguments for Topiary
381,827
def set_h264_frm_ref_mode(self, mode=1, callback=None): params = {: mode} return self.execute_command(, params, callback)
Set frame shipping reference mode of H264 encode stream. params: `mode`: see docstr of meth::get_h264_frm_ref_mode
381,828
def get_fun(fun): conn = _get_conn(ret=None) cur = conn.cursor() sql = cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, retval in data: ret[minion] = salt.utils.json.loads(retval) _close_conn(conn) return ret
Return a dict of the last function called for all minions
381,829
def find_pattern_on_line(lines, n, max_wrap_lines): for typ, regexes in COMPILED_PATTERN_MAP.items(): for regex in regexes: for m in range(max_wrap_lines): match_line = join_wrapped_lines(lines[n:n+1+m]) if match_line.startswith(): match_line = match_line[1:].strip() if regex.match(match_line.strip()): return n+m, typ return None, None
Finds a forward/reply pattern within the given lines on text on the given line number and returns a tuple with the type ('reply' or 'forward') and line number of where the pattern ends. The returned line number may be different from the given line number in case the pattern wraps over multiple lines. Returns (None, None) if no pattern was found.
381,830
def get_nested_schema_object(self, fully_qualified_parent_name: str, nested_item_name: str) -> Optional[]: return self.get_schema_object( self.get_fully_qualified_name(fully_qualified_parent_name, nested_item_name))
Used to generate a schema object from the given fully_qualified_parent_name and the nested_item_name. :param fully_qualified_parent_name: The fully qualified name of the parent. :param nested_item_name: The nested item name. :return: An initialized schema object of the nested item.
381,831
def _set_fcoe_intf_enode_bind_type(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u: {: 2}, u: {: 1}, u: {: 3}},), is_leaf=True, yang_name="fcoe-intf-enode-bind-type", rest_name="fcoe-intf-enode-bind-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "brocade-fcoe-ext:fcoe-binding-type", : , }) self.__fcoe_intf_enode_bind_type = t if hasattr(self, ): self._set()
Setter method for fcoe_intf_enode_bind_type, mapped from YANG variable /brocade_fcoe_ext_rpc/fcoe_get_interface/output/fcoe_intf_list/fcoe_intf_enode_bind_type (fcoe-binding-type) If this variable is read-only (config: false) in the source YANG file, then _set_fcoe_intf_enode_bind_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fcoe_intf_enode_bind_type() directly. YANG Description: This indicates if the enode is statically bound to the fcoe interface or not i.e. if statically bound this enode's mac will always login on the same fcoe port.
381,832
def validate_blob_uri_contents(contents: bytes, blob_uri: str) -> None: blob_path = parse.urlparse(blob_uri).path blob_hash = blob_path.split("/")[-1] contents_str = to_text(contents) content_length = len(contents_str) hashable_contents = "blob " + str(content_length) + "\0" + contents_str hash_object = hashlib.sha1(to_bytes(text=hashable_contents)) if hash_object.hexdigest() != blob_hash: raise ValidationError( f"Hash of contents fetched from {blob_uri} do not match its hash: {blob_hash}." )
Raises an exception if the sha1 hash of the contents does not match the hash found in te blob_uri. Formula for how git calculates the hash found here: http://alblue.bandlem.com/2011/08/git-tip-of-week-objects.html
381,833
def modify_target(self, to_state, to_outcome=None): if not (to_state is None and (to_outcome is not int and to_outcome is not None)): if not isinstance(to_state, string_types): raise ValueError("Invalid transition target port: to_state must be a string") if not isinstance(to_outcome, int) and to_outcome is not None: raise ValueError("Invalid transition target port: to_outcome must be of type int or None (if to_state " "is of type str)") old_to_state = self.to_state old_to_outcome = self.to_outcome self._to_state = to_state self._to_outcome = to_outcome valid, message = self._check_validity() if not valid: self._to_state = old_to_state self._to_outcome = old_to_outcome raise ValueError("The transition target could not be changed: {0}".format(message))
Set both to_state and to_outcome at the same time to modify transition target :param str to_state: State id of the target state :param int to_outcome: Outcome id of the target port :raises exceptions.ValueError: If parameters have wrong types or the new transition is not valid
381,834
def set_attribute_mapping(resource_attr_a, resource_attr_b, **kwargs): user_id = kwargs.get() ra_1 = get_resource_attribute(resource_attr_a) ra_2 = get_resource_attribute(resource_attr_b) mapping = ResourceAttrMap(resource_attr_id_a = resource_attr_a, resource_attr_id_b = resource_attr_b, network_a_id = ra_1.get_network().id, network_b_id = ra_2.get_network().id ) db.DBSession.add(mapping) db.DBSession.flush() return mapping
Define one resource attribute from one network as being the same as that from another network.
381,835
def color(out_string, color=): c = { : Fore.BLACK, : Fore.BLUE, : Fore.CYAN, : Fore.GREEN, : Fore.MAGENTA, : Fore.RED, : Fore.WHITE, : Fore.YELLOW, } try: init() return (c[color] + Style.BRIGHT + out_string + Fore.RESET + Style.NORMAL) except AttributeError: return out_string
Highlight string for terminal color coding. Purpose: We use this utility function to insert a ANSI/win32 color code | and Bright style marker before a string, and reset the color and | style after the string. We then return the string with these | codes inserted. @param out_string: the string to be colored @type out_string: str @param color: a string signifying which color to use. Defaults to 'grn'. | Accepts the following colors: | ['blk', 'blu', 'cyn', 'grn', 'mag', 'red', 'wht', 'yel'] @type color: str @returns: the modified string, including the ANSI/win32 color codes. @rtype: str
381,836
def get_endpoint_descriptor(self, dev, ep, intf, alt, config): r _not_implemented(self.get_endpoint_descriptor)
r"""Return an endpoint descriptor of the given device. The object returned is required to have all the Endpoint Descriptor fields acessible as member variables. They must be convertible (but not required to be equal) to the int type. The ep parameter is the endpoint logical index (not the bEndpointAddress field) of the endpoint descriptor desired. dev, intf, alt and config are the same values already described in the get_interface_descriptor() method.
381,837
def trainingDataLink(data_1, data_2, common_key, training_size=50000): identified_records = collections.defaultdict(lambda: [[], []]) matched_pairs = set() distinct_pairs = set() for record_id, record in data_1.items(): identified_records[record[common_key]][0].append(record_id) for record_id, record in data_2.items(): identified_records[record[common_key]][1].append(record_id) for keys_1, keys_2 in identified_records.values(): if keys_1 and keys_2: matched_pairs.update(itertools.product(keys_1, keys_2)) keys_1 = list(data_1.keys()) keys_2 = list(data_2.keys()) random_pairs = [(keys_1[i], keys_2[j]) for i, j in randomPairsMatch(len(data_1), len(data_2), training_size)] distinct_pairs = ( pair for pair in random_pairs if pair not in matched_pairs) matched_records = [(data_1[key_1], data_2[key_2]) for key_1, key_2 in matched_pairs] distinct_records = [(data_1[key_1], data_2[key_2]) for key_1, key_2 in distinct_pairs] training_pairs = {: matched_records, : distinct_records} return training_pairs
Construct training data for consumption by the ActiveLearning markPairs method from already linked datasets. Arguments : data_1 -- Dictionary of records from first dataset, where the keys are record_ids and the values are dictionaries with the keys being field names data_2 -- Dictionary of records from second dataset, same form as data_1 common_key -- The name of the record field that uniquely identifies a match training_size -- the rough limit of the number of training examples, defaults to 50000 Warning: Every match must be identified by the sharing of a common key. This function assumes that if two records do not share a common key then they are distinct records.
381,838
def css( self, filelist ): if isinstance( filelist, basestring ): self.link( href=filelist, rel=, type=, media= ) else: for file in filelist: self.link( href=file, rel=, type=, media= )
This convenience function is only useful for html. It adds css stylesheet(s) to the document via the <link> element.
381,839
def check_token(self, respond): if respond.status_code == 401: self.credential.obtain_token(config=self.config) return False return True
Check is the user's token is valid
381,840
def find_studies(self, query_dict=None, exact=False, verbose=False, **kwargs): if self.use_v1: uri = .format(p=self.query_prefix) else: uri = .format(p=self.query_prefix) return self._do_query(uri, query_dict=query_dict, exact=exact, verbose=verbose, valid_keys=self.study_search_term_set, kwargs=kwargs)
Query on study properties. See documentation for _OTIWrapper class.
381,841
def html_path(builder, pagename=None): return builder.get_relative_uri( pagename or builder.current_docname, os.path.join( builder.app.config.slide_html_relative_path, pagename or builder.current_docname, ))
Calculate the relative path to the Slides for pagename.
381,842
def copy_SRM_file(destination=None, config=): conf = read_configuration() src = pkgrs.resource_filename(, conf[]) if destination is None: destination = + conf[] + if os.path.isdir(destination): destination += + conf[] + copyfile(src, destination) print(src + + destination) return
Creates a copy of the default SRM table at the specified location. Parameters ---------- destination : str The save location for the SRM file. If no location specified, saves it as 'LAtools_[config]_SRMTable.csv' in the current working directory. config : str It's possible to set up different configurations with different SRM files. This specifies the name of the configuration that you want to copy the SRM file from. If not specified, the 'DEFAULT' configuration is used.
381,843
def create_label(self, label, doc=None, callback=dummy_progress_cb): if doc: clone = doc.clone() r = self.index.create_label(label, doc=clone) return r
Create a new label Arguments: doc --- first document on which the label must be added (required for now)
381,844
def get_operation_full_job_id(op): job_id = op.get_field() task_id = op.get_field() if task_id: return % (job_id, task_id) else: return job_id
Returns the job-id or job-id.task-id for the operation.
381,845
def log_once(key): global _last_logged if _disabled: return False elif key not in _logged: _logged.add(key) _last_logged = time.time() return True elif _periodic_log and time.time() - _last_logged > 60.0: _logged.clear() _last_logged = time.time() return False else: return False
Returns True if this is the "first" call for a given key. Various logging settings can adjust the definition of "first". Example: >>> if log_once("some_key"): ... logger.info("Some verbose logging statement")
381,846
def OnNodeSelected(self, event): try: node = self.sorted[event.GetIndex()] except IndexError, err: log.warn(_(), index=event.GetIndex()) else: if node is not self.selected_node: wx.PostEvent( self, squaremap.SquareSelectionEvent(node=node, point=None, map=None) )
We have selected a node with the list control, tell the world
381,847
def parallel_map(func, *arg_iterable, **kwargs): chunksize = kwargs.pop(, 1) func_pre_args = kwargs.pop(, ()) func_kwargs = kwargs.pop(, {}) max_workers = kwargs.pop(, None) parallel = kwargs.pop(, True) parallel_warning = kwargs.pop(, True) if kwargs: raise TypeError(.format(kwargs)) func_to_map = functools.partial(func, *func_pre_args, **func_kwargs) if parallel: pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) return list(pool.map(func_to_map, *arg_iterable, chunksize=chunksize)) else: if parallel_warning: warnings.warn(( ), UserWarning) return list(map(func_to_map, *arg_iterable))
Apply function to iterable with parallel map, and hence returns results in order. functools.partial is used to freeze func_pre_args and func_kwargs, meaning that the iterable argument must be the last positional argument. Roughly equivalent to >>> [func(*func_pre_args, x, **func_kwargs) for x in arg_iterable] Parameters ---------- func: function Function to apply to list of args. arg_iterable: iterable argument to iterate over. chunksize: int, optional Perform function in batches func_pre_args: tuple, optional Positional arguments to place before the iterable argument in func. func_kwargs: dict, optional Additional keyword arguments for func. parallel: bool, optional To turn off parallelisation if needed. parallel_warning: bool, optional To turn off warning for no parallelisation if needed. max_workers: int or None, optional Number of processes. If max_workers is None then concurrent.futures.ProcessPoolExecutor defaults to using the number of processors of the machine. N.B. If max_workers=None and running on supercomputer clusters with multiple nodes, this may default to the number of processors on a single node. Returns ------- results_list: list of function outputs
381,848
def on_sdl_keydown ( self, event ): "press ESCAPE to quit the application" key = event.key.keysym.sym if key == SDLK_ESCAPE: self.running = False
press ESCAPE to quit the application
381,849
def folderitem(self, obj, item, index): url = item.get("url") title = item.get("Title") creator = obj.Creator() item["replace"]["Title"] = get_link(url, value=title) item["created"] = self.localize_date(obj.created()) item["getType"] = _(obj.getType()[0]) item["creator"] = "" if creator: props = api.get_user_properties(creator) name = props.get("fullname", creator) item["creator"] = name return item
Augment folder listing item
381,850
def clear(self, payload): self.logger.rotate(self.queue) self.queue.clear() self.logger.write(self.queue) answer = {: , : } return answer
Clear queue from any `done` or `failed` entries. The log will be rotated once. Otherwise we would loose all logs from thoes finished processes.
381,851
def get_all_items_of_credit_note(self, credit_note_id): return self._iterate_through_pages( get_function=self.get_items_of_credit_note_per_page, resource=CREDIT_NOTE_ITEMS, **{: credit_note_id} )
Get all items of credit note This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param credit_note_id: the credit note id :return: list
381,852
def findAllBycolumn(self, target): column_matches = [] for column_index in range(self._raster[1]): column = self.getRow(column_index) column_matches[column_index] = column.findAll(target) return column_matches
Returns an array of columns in the region (defined by the raster), each column containing all matches in that column for the target pattern.
381,853
def enable_branching_model(self, project, repository): default_model_data = {: {: None, : True}, : [{: , : True, : , : }, {: , : True, : , : }, {: , : True, : , : }, {: , : True, : , : }]} return self.set_branching_model(project, repository, default_model_data)
Enable branching model by setting it with default configuration :param project: :param repository: :return:
381,854
def nla_put_nested(msg, attrtype, nested): _LOGGER.debug(, id(msg), attrtype, id(nested)) return nla_put(msg, attrtype, nlmsg_datalen(nested.nm_nlh), nlmsg_data(nested.nm_nlh))
Add nested attributes to Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L772 Takes the attributes found in the `nested` message and appends them to the message `msg` nested in a container of the type `attrtype`. The `nested` message may not have a family specific header. Positional arguments: msg -- Netlink message (nl_msg class instance). attrtype -- attribute type (integer). nested -- message containing attributes to be nested (nl_msg class instance). Returns: 0 on success or a negative error code.
381,855
def export_html(html, filename, image_tag = None, inline = True): if image_tag is None: image_tag = default_image_tag else: image_tag = ensure_utf8(image_tag) if inline: path = None else: root,ext = os.path.splitext(filename) path = root + "_files" if os.path.isfile(path): raise OSError("%s exists, but is not a directory." % path) with open(filename, ) as f: html = fix_html(html) f.write(IMG_RE.sub(lambda x: image_tag(x, path = path, format = "png"), html))
Export the contents of the ConsoleWidget as HTML. Parameters: ----------- html : str, A utf-8 encoded Python string containing the Qt HTML to export. filename : str The file to be saved. image_tag : callable, optional (default None) Used to convert images. See ``default_image_tag()`` for information. inline : bool, optional [default True] If True, include images as inline PNGs. Otherwise, include them as links to external PNG files, mimicking web browsers' "Web Page, Complete" behavior.
381,856
def cancel_all_linking(self): self.logger.info("Cancel_all_linking for device %s", self.device_id) self.hub.direct_command(self.device_id, , )
Cancel all linking
381,857
def switch_from_external_to_main_wf(self): self.run()
Main workflow switcher. This method recreates main workflow from `main wf` dict which was set by external workflow swicther previously.
381,858
def which(name, env_path=ENV_PATH, env_path_ext=ENV_PATHEXT, is_executable_fnc=isexec, path_join_fnc=os.path.join, os_name=os.name): for path in env_path: for suffix in env_path_ext: exe_file = path_join_fnc(path, name) + suffix if is_executable_fnc(exe_file): return exe_file return None
Get command absolute path. :param name: name of executable command :type name: str :param env_path: OS environment executable paths, defaults to autodetected :type env_path: list of str :param is_executable_fnc: callable will be used to detect if path is executable, defaults to `isexec` :type is_executable_fnc: Callable :param path_join_fnc: callable will be used to join path components :type path_join_fnc: Callable :param os_name: os name, defaults to os.name :type os_name: str :return: absolute path :rtype: str or None
381,859
def fit(self, X, y): word_vector_transformer = WordVectorTransformer(padding=) X = word_vector_transformer.fit_transform(X) X = LongTensor(X) self.word_vector_transformer = word_vector_transformer y_transformer = LabelEncoder() y = y_transformer.fit_transform(y) y = torch.from_numpy(y) self.y_transformer = y_transformer dataset = CategorizedDataset(X, y) dataloader = DataLoader(dataset, batch_size=self.batch_size, shuffle=True, num_workers=4) KERNEL_SIZES = self.kernel_sizes NUM_KERNEL = self.num_kernel EMBEDDING_DIM = self.embedding_dim model = TextCNN( vocab_size=word_vector_transformer.get_vocab_size(), embedding_dim=EMBEDDING_DIM, output_size=len(self.y_transformer.classes_), kernel_sizes=KERNEL_SIZES, num_kernel=NUM_KERNEL) if USE_CUDA: model = model.cuda() EPOCH = self.epoch LR = self.lr loss_function = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=LR) for epoch in range(EPOCH): losses = [] for i, data in enumerate(dataloader): X, y = data X, y = Variable(X), Variable(y) optimizer.zero_grad() model.train() output = model(X) loss = loss_function(output, y) losses.append(loss.data.tolist()[0]) loss.backward() optimizer.step() if i % 100 == 0: print("[%d/%d] mean_loss : %0.2f" % ( epoch, EPOCH, np.mean(losses))) losses = [] self.model = model
Fit KimCNNClassifier according to X, y Parameters ---------- X : list of string each item is a raw text y : list of string each item is a label
381,860
def dispatch(self): parser = argparse.ArgumentParser(description=) parser.add_argument(, metavar=, type=str, help=) parser.add_argument(, metavar=, type=int, help=) args = parser.parse_args() self.serve(port=args.port, address=args.addr)
Command-line dispatch.
381,861
def draw(self): self.ax.plot(self.k_values_, self.k_scores_, marker="D") if self.locate_elbow and self.elbow_value_!=None: elbow_label = "$elbow\ at\ k={}, score={:0.3f}$".format(self.elbow_value_, self.elbow_score_) self.ax.axvline(self.elbow_value_, c=LINE_COLOR, linestyle="--", label=elbow_label) ) return self.ax
Draw the elbow curve for the specified scores and values of K.
381,862
def print(*a): try: _print(*a) return a[0] if len(a) == 1 else a except: _print(*a)
print just one that returns what you give it instead of None
381,863
def _get (self, timeout): if timeout is None: while self._empty(): self.not_empty.wait() else: if timeout < 0: raise ValueError(" must be a positive number") endtime = _time() + timeout while self._empty(): remaining = endtime - _time() if remaining <= 0.0: raise Empty() self.not_empty.wait(remaining) self.in_progress += 1 return self.queue.popleft()
Non thread-safe utility function of self.get() doing the real work.
381,864
def snapshot(self, name): return self.get_data( "volumes/%s/snapshots/" % self.id, type=POST, params={"name": name} )
Create a snapshot of the volume. Args: name: string - a human-readable name for the snapshot
381,865
def cmp(self, other): if isinstance(other, Range): start = self.start.replace(tzinfo=other.start.tz) if other.start.tz and self.start.tz is None else self.start end = self.end.replace(tzinfo=other.end.tz) if other.end.tz and self.end.tz is None else self.end if start == other.start and end == other.end: return 0 elif start < other.start: return -1 else: return 1 elif isinstance(other, Date): if other.tz and self.start.tz is None: return 0 if other == self.start.replace(tzinfo=other.tz) else -1 if other > self.start.replace(tzinfo=other.start.tz) else 1 return 0 if other == self.start else -1 if other > self.start else 1 else: return self.cmp(Range(other, tz=self.start.tz))
*Note: checks Range.start() only* Key: self = [], other = {} * [ {----]----} => -1 * {---[---} ] => 1 * [---] {---} => -1 * [---] same as {---} => 0 * [--{-}--] => -1
381,866
def cli(wio, send): s AP. \b EXAMPLE: wio udp --send [command], send UPD command ' command = send click.echo("UDP command: {}".format(command)) result = udp.common_send(command) if result is None: return debug_error() else: click.echo(result)
Sends a UDP command to the wio device. \b DOES: Support "VERSION", "SCAN", "Blank?", "DEBUG", "ENDEBUG: 1", "ENDEBUG: 0" "APCFG: AP\\tPWDs\\tTOKENs\\tSNs\\tSERVER_Domains\\tXSERVER_Domain\\t\\r\\n", Note: 1. Ensure your device is Configure Mode. 2. Change your computer network to Wio's AP. \b EXAMPLE: wio udp --send [command], send UPD command
381,867
def code(self): def uniq(seq): seen = set() seen_add = seen.add return [x for x in seq if x not in seen and not seen_add(x)] data_set = uniq(i for i in self.autos if i is not None) error_list = uniq(i for i in self.errors if i is not None) if error_list: return "\n".join(error_list) return "\n".join(data_set)
Removes duplicates values in auto and error list. parameters.
381,868
def get_name(self, tag): s output can be controlled through keyword arguments that are provided when initializing a TagProcessor. For instance, a member of a class or namespace can have its parent scope included in the name by passing include_parent_scopes=True to __init__(). Args: tag: A BeautifulSoup Tag that satisfies match_criterion. Returns: A string that would be appropriate to use as an entry name in a Zeal database. namekindclassstructnamespacename::' + name return name
Extract and return a representative "name" from a tag. Override as necessary. get_name's output can be controlled through keyword arguments that are provided when initializing a TagProcessor. For instance, a member of a class or namespace can have its parent scope included in the name by passing include_parent_scopes=True to __init__(). Args: tag: A BeautifulSoup Tag that satisfies match_criterion. Returns: A string that would be appropriate to use as an entry name in a Zeal database.
381,869
def keep_negative_mask(X, y, model_generator, method_name, num_fcounts=11): return __run_measure(measures.keep_mask, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred)
Keep Negative (mask) xlabel = "Max fraction of features kept" ylabel = "Negative mean model output" transform = "negate" sort_order = 5
381,870
def query_cached_package_list(self): if self.debug: self.logger.debug("DEBUG: reading pickled cache file") return cPickle.load(open(self.pkg_cache_file, "r"))
Return list of pickled package names from PYPI
381,871
def check_parallel_run(self): if os.name == : logger.warning("The parallel daemon check is not available on Windows") self.__open_pidfile(write=True) return self.__open_pidfile() try: pid_var = self.fpid.readline().strip() if pid_var: pid = int(pid_var) logger.info("Found an existing pid (%s): ", self.pid_filename, pid_var) else: logger.debug("Not found an existing pid: %s", self.pid_filename) return except (IOError, ValueError) as err: logger.warning("PID file is empty or has an invalid content: %s", self.pid_filename) return if pid == os.getpid(): self.pid = pid return try: logger.debug("Testing if the process is running: ", pid) os.kill(pid, 0) except OSError: self.pre_log.append(("DEBUG", "No former instance to replace")) logger.info("A stale pid file exists, reusing the same file") return if not self.do_replace: self.exit_on_error("A valid pid file still exists (pid=%s) and " "I am not allowed to replace. Exiting!" % pid, exit_code=3) self.pre_log.append(("DEBUG", "Replacing former instance: %d" % pid)) try: pgid = os.getpgid(pid) os.killpg(pgid, signal.SIGQUIT) except os.error as err: if err.errno != errno.ESRCH: raise self.fpid.close() time.sleep(1) self.__open_pidfile(write=True)
Check (in pid file) if there isn't already a daemon running. If yes and do_replace: kill it. Keep in self.fpid the File object to the pid file. Will be used by writepid. :return: None
381,872
def size(ctx, dataset, kwargs): "Show dataset size" kwargs = parse_kwargs(kwargs) (print)(data(dataset, **ctx.obj).get(**kwargs).complete_set.size)
Show dataset size
381,873
def all(self): tests = list() for testclass in self.classes: tests.extend(self.classes[testclass].cases) return tests
Return all testcases :return:
381,874
def _build_zmat(self, construction_table): c_table = construction_table default_cols = [, , , , , , ] optional_cols = list(set(self.columns) - {, , , }) zmat_frame = pd.DataFrame(columns=default_cols + optional_cols, dtype=, index=c_table.index) zmat_frame.loc[:, optional_cols] = self.loc[c_table.index, optional_cols] zmat_frame.loc[:, ] = self.loc[c_table.index, ] zmat_frame.loc[:, [, , ]] = c_table zmat_values = self._calculate_zmat_values(c_table) zmat_frame.loc[:, [, , ]] = zmat_values zmatrix = Zmat(zmat_frame, metadata=self.metadata, _metadata={: self.copy()}) return zmatrix
Create the Zmatrix from a construction table. Args: Construction table (pd.DataFrame): Returns: Zmat: A new instance of :class:`Zmat`.
381,875
def fetch(self, **kwargs) -> : GETGET assert self.method in self._allowed_methods, \ .format(self.method) self.date = datetime.now(tzutc()) self.headers[] = self.date.isoformat() if self.content_type is not None: self.headers[] = self.content_type full_url = self._build_url() self._sign(full_url.relative()) rqst_ctx = self.session.aiohttp_session.request( self.method, str(full_url), data=self._pack_content(), timeout=_default_request_timeout, headers=self.headers) return FetchContextManager(self.session, rqst_ctx, **kwargs)
Sends the request to the server and reads the response. You may use this method either with plain synchronous Session or AsyncSession. Both the followings patterns are valid: .. code-block:: python3 from ai.backend.client.request import Request from ai.backend.client.session import Session with Session() as sess: rqst = Request(sess, 'GET', ...) with rqst.fetch() as resp: print(resp.text()) .. code-block:: python3 from ai.backend.client.request import Request from ai.backend.client.session import AsyncSession async with AsyncSession() as sess: rqst = Request(sess, 'GET', ...) async with rqst.fetch() as resp: print(await resp.text())
381,876
def get_geo_info(filename, band=1): sourceds = gdal.Open(filename, GA_ReadOnly) ndv = sourceds.GetRasterBand(band).GetNoDataValue() xsize = sourceds.RasterXSize ysize = sourceds.RasterYSize geot = sourceds.GetGeoTransform() projection = osr.SpatialReference() projection.ImportFromWkt(sourceds.GetProjectionRef()) datatype = sourceds.GetRasterBand(band).DataType datatype = gdal.GetDataTypeName(datatype) return ndv, xsize, ysize, geot, projection, datatype
Gets information from a Raster data set
381,877
def face_adjacency_radius(self): radii, span = graph.face_adjacency_radius(mesh=self) self._cache[] = span return radii
The approximate radius of a cylinder that fits inside adjacent faces. Returns ------------ radii : (len(self.face_adjacency),) float Approximate radius formed by triangle pair
381,878
def sibling(self, offs=1): indx = self.pindex + offs if indx < 0: return None if indx >= len(self.parent.kids): return None return self.parent.kids[indx]
Return sibling node by relative offset from self.
381,879
def image( self, url, title="", width=800): title = title.strip() caption = title now = datetime.now() figId = now.strftime("%Y%m%dt%H%M%S.%f") if len(title): figId = "%(title)s %(figId)s" % locals() imageLink = % locals() return imageLink
*create MMD image link* **Key Arguments:** - ``title`` -- the title for the image - ``url`` -- the image URL - ``width`` -- the width in pixels of the image. Default *800* **Return:** - ``imageLink`` -- the MMD image link **Usage:** To create a MMD image link: .. code-block:: python imageLink = md.image( "http://www.thespacedoctor.co.uk/images/thespacedoctor_icon_white_circle.png", "thespacedoctor icon", 400) print imageLink # OUTPUT: # ![thespacedoctor icon][thespacedoctor icon 20170228t130146.472262] # # [thespacedoctor icon 20170228t130146.472262]: http://www.thespacedoctor.co.uk/images/thespacedoctor_icon_white_circle.png "thespacedoctor icon" width=400px #
381,880
def create_identity(user_id, curve_name): result = interface.Identity(identity_str=, curve_name=curve_name) result.identity_dict[] = user_id return result
Create GPG identity for hardware device.
381,881
def compare_mean_curves(calc_ref, calc, nsigma=3): dstore_ref = datastore.read(calc_ref) dstore = datastore.read(calc) imtls = dstore_ref[].imtls if dstore[].imtls != imtls: raise RuntimeError( % (calc_ref, calc)) sitecol_ref = dstore_ref[] sitecol = dstore[] site_id_ref = {(lon, lat): sid for sid, lon, lat in zip( sitecol_ref.sids, sitecol_ref.lons, sitecol_ref.lats)} site_id = {(lon, lat): sid for sid, lon, lat in zip( sitecol.sids, sitecol.lons, sitecol.lats)} common = set(site_id_ref) & set(site_id) if not common: raise RuntimeError( % (calc_ref, calc)) pmap_ref = PmapGetter(dstore_ref, sids=[site_id_ref[lonlat] for lonlat in common]).get_mean() pmap = PmapGetter(dstore, sids=[site_id[lonlat] for lonlat in common]).get_mean() for lonlat in common: mean, std = pmap[site_id[lonlat]].array.T mean_ref, std_ref = pmap_ref[site_id_ref[lonlat]].array.T err = numpy.sqrt(std**2 + std_ref**2) for imt in imtls: sl = imtls(imt) ok = (numpy.abs(mean[sl] - mean_ref[sl]) < nsigma * err[sl]).all() if not ok: md = (numpy.abs(mean[sl] - mean_ref[sl])).max() plt.title( % (lonlat, imt, md)) plt.loglog(imtls[imt], mean_ref[sl] + std_ref[sl], label=str(calc_ref), color=) plt.loglog(imtls[imt], mean_ref[sl] - std_ref[sl], color=) plt.loglog(imtls[imt], mean[sl] + std[sl], label=str(calc), color=) plt.loglog(imtls[imt], mean[sl] - std[sl], color=) plt.legend() plt.show()
Compare the hazard curves coming from two different calculations.
381,882
def create_from_request_pdu(pdu): _, address, value = struct.unpack(, pdu) value = 1 if value == 0xFF00 else value instance = WriteSingleCoil() instance.address = address instance.value = value return instance
Create instance from request PDU. :param pdu: A response PDU.
381,883
def execute_wait(self, cmd, walltime=2, envs={}): stdin, stdout, stderr = self.ssh_client.exec_command( self.prepend_envs(cmd, envs), bufsize=-1, timeout=walltime ) exit_status = stdout.channel.recv_exit_status() return exit_status, stdout.read().decode("utf-8"), stderr.read().decode("utf-8")
Synchronously execute a commandline string on the shell. Args: - cmd (string) : Commandline string to execute - walltime (int) : walltime in seconds Kwargs: - envs (dict) : Dictionary of env variables Returns: - retcode : Return code from the execution, -1 on fail - stdout : stdout string - stderr : stderr string Raises: None.
381,884
def stop_sync(self): if self.scanning: self.stop_scan() for connection_id in list(self.connections.get_connections()): self.disconnect_sync(connection_id) self.bable.stop() self.connections.stop() self.stopped = True
Safely stop this BLED112 instance without leaving it in a weird state
381,885
def personByEmailAddress(self, address): email = self.store.findUnique(EmailAddress, EmailAddress.address == address, default=None) if email is not None: return email.person
Retrieve the L{Person} item for the given email address (or return None if no such person exists) @type name: C{unicode}
381,886
async def write_and_drain(self, data: bytes, timeout: NumType = None) -> None: if self._stream_writer is None: raise SMTPServerDisconnected("Client not connected") self._stream_writer.write(data) async with self._io_lock: await self._drain_writer(timeout)
Format a command and send it to the server.
381,887
def get_requests_request_name(self, request_name): query_parameters = {} if request_name is not None: query_parameters[] = self._serialize.query(, request_name, ) response = self._send(http_method=, location_id=, version=, query_parameters=query_parameters) return self._deserialize(, response)
GetRequestsRequestName. [Preview API] Get a symbol request by request name. :param str request_name: :rtype: :class:`<Request> <azure.devops.v5_0.symbol.models.Request>`
381,888
def logSumExp(A, B, out=None): if out is None: out = numpy.zeros(A.shape) indicator1 = A >= B indicator2 = numpy.logical_not(indicator1) out[indicator1] = A[indicator1] + numpy.log1p(numpy.exp(B[indicator1]-A[indicator1])) out[indicator2] = B[indicator2] + numpy.log1p(numpy.exp(A[indicator2]-B[indicator2])) return out
returns log(exp(A) + exp(B)). A and B are numpy arrays
381,889
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default): local_DecodeVarint = _DecodeVarint assert not is_packed if is_repeated: tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) tag_len = len(tag_bytes) def DecodeRepeatedField(buffer, pos, end, message, field_dict): value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) while 1: (size, pos) = local_DecodeVarint(buffer, pos) new_pos = pos + size if new_pos > end: raise _DecodeError() value.append(buffer[pos:new_pos]) pos = new_pos + tag_len if buffer[new_pos:pos] != tag_bytes or new_pos == end: return new_pos return DecodeRepeatedField else: def DecodeField(buffer, pos, end, message, field_dict): (size, pos) = local_DecodeVarint(buffer, pos) new_pos = pos + size if new_pos > end: raise _DecodeError() field_dict[key] = buffer[pos:new_pos] return new_pos return DecodeField
Returns a decoder for a bytes field.
381,890
def graph_query(kind, source, target=None, neighbor_limit=1, database_filter=None): default_databases = [, , , , , , , , , , , , , , , , , ] if not database_filter: query_databases = default_databases else: query_databases = database_filter params = {} params[] = params[] = params[] = query_databases kind_str = kind.lower() if kind not in [, , ]: logger.warn( % kind_str) return None params[] = kind_str if isinstance(source, basestring): source_str = source else: source_str = .join(source) params[] = source_str try: neighbor_limit = int(neighbor_limit) params[] = neighbor_limit except (TypeError, ValueError): logger.warn( % neighbor_limit) return None if target is not None: if isinstance(target, basestring): target_str = target else: target_str = .join(target) params[] = target_str logger.info() for k, v in params.items(): logger.info( % (k, v)) logger.info() res = requests.get(pc2_url + , params=params) if not res.status_code == 200: logger.error( % res.status_code) if res.status_code == 500: logger.error( ) return None return model
Perform a graph query on PathwayCommons. For more information on these queries, see http://www.pathwaycommons.org/pc2/#graph Parameters ---------- kind : str The kind of graph query to perform. Currently 3 options are implemented, 'neighborhood', 'pathsbetween' and 'pathsfromto'. source : list[str] A list of gene names which are the source set for the graph query. target : Optional[list[str]] A list of gene names which are the target set for the graph query. Only needed for 'pathsfromto' queries. neighbor_limit : Optional[int] This limits the length of the longest path considered in the graph query. Default: 1 Returns ------- model : org.biopax.paxtools.model.Model A BioPAX model (java object).
381,891
def add_parameter(self, name, value, meta=None): parameter = Parameter(name, value) if meta: parameter.meta = meta self.parameters.append(parameter)
Add a parameter to the parameter list. :param name: New parameter's name. :type name: str :param value: New parameter's value. :type value: float :param meta: New parameter's meta property. :type meta: dict
381,892
def open_file(self, info): if not info.initialized: return dlg = FileDialog( action = "open", wildcard = "Graphviz Files (*.dot, *.xdot, *.txt)|" "*.dot;*.xdot;*.txt|Dot Files (*.dot)|*.dot|" "All Files (*.*)|*.*|") if dlg.open() == OK: parser = GodotDataParser() model = parser.parse_dot_file(dlg.path) if model is not None: self.model = model else: print "error parsing: %s" % dlg.path self.save_file = dlg.path del dlg
Handles the open action.
381,893
def _connect(self, database=None): conn_args = { : self.config[], : self.config[], : self.config[], : self.config[], : self.config[], } if database: conn_args[] = database else: conn_args[] = if self.config[] == : del conn_args[] try: conn = psycopg2.connect(**conn_args) except Exception as e: self.log.error(e) raise e conn.set_isolation_level(0) return conn
Connect to given database
381,894
def insert_before(self, text): selection_state = self.selection if selection_state: selection_state = SelectionState( original_cursor_position=selection_state.original_cursor_position + len(text), type=selection_state.type) return Document( text=text + self.text, cursor_position=self.cursor_position + len(text), selection=selection_state)
Create a new document, with this text inserted before the buffer. It keeps selection ranges and cursor position in sync.
381,895
def trigger_event(self, event, client, args, force_dispatch=False): self.controller.process_event(event, client, args, force_dispatch=force_dispatch)
Trigger a new event that will be dispatched to all modules.
381,896
def rand_bytes(length): if not isinstance(length, int_types): raise TypeError(pretty_message( , type_name(length) )) if length < 1: raise ValueError() if length > 1024: raise ValueError() buffer = buffer_from_bytes(length) result = Security.SecRandomCopyBytes(Security.kSecRandomDefault, length, buffer) if result != 0: raise OSError(_extract_error()) return bytes_from_buffer(buffer)
Returns a number of random bytes suitable for cryptographic purposes :param length: The desired number of bytes :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string
381,897
def ntowfv2(domain, user, password): md4 = hashlib.new() md4.update(password) hmac_context = hmac.HMAC(md4.digest(), hashes.MD5(), backend=default_backend()) hmac_context.update(user.upper().encode()) hmac_context.update(domain.encode()) return hmac_context.finalize()
NTOWFv2() Implementation [MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol 3.3.2 NTLM v2 Authentication :param domain: The windows domain name :param user: The windows username :param password: The users password :return: Hash Data
381,898
def clear(self): not_removed = [] for fn in os.listdir(self.base): fn = os.path.join(self.base, fn) try: if os.path.islink(fn) or os.path.isfile(fn): os.remove(fn) elif os.path.isdir(fn): shutil.rmtree(fn) except Exception: not_removed.append(fn) return not_removed
Clear the cache.
381,899
def store_drop(cls, resource: str, session: Optional[Session] = None) -> : action = cls.make_drop(resource) _store_helper(action, session=session) return action
Store a "drop" event. :param resource: The normalized name of the resource to store Example: >>> from bio2bel.models import Action >>> Action.store_drop('hgnc')