code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def cache_control_expires(num_hours):
"""
Set the appropriate Cache-Control and Expires headers for the given
number of hours.
"""
num_seconds = int(num_hours * 60 * 60)
def decorator(func):
@wraps(func)
def inner(request, *args, **kwargs):
response = func(request, *args, **kwargs)
patch_response_headers(response, num_seconds)
return response
return inner
return decorator | Set the appropriate Cache-Control and Expires headers for the given
number of hours. |
def to_meshpoint(meshcode, lat_multiplier, lon_multiplier):
"""地域メッシュコードから緯度経度を算出する。
下記のメッシュに対応している。
1次(80km四方):1
40倍(40km四方):40000
20倍(20km四方):20000
16倍(16km四方):16000
2次(10km四方):2
8倍(8km四方):8000
5倍(5km四方):5000
4倍(4km四方):4000
2.5倍(2.5km四方):2500
2倍(2km四方):2000
3次(1km四方):3
4次(500m四方):4
5次(250m四方):5
6次(125m四方):6
Args:
meshcode: 指定次の地域メッシュコード
lat_multiplier: 当該メッシュの基準点(南西端)から、緯度座標上の点の位置を当該メッシュの単位緯度の倍数で指定
lon_multiplier: 当該メッシュの基準点(南西端)から、経度座標上の点の位置を当該メッシュの単位経度の倍数で指定
Return:
lat: 世界測地系の緯度(度単位)
lon: 世界測地系の経度(度単位)
"""
def mesh_cord(func_higher_cord, func_unit_cord, func_multiplier):
return func_higher_cord() + func_unit_cord() * func_multiplier()
lat_multiplier_lv = lambda: lat_multiplier
lon_multiplier_lv = lambda: lon_multiplier
lat_multiplier_lv1 = _functools.partial(
lambda meshcode: int(meshcode[0:2]), meshcode=meshcode)
lon_multiplier_lv1 = _functools.partial(
lambda meshcode: int(meshcode[2:4]), meshcode=meshcode)
lat_multiplier_40000 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[4:5])-1)[2:].zfill(2)[0:1]), meshcode=meshcode)
lon_multiplier_40000 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[4:5])-1)[2:].zfill(2)[1:2]), meshcode=meshcode)
lat_multiplier_20000 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[5:6])-1)[2:].zfill(2)[0:1]), meshcode=meshcode)
lon_multiplier_20000 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[5:6])-1)[2:].zfill(2)[1:2]), meshcode=meshcode)
lat_multiplier_16000 = _functools.partial(
lambda meshcode: int(meshcode[4:5])/2, meshcode=meshcode)
lon_multiplier_16000 = _functools.partial(
lambda meshcode: int(meshcode[5:6])/2, meshcode=meshcode)
lat_multiplier_lv2 = _functools.partial(
lambda meshcode: int(meshcode[4:5]), meshcode=meshcode)
lon_multiplier_lv2 = _functools.partial(
lambda meshcode: int(meshcode[5:6]), meshcode=meshcode)
lat_multiplier_8000 = _functools.partial(
lambda meshcode: int(meshcode[4:5]), meshcode=meshcode)
lon_multiplier_8000 = _functools.partial(
lambda meshcode: int(meshcode[5:6]), meshcode=meshcode)
lat_multiplier_5000 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[6:7])-1)[2:].zfill(2)[0:1]), meshcode=meshcode)
lon_multiplier_5000 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[6:7])-1)[2:].zfill(2)[1:2]), meshcode=meshcode)
lat_multiplier_4000 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[7:8])-1)[2:].zfill(2)[0:1]), meshcode=meshcode)
lon_multiplier_4000 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[7:8])-1)[2:].zfill(2)[1:2]), meshcode=meshcode)
lat_multiplier_2500 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[7:8])-1)[2:].zfill(2)[0:1]), meshcode=meshcode)
lon_multiplier_2500 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[7:8])-1)[2:].zfill(2)[1:2]), meshcode=meshcode)
lat_multiplier_2000 = _functools.partial(
lambda meshcode: int(meshcode[6:7])/2, meshcode=meshcode)
lon_multiplier_2000 = _functools.partial(
lambda meshcode: int(meshcode[7:8])/2, meshcode=meshcode)
lat_multiplier_lv3 = _functools.partial(
lambda meshcode: int(meshcode[6:7]), meshcode=meshcode)
lon_multiplier_lv3 = _functools.partial(
lambda meshcode: int(meshcode[7:8]), meshcode=meshcode)
lat_multiplier_lv4 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[8:9])-1)[2:].zfill(2)[0:1]), meshcode=meshcode)
lon_multiplier_lv4 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[8:9])-1)[2:].zfill(2)[1:2]), meshcode=meshcode)
lat_multiplier_lv5 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[9:10])-1)[2:].zfill(2)[0:1]), meshcode=meshcode)
lon_multiplier_lv5 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[9:10])-1)[2:].zfill(2)[1:2]), meshcode=meshcode)
lat_multiplier_lv6 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[10:11])-1)[2:].zfill(2)[0:1]), meshcode=meshcode)
lon_multiplier_lv6 = _functools.partial(
lambda meshcode: int(bin(int(meshcode[10:11])-1)[2:].zfill(2)[1:2]), meshcode=meshcode)
mesh_lv1_default_lat = _functools.partial(
mesh_cord,
func_higher_cord=lambda: 0,
func_unit_cord=_unit_lat_lv1,
func_multiplier=lat_multiplier_lv1)
mesh_lv1_default_lon = _functools.partial(
mesh_cord,
func_higher_cord=lambda: 100,
func_unit_cord=_unit_lon_lv1,
func_multiplier=lon_multiplier_lv1)
mesh_40000_default_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv1_default_lat,
func_unit_cord=_unit_lat_40000,
func_multiplier=lat_multiplier_40000)
mesh_40000_default_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv1_default_lon,
func_unit_cord=_unit_lon_40000,
func_multiplier=lon_multiplier_40000)
mesh_20000_default_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_40000_default_lat,
func_unit_cord=_unit_lat_20000,
func_multiplier=lat_multiplier_20000)
mesh_20000_default_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_40000_default_lon,
func_unit_cord=_unit_lon_20000,
func_multiplier=lon_multiplier_20000)
mesh_16000_default_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv1_default_lat,
func_unit_cord=_unit_lat_16000,
func_multiplier=lat_multiplier_16000)
mesh_16000_default_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv1_default_lon,
func_unit_cord=_unit_lon_16000,
func_multiplier=lon_multiplier_16000)
mesh_lv2_default_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv1_default_lat,
func_unit_cord=_unit_lat_lv2,
func_multiplier=lat_multiplier_lv2)
mesh_lv2_default_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv1_default_lon,
func_unit_cord=_unit_lon_lv2,
func_multiplier=lon_multiplier_lv2)
mesh_8000_default_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv1_default_lat,
func_unit_cord=_unit_lat_8000,
func_multiplier=lat_multiplier_8000)
mesh_8000_default_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv1_default_lon,
func_unit_cord=_unit_lon_8000,
func_multiplier=lon_multiplier_8000)
mesh_5000_default_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv2_default_lat,
func_unit_cord=_unit_lat_5000,
func_multiplier=lat_multiplier_5000)
mesh_5000_default_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv2_default_lon,
func_unit_cord=_unit_lon_5000,
func_multiplier=lon_multiplier_5000)
mesh_4000_default_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_8000_default_lat,
func_unit_cord=_unit_lat_4000,
func_multiplier=lat_multiplier_4000)
mesh_4000_default_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_8000_default_lon,
func_unit_cord=_unit_lon_4000,
func_multiplier=lon_multiplier_4000)
mesh_2500_default_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_5000_default_lat,
func_unit_cord=_unit_lat_2500,
func_multiplier=lat_multiplier_2500)
mesh_2500_default_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_5000_default_lon,
func_unit_cord=_unit_lon_2500,
func_multiplier=lon_multiplier_2500)
mesh_2000_default_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv2_default_lat,
func_unit_cord=_unit_lat_2000,
func_multiplier=lat_multiplier_2000)
mesh_2000_default_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv2_default_lon,
func_unit_cord=_unit_lon_2000,
func_multiplier=lon_multiplier_2000)
mesh_lv3_default_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv2_default_lat,
func_unit_cord=_unit_lat_lv3,
func_multiplier=lat_multiplier_lv3)
mesh_lv3_default_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv2_default_lon,
func_unit_cord=_unit_lon_lv3,
func_multiplier=lon_multiplier_lv3)
mesh_lv4_default_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv3_default_lat,
func_unit_cord=_unit_lat_lv4,
func_multiplier=lat_multiplier_lv4)
mesh_lv4_default_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv3_default_lon,
func_unit_cord=_unit_lon_lv4,
func_multiplier=lon_multiplier_lv4)
mesh_lv5_default_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv4_default_lat,
func_unit_cord=_unit_lat_lv5,
func_multiplier=lat_multiplier_lv5)
mesh_lv5_default_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv4_default_lon,
func_unit_cord=_unit_lon_lv5,
func_multiplier=lon_multiplier_lv5)
mesh_lv6_default_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv5_default_lat,
func_unit_cord=_unit_lat_lv6,
func_multiplier=lat_multiplier_lv6)
mesh_lv6_default_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv5_default_lon,
func_unit_cord=_unit_lon_lv6,
func_multiplier=lon_multiplier_lv6)
mesh_lv1_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv1_default_lat,
func_unit_cord=_unit_lat_lv1,
func_multiplier=lat_multiplier_lv)
mesh_lv1_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv1_default_lon,
func_unit_cord=_unit_lon_lv1,
func_multiplier=lon_multiplier_lv)
mesh_40000_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_40000_default_lat,
func_unit_cord=_unit_lat_40000,
func_multiplier=lat_multiplier_lv)
mesh_40000_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_40000_default_lon,
func_unit_cord=_unit_lon_40000,
func_multiplier=lon_multiplier_lv)
mesh_20000_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_20000_default_lat,
func_unit_cord=_unit_lat_20000,
func_multiplier=lat_multiplier_lv)
mesh_20000_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_20000_default_lon,
func_unit_cord=_unit_lon_20000,
func_multiplier=lon_multiplier_lv)
mesh_16000_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_16000_default_lat,
func_unit_cord=_unit_lat_16000,
func_multiplier=lat_multiplier_lv)
mesh_16000_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_16000_default_lon,
func_unit_cord=_unit_lon_16000,
func_multiplier=lon_multiplier_lv)
mesh_lv2_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv2_default_lat,
func_unit_cord=_unit_lat_lv2,
func_multiplier=lat_multiplier_lv)
mesh_lv2_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv2_default_lon,
func_unit_cord=_unit_lon_lv2,
func_multiplier=lon_multiplier_lv)
mesh_8000_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_8000_default_lat,
func_unit_cord=_unit_lat_8000,
func_multiplier=lat_multiplier_lv)
mesh_8000_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_8000_default_lon,
func_unit_cord=_unit_lon_8000,
func_multiplier=lon_multiplier_lv)
mesh_5000_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_5000_default_lat,
func_unit_cord=_unit_lat_5000,
func_multiplier=lat_multiplier_lv)
mesh_5000_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_5000_default_lon,
func_unit_cord=_unit_lon_5000,
func_multiplier=lon_multiplier_lv)
mesh_4000_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_4000_default_lat,
func_unit_cord=_unit_lat_4000,
func_multiplier=lat_multiplier_lv)
mesh_4000_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_4000_default_lon,
func_unit_cord=_unit_lon_4000,
func_multiplier=lon_multiplier_lv)
mesh_2500_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_2500_default_lat,
func_unit_cord=_unit_lat_2500,
func_multiplier=lat_multiplier_lv)
mesh_2500_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_2500_default_lon,
func_unit_cord=_unit_lon_2500,
func_multiplier=lon_multiplier_lv)
mesh_2000_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_2000_default_lat,
func_unit_cord=_unit_lat_2000,
func_multiplier=lat_multiplier_lv)
mesh_2000_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_2000_default_lon,
func_unit_cord=_unit_lon_2000,
func_multiplier=lon_multiplier_lv)
mesh_lv3_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv3_default_lat,
func_unit_cord=_unit_lat_lv3,
func_multiplier=lat_multiplier_lv)
mesh_lv3_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv3_default_lon,
func_unit_cord=_unit_lon_lv3,
func_multiplier=lon_multiplier_lv)
mesh_lv4_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv4_default_lat,
func_unit_cord=_unit_lat_lv4,
func_multiplier=lat_multiplier_lv)
mesh_lv4_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv4_default_lon,
func_unit_cord=_unit_lon_lv4,
func_multiplier=lon_multiplier_lv)
mesh_lv5_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv5_default_lat,
func_unit_cord=_unit_lat_lv5,
func_multiplier=lat_multiplier_lv)
mesh_lv5_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv5_default_lon,
func_unit_cord=_unit_lon_lv5,
func_multiplier=lon_multiplier_lv)
mesh_lv6_lat = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv6_default_lat,
func_unit_cord=_unit_lat_lv6,
func_multiplier=lat_multiplier_lv)
mesh_lv6_lon = _functools.partial(
mesh_cord,
func_higher_cord=mesh_lv6_default_lon,
func_unit_cord=_unit_lon_lv6,
func_multiplier=lon_multiplier_lv)
level = to_meshlevel(meshcode)
if level == 1:
return mesh_lv1_lat(), mesh_lv1_lon()
if level == 40000:
return mesh_40000_lat(), mesh_40000_lon()
if level == 20000:
return mesh_20000_lat(), mesh_20000_lon()
if level == 16000:
return mesh_16000_lat(), mesh_16000_lon()
if level == 2:
return mesh_lv2_lat(), mesh_lv2_lon()
if level == 8000:
return mesh_8000_lat(), mesh_8000_lon()
if level == 5000:
return mesh_5000_lat(), mesh_5000_lon()
if level == 4000:
return mesh_4000_lat(), mesh_4000_lon()
if level == 2500:
return mesh_2500_lat(), mesh_2500_lon()
if level == 2000:
return mesh_2000_lat(), mesh_2000_lon()
if level == 3:
return mesh_lv3_lat(), mesh_lv3_lon()
if level == 4:
return mesh_lv4_lat(), mesh_lv4_lon()
if level == 5:
return mesh_lv5_lat(), mesh_lv5_lon()
if level == 6:
return mesh_lv6_lat(), mesh_lv6_lon()
raise ValueError("the level is unsupported.") | 地域メッシュコードから緯度経度を算出する。
下記のメッシュに対応している。
1次(80km四方):1
40倍(40km四方):40000
20倍(20km四方):20000
16倍(16km四方):16000
2次(10km四方):2
8倍(8km四方):8000
5倍(5km四方):5000
4倍(4km四方):4000
2.5倍(2.5km四方):2500
2倍(2km四方):2000
3次(1km四方):3
4次(500m四方):4
5次(250m四方):5
6次(125m四方):6
Args:
meshcode: 指定次の地域メッシュコード
lat_multiplier: 当該メッシュの基準点(南西端)から、緯度座標上の点の位置を当該メッシュの単位緯度の倍数で指定
lon_multiplier: 当該メッシュの基準点(南西端)から、経度座標上の点の位置を当該メッシュの単位経度の倍数で指定
Return:
lat: 世界測地系の緯度(度単位)
lon: 世界測地系の経度(度単位) |
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels | Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002 |
def restore_data(self, data_dict):
"""
Restore the data dict - update the flask session and this object
"""
session[self._base_key] = data_dict
self._data_dict = session[self._base_key] | Restore the data dict - update the flask session and this object |
def add_copies(self, node, parent, elem, minel):
"""Add appropriate number of `elem` copies to `parent`."""
rep = 0 if minel is None else int(minel.arg) - 1
for i in range(rep):
parent.append(copy.deepcopy(elem)) | Add appropriate number of `elem` copies to `parent`. |
def get_service_status(service):
'''Update the status of a particular service in the database.
'''
dbs = db.get_session()
srvs = dbs.query(db.ServiceStates).filter(db.ServiceStates.type == service)
if srvs.count():
return srvs[0].status
return db.ServiceStatus.STOPPED | Update the status of a particular service in the database. |
def incr(self, field, by=1):
""" :see::meth:RedisMap.incr """
return self._client.hincrby(self.key_prefix, field, by) | :see::meth:RedisMap.incr |
def decode_address(self, addr):
"""Initialize the address from a string. Lots of different forms are supported."""
if _debug: Address._debug("decode_address %r (%s)", addr, type(addr))
# start out assuming this is a local station
self.addrType = Address.localStationAddr
self.addrNet = None
if addr == "*":
if _debug: Address._debug(" - localBroadcast")
self.addrType = Address.localBroadcastAddr
self.addrNet = None
self.addrAddr = None
self.addrLen = None
elif addr == "*:*":
if _debug: Address._debug(" - globalBroadcast")
self.addrType = Address.globalBroadcastAddr
self.addrNet = None
self.addrAddr = None
self.addrLen = None
elif isinstance(addr, int):
if _debug: Address._debug(" - int")
if (addr < 0) or (addr >= 256):
raise ValueError("address out of range")
self.addrAddr = struct.pack('B', addr)
self.addrLen = 1
elif isinstance(addr, basestring):
if _debug: Address._debug(" - str")
m = ip_address_mask_port_re.match(addr)
if m:
if _debug: Address._debug(" - IP address")
net, addr, mask, port = m.groups()
if not mask: mask = '32'
if not port: port = '47808'
if _debug: Address._debug(" - net, addr, mask, port: %r, %r, %r, %r", net, addr, mask, port)
if net:
net = int(net)
if (net >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteStationAddr
self.addrNet = net
self.addrPort = int(port)
self.addrTuple = (addr, self.addrPort)
addrstr = socket.inet_aton(addr)
self.addrIP = struct.unpack('!L', addrstr)[0]
self.addrMask = (_long_mask << (32 - int(mask))) & _long_mask
self.addrHost = (self.addrIP & ~self.addrMask)
self.addrSubnet = (self.addrIP & self.addrMask)
bcast = (self.addrSubnet | ~self.addrMask)
self.addrBroadcastTuple = (socket.inet_ntoa(struct.pack('!L', bcast & _long_mask)), self.addrPort)
self.addrAddr = addrstr + struct.pack('!H', self.addrPort & _short_mask)
self.addrLen = 6
elif ethernet_re.match(addr):
if _debug: Address._debug(" - ethernet")
self.addrAddr = xtob(addr, ':')
self.addrLen = len(self.addrAddr)
elif re.match(r"^\d+$", addr):
if _debug: Address._debug(" - int")
addr = int(addr)
if (addr > 255):
raise ValueError("address out of range")
self.addrAddr = struct.pack('B', addr)
self.addrLen = 1
elif re.match(r"^\d+:[*]$", addr):
if _debug: Address._debug(" - remote broadcast")
addr = int(addr[:-2])
if (addr >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteBroadcastAddr
self.addrNet = addr
self.addrAddr = None
self.addrLen = None
elif re.match(r"^\d+:\d+$",addr):
if _debug: Address._debug(" - remote station")
net, addr = addr.split(':')
net = int(net)
addr = int(addr)
if (net >= 65535):
raise ValueError("network out of range")
if (addr > 255):
raise ValueError("address out of range")
self.addrType = Address.remoteStationAddr
self.addrNet = net
self.addrAddr = struct.pack('B', addr)
self.addrLen = 1
elif re.match(r"^0x([0-9A-Fa-f][0-9A-Fa-f])+$",addr):
if _debug: Address._debug(" - modern hex string")
self.addrAddr = xtob(addr[2:])
self.addrLen = len(self.addrAddr)
elif re.match(r"^X'([0-9A-Fa-f][0-9A-Fa-f])+'$",addr):
if _debug: Address._debug(" - old school hex string")
self.addrAddr = xtob(addr[2:-1])
self.addrLen = len(self.addrAddr)
elif re.match(r"^\d+:0x([0-9A-Fa-f][0-9A-Fa-f])+$",addr):
if _debug: Address._debug(" - remote station with modern hex string")
net, addr = addr.split(':')
net = int(net)
if (net >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteStationAddr
self.addrNet = net
self.addrAddr = xtob(addr[2:])
self.addrLen = len(self.addrAddr)
elif re.match(r"^\d+:X'([0-9A-Fa-f][0-9A-Fa-f])+'$",addr):
if _debug: Address._debug(" - remote station with old school hex string")
net, addr = addr.split(':')
net = int(net)
if (net >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteStationAddr
self.addrNet = net
self.addrAddr = xtob(addr[2:-1])
self.addrLen = len(self.addrAddr)
elif netifaces and interface_re.match(addr):
if _debug: Address._debug(" - interface name with optional port")
interface, port = interface_re.match(addr).groups()
if port is not None:
self.addrPort = int(port)
else:
self.addrPort = 47808
interfaces = netifaces.interfaces()
if interface not in interfaces:
raise ValueError("not an interface: %s" % (interface,))
if _debug: Address._debug(" - interfaces: %r", interfaces)
ifaddresses = netifaces.ifaddresses(interface)
if netifaces.AF_INET not in ifaddresses:
raise ValueError("interface does not support IPv4: %s" % (interface,))
ipv4addresses = ifaddresses[netifaces.AF_INET]
if len(ipv4addresses) > 1:
raise ValueError("interface supports multiple IPv4 addresses: %s" % (interface,))
ifaddress = ipv4addresses[0]
if _debug: Address._debug(" - ifaddress: %r", ifaddress)
addr = ifaddress['addr']
self.addrTuple = (addr, self.addrPort)
if _debug: Address._debug(" - addrTuple: %r", self.addrTuple)
addrstr = socket.inet_aton(addr)
self.addrIP = struct.unpack('!L', addrstr)[0]
if 'netmask' in ifaddress:
maskstr = socket.inet_aton(ifaddress['netmask'])
self.addrMask = struct.unpack('!L', maskstr)[0]
else:
self.addrMask = _long_mask
self.addrHost = (self.addrIP & ~self.addrMask)
self.addrSubnet = (self.addrIP & self.addrMask)
if 'broadcast' in ifaddress:
self.addrBroadcastTuple = (ifaddress['broadcast'], self.addrPort)
else:
self.addrBroadcastTuple = None
if _debug: Address._debug(" - addrBroadcastTuple: %r", self.addrBroadcastTuple)
self.addrAddr = addrstr + struct.pack('!H', self.addrPort & _short_mask)
self.addrLen = 6
else:
raise ValueError("unrecognized format")
elif isinstance(addr, tuple):
addr, port = addr
self.addrPort = int(port)
if isinstance(addr, basestring):
if not addr:
# when ('', n) is passed it is the local host address, but that
# could be more than one on a multihomed machine, the empty string
# means "any".
addrstr = '\0\0\0\0'
else:
addrstr = socket.inet_aton(addr)
self.addrTuple = (addr, self.addrPort)
elif isinstance(addr, (int, long)):
addrstr = struct.pack('!L', addr & _long_mask)
self.addrTuple = (socket.inet_ntoa(addrstr), self.addrPort)
else:
raise TypeError("tuple must be (string, port) or (long, port)")
if _debug: Address._debug(" - addrstr: %r", addrstr)
self.addrIP = struct.unpack('!L', addrstr)[0]
self.addrMask = _long_mask
self.addrHost = None
self.addrSubnet = None
self.addrBroadcastTuple = self.addrTuple
self.addrAddr = addrstr + struct.pack('!H', self.addrPort & _short_mask)
self.addrLen = 6
else:
raise TypeError("integer, string or tuple required") | Initialize the address from a string. Lots of different forms are supported. |
def get_config_from_file(conf_properties_files):
"""Reads properties files and saves them to a config object
:param conf_properties_files: comma-separated list of properties files
:returns: config object
"""
# Initialize the config object
config = ExtendedConfigParser()
logger = logging.getLogger(__name__)
# Configure properties (last files could override properties)
found = False
files_list = conf_properties_files.split(';')
for conf_properties_file in files_list:
result = config.read(conf_properties_file)
if len(result) == 0:
message = 'Properties config file not found: %s'
if len(files_list) == 1:
logger.error(message, conf_properties_file)
raise Exception(message % conf_properties_file)
else:
logger.debug(message, conf_properties_file)
else:
logger.debug('Reading properties from file: %s', conf_properties_file)
found = True
if not found:
message = 'Any of the properties config files has been found'
logger.error(message)
raise Exception(message)
return config | Reads properties files and saves them to a config object
:param conf_properties_files: comma-separated list of properties files
:returns: config object |
def write_th(self, s, header=False, indent=0, tags=None):
"""
Method for writting a formatted <th> cell.
If col_space is set on the formatter then that is used for
the value of min-width.
Parameters
----------
s : object
The data to be written inside the cell.
header : boolean, default False
Set to True if the <th> is for use inside <thead>. This will
cause min-width to be set if there is one.
indent : int, default 0
The indentation level of the cell.
tags : string, default None
Tags to include in the cell.
Returns
-------
A written <th> cell.
"""
if header and self.fmt.col_space is not None:
tags = (tags or "")
tags += ('style="min-width: {colspace};"'
.format(colspace=self.fmt.col_space))
return self._write_cell(s, kind='th', indent=indent, tags=tags) | Method for writting a formatted <th> cell.
If col_space is set on the formatter then that is used for
the value of min-width.
Parameters
----------
s : object
The data to be written inside the cell.
header : boolean, default False
Set to True if the <th> is for use inside <thead>. This will
cause min-width to be set if there is one.
indent : int, default 0
The indentation level of the cell.
tags : string, default None
Tags to include in the cell.
Returns
-------
A written <th> cell. |
def callback(self):
"""Run callback."""
if self._callback_func and callable(self._callback_func):
self._callback_func(self) | Run callback. |
def _retrieve_session(self, session_key):
"""
:type session_key: SessionKey
:returns: SimpleSession
"""
session_id = session_key.session_id
if (session_id is None):
msg = ("Unable to resolve session ID from SessionKey [{0}]."
"Returning null to indicate a session could not be "
"found.".format(session_key))
logger.debug(msg)
return None
session = self.session_store.read(session_id)
if (session is None):
# session ID was provided, meaning one is expected to be found,
# but we couldn't find one:
msg2 = "Could not find session with ID [{0}]".format(session_id)
raise ValueError(msg2)
return session | :type session_key: SessionKey
:returns: SimpleSession |
def messages(self):
'''a generator yielding the :class:`Message` structures in the index'''
# the file contains the fixed-size file header followed by
# fixed-size message structures. start after the file header and
# then simply return the message structures in sequence until the
# end of the file.
offset = self.LENGTH
while offset < len(self.mmap):
yield Message(mm=self.mmap, offset=offset)
offset += Message.LENGTH | a generator yielding the :class:`Message` structures in the index |
def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a SegmentedRegressionModel instance from a saved YAML
configuration. Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
SegmentedRegressionModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
default_model_expr = cfg['default_config']['model_expression']
default_ytransform = cfg['default_config']['ytransform']
seg = cls(
cfg['segmentation_col'], cfg['fit_filters'],
cfg['predict_filters'], default_model_expr,
YTRANSFORM_MAPPING[default_ytransform], cfg['min_segment_size'],
cfg['name'])
if "models" not in cfg:
cfg["models"] = {}
for name, m in cfg['models'].items():
m['model_expression'] = m.get(
'model_expression', default_model_expr)
m['ytransform'] = m.get('ytransform', default_ytransform)
m['fit_filters'] = None
m['predict_filters'] = None
reg = RegressionModel.from_yaml(yamlio.convert_to_yaml(m, None))
seg._group.add_model(reg)
logger.debug(
'loaded segmented regression model {} from yaml'.format(seg.name))
return seg | Create a SegmentedRegressionModel instance from a saved YAML
configuration. Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
SegmentedRegressionModel |
def Barr_1981(Re, eD):
r'''Calculates Darcy friction factor using the method in Barr (1981) [2]_
as shown in [1]_.
.. math::
\frac{1}{\sqrt{f_d}} = -2\log\left\{\frac{\epsilon}{3.7D} +
\frac{4.518\log(\frac{Re}{7})}{Re\left[1+\frac{Re^{0.52}}{29}
\left(\frac{\epsilon}{D}\right)^{0.7}\right]}\right\}
Parameters
----------
Re : float
Reynolds number, [-]
eD : float
Relative roughness, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
No range of validity specified for this equation.
Examples
--------
>>> Barr_1981(1E5, 1E-4)
0.01849836032779929
References
----------
.. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and
Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence
and Combustion 90, no. 1 (January 1, 2013): 1-27.
doi:10.1007/s10494-012-9419-7
.. [2] Barr, Dih, and Colebrook White."Technical Note. Solutions Of The
Colebrook-White Function For Resistance To Uniform Turbulent Flow."
ICE Proceedings 71, no. 2 (January 6, 1981): 529-35.
doi:10.1680/iicep.1981.1895.
'''
fd = (-2*log10(eD/3.7 + 4.518*log10(Re/7.)/(Re*(1+Re**0.52/29*eD**0.7))))**-2
return fd | r'''Calculates Darcy friction factor using the method in Barr (1981) [2]_
as shown in [1]_.
.. math::
\frac{1}{\sqrt{f_d}} = -2\log\left\{\frac{\epsilon}{3.7D} +
\frac{4.518\log(\frac{Re}{7})}{Re\left[1+\frac{Re^{0.52}}{29}
\left(\frac{\epsilon}{D}\right)^{0.7}\right]}\right\}
Parameters
----------
Re : float
Reynolds number, [-]
eD : float
Relative roughness, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
No range of validity specified for this equation.
Examples
--------
>>> Barr_1981(1E5, 1E-4)
0.01849836032779929
References
----------
.. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and
Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence
and Combustion 90, no. 1 (January 1, 2013): 1-27.
doi:10.1007/s10494-012-9419-7
.. [2] Barr, Dih, and Colebrook White."Technical Note. Solutions Of The
Colebrook-White Function For Resistance To Uniform Turbulent Flow."
ICE Proceedings 71, no. 2 (January 6, 1981): 529-35.
doi:10.1680/iicep.1981.1895. |
def block_idxmat_shuffle(numdraws, numblocks, random_state=None):
"""Create K columns with unique random integers from 0 to N-1
Purpose:
--------
- Create K blocks for k-fold cross-validation
Parameters:
-----------
numdraws : int
number of observations N or sample size N
numblocks : int
number of blocks K
Example:
--------
import pandas as pd
import numpy as np
import oxyba as ox
X = np.random.normal(size=(7,5), scale=50).round(1)
N,_ = X.shape
K = 3; #number of blocks
idxmat, dropped = ox.block_idxmat_shuffle(N,K)
for b in range(K):
print('\nBlock:',b)
print(pd.DataFrame(X[idxmat[:,b],:], index=idxmat[:,b]))
print('\nDropped observations\n', X[dropped,:] )
print('\nrow indicies of dropped observations:', dropped, '\')
Why is this useful?
-------------------
- Avoid creating copies of dataset X during run time
- Shuffle the indicies of a data point rather than the
data points themselve
Links:
------
- How numpy's permutation works, https://stackoverflow.com/a/15474335
"""
# load modules
import numpy as np
# minimum block size: bz=int(N/K)
blocksize = int(numdraws / numblocks)
# shuffle vector indicies: from 0 to N-1
if random_state:
np.random.seed(random_state)
obsidx = np.random.permutation(numdraws)
# how many to drop? i.e. "numdrop = N - bz*K"
numdrop = numdraws % numblocks
dropped = obsidx[:numdrop]
obsidx = obsidx[numdrop:]
# reshape the remaing vector indicies into a matrix
idxmat = obsidx.reshape((blocksize, numblocks))
# output the indicies for the blocks, and indicies of dropped obserations
return idxmat, dropped | Create K columns with unique random integers from 0 to N-1
Purpose:
--------
- Create K blocks for k-fold cross-validation
Parameters:
-----------
numdraws : int
number of observations N or sample size N
numblocks : int
number of blocks K
Example:
--------
import pandas as pd
import numpy as np
import oxyba as ox
X = np.random.normal(size=(7,5), scale=50).round(1)
N,_ = X.shape
K = 3; #number of blocks
idxmat, dropped = ox.block_idxmat_shuffle(N,K)
for b in range(K):
print('\nBlock:',b)
print(pd.DataFrame(X[idxmat[:,b],:], index=idxmat[:,b]))
print('\nDropped observations\n', X[dropped,:] )
print('\nrow indicies of dropped observations:', dropped, '\')
Why is this useful?
-------------------
- Avoid creating copies of dataset X during run time
- Shuffle the indicies of a data point rather than the
data points themselve
Links:
------
- How numpy's permutation works, https://stackoverflow.com/a/15474335 |
def do_mo(self):
"""
Generate mo files for all po files.
"""
log.debug("Start updating mo files ...")
for po_dir_path in self._iter_po_dir():
po_path = (po_dir_path / self._basename).with_suffix(".po")
lc_path = self._mo_path / po_dir_path.name / "LC_MESSAGES"
lc_path.mkdir(parents=True, exist_ok=True)
mo_path = (lc_path / self._basename).with_suffix(".mo")
log.debug("Creating from {po}: {mo}".format(po=str(po_path), mo=str(mo_path)))
check_call(["msgfmt", str(po_path), "-o", str(mo_path)])
log.debug("All mo files updated") | Generate mo files for all po files. |
def ndfrom2d(xtr, rsi):
"""Undo the array shape conversion applied by :func:`ndto2d`,
returning the input 2D array to its original shape.
Parameters
----------
xtr : array_like
Two-dimensional input array
rsi : tuple
A tuple containing the shape of the axis-permuted array and the
permutation order applied in :func:`ndto2d`.
Returns
-------
x : ndarray
Multi-dimensional output array
"""
# Extract components of conversion information tuple
xts = rsi[0]
prm = rsi[1]
# Reshape x to the shape obtained after permuting axes in ndto2d
xt = xtr.reshape(xts)
# Undo axis permutation performed in ndto2d
x = np.transpose(xt, np.argsort(prm))
# Return array with shape corresponding to that of the input to ndto2d
return x | Undo the array shape conversion applied by :func:`ndto2d`,
returning the input 2D array to its original shape.
Parameters
----------
xtr : array_like
Two-dimensional input array
rsi : tuple
A tuple containing the shape of the axis-permuted array and the
permutation order applied in :func:`ndto2d`.
Returns
-------
x : ndarray
Multi-dimensional output array |
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = calendar.timegm(data)
return t - data[9] | Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp. |
def validated_value(self, raw_value):
"""Return parsed parameter value and run validation handlers.
Error message included in exception will be included in http error
response
Args:
value: raw parameter value to parse validate
Returns:
None
Note:
Concept of validation for params is understood here as a process
of checking if data of valid type (successfully parsed/processed by
``.value()`` handler) does meet some other constraints
(lenght, bounds, uniqueness, etc.). It will internally call its
``value()`` handler.
"""
value = self.value(raw_value)
try:
for validator in self.validators:
validator(value)
except:
raise
else:
return value | Return parsed parameter value and run validation handlers.
Error message included in exception will be included in http error
response
Args:
value: raw parameter value to parse validate
Returns:
None
Note:
Concept of validation for params is understood here as a process
of checking if data of valid type (successfully parsed/processed by
``.value()`` handler) does meet some other constraints
(lenght, bounds, uniqueness, etc.). It will internally call its
``value()`` handler. |
def inject_settings(mixed: Union[str, Settings],
context: MutableMapping[str, Any],
fail_silently: bool = False) -> None:
"""Inject settings values to given context.
:param mixed:
Settings can be a string (that it will be read from Python path),
Python module or dict-like instance.
:param context:
Context to assign settings key values. It should support dict-like item
assingment.
:param fail_silently:
When enabled and reading settings from Python path ignore errors if
given Python path couldn't be loaded.
"""
if isinstance(mixed, str):
try:
mixed = import_module(mixed)
except Exception:
if fail_silently:
return
raise
for key, value in iter_settings(mixed):
context[key] = value | Inject settings values to given context.
:param mixed:
Settings can be a string (that it will be read from Python path),
Python module or dict-like instance.
:param context:
Context to assign settings key values. It should support dict-like item
assingment.
:param fail_silently:
When enabled and reading settings from Python path ignore errors if
given Python path couldn't be loaded. |
def get_posts_with_limits(self, include_draft=False, **limits):
"""
Get all posts and filter them as needed.
:param include_draft: return draft posts or not
:param limits: other limits to the attrs of the result,
should be a dict with string or list values
:return: an iterable of Post objects
"""
filter_funcs = []
for attr in ('title', 'layout', 'author',
'email', 'tags', 'categories'):
if limits.get(attr):
filter_set = set(to_list(limits.get(attr)))
def get_filter_func(filter_set_, attr_):
return lambda p: filter_set_.intersection(
to_list(getattr(p, attr_)))
filter_funcs.append(get_filter_func(filter_set, attr))
for attr in ('created', 'updated'):
interval = limits.get(attr)
if isinstance(interval, (list, tuple)) and len(interval) == 2 \
and isinstance(interval[0], date) and isinstance(
interval[1], date):
# [start date(time), end date(time)]
start, end = interval
start = to_datetime(start)
if not isinstance(end, datetime):
# 'end' is a date,
# we should convert it to 00:00:00 of the next day,
# so that posts of that day will be included
end = datetime.strptime(
'%04d-%02d-%02d' % (end.year, end.month, end.day),
'%Y-%m-%d')
end += timedelta(days=1)
def get_filter_func(attr_, start_dt, end_dt):
return lambda p: start_dt <= getattr(p, attr_) < end_dt
filter_funcs.append(get_filter_func(attr, start, end))
return self.get_posts(include_draft=include_draft,
filter_functions=filter_funcs) | Get all posts and filter them as needed.
:param include_draft: return draft posts or not
:param limits: other limits to the attrs of the result,
should be a dict with string or list values
:return: an iterable of Post objects |
def fol_fc_ask(KB, alpha):
"""Inefficient forward chaining for first-order logic. [Fig. 9.3]
KB is a FolKB and alpha must be an atomic sentence."""
while True:
new = {}
for r in KB.clauses:
ps, q = parse_definite_clause(standardize_variables(r))
raise NotImplementedError | Inefficient forward chaining for first-order logic. [Fig. 9.3]
KB is a FolKB and alpha must be an atomic sentence. |
def validate_schema(schema_name):
"""Validate the JSON against a required schema_name."""
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
instance = args[0]
try:
instance.validator(instance.schemas[schema_name]).validate(request.get_json())
except ValidationError, e:
ret_dict = instance._create_ret_object(instance.FAILURE,
None, True,
instance.BAD_SCHEMA,
e.message)
instance.logger.error("Invalid Schema", ret_dict)
return jsonify(ret_dict), 400
instance.logger.debug("Schema is valid")
return f(*args, **kw)
return wrapper
return decorator | Validate the JSON against a required schema_name. |
def release():
"""
Release current version to pypi
"""
with settings(warn_only=True):
r = local(clom.git['diff-files']('--quiet', '--ignore-submodules', '--'))
if r.return_code != 0:
abort('There are uncommitted changes, commit or stash them before releasing')
version = open('VERSION.txt').read().strip()
existing_tag = local(clom.git.tag('-l', version), capture=True)
if not existing_tag.strip():
print('Releasing %s...' % version)
local(clom.git.tag(version))
if confirm('Push %s to pypi?' % version, default=True):
local(clom.git.push('origin', 'HEAD'))
local(clom.git.push('origin', version))
local(clom.python('setup.py', 'sdist', 'upload')) | Release current version to pypi |
def _iupac_ambiguous_equal(ambig_base, unambig_base):
"""
Tests two bases for equality, accounting for IUPAC ambiguous DNA
ambiguous base may be IUPAC ambiguous, unambiguous must be one of ACGT
"""
iupac_translation = {
'A': 'A',
'C': 'C',
'G': 'G',
'T': 'T',
'U': 'U',
'R': 'AG',
'Y': 'CT',
'S': 'GC',
'W': 'AT',
'K': 'GT',
'M': 'AC',
'B': 'CGT',
'D': 'AGT',
'H': 'ACT',
'V': 'ACG',
'N': 'ACGT',
'-': '-'
}
for i in (ambig_base, unambig_base):
if not len(i) == 1:
raise ValueError("only one base may be passed.")
return unambig_base.upper() in iupac_translation[ambig_base.upper()] | Tests two bases for equality, accounting for IUPAC ambiguous DNA
ambiguous base may be IUPAC ambiguous, unambiguous must be one of ACGT |
def predict_epitopes_from_args(args):
"""
Returns an epitope collection from the given commandline arguments.
Parameters
----------
args : argparse.Namespace
Parsed commandline arguments for Topiary
"""
mhc_model = mhc_binding_predictor_from_args(args)
variants = variant_collection_from_args(args)
gene_expression_dict = rna_gene_expression_dict_from_args(args)
transcript_expression_dict = rna_transcript_expression_dict_from_args(args)
predictor = TopiaryPredictor(
mhc_model=mhc_model,
padding_around_mutation=args.padding_around_mutation,
ic50_cutoff=args.ic50_cutoff,
percentile_cutoff=args.percentile_cutoff,
min_transcript_expression=args.rna_min_transcript_expression,
min_gene_expression=args.rna_min_gene_expression,
only_novel_epitopes=args.only_novel_epitopes,
raise_on_error=not args.skip_variant_errors)
return predictor.predict_from_variants(
variants=variants,
transcript_expression_dict=transcript_expression_dict,
gene_expression_dict=gene_expression_dict) | Returns an epitope collection from the given commandline arguments.
Parameters
----------
args : argparse.Namespace
Parsed commandline arguments for Topiary |
def set_h264_frm_ref_mode(self, mode=1, callback=None):
'''
Set frame shipping reference mode of H264 encode stream.
params:
`mode`: see docstr of meth::get_h264_frm_ref_mode
'''
params = {'mode': mode}
return self.execute_command('setH264FrmRefMode', params, callback) | Set frame shipping reference mode of H264 encode stream.
params:
`mode`: see docstr of meth::get_h264_frm_ref_mode |
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT s.id,s.jid, s.full_ret
FROM salt_returns s
JOIN ( SELECT MAX(jid) AS jid FROM salt_returns GROUP BY fun, id) max
ON s.jid = max.jid
WHERE s.fun = ?
'''
cur.execute(sql, (fun,))
data = cur.fetchall()
ret = {}
if data:
for minion, _, retval in data:
ret[minion] = salt.utils.json.loads(retval)
_close_conn(conn)
return ret | Return a dict of the last function called for all minions |
def find_pattern_on_line(lines, n, max_wrap_lines):
"""
Finds a forward/reply pattern within the given lines on text on the given
line number and returns a tuple with the type ('reply' or 'forward') and
line number of where the pattern ends. The returned line number may be
different from the given line number in case the pattern wraps over
multiple lines.
Returns (None, None) if no pattern was found.
"""
for typ, regexes in COMPILED_PATTERN_MAP.items():
for regex in regexes:
for m in range(max_wrap_lines):
match_line = join_wrapped_lines(lines[n:n+1+m])
if match_line.startswith('>'):
match_line = match_line[1:].strip()
if regex.match(match_line.strip()):
return n+m, typ
return None, None | Finds a forward/reply pattern within the given lines on text on the given
line number and returns a tuple with the type ('reply' or 'forward') and
line number of where the pattern ends. The returned line number may be
different from the given line number in case the pattern wraps over
multiple lines.
Returns (None, None) if no pattern was found. |
def get_nested_schema_object(self, fully_qualified_parent_name: str,
nested_item_name: str) -> Optional['BaseSchema']:
"""
Used to generate a schema object from the given fully_qualified_parent_name
and the nested_item_name.
:param fully_qualified_parent_name: The fully qualified name of the parent.
:param nested_item_name: The nested item name.
:return: An initialized schema object of the nested item.
"""
return self.get_schema_object(
self.get_fully_qualified_name(fully_qualified_parent_name, nested_item_name)) | Used to generate a schema object from the given fully_qualified_parent_name
and the nested_item_name.
:param fully_qualified_parent_name: The fully qualified name of the parent.
:param nested_item_name: The nested item name.
:return: An initialized schema object of the nested item. |
def _set_fcoe_intf_enode_bind_type(self, v, load=False):
"""
Setter method for fcoe_intf_enode_bind_type, mapped from YANG variable /brocade_fcoe_ext_rpc/fcoe_get_interface/output/fcoe_intf_list/fcoe_intf_enode_bind_type (fcoe-binding-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe_intf_enode_bind_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe_intf_enode_bind_type() directly.
YANG Description: This indicates if the enode is statically bound to
the fcoe interface or not i.e. if statically bound
this enode's mac will always login on the same fcoe
port.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'Dynamic': {'value': 2}, u'Static': {'value': 1}, u'Unknown': {'value': 3}},), is_leaf=True, yang_name="fcoe-intf-enode-bind-type", rest_name="fcoe-intf-enode-bind-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='fcoe-binding-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fcoe_intf_enode_bind_type must be of a type compatible with fcoe-binding-type""",
'defined-type': "brocade-fcoe-ext:fcoe-binding-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'Dynamic': {'value': 2}, u'Static': {'value': 1}, u'Unknown': {'value': 3}},), is_leaf=True, yang_name="fcoe-intf-enode-bind-type", rest_name="fcoe-intf-enode-bind-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='fcoe-binding-type', is_config=True)""",
})
self.__fcoe_intf_enode_bind_type = t
if hasattr(self, '_set'):
self._set() | Setter method for fcoe_intf_enode_bind_type, mapped from YANG variable /brocade_fcoe_ext_rpc/fcoe_get_interface/output/fcoe_intf_list/fcoe_intf_enode_bind_type (fcoe-binding-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe_intf_enode_bind_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe_intf_enode_bind_type() directly.
YANG Description: This indicates if the enode is statically bound to
the fcoe interface or not i.e. if statically bound
this enode's mac will always login on the same fcoe
port. |
def validate_blob_uri_contents(contents: bytes, blob_uri: str) -> None:
"""
Raises an exception if the sha1 hash of the contents does not match the hash found in te
blob_uri. Formula for how git calculates the hash found here:
http://alblue.bandlem.com/2011/08/git-tip-of-week-objects.html
"""
blob_path = parse.urlparse(blob_uri).path
blob_hash = blob_path.split("/")[-1]
contents_str = to_text(contents)
content_length = len(contents_str)
hashable_contents = "blob " + str(content_length) + "\0" + contents_str
hash_object = hashlib.sha1(to_bytes(text=hashable_contents))
if hash_object.hexdigest() != blob_hash:
raise ValidationError(
f"Hash of contents fetched from {blob_uri} do not match its hash: {blob_hash}."
) | Raises an exception if the sha1 hash of the contents does not match the hash found in te
blob_uri. Formula for how git calculates the hash found here:
http://alblue.bandlem.com/2011/08/git-tip-of-week-objects.html |
def modify_target(self, to_state, to_outcome=None):
"""Set both to_state and to_outcome at the same time to modify transition target
:param str to_state: State id of the target state
:param int to_outcome: Outcome id of the target port
:raises exceptions.ValueError: If parameters have wrong types or the new transition is not valid
"""
if not (to_state is None and (to_outcome is not int and to_outcome is not None)):
if not isinstance(to_state, string_types):
raise ValueError("Invalid transition target port: to_state must be a string")
if not isinstance(to_outcome, int) and to_outcome is not None:
raise ValueError("Invalid transition target port: to_outcome must be of type int or None (if to_state "
"is of type str)")
old_to_state = self.to_state
old_to_outcome = self.to_outcome
self._to_state = to_state
self._to_outcome = to_outcome
valid, message = self._check_validity()
if not valid:
self._to_state = old_to_state
self._to_outcome = old_to_outcome
raise ValueError("The transition target could not be changed: {0}".format(message)) | Set both to_state and to_outcome at the same time to modify transition target
:param str to_state: State id of the target state
:param int to_outcome: Outcome id of the target port
:raises exceptions.ValueError: If parameters have wrong types or the new transition is not valid |
def set_attribute_mapping(resource_attr_a, resource_attr_b, **kwargs):
"""
Define one resource attribute from one network as being the same as
that from another network.
"""
user_id = kwargs.get('user_id')
ra_1 = get_resource_attribute(resource_attr_a)
ra_2 = get_resource_attribute(resource_attr_b)
mapping = ResourceAttrMap(resource_attr_id_a = resource_attr_a,
resource_attr_id_b = resource_attr_b,
network_a_id = ra_1.get_network().id,
network_b_id = ra_2.get_network().id )
db.DBSession.add(mapping)
db.DBSession.flush()
return mapping | Define one resource attribute from one network as being the same as
that from another network. |
def color(out_string, color='grn'):
""" Highlight string for terminal color coding.
Purpose: We use this utility function to insert a ANSI/win32 color code
| and Bright style marker before a string, and reset the color and
| style after the string. We then return the string with these
| codes inserted.
@param out_string: the string to be colored
@type out_string: str
@param color: a string signifying which color to use. Defaults to 'grn'.
| Accepts the following colors:
| ['blk', 'blu', 'cyn', 'grn', 'mag', 'red', 'wht', 'yel']
@type color: str
@returns: the modified string, including the ANSI/win32 color codes.
@rtype: str
"""
c = {
'blk': Fore.BLACK,
'blu': Fore.BLUE,
'cyn': Fore.CYAN,
'grn': Fore.GREEN,
'mag': Fore.MAGENTA,
'red': Fore.RED,
'wht': Fore.WHITE,
'yel': Fore.YELLOW,
}
try:
init()
return (c[color] + Style.BRIGHT + out_string + Fore.RESET + Style.NORMAL)
except AttributeError:
return out_string | Highlight string for terminal color coding.
Purpose: We use this utility function to insert a ANSI/win32 color code
| and Bright style marker before a string, and reset the color and
| style after the string. We then return the string with these
| codes inserted.
@param out_string: the string to be colored
@type out_string: str
@param color: a string signifying which color to use. Defaults to 'grn'.
| Accepts the following colors:
| ['blk', 'blu', 'cyn', 'grn', 'mag', 'red', 'wht', 'yel']
@type color: str
@returns: the modified string, including the ANSI/win32 color codes.
@rtype: str |
def get_endpoint_descriptor(self, dev, ep, intf, alt, config):
r"""Return an endpoint descriptor of the given device.
The object returned is required to have all the Endpoint Descriptor
fields acessible as member variables. They must be convertible (but
not required to be equal) to the int type.
The ep parameter is the endpoint logical index (not the bEndpointAddress
field) of the endpoint descriptor desired. dev, intf, alt and config are the same
values already described in the get_interface_descriptor() method.
"""
_not_implemented(self.get_endpoint_descriptor) | r"""Return an endpoint descriptor of the given device.
The object returned is required to have all the Endpoint Descriptor
fields acessible as member variables. They must be convertible (but
not required to be equal) to the int type.
The ep parameter is the endpoint logical index (not the bEndpointAddress
field) of the endpoint descriptor desired. dev, intf, alt and config are the same
values already described in the get_interface_descriptor() method. |
def trainingDataLink(data_1, data_2, common_key, training_size=50000): # pragma: nocover
'''
Construct training data for consumption by the ActiveLearning
markPairs method from already linked datasets.
Arguments :
data_1 -- Dictionary of records from first dataset, where the keys
are record_ids and the values are dictionaries with the
keys being field names
data_2 -- Dictionary of records from second dataset, same form as
data_1
common_key -- The name of the record field that uniquely identifies
a match
training_size -- the rough limit of the number of training examples,
defaults to 50000
Warning:
Every match must be identified by the sharing of a common key.
This function assumes that if two records do not share a common key
then they are distinct records.
'''
identified_records = collections.defaultdict(lambda: [[], []])
matched_pairs = set()
distinct_pairs = set()
for record_id, record in data_1.items():
identified_records[record[common_key]][0].append(record_id)
for record_id, record in data_2.items():
identified_records[record[common_key]][1].append(record_id)
for keys_1, keys_2 in identified_records.values():
if keys_1 and keys_2:
matched_pairs.update(itertools.product(keys_1, keys_2))
keys_1 = list(data_1.keys())
keys_2 = list(data_2.keys())
random_pairs = [(keys_1[i], keys_2[j])
for i, j
in randomPairsMatch(len(data_1), len(data_2),
training_size)]
distinct_pairs = (
pair for pair in random_pairs if pair not in matched_pairs)
matched_records = [(data_1[key_1], data_2[key_2])
for key_1, key_2 in matched_pairs]
distinct_records = [(data_1[key_1], data_2[key_2])
for key_1, key_2 in distinct_pairs]
training_pairs = {'match': matched_records,
'distinct': distinct_records}
return training_pairs | Construct training data for consumption by the ActiveLearning
markPairs method from already linked datasets.
Arguments :
data_1 -- Dictionary of records from first dataset, where the keys
are record_ids and the values are dictionaries with the
keys being field names
data_2 -- Dictionary of records from second dataset, same form as
data_1
common_key -- The name of the record field that uniquely identifies
a match
training_size -- the rough limit of the number of training examples,
defaults to 50000
Warning:
Every match must be identified by the sharing of a common key.
This function assumes that if two records do not share a common key
then they are distinct records. |
def css( self, filelist ):
"""This convenience function is only useful for html.
It adds css stylesheet(s) to the document via the <link> element."""
if isinstance( filelist, basestring ):
self.link( href=filelist, rel='stylesheet', type='text/css', media='all' )
else:
for file in filelist:
self.link( href=file, rel='stylesheet', type='text/css', media='all' ) | This convenience function is only useful for html.
It adds css stylesheet(s) to the document via the <link> element. |
def check_token(self, respond):
"""
Check is the user's token is valid
"""
if respond.status_code == 401:
self.credential.obtain_token(config=self.config)
return False
return True | Check is the user's token is valid |
def find_studies(self, query_dict=None, exact=False, verbose=False, **kwargs):
"""Query on study properties. See documentation for _OTIWrapper class."""
if self.use_v1:
uri = '{p}/singlePropertySearchForStudies'.format(p=self.query_prefix)
else:
uri = '{p}/find_studies'.format(p=self.query_prefix)
return self._do_query(uri,
query_dict=query_dict,
exact=exact,
verbose=verbose,
valid_keys=self.study_search_term_set,
kwargs=kwargs) | Query on study properties. See documentation for _OTIWrapper class. |
def html_path(builder, pagename=None):
"""Calculate the relative path to the Slides for pagename."""
return builder.get_relative_uri(
pagename or builder.current_docname,
os.path.join(
builder.app.config.slide_html_relative_path,
pagename or builder.current_docname,
)) | Calculate the relative path to the Slides for pagename. |
def copy_SRM_file(destination=None, config='DEFAULT'):
"""
Creates a copy of the default SRM table at the specified location.
Parameters
----------
destination : str
The save location for the SRM file. If no location specified,
saves it as 'LAtools_[config]_SRMTable.csv' in the current working
directory.
config : str
It's possible to set up different configurations with different
SRM files. This specifies the name of the configuration that you
want to copy the SRM file from. If not specified, the 'DEFAULT'
configuration is used.
"""
# find SRM file from configuration
conf = read_configuration()
src = pkgrs.resource_filename('latools', conf['srmfile'])
# work out destination path (if not given)
if destination is None:
destination = './LAtools_' + conf['config'] + '_SRMTable.csv'
if os.path.isdir(destination):
destination += 'LAtools_' + conf['config'] + '_SRMTable.csv'
copyfile(src, destination)
print(src + ' \n copied to:\n ' + destination)
return | Creates a copy of the default SRM table at the specified location.
Parameters
----------
destination : str
The save location for the SRM file. If no location specified,
saves it as 'LAtools_[config]_SRMTable.csv' in the current working
directory.
config : str
It's possible to set up different configurations with different
SRM files. This specifies the name of the configuration that you
want to copy the SRM file from. If not specified, the 'DEFAULT'
configuration is used. |
def create_label(self, label, doc=None, callback=dummy_progress_cb):
"""
Create a new label
Arguments:
doc --- first document on which the label must be added (required
for now)
"""
if doc:
clone = doc.clone() # make sure it's serializable
r = self.index.create_label(label, doc=clone)
return r | Create a new label
Arguments:
doc --- first document on which the label must be added (required
for now) |
def get_operation_full_job_id(op):
"""Returns the job-id or job-id.task-id for the operation."""
job_id = op.get_field('job-id')
task_id = op.get_field('task-id')
if task_id:
return '%s.%s' % (job_id, task_id)
else:
return job_id | Returns the job-id or job-id.task-id for the operation. |
def log_once(key):
"""Returns True if this is the "first" call for a given key.
Various logging settings can adjust the definition of "first".
Example:
>>> if log_once("some_key"):
... logger.info("Some verbose logging statement")
"""
global _last_logged
if _disabled:
return False
elif key not in _logged:
_logged.add(key)
_last_logged = time.time()
return True
elif _periodic_log and time.time() - _last_logged > 60.0:
_logged.clear()
_last_logged = time.time()
return False
else:
return False | Returns True if this is the "first" call for a given key.
Various logging settings can adjust the definition of "first".
Example:
>>> if log_once("some_key"):
... logger.info("Some verbose logging statement") |
def OnNodeSelected(self, event):
"""We have selected a node with the list control, tell the world"""
try:
node = self.sorted[event.GetIndex()]
except IndexError, err:
log.warn(_('Invalid index in node selected: %(index)s'),
index=event.GetIndex())
else:
if node is not self.selected_node:
wx.PostEvent(
self,
squaremap.SquareSelectionEvent(node=node, point=None,
map=None)
) | We have selected a node with the list control, tell the world |
def parallel_map(func, *arg_iterable, **kwargs):
"""Apply function to iterable with parallel map, and hence returns
results in order. functools.partial is used to freeze func_pre_args and
func_kwargs, meaning that the iterable argument must be the last positional
argument.
Roughly equivalent to
>>> [func(*func_pre_args, x, **func_kwargs) for x in arg_iterable]
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
chunksize: int, optional
Perform function in batches
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs
"""
chunksize = kwargs.pop('chunksize', 1)
func_pre_args = kwargs.pop('func_pre_args', ())
func_kwargs = kwargs.pop('func_kwargs', {})
max_workers = kwargs.pop('max_workers', None)
parallel = kwargs.pop('parallel', True)
parallel_warning = kwargs.pop('parallel_warning', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
func_to_map = functools.partial(func, *func_pre_args, **func_kwargs)
if parallel:
pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)
return list(pool.map(func_to_map, *arg_iterable, chunksize=chunksize))
else:
if parallel_warning:
warnings.warn(('parallel_map has parallel=False - turn on '
'parallelisation for faster processing'),
UserWarning)
return list(map(func_to_map, *arg_iterable)) | Apply function to iterable with parallel map, and hence returns
results in order. functools.partial is used to freeze func_pre_args and
func_kwargs, meaning that the iterable argument must be the last positional
argument.
Roughly equivalent to
>>> [func(*func_pre_args, x, **func_kwargs) for x in arg_iterable]
Parameters
----------
func: function
Function to apply to list of args.
arg_iterable: iterable
argument to iterate over.
chunksize: int, optional
Perform function in batches
func_pre_args: tuple, optional
Positional arguments to place before the iterable argument in func.
func_kwargs: dict, optional
Additional keyword arguments for func.
parallel: bool, optional
To turn off parallelisation if needed.
parallel_warning: bool, optional
To turn off warning for no parallelisation if needed.
max_workers: int or None, optional
Number of processes.
If max_workers is None then concurrent.futures.ProcessPoolExecutor
defaults to using the number of processors of the machine.
N.B. If max_workers=None and running on supercomputer clusters with
multiple nodes, this may default to the number of processors on a
single node.
Returns
-------
results_list: list of function outputs |
def on_sdl_keydown ( self, event ):
"press ESCAPE to quit the application"
key = event.key.keysym.sym
if key == SDLK_ESCAPE:
self.running = False | press ESCAPE to quit the application |
def folderitem(self, obj, item, index):
"""Augment folder listing item
"""
url = item.get("url")
title = item.get("Title")
creator = obj.Creator()
item["replace"]["Title"] = get_link(url, value=title)
item["created"] = self.localize_date(obj.created())
item["getType"] = _(obj.getType()[0])
item["creator"] = ""
if creator:
props = api.get_user_properties(creator)
name = props.get("fullname", creator)
item["creator"] = name
return item | Augment folder listing item |
def clear(self, payload):
"""Clear queue from any `done` or `failed` entries.
The log will be rotated once. Otherwise we would loose all logs from
thoes finished processes.
"""
self.logger.rotate(self.queue)
self.queue.clear()
self.logger.write(self.queue)
answer = {'message': 'Finished entries have been removed.', 'status': 'success'}
return answer | Clear queue from any `done` or `failed` entries.
The log will be rotated once. Otherwise we would loose all logs from
thoes finished processes. |
def get_all_items_of_credit_note(self, credit_note_id):
"""
Get all items of credit note
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param credit_note_id: the credit note id
:return: list
"""
return self._iterate_through_pages(
get_function=self.get_items_of_credit_note_per_page,
resource=CREDIT_NOTE_ITEMS,
**{'credit_note_id': credit_note_id}
) | Get all items of credit note
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param credit_note_id: the credit note id
:return: list |
def findAllBycolumn(self, target):
""" Returns an array of columns in the region (defined by the raster), each
column containing all matches in that column for the target pattern. """
column_matches = []
for column_index in range(self._raster[1]):
column = self.getRow(column_index)
column_matches[column_index] = column.findAll(target)
return column_matches | Returns an array of columns in the region (defined by the raster), each
column containing all matches in that column for the target pattern. |
def enable_branching_model(self, project, repository):
"""
Enable branching model by setting it with default configuration
:param project:
:param repository:
:return:
"""
default_model_data = {'development': {'refId': None, 'useDefault': True},
'types': [{'displayName': 'Bugfix',
'enabled': True,
'id': 'BUGFIX',
'prefix': 'bugfix/'},
{'displayName': 'Feature',
'enabled': True,
'id': 'FEATURE',
'prefix': 'feature/'},
{'displayName': 'Hotfix',
'enabled': True,
'id': 'HOTFIX',
'prefix': 'hotfix/'},
{'displayName': 'Release',
'enabled': True,
'id': 'RELEASE',
'prefix': 'release/'}]}
return self.set_branching_model(project,
repository,
default_model_data) | Enable branching model by setting it with default configuration
:param project:
:param repository:
:return: |
def nla_put_nested(msg, attrtype, nested):
"""Add nested attributes to Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L772
Takes the attributes found in the `nested` message and appends them to the message `msg` nested in a container of
the type `attrtype`. The `nested` message may not have a family specific header.
Positional arguments:
msg -- Netlink message (nl_msg class instance).
attrtype -- attribute type (integer).
nested -- message containing attributes to be nested (nl_msg class instance).
Returns:
0 on success or a negative error code.
"""
_LOGGER.debug('msg 0x%x: attr <> %d: adding msg 0x%x as nested attribute', id(msg), attrtype, id(nested))
return nla_put(msg, attrtype, nlmsg_datalen(nested.nm_nlh), nlmsg_data(nested.nm_nlh)) | Add nested attributes to Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L772
Takes the attributes found in the `nested` message and appends them to the message `msg` nested in a container of
the type `attrtype`. The `nested` message may not have a family specific header.
Positional arguments:
msg -- Netlink message (nl_msg class instance).
attrtype -- attribute type (integer).
nested -- message containing attributes to be nested (nl_msg class instance).
Returns:
0 on success or a negative error code. |
def export_html(html, filename, image_tag = None, inline = True):
""" Export the contents of the ConsoleWidget as HTML.
Parameters:
-----------
html : str,
A utf-8 encoded Python string containing the Qt HTML to export.
filename : str
The file to be saved.
image_tag : callable, optional (default None)
Used to convert images. See ``default_image_tag()`` for information.
inline : bool, optional [default True]
If True, include images as inline PNGs. Otherwise, include them as
links to external PNG files, mimicking web browsers' "Web Page,
Complete" behavior.
"""
if image_tag is None:
image_tag = default_image_tag
else:
image_tag = ensure_utf8(image_tag)
if inline:
path = None
else:
root,ext = os.path.splitext(filename)
path = root + "_files"
if os.path.isfile(path):
raise OSError("%s exists, but is not a directory." % path)
with open(filename, 'w') as f:
html = fix_html(html)
f.write(IMG_RE.sub(lambda x: image_tag(x, path = path, format = "png"),
html)) | Export the contents of the ConsoleWidget as HTML.
Parameters:
-----------
html : str,
A utf-8 encoded Python string containing the Qt HTML to export.
filename : str
The file to be saved.
image_tag : callable, optional (default None)
Used to convert images. See ``default_image_tag()`` for information.
inline : bool, optional [default True]
If True, include images as inline PNGs. Otherwise, include them as
links to external PNG files, mimicking web browsers' "Web Page,
Complete" behavior. |
def cancel_all_linking(self):
"""Cancel all linking"""
self.logger.info("Cancel_all_linking for device %s", self.device_id)
self.hub.direct_command(self.device_id, '02', '65') | Cancel all linking |
def switch_from_external_to_main_wf(self):
"""
Main workflow switcher.
This method recreates main workflow from `main wf` dict which
was set by external workflow swicther previously.
"""
# in external assigned as True in switch_to_external_wf.
# external_wf should finish EndEvent and it's name should be
# also EndEvent for switching again to main wf.
if self.wf_state['in_external'] and self.current.task_type == 'EndEvent' and \
self.current.task_name == 'EndEvent':
# main_wf information was copied in switch_to_external_wf and it takes this information.
main_wf = self.wf_state['main_wf']
# main_wf_name is assigned to current workflow name again.
self.current.workflow_name = main_wf['name']
# For external WF, check permission and authentication. But after cleaning current task.
self._clear_current_task()
# check for auth and perm. current task cleared, do against new workflow_name
self.check_for_authentication()
self.check_for_permission()
# WF knowledge is taken for main wf.
self.workflow_spec = self.get_worfklow_spec()
# WF instance is started again where leave off.
self.workflow = self.deserialize_workflow(main_wf['step'])
# Current WF is this WF instance.
self.current.workflow = self.workflow
# in_external is assigned as False
self.wf_state['in_external'] = False
# finished is assigned as False, because still in progress.
self.wf_state['finished'] = False
# pool info of main_wf is assigned.
self.wf_state['pool'] = main_wf['pool']
self.current.pool = self.wf_state['pool']
# With main_wf is executed.
self.run() | Main workflow switcher.
This method recreates main workflow from `main wf` dict which
was set by external workflow swicther previously. |
def which(name,
env_path=ENV_PATH,
env_path_ext=ENV_PATHEXT,
is_executable_fnc=isexec,
path_join_fnc=os.path.join,
os_name=os.name):
'''
Get command absolute path.
:param name: name of executable command
:type name: str
:param env_path: OS environment executable paths, defaults to autodetected
:type env_path: list of str
:param is_executable_fnc: callable will be used to detect if path is
executable, defaults to `isexec`
:type is_executable_fnc: Callable
:param path_join_fnc: callable will be used to join path components
:type path_join_fnc: Callable
:param os_name: os name, defaults to os.name
:type os_name: str
:return: absolute path
:rtype: str or None
'''
for path in env_path:
for suffix in env_path_ext:
exe_file = path_join_fnc(path, name) + suffix
if is_executable_fnc(exe_file):
return exe_file
return None | Get command absolute path.
:param name: name of executable command
:type name: str
:param env_path: OS environment executable paths, defaults to autodetected
:type env_path: list of str
:param is_executable_fnc: callable will be used to detect if path is
executable, defaults to `isexec`
:type is_executable_fnc: Callable
:param path_join_fnc: callable will be used to join path components
:type path_join_fnc: Callable
:param os_name: os name, defaults to os.name
:type os_name: str
:return: absolute path
:rtype: str or None |
def fit(self, X, y):
"""Fit KimCNNClassifier according to X, y
Parameters
----------
X : list of string
each item is a raw text
y : list of string
each item is a label
"""
####################
# Data Loader
####################
word_vector_transformer = WordVectorTransformer(padding='max')
X = word_vector_transformer.fit_transform(X)
X = LongTensor(X)
self.word_vector_transformer = word_vector_transformer
y_transformer = LabelEncoder()
y = y_transformer.fit_transform(y)
y = torch.from_numpy(y)
self.y_transformer = y_transformer
dataset = CategorizedDataset(X, y)
dataloader = DataLoader(dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=4)
####################
# Model
####################
KERNEL_SIZES = self.kernel_sizes
NUM_KERNEL = self.num_kernel
EMBEDDING_DIM = self.embedding_dim
model = TextCNN(
vocab_size=word_vector_transformer.get_vocab_size(),
embedding_dim=EMBEDDING_DIM,
output_size=len(self.y_transformer.classes_),
kernel_sizes=KERNEL_SIZES,
num_kernel=NUM_KERNEL)
if USE_CUDA:
model = model.cuda()
####################
# Train
####################
EPOCH = self.epoch
LR = self.lr
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=LR)
for epoch in range(EPOCH):
losses = []
for i, data in enumerate(dataloader):
X, y = data
X, y = Variable(X), Variable(y)
optimizer.zero_grad()
model.train()
output = model(X)
loss = loss_function(output, y)
losses.append(loss.data.tolist()[0])
loss.backward()
optimizer.step()
if i % 100 == 0:
print("[%d/%d] mean_loss : %0.2f" % (
epoch, EPOCH, np.mean(losses)))
losses = []
self.model = model | Fit KimCNNClassifier according to X, y
Parameters
----------
X : list of string
each item is a raw text
y : list of string
each item is a label |
def dispatch(self):
"""Command-line dispatch."""
parser = argparse.ArgumentParser(description='Run an Inbox server.')
parser.add_argument('addr', metavar='addr', type=str, help='addr to bind to')
parser.add_argument('port', metavar='port', type=int, help='port to bind to')
args = parser.parse_args()
self.serve(port=args.port, address=args.addr) | Command-line dispatch. |
def draw(self):
"""
Draw the elbow curve for the specified scores and values of K.
"""
# Plot the silhouette score against k
self.ax.plot(self.k_values_, self.k_scores_, marker="D")
if self.locate_elbow and self.elbow_value_!=None:
elbow_label = "$elbow\ at\ k={}, score={:0.3f}$".format(self.elbow_value_, self.elbow_score_)
self.ax.axvline(self.elbow_value_, c=LINE_COLOR, linestyle="--", label=elbow_label)
# If we're going to plot the timings, create a twinx axis
if self.timings:
self.axes = [self.ax, self.ax.twinx()]
self.axes[1].plot(
self.k_values_, self.k_timers_, label="fit time",
c='g', marker="o", linestyle="--", alpha=0.75,
)
return self.ax | Draw the elbow curve for the specified scores and values of K. |
def print(*a):
""" print just one that returns what you give it instead of None """
try:
_print(*a)
return a[0] if len(a) == 1 else a
except:
_print(*a) | print just one that returns what you give it instead of None |
def _get (self, timeout):
"""Non thread-safe utility function of self.get() doing the real
work."""
if timeout is None:
while self._empty():
self.not_empty.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._empty():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty()
self.not_empty.wait(remaining)
self.in_progress += 1
return self.queue.popleft() | Non thread-safe utility function of self.get() doing the real
work. |
def snapshot(self, name):
"""
Create a snapshot of the volume.
Args:
name: string - a human-readable name for the snapshot
"""
return self.get_data(
"volumes/%s/snapshots/" % self.id,
type=POST,
params={"name": name}
) | Create a snapshot of the volume.
Args:
name: string - a human-readable name for the snapshot |
def cmp(self, other):
"""*Note: checks Range.start() only*
Key: self = [], other = {}
* [ {----]----} => -1
* {---[---} ] => 1
* [---] {---} => -1
* [---] same as {---} => 0
* [--{-}--] => -1
"""
if isinstance(other, Range):
# other has tz, I dont, so replace the tz
start = self.start.replace(tzinfo=other.start.tz) if other.start.tz and self.start.tz is None else self.start
end = self.end.replace(tzinfo=other.end.tz) if other.end.tz and self.end.tz is None else self.end
if start == other.start and end == other.end:
return 0
elif start < other.start:
return -1
else:
return 1
elif isinstance(other, Date):
if other.tz and self.start.tz is None:
return 0 if other == self.start.replace(tzinfo=other.tz) else -1 if other > self.start.replace(tzinfo=other.start.tz) else 1
return 0 if other == self.start else -1 if other > self.start else 1
else:
return self.cmp(Range(other, tz=self.start.tz)) | *Note: checks Range.start() only*
Key: self = [], other = {}
* [ {----]----} => -1
* {---[---} ] => 1
* [---] {---} => -1
* [---] same as {---} => 0
* [--{-}--] => -1 |
def cli(wio, send):
'''
Sends a UDP command to the wio device.
\b
DOES:
Support "VERSION", "SCAN", "Blank?", "DEBUG", "ENDEBUG: 1", "ENDEBUG: 0"
"APCFG: AP\\tPWDs\\tTOKENs\\tSNs\\tSERVER_Domains\\tXSERVER_Domain\\t\\r\\n",
Note:
1. Ensure your device is Configure Mode.
2. Change your computer network to Wio's AP.
\b
EXAMPLE:
wio udp --send [command], send UPD command
'''
command = send
click.echo("UDP command: {}".format(command))
result = udp.common_send(command)
if result is None:
return debug_error()
else:
click.echo(result) | Sends a UDP command to the wio device.
\b
DOES:
Support "VERSION", "SCAN", "Blank?", "DEBUG", "ENDEBUG: 1", "ENDEBUG: 0"
"APCFG: AP\\tPWDs\\tTOKENs\\tSNs\\tSERVER_Domains\\tXSERVER_Domain\\t\\r\\n",
Note:
1. Ensure your device is Configure Mode.
2. Change your computer network to Wio's AP.
\b
EXAMPLE:
wio udp --send [command], send UPD command |
def code(self):
"""
Removes duplicates values in auto and error list.
parameters.
"""
def uniq(seq):
"""
Utility function that removes duplicate.
"""
seen = set()
seen_add = seen.add
# This way removes duplicates while preserving the order.
return [x for x in seq if x not in seen and not seen_add(x)]
data_set = uniq(i for i in self.autos if i is not None)
error_list = uniq(i for i in self.errors if i is not None)
if error_list:
return "\n".join(error_list)
return "\n".join(data_set) | Removes duplicates values in auto and error list.
parameters. |
def get_name(self, tag):
'''Extract and return a representative "name" from a tag.
Override as necessary. get_name's output can be controlled through
keyword arguments that are provided when initializing a
TagProcessor. For instance, a member of a class or namespace can have
its parent scope included in the name by passing
include_parent_scopes=True to __init__().
Args:
tag: A BeautifulSoup Tag that satisfies match_criterion.
Returns:
A string that would be appropriate to use as an entry name in a
Zeal database.
'''
name = tag.findChild('name').contents[0]
if self.include_parent_scopes:
# Include parent scope in returned name
parent_tag = tag.findParent()
if parent_tag.get('kind') in ['class', 'struct', 'namespace']:
name = parent_tag.findChild('name').contents[0] + '::' + name
return name | Extract and return a representative "name" from a tag.
Override as necessary. get_name's output can be controlled through
keyword arguments that are provided when initializing a
TagProcessor. For instance, a member of a class or namespace can have
its parent scope included in the name by passing
include_parent_scopes=True to __init__().
Args:
tag: A BeautifulSoup Tag that satisfies match_criterion.
Returns:
A string that would be appropriate to use as an entry name in a
Zeal database. |
def keep_negative_mask(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Negative (mask)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 5
"""
return __run_measure(measures.keep_mask, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred) | Keep Negative (mask)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 5 |
def query_cached_package_list(self):
"""Return list of pickled package names from PYPI"""
if self.debug:
self.logger.debug("DEBUG: reading pickled cache file")
return cPickle.load(open(self.pkg_cache_file, "r")) | Return list of pickled package names from PYPI |
def check_parallel_run(self): # pragma: no cover, not with unit tests...
"""Check (in pid file) if there isn't already a daemon running.
If yes and do_replace: kill it.
Keep in self.fpid the File object to the pid file. Will be used by writepid.
:return: None
"""
# TODO: other daemon run on nt
if os.name == 'nt': # pragma: no cover, not currently tested with Windows...
logger.warning("The parallel daemon check is not available on Windows")
self.__open_pidfile(write=True)
return
# First open the pid file in open mode
self.__open_pidfile()
try:
pid_var = self.fpid.readline().strip(' \r\n')
if pid_var:
pid = int(pid_var)
logger.info("Found an existing pid (%s): '%s'", self.pid_filename, pid_var)
else:
logger.debug("Not found an existing pid: %s", self.pid_filename)
return
except (IOError, ValueError) as err:
logger.warning("PID file is empty or has an invalid content: %s", self.pid_filename)
return
if pid == os.getpid():
self.pid = pid
return
try:
logger.debug("Testing if the process is running: '%s'", pid)
os.kill(pid, 0)
except OSError:
# consider any exception as a stale pid file.
# this includes :
# * PermissionError when a process with same pid exists but is executed by another user
# * ProcessLookupError: [Errno 3] No such process
self.pre_log.append(("DEBUG", "No former instance to replace"))
logger.info("A stale pid file exists, reusing the same file")
return
if not self.do_replace:
self.exit_on_error("A valid pid file still exists (pid=%s) and "
"I am not allowed to replace. Exiting!" % pid, exit_code=3)
self.pre_log.append(("DEBUG", "Replacing former instance: %d" % pid))
try:
pgid = os.getpgid(pid)
# SIGQUIT to terminate and dump core
os.killpg(pgid, signal.SIGQUIT)
except os.error as err:
if err.errno != errno.ESRCH:
raise
self.fpid.close()
# TODO: give some time to wait that previous instance finishes?
time.sleep(1)
# we must also reopen the pid file in write mode
# because the previous instance should have deleted it!!
self.__open_pidfile(write=True) | Check (in pid file) if there isn't already a daemon running.
If yes and do_replace: kill it.
Keep in self.fpid the File object to the pid file. Will be used by writepid.
:return: None |
def size(ctx, dataset, kwargs):
"Show dataset size"
kwargs = parse_kwargs(kwargs)
(print)(data(dataset, **ctx.obj).get(**kwargs).complete_set.size) | Show dataset size |
def all(self):
"""
Return all testcases
:return:
"""
tests = list()
for testclass in self.classes:
tests.extend(self.classes[testclass].cases)
return tests | Return all testcases
:return: |
def _build_zmat(self, construction_table):
"""Create the Zmatrix from a construction table.
Args:
Construction table (pd.DataFrame):
Returns:
Zmat: A new instance of :class:`Zmat`.
"""
c_table = construction_table
default_cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral']
optional_cols = list(set(self.columns) - {'atom', 'x', 'y', 'z'})
zmat_frame = pd.DataFrame(columns=default_cols + optional_cols,
dtype='float', index=c_table.index)
zmat_frame.loc[:, optional_cols] = self.loc[c_table.index,
optional_cols]
zmat_frame.loc[:, 'atom'] = self.loc[c_table.index, 'atom']
zmat_frame.loc[:, ['b', 'a', 'd']] = c_table
zmat_values = self._calculate_zmat_values(c_table)
zmat_frame.loc[:, ['bond', 'angle', 'dihedral']] = zmat_values
zmatrix = Zmat(zmat_frame, metadata=self.metadata,
_metadata={'last_valid_cartesian': self.copy()})
return zmatrix | Create the Zmatrix from a construction table.
Args:
Construction table (pd.DataFrame):
Returns:
Zmat: A new instance of :class:`Zmat`. |
def fetch(self, **kwargs) -> 'FetchContextManager':
'''
Sends the request to the server and reads the response.
You may use this method either with plain synchronous Session or
AsyncSession. Both the followings patterns are valid:
.. code-block:: python3
from ai.backend.client.request import Request
from ai.backend.client.session import Session
with Session() as sess:
rqst = Request(sess, 'GET', ...)
with rqst.fetch() as resp:
print(resp.text())
.. code-block:: python3
from ai.backend.client.request import Request
from ai.backend.client.session import AsyncSession
async with AsyncSession() as sess:
rqst = Request(sess, 'GET', ...)
async with rqst.fetch() as resp:
print(await resp.text())
'''
assert self.method in self._allowed_methods, \
'Disallowed HTTP method: {}'.format(self.method)
self.date = datetime.now(tzutc())
self.headers['Date'] = self.date.isoformat()
if self.content_type is not None:
self.headers['Content-Type'] = self.content_type
full_url = self._build_url()
self._sign(full_url.relative())
rqst_ctx = self.session.aiohttp_session.request(
self.method,
str(full_url),
data=self._pack_content(),
timeout=_default_request_timeout,
headers=self.headers)
return FetchContextManager(self.session, rqst_ctx, **kwargs) | Sends the request to the server and reads the response.
You may use this method either with plain synchronous Session or
AsyncSession. Both the followings patterns are valid:
.. code-block:: python3
from ai.backend.client.request import Request
from ai.backend.client.session import Session
with Session() as sess:
rqst = Request(sess, 'GET', ...)
with rqst.fetch() as resp:
print(resp.text())
.. code-block:: python3
from ai.backend.client.request import Request
from ai.backend.client.session import AsyncSession
async with AsyncSession() as sess:
rqst = Request(sess, 'GET', ...)
async with rqst.fetch() as resp:
print(await resp.text()) |
def get_geo_info(filename, band=1):
''' Gets information from a Raster data set
'''
sourceds = gdal.Open(filename, GA_ReadOnly)
ndv = sourceds.GetRasterBand(band).GetNoDataValue()
xsize = sourceds.RasterXSize
ysize = sourceds.RasterYSize
geot = sourceds.GetGeoTransform()
projection = osr.SpatialReference()
projection.ImportFromWkt(sourceds.GetProjectionRef())
datatype = sourceds.GetRasterBand(band).DataType
datatype = gdal.GetDataTypeName(datatype)
return ndv, xsize, ysize, geot, projection, datatype | Gets information from a Raster data set |
def face_adjacency_radius(self):
"""
The approximate radius of a cylinder that fits inside adjacent faces.
Returns
------------
radii : (len(self.face_adjacency),) float
Approximate radius formed by triangle pair
"""
radii, span = graph.face_adjacency_radius(mesh=self)
self._cache['face_adjacency_span'] = span
return radii | The approximate radius of a cylinder that fits inside adjacent faces.
Returns
------------
radii : (len(self.face_adjacency),) float
Approximate radius formed by triangle pair |
def sibling(self, offs=1):
'''
Return sibling node by relative offset from self.
'''
indx = self.pindex + offs
if indx < 0:
return None
if indx >= len(self.parent.kids):
return None
return self.parent.kids[indx] | Return sibling node by relative offset from self. |
def image(
self,
url,
title="",
width=800):
"""*create MMD image link*
**Key Arguments:**
- ``title`` -- the title for the image
- ``url`` -- the image URL
- ``width`` -- the width in pixels of the image. Default *800*
**Return:**
- ``imageLink`` -- the MMD image link
**Usage:**
To create a MMD image link:
.. code-block:: python
imageLink = md.image(
"http://www.thespacedoctor.co.uk/images/thespacedoctor_icon_white_circle.png", "thespacedoctor icon", 400)
print imageLink
# OUTPUT:
# ![thespacedoctor icon][thespacedoctor icon 20170228t130146.472262]
#
# [thespacedoctor icon 20170228t130146.472262]: http://www.thespacedoctor.co.uk/images/thespacedoctor_icon_white_circle.png "thespacedoctor icon" width=400px
#
"""
title = title.strip()
caption = title
now = datetime.now()
figId = now.strftime("%Y%m%dt%H%M%S.%f")
if len(title):
figId = "%(title)s %(figId)s" % locals()
imageLink = """\n\n![%(caption)s][%(figId)s]
[%(figId)s]: %(url)s "%(title)s" width=%(width)spx\n\n""" % locals()
return imageLink | *create MMD image link*
**Key Arguments:**
- ``title`` -- the title for the image
- ``url`` -- the image URL
- ``width`` -- the width in pixels of the image. Default *800*
**Return:**
- ``imageLink`` -- the MMD image link
**Usage:**
To create a MMD image link:
.. code-block:: python
imageLink = md.image(
"http://www.thespacedoctor.co.uk/images/thespacedoctor_icon_white_circle.png", "thespacedoctor icon", 400)
print imageLink
# OUTPUT:
# ![thespacedoctor icon][thespacedoctor icon 20170228t130146.472262]
#
# [thespacedoctor icon 20170228t130146.472262]: http://www.thespacedoctor.co.uk/images/thespacedoctor_icon_white_circle.png "thespacedoctor icon" width=400px
# |
def create_identity(user_id, curve_name):
"""Create GPG identity for hardware device."""
result = interface.Identity(identity_str='gpg://', curve_name=curve_name)
result.identity_dict['host'] = user_id
return result | Create GPG identity for hardware device. |
def compare_mean_curves(calc_ref, calc, nsigma=3):
"""
Compare the hazard curves coming from two different calculations.
"""
dstore_ref = datastore.read(calc_ref)
dstore = datastore.read(calc)
imtls = dstore_ref['oqparam'].imtls
if dstore['oqparam'].imtls != imtls:
raise RuntimeError('The IMTs and levels are different between '
'calculation %d and %d' % (calc_ref, calc))
sitecol_ref = dstore_ref['sitecol']
sitecol = dstore['sitecol']
site_id_ref = {(lon, lat): sid for sid, lon, lat in zip(
sitecol_ref.sids, sitecol_ref.lons, sitecol_ref.lats)}
site_id = {(lon, lat): sid for sid, lon, lat in zip(
sitecol.sids, sitecol.lons, sitecol.lats)}
common = set(site_id_ref) & set(site_id)
if not common:
raise RuntimeError('There are no common sites between calculation '
'%d and %d' % (calc_ref, calc))
pmap_ref = PmapGetter(dstore_ref, sids=[site_id_ref[lonlat]
for lonlat in common]).get_mean()
pmap = PmapGetter(dstore, sids=[site_id[lonlat]
for lonlat in common]).get_mean()
for lonlat in common:
mean, std = pmap[site_id[lonlat]].array.T # shape (2, N)
mean_ref, std_ref = pmap_ref[site_id_ref[lonlat]].array.T
err = numpy.sqrt(std**2 + std_ref**2)
for imt in imtls:
sl = imtls(imt)
ok = (numpy.abs(mean[sl] - mean_ref[sl]) < nsigma * err[sl]).all()
if not ok:
md = (numpy.abs(mean[sl] - mean_ref[sl])).max()
plt.title('point=%s, imt=%s, maxdiff=%.2e' % (lonlat, imt, md))
plt.loglog(imtls[imt], mean_ref[sl] + std_ref[sl],
label=str(calc_ref), color='black')
plt.loglog(imtls[imt], mean_ref[sl] - std_ref[sl],
color='black')
plt.loglog(imtls[imt], mean[sl] + std[sl], label=str(calc),
color='red')
plt.loglog(imtls[imt], mean[sl] - std[sl], color='red')
plt.legend()
plt.show() | Compare the hazard curves coming from two different calculations. |
def create_from_request_pdu(pdu):
""" Create instance from request PDU.
:param pdu: A response PDU.
"""
_, address, value = struct.unpack('>BHH', pdu)
value = 1 if value == 0xFF00 else value
instance = WriteSingleCoil()
instance.address = address
instance.value = value
return instance | Create instance from request PDU.
:param pdu: A response PDU. |
def execute_wait(self, cmd, walltime=2, envs={}):
''' Synchronously execute a commandline string on the shell.
Args:
- cmd (string) : Commandline string to execute
- walltime (int) : walltime in seconds
Kwargs:
- envs (dict) : Dictionary of env variables
Returns:
- retcode : Return code from the execution, -1 on fail
- stdout : stdout string
- stderr : stderr string
Raises:
None.
'''
# Execute the command
stdin, stdout, stderr = self.ssh_client.exec_command(
self.prepend_envs(cmd, envs), bufsize=-1, timeout=walltime
)
# Block on exit status from the command
exit_status = stdout.channel.recv_exit_status()
return exit_status, stdout.read().decode("utf-8"), stderr.read().decode("utf-8") | Synchronously execute a commandline string on the shell.
Args:
- cmd (string) : Commandline string to execute
- walltime (int) : walltime in seconds
Kwargs:
- envs (dict) : Dictionary of env variables
Returns:
- retcode : Return code from the execution, -1 on fail
- stdout : stdout string
- stderr : stderr string
Raises:
None. |
def stop_sync(self):
"""Safely stop this BLED112 instance without leaving it in a weird state"""
# Stop to scan
if self.scanning:
self.stop_scan()
# Disconnect all connected devices
for connection_id in list(self.connections.get_connections()):
self.disconnect_sync(connection_id)
# Stop the baBLE interface
self.bable.stop()
# Stop the connection manager
self.connections.stop()
self.stopped = True | Safely stop this BLED112 instance without leaving it in a weird state |
def personByEmailAddress(self, address):
"""
Retrieve the L{Person} item for the given email address
(or return None if no such person exists)
@type name: C{unicode}
"""
email = self.store.findUnique(EmailAddress,
EmailAddress.address == address,
default=None)
if email is not None:
return email.person | Retrieve the L{Person} item for the given email address
(or return None if no such person exists)
@type name: C{unicode} |
async def write_and_drain(self, data: bytes, timeout: NumType = None) -> None:
"""
Format a command and send it to the server.
"""
if self._stream_writer is None:
raise SMTPServerDisconnected("Client not connected")
self._stream_writer.write(data)
async with self._io_lock:
await self._drain_writer(timeout) | Format a command and send it to the server. |
def get_requests_request_name(self, request_name):
"""GetRequestsRequestName.
[Preview API] Get a symbol request by request name.
:param str request_name:
:rtype: :class:`<Request> <azure.devops.v5_0.symbol.models.Request>`
"""
query_parameters = {}
if request_name is not None:
query_parameters['requestName'] = self._serialize.query('request_name', request_name, 'str')
response = self._send(http_method='GET',
location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52',
version='5.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('Request', response) | GetRequestsRequestName.
[Preview API] Get a symbol request by request name.
:param str request_name:
:rtype: :class:`<Request> <azure.devops.v5_0.symbol.models.Request>` |
def logSumExp(A, B, out=None):
""" returns log(exp(A) + exp(B)). A and B are numpy arrays"""
if out is None:
out = numpy.zeros(A.shape)
indicator1 = A >= B
indicator2 = numpy.logical_not(indicator1)
out[indicator1] = A[indicator1] + numpy.log1p(numpy.exp(B[indicator1]-A[indicator1]))
out[indicator2] = B[indicator2] + numpy.log1p(numpy.exp(A[indicator2]-B[indicator2]))
return out | returns log(exp(A) + exp(B)). A and B are numpy arrays |
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a bytes field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos])
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = buffer[pos:new_pos]
return new_pos
return DecodeField | Returns a decoder for a bytes field. |
def graph_query(kind, source, target=None, neighbor_limit=1,
database_filter=None):
"""Perform a graph query on PathwayCommons.
For more information on these queries, see
http://www.pathwaycommons.org/pc2/#graph
Parameters
----------
kind : str
The kind of graph query to perform. Currently 3 options are
implemented, 'neighborhood', 'pathsbetween' and 'pathsfromto'.
source : list[str]
A list of gene names which are the source set for the graph query.
target : Optional[list[str]]
A list of gene names which are the target set for the graph query.
Only needed for 'pathsfromto' queries.
neighbor_limit : Optional[int]
This limits the length of the longest path considered in
the graph query. Default: 1
Returns
-------
model : org.biopax.paxtools.model.Model
A BioPAX model (java object).
"""
default_databases = ['wp', 'smpdb', 'reconx', 'reactome', 'psp', 'pid',
'panther', 'netpath', 'msigdb', 'mirtarbase', 'kegg',
'intact', 'inoh', 'humancyc', 'hprd',
'drugbank', 'dip', 'corum']
if not database_filter:
query_databases = default_databases
else:
query_databases = database_filter
# excluded: ctd
params = {}
params['format'] = 'BIOPAX'
params['organism'] = '9606'
params['datasource'] = query_databases
# Get the "kind" string
kind_str = kind.lower()
if kind not in ['neighborhood', 'pathsbetween', 'pathsfromto']:
logger.warn('Invalid query type %s' % kind_str)
return None
params['kind'] = kind_str
# Get the source string
if isinstance(source, basestring):
source_str = source
else:
source_str = ','.join(source)
params['source'] = source_str
try:
neighbor_limit = int(neighbor_limit)
params['limit'] = neighbor_limit
except (TypeError, ValueError):
logger.warn('Invalid neighborhood limit %s' % neighbor_limit)
return None
if target is not None:
if isinstance(target, basestring):
target_str = target
else:
target_str = ','.join(target)
params['target'] = target_str
logger.info('Sending Pathway Commons query with parameters: ')
for k, v in params.items():
logger.info(' %s: %s' % (k, v))
logger.info('Sending Pathway Commons query...')
res = requests.get(pc2_url + 'graph', params=params)
if not res.status_code == 200:
logger.error('Response is HTTP code %d.' % res.status_code)
if res.status_code == 500:
logger.error('Note: HTTP code 500 can mean empty '
'results for a valid query.')
return None
# We don't decode to Unicode here because owl_str_to_model expects
# a byte stream
model = owl_str_to_model(res.content)
if model is not None:
logger.info('Pathway Commons query returned a model...')
return model | Perform a graph query on PathwayCommons.
For more information on these queries, see
http://www.pathwaycommons.org/pc2/#graph
Parameters
----------
kind : str
The kind of graph query to perform. Currently 3 options are
implemented, 'neighborhood', 'pathsbetween' and 'pathsfromto'.
source : list[str]
A list of gene names which are the source set for the graph query.
target : Optional[list[str]]
A list of gene names which are the target set for the graph query.
Only needed for 'pathsfromto' queries.
neighbor_limit : Optional[int]
This limits the length of the longest path considered in
the graph query. Default: 1
Returns
-------
model : org.biopax.paxtools.model.Model
A BioPAX model (java object). |
def add_parameter(self, name, value, meta=None):
"""Add a parameter to the parameter list.
:param name: New parameter's name.
:type name: str
:param value: New parameter's value.
:type value: float
:param meta: New parameter's meta property.
:type meta: dict
"""
parameter = Parameter(name, value)
if meta: parameter.meta = meta
self.parameters.append(parameter) | Add a parameter to the parameter list.
:param name: New parameter's name.
:type name: str
:param value: New parameter's value.
:type value: float
:param meta: New parameter's meta property.
:type meta: dict |
def open_file(self, info):
""" Handles the open action. """
if not info.initialized: return # Escape.
# retval = self.edit_traits(parent=info.ui.control, view="file_view")
dlg = FileDialog( action = "open",
wildcard = "Graphviz Files (*.dot, *.xdot, *.txt)|"
"*.dot;*.xdot;*.txt|Dot Files (*.dot)|*.dot|"
"All Files (*.*)|*.*|")
if dlg.open() == OK:
parser = GodotDataParser()
model = parser.parse_dot_file(dlg.path)
if model is not None:
self.model = model
else:
print "error parsing: %s" % dlg.path
self.save_file = dlg.path
del dlg | Handles the open action. |
def _connect(self, database=None):
"""
Connect to given database
"""
conn_args = {
'host': self.config['host'],
'user': self.config['user'],
'password': self.config['password'],
'port': self.config['port'],
'sslmode': self.config['sslmode'],
}
if database:
conn_args['database'] = database
else:
conn_args['database'] = 'postgres'
# libpq will use ~/.pgpass only if no password supplied
if self.config['password_provider'] == 'pgpass':
del conn_args['password']
try:
conn = psycopg2.connect(**conn_args)
except Exception as e:
self.log.error(e)
raise e
# Avoid using transactions, set isolation level to autocommit
conn.set_isolation_level(0)
return conn | Connect to given database |
def insert_before(self, text):
"""
Create a new document, with this text inserted before the buffer.
It keeps selection ranges and cursor position in sync.
"""
selection_state = self.selection
if selection_state:
selection_state = SelectionState(
original_cursor_position=selection_state.original_cursor_position + len(text),
type=selection_state.type)
return Document(
text=text + self.text,
cursor_position=self.cursor_position + len(text),
selection=selection_state) | Create a new document, with this text inserted before the buffer.
It keeps selection ranges and cursor position in sync. |
def trigger_event(self, event, client, args, force_dispatch=False):
"""Trigger a new event that will be dispatched to all modules."""
self.controller.process_event(event, client, args, force_dispatch=force_dispatch) | Trigger a new event that will be dispatched to all modules. |
def rand_bytes(length):
"""
Returns a number of random bytes suitable for cryptographic purposes
:param length:
The desired number of bytes
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string
"""
if not isinstance(length, int_types):
raise TypeError(pretty_message(
'''
length must be an integer, not %s
''',
type_name(length)
))
if length < 1:
raise ValueError('length must be greater than 0')
if length > 1024:
raise ValueError('length must not be greater than 1024')
buffer = buffer_from_bytes(length)
result = Security.SecRandomCopyBytes(Security.kSecRandomDefault, length, buffer)
if result != 0:
raise OSError(_extract_error())
return bytes_from_buffer(buffer) | Returns a number of random bytes suitable for cryptographic purposes
:param length:
The desired number of bytes
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string |
def ntowfv2(domain, user, password):
"""
NTOWFv2() Implementation
[MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol
3.3.2 NTLM v2 Authentication
:param domain: The windows domain name
:param user: The windows username
:param password: The users password
:return: Hash Data
"""
md4 = hashlib.new('md4')
md4.update(password)
hmac_context = hmac.HMAC(md4.digest(), hashes.MD5(), backend=default_backend())
hmac_context.update(user.upper().encode('utf-16le'))
hmac_context.update(domain.encode('utf-16le'))
return hmac_context.finalize() | NTOWFv2() Implementation
[MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol
3.3.2 NTLM v2 Authentication
:param domain: The windows domain name
:param user: The windows username
:param password: The users password
:return: Hash Data |
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed | Clear the cache. |
def store_drop(cls, resource: str, session: Optional[Session] = None) -> 'Action':
"""Store a "drop" event.
:param resource: The normalized name of the resource to store
Example:
>>> from bio2bel.models import Action
>>> Action.store_drop('hgnc')
"""
action = cls.make_drop(resource)
_store_helper(action, session=session)
return action | Store a "drop" event.
:param resource: The normalized name of the resource to store
Example:
>>> from bio2bel.models import Action
>>> Action.store_drop('hgnc') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.