max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
hermetrics/damerau_levenshtein.py | SoldAI/hermetrics | 3 | 7600 | <filename>hermetrics/damerau_levenshtein.py
from .levenshtein import Levenshtein
class DamerauLevenshtein(Levenshtein):
def __init__(self, name='Damerau-Levenshtein'):
super().__init__(name=name)
def distance(self, source, target, cost=(1, 1, 1, 1)):
"""Damerau-Levenshtein distance with costs for deletion, insertion, substitution and transposition"""
s_len = len(source)
t_len = len(target)
if type(cost) == int or type(cost) == float:
del_cost = ins_cost = sub_cost = tra_cost = cost
else:
del_cost, ins_cost, sub_cost, tra_cost = cost
# Be sure to exceed maximum value
#INF = float('inf')
UPPER = max(del_cost, ins_cost, sub_cost, tra_cost) * (s_len + t_len)
# Initialize matrix (s_len + 2) X (t_len + 2)
D = [[UPPER for j in range(t_len + 2)]]
D += [[UPPER] + [j*ins_cost for j in range(t_len + 1)]]
D += [[UPPER, i] + [0]*t_len for i in range(1, s_len + 1)]
# Holds last row each element was encountered
last_row = {}
for i in range(1, s_len + 1):
# Current symbol in source
s_symbol = source[i-1]
# Column of lasta match on this row
last_match_col = 0
for j in range(1, t_len + 1):
# Current symbol in target
t_symbol = target[j-1]
# Last row with matching character
last_match_row = last_row.get(t_symbol, 0)
# Cost of substitution
opt_sub_cost = 0 if s_symbol == t_symbol else sub_cost
# Compute different options
deletion = D[i][j+1] + del_cost
insertion = D[i+1][j] + ins_cost
substitution = D[i][j] + opt_sub_cost
# Cost before transposition
# + cost of operations between transposed letters
# + cost of transposition
# transposition = D[last_match_row][last_match_col] + \
# (i-last_match_row-1) * del_cost + \
# (j-last_match_col-1) * ins_cost + \
# tra_cost
transposition = D[last_match_row][last_match_col] + \
max((i-last_match_row-1) * del_cost, \
(j-last_match_col-1) * ins_cost) + tra_cost
D[i+1][j+1] = min(deletion, insertion, substitution, transposition)
if opt_sub_cost == 0:
last_match_col = j
last_row[s_symbol] = i
return D[-1][-1]
def max_distance(self, source, target, cost=(1,1,1,1)):
"""Damerau-Levenshtein maximum distance value (same as Levenshtein to account for difference in operations)"""
if type(cost) == int or type(cost) == float:
lev_cost = cost
else:
lev_cost = cost[:3]
return super().max_distance(source, target, lev_cost)
if(__name__ == '__main__'):
print("Damerau-Levenshtein distance")
| <filename>hermetrics/damerau_levenshtein.py
from .levenshtein import Levenshtein
class DamerauLevenshtein(Levenshtein):
def __init__(self, name='Damerau-Levenshtein'):
super().__init__(name=name)
def distance(self, source, target, cost=(1, 1, 1, 1)):
"""Damerau-Levenshtein distance with costs for deletion, insertion, substitution and transposition"""
s_len = len(source)
t_len = len(target)
if type(cost) == int or type(cost) == float:
del_cost = ins_cost = sub_cost = tra_cost = cost
else:
del_cost, ins_cost, sub_cost, tra_cost = cost
# Be sure to exceed maximum value
#INF = float('inf')
UPPER = max(del_cost, ins_cost, sub_cost, tra_cost) * (s_len + t_len)
# Initialize matrix (s_len + 2) X (t_len + 2)
D = [[UPPER for j in range(t_len + 2)]]
D += [[UPPER] + [j*ins_cost for j in range(t_len + 1)]]
D += [[UPPER, i] + [0]*t_len for i in range(1, s_len + 1)]
# Holds last row each element was encountered
last_row = {}
for i in range(1, s_len + 1):
# Current symbol in source
s_symbol = source[i-1]
# Column of lasta match on this row
last_match_col = 0
for j in range(1, t_len + 1):
# Current symbol in target
t_symbol = target[j-1]
# Last row with matching character
last_match_row = last_row.get(t_symbol, 0)
# Cost of substitution
opt_sub_cost = 0 if s_symbol == t_symbol else sub_cost
# Compute different options
deletion = D[i][j+1] + del_cost
insertion = D[i+1][j] + ins_cost
substitution = D[i][j] + opt_sub_cost
# Cost before transposition
# + cost of operations between transposed letters
# + cost of transposition
# transposition = D[last_match_row][last_match_col] + \
# (i-last_match_row-1) * del_cost + \
# (j-last_match_col-1) * ins_cost + \
# tra_cost
transposition = D[last_match_row][last_match_col] + \
max((i-last_match_row-1) * del_cost, \
(j-last_match_col-1) * ins_cost) + tra_cost
D[i+1][j+1] = min(deletion, insertion, substitution, transposition)
if opt_sub_cost == 0:
last_match_col = j
last_row[s_symbol] = i
return D[-1][-1]
def max_distance(self, source, target, cost=(1,1,1,1)):
"""Damerau-Levenshtein maximum distance value (same as Levenshtein to account for difference in operations)"""
if type(cost) == int or type(cost) == float:
lev_cost = cost
else:
lev_cost = cost[:3]
return super().max_distance(source, target, lev_cost)
if(__name__ == '__main__'):
print("Damerau-Levenshtein distance")
| en | 0.805677 | Damerau-Levenshtein distance with costs for deletion, insertion, substitution and transposition # Be sure to exceed maximum value #INF = float('inf') # Initialize matrix (s_len + 2) X (t_len + 2) # Holds last row each element was encountered # Current symbol in source # Column of lasta match on this row # Current symbol in target # Last row with matching character # Cost of substitution # Compute different options # Cost before transposition # + cost of operations between transposed letters # + cost of transposition # transposition = D[last_match_row][last_match_col] + \ # (i-last_match_row-1) * del_cost + \ # (j-last_match_col-1) * ins_cost + \ # tra_cost Damerau-Levenshtein maximum distance value (same as Levenshtein to account for difference in operations) | 3.395736 | 3 |
etna/analysis/outliers/hist_outliers.py | Carlosbogo/etna | 1 | 7601 | <reponame>Carlosbogo/etna
import typing
from copy import deepcopy
from typing import TYPE_CHECKING
from typing import List
import numba
import numpy as np
import pandas as pd
if TYPE_CHECKING:
from etna.datasets import TSDataset
@numba.jit(nopython=True)
def optimal_sse(left: int, right: int, p: np.ndarray, pp: np.ndarray) -> float:
"""
Count the approximation error by 1 bin from left to right elements.
Parameters
----------
left:
left border
right:
right border
p:
array of sums of elements, p[i] - sum from first to i elements
pp:
array of sums of squares of elements, p[i] - sum of squares from first to i elements
Returns
-------
result: float
approximation error
"""
if left == 0:
avg = p[right]
return pp[right] - avg ** 2 / (right - left + 1)
avg = p[right] - p[left - 1]
return pp[right] - pp[left - 1] - avg ** 2 / (right - left + 1)
@numba.jit(nopython=True)
def adjust_estimation(i: int, k: int, sse: np.ndarray, sse_one_bin: np.ndarray) -> float:
"""
Count sse_one_bin[i][k] using binary search.
Parameters
----------
i:
left border of series
k:
number of bins
sse:
array of approximation errors
sse_one_bin:
array of approximation errors with one bin
Returns
-------
result: float
calculated sse_one_bin[i][k]
"""
now_evaluated = sse[i - 1][k - 1]
first_evaluated = sse[i - 1][k - 1]
idx_prev = np.inf
idx_now = 0
left = 0
while idx_now != idx_prev:
right = i
idx_prev = idx_now
while right - left > 1:
if sse_one_bin[(left + right) // 2][i] > now_evaluated:
left = (left + right) // 2
else:
right = (left + right) // 2
idx_now = left
now_evaluated = first_evaluated - sse[idx_now][k - 1]
now_min = np.inf
for j in range(idx_now, i):
now = sse[j][k - 1] + sse_one_bin[j + 1][i]
now_min = min(now_min, now)
return now_min
@numba.jit(nopython=True)
def v_optimal_hist(series: np.ndarray, bins_number: int, p: np.ndarray, pp: np.ndarray) -> np.ndarray:
"""
Count an approximation error of a series with [1, bins_number] bins.
http://www.vldb.org/conf/1998/p275.pdf
Parameters
----------
series:
array to count an approximation error with bins_number bins
bins_number:
number of bins
p:
array of sums of elements, p[i] - sum from 0th to i elements
pp:
array of sums of squares of elements, p[i] - sum of squares from 0th to i elements
Returns
-------
error: np.ndarray
approximation error of a series with [1, bins_number] bins
"""
sse = np.zeros((len(series), bins_number))
for i in range(len(series)):
sse[i][0] = optimal_sse(0, i, p, pp)
sse_one_bin = np.zeros((len(series), len(series)))
for i in range(len(series)):
for j in range(i, len(series)):
sse_one_bin[i][j] = optimal_sse(i, j, p, pp)
for tmp_bins_number in range(1, bins_number):
for i in range(tmp_bins_number, len(series)):
sse[i][tmp_bins_number] = adjust_estimation(i, tmp_bins_number, sse, sse_one_bin)
return sse
def compute_f(series: np.ndarray, k: int, p: np.ndarray, pp: np.ndarray) -> np.ndarray:
"""
Compute F. F[a][b][k] - minimum approximation error on series[a:b+1] with k outliers.
http://www.vldb.org/conf/1999/P9.pdf
Parameters
----------
series:
array to count F
k:
number of outliers
p:
array of sums of elements, p[i] - sum from 0th to i elements
pp:
array of sums of squares of elements, p[i] - sum of squares from 0th to i elements
Returns
-------
result: np.ndarray
array F, outliers_indices
"""
f = np.zeros((len(series), len(series), k + 1))
s: list = [[[[] for i in range(k + 1)] for j in range(len(series))] for s in range(len(series))]
ss: list = [[[[] for i in range(k + 1)] for j in range(len(series))] for s in range(len(series))]
outliers_indices: list = [[[[] for i in range(k + 1)] for j in range(len(series))] for s in range(len(series))]
for right_border in range(0, len(series)):
f[0][right_border][0] = optimal_sse(0, right_border, p, pp)
s[0][right_border][0] = [p[right_border]]
ss[0][right_border][0] = [pp[right_border]]
for left_border in range(1, len(series)):
for right_border in range(left_border, len(series)):
f[left_border][right_border][0] = optimal_sse(left_border, right_border, p, pp)
s[left_border][right_border][0] = [p[right_border] - p[left_border - 1]]
ss[left_border][right_border][0] = [pp[right_border] - pp[left_border - 1]]
for left_border in range(0, len(series)):
for right_border in range(left_border, min(len(series), left_border + k)):
s[left_border][right_border][right_border - left_border + 1] = [0]
ss[left_border][right_border][right_border - left_border + 1] = [0]
outliers_indices[left_border][right_border][right_border - left_border + 1] = [
list(np.arange(left_border, right_border + 1))
]
for left_border in range(len(series)):
for right_border in range(left_border + 1, len(series)):
for outlier_number in range(1, min(right_border - left_border + 1, k + 1)):
f1 = f[left_border][right_border - 1][outlier_number - 1]
tmp_ss = []
tmp_s = []
f2 = []
now_min = np.inf
now_outliers_indices = []
where = 0
for i in range(len(ss[left_border][right_border - 1][outlier_number])):
tmp_ss.append(ss[left_border][right_border - 1][outlier_number][i] + series[right_border] ** 2)
tmp_s.append(s[left_border][right_border - 1][outlier_number][i] + series[right_border])
now_outliers_indices.append(
deepcopy(outliers_indices[left_border][right_border - 1][outlier_number][i])
)
f2.append(tmp_ss[-1] - tmp_s[-1] ** 2 / (right_border - left_border + 1 - outlier_number))
if f2[-1] < now_min:
now_min = f2[-1]
where = i
if f1 < now_min:
f[left_border][right_border][outlier_number] = f1
s[left_border][right_border][outlier_number] = deepcopy(
s[left_border][right_border - 1][outlier_number - 1]
)
ss[left_border][right_border][outlier_number] = deepcopy(
ss[left_border][right_border - 1][outlier_number - 1]
)
outliers_indices[left_border][right_border][outlier_number] = deepcopy(
outliers_indices[left_border][right_border - 1][outlier_number - 1]
)
if len(outliers_indices[left_border][right_border][outlier_number]):
for i in range(len(outliers_indices[left_border][right_border][outlier_number])):
outliers_indices[left_border][right_border][outlier_number][i].append(right_border)
else:
outliers_indices[left_border][right_border][outlier_number].append([right_border])
elif f1 > now_min:
f[left_border][right_border][outlier_number] = f2[where]
s[left_border][right_border][outlier_number] = tmp_s
ss[left_border][right_border][outlier_number] = tmp_ss
outliers_indices[left_border][right_border][outlier_number] = now_outliers_indices
else:
f[left_border][right_border][outlier_number] = f1
tmp_s.extend(s[left_border][right_border - 1][outlier_number - 1])
tmp_ss.extend(ss[left_border][right_border - 1][outlier_number - 1])
s[left_border][right_border][outlier_number] = tmp_s
ss[left_border][right_border][outlier_number] = tmp_ss
tmp = deepcopy(outliers_indices[left_border][right_border - 1][outlier_number - 1])
if len(tmp):
for i in range(len(tmp)):
tmp[i].append(right_border)
else:
tmp = [[right_border]]
outliers_indices[left_border][right_border][outlier_number].extend(now_outliers_indices)
outliers_indices[left_border][right_border][outlier_number].extend(deepcopy(tmp))
return f, outliers_indices
def hist(series: np.ndarray, bins_number: int) -> np.ndarray:
"""
Compute outliers indices according to hist rule.
http://www.vldb.org/conf/1999/P9.pdf
Parameters
----------
series:
array to count F
bins_number:
number of bins
Returns
-------
indices: np.ndarray
outliers indices
"""
approximation_error = np.zeros((len(series), bins_number + 1, bins_number))
anomalies: list = [[[[] for i in range(bins_number)] for j in range(bins_number + 1)] for s in range(len(series))]
p, pp = np.empty_like(series), np.empty_like(series)
p[0] = series[0]
pp[0] = series[0] ** 2
for i in range(1, len(series)):
p[i] = p[i - 1] + series[i]
pp[i] = pp[i - 1] + series[i] ** 2
f, outliers_indices = compute_f(series, bins_number - 1, p, pp)
approximation_error[:, 1:, 0] = v_optimal_hist(series, bins_number, p, pp)
approximation_error[:, 1, :] = f[0]
for right_border in range(len(series)):
for outlier_number in range(1, bins_number):
if len(outliers_indices[0][right_border][outlier_number]):
anomalies[right_border][1][outlier_number] = deepcopy(
outliers_indices[0][right_border][outlier_number][0]
)
for right_border in range(1, len(series)):
for tmp_bins_number in range(2, min(bins_number + 1, right_border + 2)):
for outlier_number in range(1, min(bins_number, right_border + 2 - tmp_bins_number)): # см формулу выше
tmp_approximation_error = approximation_error[:right_border, tmp_bins_number - 1, : outlier_number + 1]
tmp_f = f[1 : right_border + 1, right_border, : outlier_number + 1][:, ::-1]
approximation_error[right_border][tmp_bins_number][outlier_number] = np.min(
tmp_approximation_error + tmp_f
)
where = np.where(
tmp_approximation_error + tmp_f
== approximation_error[right_border][tmp_bins_number][outlier_number]
)
if where[1][0] != outlier_number:
anomalies[right_border][tmp_bins_number][outlier_number].extend(
deepcopy(outliers_indices[1 + where[0][0]][right_border][outlier_number - where[1][0]][0])
)
anomalies[right_border][tmp_bins_number][outlier_number].extend(
deepcopy(anomalies[where[0][0]][tmp_bins_number - 1][where[1][0]])
)
count = 0
now_min = approximation_error[-1][-1][0]
for outlier_number in range(1, min(approximation_error.shape[1], approximation_error.shape[2])):
if approximation_error[-1][approximation_error.shape[1] - 1 - outlier_number][outlier_number] <= now_min:
count = outlier_number
now_min = approximation_error[-1][approximation_error.shape[1] - 1 - outlier_number][outlier_number]
return np.array(sorted(anomalies[-1][approximation_error.shape[1] - 1 - count][count]))
def get_anomalies_hist(
ts: "TSDataset", in_column: str = "target", bins_number: int = 10
) -> typing.Dict[str, List[pd.Timestamp]]:
"""
Get point outliers in time series using histogram model.
Outliers are all points that, when removed, result in a histogram with a lower approximation error,
even with the number of bins less than the number of outliers.
Parameters
----------
ts:
TSDataset with timeseries data
in_column:
name of the column in which the anomaly is searching
bins_number:
number of bins
Returns
-------
dict of outliers: typing.Dict[str, typing.List[pd.Timestamp]]
dict of outliers in format {segment: [outliers_timestamps]}
"""
outliers_per_segment = {}
segments = ts.segments
for seg in segments:
segment_df = ts.df[seg].reset_index()
values = segment_df[in_column].values
timestamp = segment_df["timestamp"].values
anomalies = hist(values, bins_number)
outliers_per_segment[seg] = [timestamp[i] for i in anomalies]
return outliers_per_segment
| import typing
from copy import deepcopy
from typing import TYPE_CHECKING
from typing import List
import numba
import numpy as np
import pandas as pd
if TYPE_CHECKING:
from etna.datasets import TSDataset
@numba.jit(nopython=True)
def optimal_sse(left: int, right: int, p: np.ndarray, pp: np.ndarray) -> float:
"""
Count the approximation error by 1 bin from left to right elements.
Parameters
----------
left:
left border
right:
right border
p:
array of sums of elements, p[i] - sum from first to i elements
pp:
array of sums of squares of elements, p[i] - sum of squares from first to i elements
Returns
-------
result: float
approximation error
"""
if left == 0:
avg = p[right]
return pp[right] - avg ** 2 / (right - left + 1)
avg = p[right] - p[left - 1]
return pp[right] - pp[left - 1] - avg ** 2 / (right - left + 1)
@numba.jit(nopython=True)
def adjust_estimation(i: int, k: int, sse: np.ndarray, sse_one_bin: np.ndarray) -> float:
"""
Count sse_one_bin[i][k] using binary search.
Parameters
----------
i:
left border of series
k:
number of bins
sse:
array of approximation errors
sse_one_bin:
array of approximation errors with one bin
Returns
-------
result: float
calculated sse_one_bin[i][k]
"""
now_evaluated = sse[i - 1][k - 1]
first_evaluated = sse[i - 1][k - 1]
idx_prev = np.inf
idx_now = 0
left = 0
while idx_now != idx_prev:
right = i
idx_prev = idx_now
while right - left > 1:
if sse_one_bin[(left + right) // 2][i] > now_evaluated:
left = (left + right) // 2
else:
right = (left + right) // 2
idx_now = left
now_evaluated = first_evaluated - sse[idx_now][k - 1]
now_min = np.inf
for j in range(idx_now, i):
now = sse[j][k - 1] + sse_one_bin[j + 1][i]
now_min = min(now_min, now)
return now_min
@numba.jit(nopython=True)
def v_optimal_hist(series: np.ndarray, bins_number: int, p: np.ndarray, pp: np.ndarray) -> np.ndarray:
"""
Count an approximation error of a series with [1, bins_number] bins.
http://www.vldb.org/conf/1998/p275.pdf
Parameters
----------
series:
array to count an approximation error with bins_number bins
bins_number:
number of bins
p:
array of sums of elements, p[i] - sum from 0th to i elements
pp:
array of sums of squares of elements, p[i] - sum of squares from 0th to i elements
Returns
-------
error: np.ndarray
approximation error of a series with [1, bins_number] bins
"""
sse = np.zeros((len(series), bins_number))
for i in range(len(series)):
sse[i][0] = optimal_sse(0, i, p, pp)
sse_one_bin = np.zeros((len(series), len(series)))
for i in range(len(series)):
for j in range(i, len(series)):
sse_one_bin[i][j] = optimal_sse(i, j, p, pp)
for tmp_bins_number in range(1, bins_number):
for i in range(tmp_bins_number, len(series)):
sse[i][tmp_bins_number] = adjust_estimation(i, tmp_bins_number, sse, sse_one_bin)
return sse
def compute_f(series: np.ndarray, k: int, p: np.ndarray, pp: np.ndarray) -> np.ndarray:
"""
Compute F. F[a][b][k] - minimum approximation error on series[a:b+1] with k outliers.
http://www.vldb.org/conf/1999/P9.pdf
Parameters
----------
series:
array to count F
k:
number of outliers
p:
array of sums of elements, p[i] - sum from 0th to i elements
pp:
array of sums of squares of elements, p[i] - sum of squares from 0th to i elements
Returns
-------
result: np.ndarray
array F, outliers_indices
"""
f = np.zeros((len(series), len(series), k + 1))
s: list = [[[[] for i in range(k + 1)] for j in range(len(series))] for s in range(len(series))]
ss: list = [[[[] for i in range(k + 1)] for j in range(len(series))] for s in range(len(series))]
outliers_indices: list = [[[[] for i in range(k + 1)] for j in range(len(series))] for s in range(len(series))]
for right_border in range(0, len(series)):
f[0][right_border][0] = optimal_sse(0, right_border, p, pp)
s[0][right_border][0] = [p[right_border]]
ss[0][right_border][0] = [pp[right_border]]
for left_border in range(1, len(series)):
for right_border in range(left_border, len(series)):
f[left_border][right_border][0] = optimal_sse(left_border, right_border, p, pp)
s[left_border][right_border][0] = [p[right_border] - p[left_border - 1]]
ss[left_border][right_border][0] = [pp[right_border] - pp[left_border - 1]]
for left_border in range(0, len(series)):
for right_border in range(left_border, min(len(series), left_border + k)):
s[left_border][right_border][right_border - left_border + 1] = [0]
ss[left_border][right_border][right_border - left_border + 1] = [0]
outliers_indices[left_border][right_border][right_border - left_border + 1] = [
list(np.arange(left_border, right_border + 1))
]
for left_border in range(len(series)):
for right_border in range(left_border + 1, len(series)):
for outlier_number in range(1, min(right_border - left_border + 1, k + 1)):
f1 = f[left_border][right_border - 1][outlier_number - 1]
tmp_ss = []
tmp_s = []
f2 = []
now_min = np.inf
now_outliers_indices = []
where = 0
for i in range(len(ss[left_border][right_border - 1][outlier_number])):
tmp_ss.append(ss[left_border][right_border - 1][outlier_number][i] + series[right_border] ** 2)
tmp_s.append(s[left_border][right_border - 1][outlier_number][i] + series[right_border])
now_outliers_indices.append(
deepcopy(outliers_indices[left_border][right_border - 1][outlier_number][i])
)
f2.append(tmp_ss[-1] - tmp_s[-1] ** 2 / (right_border - left_border + 1 - outlier_number))
if f2[-1] < now_min:
now_min = f2[-1]
where = i
if f1 < now_min:
f[left_border][right_border][outlier_number] = f1
s[left_border][right_border][outlier_number] = deepcopy(
s[left_border][right_border - 1][outlier_number - 1]
)
ss[left_border][right_border][outlier_number] = deepcopy(
ss[left_border][right_border - 1][outlier_number - 1]
)
outliers_indices[left_border][right_border][outlier_number] = deepcopy(
outliers_indices[left_border][right_border - 1][outlier_number - 1]
)
if len(outliers_indices[left_border][right_border][outlier_number]):
for i in range(len(outliers_indices[left_border][right_border][outlier_number])):
outliers_indices[left_border][right_border][outlier_number][i].append(right_border)
else:
outliers_indices[left_border][right_border][outlier_number].append([right_border])
elif f1 > now_min:
f[left_border][right_border][outlier_number] = f2[where]
s[left_border][right_border][outlier_number] = tmp_s
ss[left_border][right_border][outlier_number] = tmp_ss
outliers_indices[left_border][right_border][outlier_number] = now_outliers_indices
else:
f[left_border][right_border][outlier_number] = f1
tmp_s.extend(s[left_border][right_border - 1][outlier_number - 1])
tmp_ss.extend(ss[left_border][right_border - 1][outlier_number - 1])
s[left_border][right_border][outlier_number] = tmp_s
ss[left_border][right_border][outlier_number] = tmp_ss
tmp = deepcopy(outliers_indices[left_border][right_border - 1][outlier_number - 1])
if len(tmp):
for i in range(len(tmp)):
tmp[i].append(right_border)
else:
tmp = [[right_border]]
outliers_indices[left_border][right_border][outlier_number].extend(now_outliers_indices)
outliers_indices[left_border][right_border][outlier_number].extend(deepcopy(tmp))
return f, outliers_indices
def hist(series: np.ndarray, bins_number: int) -> np.ndarray:
"""
Compute outliers indices according to hist rule.
http://www.vldb.org/conf/1999/P9.pdf
Parameters
----------
series:
array to count F
bins_number:
number of bins
Returns
-------
indices: np.ndarray
outliers indices
"""
approximation_error = np.zeros((len(series), bins_number + 1, bins_number))
anomalies: list = [[[[] for i in range(bins_number)] for j in range(bins_number + 1)] for s in range(len(series))]
p, pp = np.empty_like(series), np.empty_like(series)
p[0] = series[0]
pp[0] = series[0] ** 2
for i in range(1, len(series)):
p[i] = p[i - 1] + series[i]
pp[i] = pp[i - 1] + series[i] ** 2
f, outliers_indices = compute_f(series, bins_number - 1, p, pp)
approximation_error[:, 1:, 0] = v_optimal_hist(series, bins_number, p, pp)
approximation_error[:, 1, :] = f[0]
for right_border in range(len(series)):
for outlier_number in range(1, bins_number):
if len(outliers_indices[0][right_border][outlier_number]):
anomalies[right_border][1][outlier_number] = deepcopy(
outliers_indices[0][right_border][outlier_number][0]
)
for right_border in range(1, len(series)):
for tmp_bins_number in range(2, min(bins_number + 1, right_border + 2)):
for outlier_number in range(1, min(bins_number, right_border + 2 - tmp_bins_number)): # см формулу выше
tmp_approximation_error = approximation_error[:right_border, tmp_bins_number - 1, : outlier_number + 1]
tmp_f = f[1 : right_border + 1, right_border, : outlier_number + 1][:, ::-1]
approximation_error[right_border][tmp_bins_number][outlier_number] = np.min(
tmp_approximation_error + tmp_f
)
where = np.where(
tmp_approximation_error + tmp_f
== approximation_error[right_border][tmp_bins_number][outlier_number]
)
if where[1][0] != outlier_number:
anomalies[right_border][tmp_bins_number][outlier_number].extend(
deepcopy(outliers_indices[1 + where[0][0]][right_border][outlier_number - where[1][0]][0])
)
anomalies[right_border][tmp_bins_number][outlier_number].extend(
deepcopy(anomalies[where[0][0]][tmp_bins_number - 1][where[1][0]])
)
count = 0
now_min = approximation_error[-1][-1][0]
for outlier_number in range(1, min(approximation_error.shape[1], approximation_error.shape[2])):
if approximation_error[-1][approximation_error.shape[1] - 1 - outlier_number][outlier_number] <= now_min:
count = outlier_number
now_min = approximation_error[-1][approximation_error.shape[1] - 1 - outlier_number][outlier_number]
return np.array(sorted(anomalies[-1][approximation_error.shape[1] - 1 - count][count]))
def get_anomalies_hist(
ts: "TSDataset", in_column: str = "target", bins_number: int = 10
) -> typing.Dict[str, List[pd.Timestamp]]:
"""
Get point outliers in time series using histogram model.
Outliers are all points that, when removed, result in a histogram with a lower approximation error,
even with the number of bins less than the number of outliers.
Parameters
----------
ts:
TSDataset with timeseries data
in_column:
name of the column in which the anomaly is searching
bins_number:
number of bins
Returns
-------
dict of outliers: typing.Dict[str, typing.List[pd.Timestamp]]
dict of outliers in format {segment: [outliers_timestamps]}
"""
outliers_per_segment = {}
segments = ts.segments
for seg in segments:
segment_df = ts.df[seg].reset_index()
values = segment_df[in_column].values
timestamp = segment_df["timestamp"].values
anomalies = hist(values, bins_number)
outliers_per_segment[seg] = [timestamp[i] for i in anomalies]
return outliers_per_segment | en | 0.663076 | Count the approximation error by 1 bin from left to right elements. Parameters ---------- left: left border right: right border p: array of sums of elements, p[i] - sum from first to i elements pp: array of sums of squares of elements, p[i] - sum of squares from first to i elements Returns ------- result: float approximation error Count sse_one_bin[i][k] using binary search. Parameters ---------- i: left border of series k: number of bins sse: array of approximation errors sse_one_bin: array of approximation errors with one bin Returns ------- result: float calculated sse_one_bin[i][k] Count an approximation error of a series with [1, bins_number] bins. http://www.vldb.org/conf/1998/p275.pdf Parameters ---------- series: array to count an approximation error with bins_number bins bins_number: number of bins p: array of sums of elements, p[i] - sum from 0th to i elements pp: array of sums of squares of elements, p[i] - sum of squares from 0th to i elements Returns ------- error: np.ndarray approximation error of a series with [1, bins_number] bins Compute F. F[a][b][k] - minimum approximation error on series[a:b+1] with k outliers. http://www.vldb.org/conf/1999/P9.pdf Parameters ---------- series: array to count F k: number of outliers p: array of sums of elements, p[i] - sum from 0th to i elements pp: array of sums of squares of elements, p[i] - sum of squares from 0th to i elements Returns ------- result: np.ndarray array F, outliers_indices Compute outliers indices according to hist rule. http://www.vldb.org/conf/1999/P9.pdf Parameters ---------- series: array to count F bins_number: number of bins Returns ------- indices: np.ndarray outliers indices # см формулу выше Get point outliers in time series using histogram model. Outliers are all points that, when removed, result in a histogram with a lower approximation error, even with the number of bins less than the number of outliers. Parameters ---------- ts: TSDataset with timeseries data in_column: name of the column in which the anomaly is searching bins_number: number of bins Returns ------- dict of outliers: typing.Dict[str, typing.List[pd.Timestamp]] dict of outliers in format {segment: [outliers_timestamps]} | 2.72692 | 3 |
aws/securityGroup.py | emanueleleyland/sabd-project2 | 0 | 7602 | def createKafkaSecurityGroup(ec2, vpc):
sec_group_kafka = ec2.create_security_group(
GroupName='kafka', Description='kafka sec group', VpcId=vpc.id)
sec_group_kafka.authorize_ingress(
IpPermissions=[{'IpProtocol': 'icmp', 'FromPort': -1, 'ToPort': -1, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 22, 'ToPort': 22, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 9092, 'ToPort': 9092, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}]
)
print(sec_group_kafka.id)
return sec_group_kafka
def createZookeeperSecurityGroup(ec2, vpc):
sec_group_zookeeper = ec2.create_security_group(
GroupName='zookeeper', Description='zookeeper', VpcId=vpc.id)
sec_group_zookeeper.authorize_ingress(
IpPermissions=[{'IpProtocol': 'icmp', 'FromPort': -1, 'ToPort': -1, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 22, 'ToPort': 22, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 2181, 'ToPort': 2181, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 2888, 'ToPort': 2888, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 3888, 'ToPort': 3888, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}]
)
print(sec_group_zookeeper.id)
return sec_group_zookeeper
def create_redis_security_group(ec2, vpc):
sec_group_redis = ec2.create_security_group(
GroupName='redis', Description='redis', VpcId=vpc.id)
sec_group_redis.authorize_ingress(
IpPermissions=[{'IpProtocol': 'icmp', 'FromPort': -1, 'ToPort': -1, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 22, 'ToPort': 22, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 6379, 'ToPort': 6379, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}]
)
print(sec_group_redis.id)
return sec_group_redis | def createKafkaSecurityGroup(ec2, vpc):
sec_group_kafka = ec2.create_security_group(
GroupName='kafka', Description='kafka sec group', VpcId=vpc.id)
sec_group_kafka.authorize_ingress(
IpPermissions=[{'IpProtocol': 'icmp', 'FromPort': -1, 'ToPort': -1, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 22, 'ToPort': 22, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 9092, 'ToPort': 9092, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}]
)
print(sec_group_kafka.id)
return sec_group_kafka
def createZookeeperSecurityGroup(ec2, vpc):
sec_group_zookeeper = ec2.create_security_group(
GroupName='zookeeper', Description='zookeeper', VpcId=vpc.id)
sec_group_zookeeper.authorize_ingress(
IpPermissions=[{'IpProtocol': 'icmp', 'FromPort': -1, 'ToPort': -1, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 22, 'ToPort': 22, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 2181, 'ToPort': 2181, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 2888, 'ToPort': 2888, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 3888, 'ToPort': 3888, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}]
)
print(sec_group_zookeeper.id)
return sec_group_zookeeper
def create_redis_security_group(ec2, vpc):
sec_group_redis = ec2.create_security_group(
GroupName='redis', Description='redis', VpcId=vpc.id)
sec_group_redis.authorize_ingress(
IpPermissions=[{'IpProtocol': 'icmp', 'FromPort': -1, 'ToPort': -1, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 22, 'ToPort': 22, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]},
{'IpProtocol': 'tcp', 'FromPort': 6379, 'ToPort': 6379, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}]
)
print(sec_group_redis.id)
return sec_group_redis | none | 1 | 1.997952 | 2 |
|
virtual/lib/python3.6/site-packages/django_pusher/context_processors.py | petermirithu/hooby_lab | 2 | 7603 | <gh_stars>1-10
from django.conf import settings
def pusher(request):
return {
"PUSHER_KEY": getattr(settings, "PUSHER_KEY", ""),
}
| from django.conf import settings
def pusher(request):
return {
"PUSHER_KEY": getattr(settings, "PUSHER_KEY", ""),
} | none | 1 | 1.595303 | 2 |
|
inconnu/character/update/parse.py | tiltowait/inconnu | 4 | 7604 | <reponame>tiltowait/inconnu
"""character/update/parse.py - Defines an interface for updating character traits."""
# pylint: disable=too-many-arguments
import re
import discord
from discord_ui.components import LinkButton
from . import paramupdate
from ..display import display
from ... import common, constants
from ...log import Log
from ...vchar import VChar
__MATCHES = {}
__KEYS = {
"name": "The character's name",
"health": "The character's max Health",
"willpower": "The character's max Willpower",
"humanity": "The character's Humanity",
"splat": "The type of character: `vampire`, `mortal`, or `ghoul`",
"sh": "+/- Superficial Health damage",
"ah": "+/- Aggravated Health damage",
"sw": "+/- Superficial Willpower damage",
"aw": "+/- Aggravated Willpower damage",
"stains": "+/- Stains",
"unspent_xp": "+/- Unspent XP",
"lifetime_xp": "+/- Total Lifetime XP",
"hunger": "+/- The character's Hunger",
"potency": "+/- The character's Blood Potency"
}
__HELP_URL = "https://www.inconnu-bot.com/#/character-tracking?id=tracker-updates"
async def update(
ctx, parameters: str, character=None, color=None, update_message=None, player=None
):
"""
Process the user's arguments.
Allow the user to omit a character if they have only one.
"""
args = re.sub(r":", r"=", parameters) # Some people think colons work ...
args = re.sub(r"(\w)\s*([+-])\s*(\w)", r"\g<1>=\g<2>\g<3>", args) # Stop the sh+3 madness
args = re.sub(r"\s*([+-])\s*=\s*", r"=\g<1>", args) # Let +/-= work, for the CS nerds
args = re.sub(r"\s*=\s*([+-])\s*", r"=\g<1>", args) # Remove gaps between keys and values
args = list(args.split()) # To allow element removal
if len(args) == 0:
await update_help(ctx)
return
try:
owner = await common.player_lookup(ctx, player)
tip = f"`/character update` `parameters:{parameters}` `character:CHARACTER`"
character = await common.fetch_character(ctx, character, tip, __HELP_URL, owner=owner)
parameters = __parse_arguments(*args)
updates = []
for parameter, new_value in parameters.items():
update_msg = __update_character(character, parameter, new_value)
updates.append(update_msg)
Log.log("update",
user=ctx.author.id,
guild=ctx.guild.id,
charid=character.id,
syntax=" ".join(args)
)
# Ignore generated output if we got a custom message
if update_message is None:
update_message = "\n".join(updates)
await display(ctx, character, color=color, owner=player, message=update_message)
except (SyntaxError, ValueError) as err:
Log.log("update_error",
user=ctx.author.id,
guild=ctx.guild.id,
charid=character.id,
syntax=" ".join(args)
)
await update_help(ctx, err)
except LookupError as err:
await common.present_error(ctx, err, help_url=__HELP_URL)
except common.FetchError:
pass
def __parse_arguments(*arguments):
"""
Parse the user's arguments.
Raises ValueErrors and KeyErrors on exceptions.
"""
if len(arguments) == 0:
raise ValueError("You must supply some parameters!")
parameters = {}
for argument in arguments:
split = argument.split("=")
key = split[0].lower()
if len(split) != 2:
err = "Parameters must be in `key = value` pairs."
if key not in __KEYS:
err += f" Also, `{key}` is not a valid option."
raise SyntaxError(err)
if key in parameters:
raise ValueError(f"You cannot use `{key}` more than once.")
if key not in __MATCHES:
raise ValueError(f"Unknown parameter: `{key}`.")
key = __MATCHES[key] # Get the canonical key
value = split[1]
if len(value) == 0:
raise ValueError(f"No value given for `{key}`.")
parameters[key] = value # Don't do any validation here
return parameters
def __update_character(character: VChar, param: str, value: str) -> str:
"""
Update one of a character's parameters.
Args:
character (VChar): The character being updated
param (str): The parameter to update
value (str): The parameter's new value
Raises ValueError if the parameter's value is invalid.
"""
return getattr(paramupdate, f"update_{param}")(character, value)
async def update_help(ctx, err=None, hidden=True):
"""Display a help message that details the available keys."""
embed = discord.Embed(
title="Character Tracking",
)
embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.display_avatar)
if err is not None:
embed.add_field(name="Error", value=str(err), inline=False)
inst = "To update a character, use `/character update` with one or more `KEY=VALUE` pairs."
embed.add_field(name="Instructions", value=inst, inline=False)
parameters = [f"***{key}:*** {val}" for key, val in __KEYS.items()]
parameters = "\n".join(parameters)
embed.add_field(name="Keys", value=parameters, inline=False)
embed.add_field(
name="Example",
value="Character takes 4 Superficial Health damage:```/character update parameters:sh+4```"
)
embed.set_footer(text="You may modify more than one tracker at a time.")
documentation = LinkButton(
"http://www.inconnu-bot.com/#/character-tracking?id=tracker-updates",
label="Full Documentation"
)
support = LinkButton(constants.SUPPORT_URL, "Support")
await ctx.respond(embed=embed, components=[documentation, support], hidden=hidden)
# We do flexible matching for the keys. Many of these are the same as RoD's
# keys, while others have been observed in syntax error logs. This should be
# a little more user-friendly.
def __setup_matches():
"""Register all the update keys."""
__register_keys("name")
__register_keys("health", "hp")
__register_keys("willpower", "wp", "w")
__register_keys("humanity", "hm")
__register_keys("splat", "type")
__register_keys(
"sh", "sd", "shp", "suphp", "suph", "supd", "superficialhealth",
"superficialdamage"
)
__register_keys("ah", "ad", "ahp", "agghp", "aggd", "aggh", "agghealth", "aggdamage")
__register_keys("sw", "swp", "supwp", "supw", "superficialwillpower")
__register_keys("aw", "awp", "aggwp", "aggw", "aggwillpower")
__register_keys("stains", "stain", "s")
__register_keys(
"current_xp", "xp_current", "current_exp", "exp_current", "currentxp",
"currentexp", "xpcurrent", "expcurrent", "cxp",
"unspent_xp", "xp_unspent", "unspent_exp", "exp_unspent", "unspentxp",
"unspentexp", "xpunspent", "expunspent", "uxp"
)
__register_keys(
"total_xp", "xp_total", "total_exp", "exp_total", "totalxp",
"totalexp", "xptotal", "exptotal", "txp",
"lifetimexp", "xplifetime", "explifetime", "lxp", "lifetime_xp", "life_time_xp"
)
__register_keys("hunger", "h")
__register_keys("potency", "bp", "p")
def __register_keys(canonical, *alternates):
"""Register an update key along with some alternates."""
__MATCHES[canonical] = canonical
for alternate in alternates:
if alternate in __MATCHES:
raise KeyError(f"{alternate} is already an update parameter.")
__MATCHES[alternate] = canonical
__setup_matches()
| """character/update/parse.py - Defines an interface for updating character traits."""
# pylint: disable=too-many-arguments
import re
import discord
from discord_ui.components import LinkButton
from . import paramupdate
from ..display import display
from ... import common, constants
from ...log import Log
from ...vchar import VChar
__MATCHES = {}
__KEYS = {
"name": "The character's name",
"health": "The character's max Health",
"willpower": "The character's max Willpower",
"humanity": "The character's Humanity",
"splat": "The type of character: `vampire`, `mortal`, or `ghoul`",
"sh": "+/- Superficial Health damage",
"ah": "+/- Aggravated Health damage",
"sw": "+/- Superficial Willpower damage",
"aw": "+/- Aggravated Willpower damage",
"stains": "+/- Stains",
"unspent_xp": "+/- Unspent XP",
"lifetime_xp": "+/- Total Lifetime XP",
"hunger": "+/- The character's Hunger",
"potency": "+/- The character's Blood Potency"
}
__HELP_URL = "https://www.inconnu-bot.com/#/character-tracking?id=tracker-updates"
async def update(
ctx, parameters: str, character=None, color=None, update_message=None, player=None
):
"""
Process the user's arguments.
Allow the user to omit a character if they have only one.
"""
args = re.sub(r":", r"=", parameters) # Some people think colons work ...
args = re.sub(r"(\w)\s*([+-])\s*(\w)", r"\g<1>=\g<2>\g<3>", args) # Stop the sh+3 madness
args = re.sub(r"\s*([+-])\s*=\s*", r"=\g<1>", args) # Let +/-= work, for the CS nerds
args = re.sub(r"\s*=\s*([+-])\s*", r"=\g<1>", args) # Remove gaps between keys and values
args = list(args.split()) # To allow element removal
if len(args) == 0:
await update_help(ctx)
return
try:
owner = await common.player_lookup(ctx, player)
tip = f"`/character update` `parameters:{parameters}` `character:CHARACTER`"
character = await common.fetch_character(ctx, character, tip, __HELP_URL, owner=owner)
parameters = __parse_arguments(*args)
updates = []
for parameter, new_value in parameters.items():
update_msg = __update_character(character, parameter, new_value)
updates.append(update_msg)
Log.log("update",
user=ctx.author.id,
guild=ctx.guild.id,
charid=character.id,
syntax=" ".join(args)
)
# Ignore generated output if we got a custom message
if update_message is None:
update_message = "\n".join(updates)
await display(ctx, character, color=color, owner=player, message=update_message)
except (SyntaxError, ValueError) as err:
Log.log("update_error",
user=ctx.author.id,
guild=ctx.guild.id,
charid=character.id,
syntax=" ".join(args)
)
await update_help(ctx, err)
except LookupError as err:
await common.present_error(ctx, err, help_url=__HELP_URL)
except common.FetchError:
pass
def __parse_arguments(*arguments):
"""
Parse the user's arguments.
Raises ValueErrors and KeyErrors on exceptions.
"""
if len(arguments) == 0:
raise ValueError("You must supply some parameters!")
parameters = {}
for argument in arguments:
split = argument.split("=")
key = split[0].lower()
if len(split) != 2:
err = "Parameters must be in `key = value` pairs."
if key not in __KEYS:
err += f" Also, `{key}` is not a valid option."
raise SyntaxError(err)
if key in parameters:
raise ValueError(f"You cannot use `{key}` more than once.")
if key not in __MATCHES:
raise ValueError(f"Unknown parameter: `{key}`.")
key = __MATCHES[key] # Get the canonical key
value = split[1]
if len(value) == 0:
raise ValueError(f"No value given for `{key}`.")
parameters[key] = value # Don't do any validation here
return parameters
def __update_character(character: VChar, param: str, value: str) -> str:
"""
Update one of a character's parameters.
Args:
character (VChar): The character being updated
param (str): The parameter to update
value (str): The parameter's new value
Raises ValueError if the parameter's value is invalid.
"""
return getattr(paramupdate, f"update_{param}")(character, value)
async def update_help(ctx, err=None, hidden=True):
"""Display a help message that details the available keys."""
embed = discord.Embed(
title="Character Tracking",
)
embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.display_avatar)
if err is not None:
embed.add_field(name="Error", value=str(err), inline=False)
inst = "To update a character, use `/character update` with one or more `KEY=VALUE` pairs."
embed.add_field(name="Instructions", value=inst, inline=False)
parameters = [f"***{key}:*** {val}" for key, val in __KEYS.items()]
parameters = "\n".join(parameters)
embed.add_field(name="Keys", value=parameters, inline=False)
embed.add_field(
name="Example",
value="Character takes 4 Superficial Health damage:```/character update parameters:sh+4```"
)
embed.set_footer(text="You may modify more than one tracker at a time.")
documentation = LinkButton(
"http://www.inconnu-bot.com/#/character-tracking?id=tracker-updates",
label="Full Documentation"
)
support = LinkButton(constants.SUPPORT_URL, "Support")
await ctx.respond(embed=embed, components=[documentation, support], hidden=hidden)
# We do flexible matching for the keys. Many of these are the same as RoD's
# keys, while others have been observed in syntax error logs. This should be
# a little more user-friendly.
def __setup_matches():
"""Register all the update keys."""
__register_keys("name")
__register_keys("health", "hp")
__register_keys("willpower", "wp", "w")
__register_keys("humanity", "hm")
__register_keys("splat", "type")
__register_keys(
"sh", "sd", "shp", "suphp", "suph", "supd", "superficialhealth",
"superficialdamage"
)
__register_keys("ah", "ad", "ahp", "agghp", "aggd", "aggh", "agghealth", "aggdamage")
__register_keys("sw", "swp", "supwp", "supw", "superficialwillpower")
__register_keys("aw", "awp", "aggwp", "aggw", "aggwillpower")
__register_keys("stains", "stain", "s")
__register_keys(
"current_xp", "xp_current", "current_exp", "exp_current", "currentxp",
"currentexp", "xpcurrent", "expcurrent", "cxp",
"unspent_xp", "xp_unspent", "unspent_exp", "exp_unspent", "unspentxp",
"unspentexp", "xpunspent", "expunspent", "uxp"
)
__register_keys(
"total_xp", "xp_total", "total_exp", "exp_total", "totalxp",
"totalexp", "xptotal", "exptotal", "txp",
"lifetimexp", "xplifetime", "explifetime", "lxp", "lifetime_xp", "life_time_xp"
)
__register_keys("hunger", "h")
__register_keys("potency", "bp", "p")
def __register_keys(canonical, *alternates):
"""Register an update key along with some alternates."""
__MATCHES[canonical] = canonical
for alternate in alternates:
if alternate in __MATCHES:
raise KeyError(f"{alternate} is already an update parameter.")
__MATCHES[alternate] = canonical
__setup_matches() | en | 0.751347 | character/update/parse.py - Defines an interface for updating character traits. # pylint: disable=too-many-arguments #/character-tracking?id=tracker-updates" Process the user's arguments. Allow the user to omit a character if they have only one. # Some people think colons work ... # Stop the sh+3 madness # Let +/-= work, for the CS nerds # Remove gaps between keys and values # To allow element removal # Ignore generated output if we got a custom message Parse the user's arguments. Raises ValueErrors and KeyErrors on exceptions. # Get the canonical key # Don't do any validation here Update one of a character's parameters. Args: character (VChar): The character being updated param (str): The parameter to update value (str): The parameter's new value Raises ValueError if the parameter's value is invalid. Display a help message that details the available keys. #/character-tracking?id=tracker-updates", # We do flexible matching for the keys. Many of these are the same as RoD's # keys, while others have been observed in syntax error logs. This should be # a little more user-friendly. Register all the update keys. Register an update key along with some alternates. | 2.677846 | 3 |
src/models/train_search_multi_deep.py | smadha/MlTrio | 0 | 7605 | '''
Uses flattened features in feature directory and run a SVM on it
'''
from keras.layers import Dense
from keras.models import Sequential
import keras.regularizers as Reg
from keras.optimizers import SGD, RMSprop
from keras.callbacks import EarlyStopping
import cPickle as pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
import theano
from models.down_sampling import balanced_subsample
theano.config.openmp = True
OMP_NUM_THREADS=16
users_va_te_dict = dict([ (v,idx) for (idx,v) in enumerate(pickle.load(open("../../bytecup2016data/users_va_te.p"))) ])
print "users_va_te_dict created ", len(users_va_te_dict)
def normalize(X_tr):
''' Normalize training and test data features
Args:
X_tr: Unnormalized training features
Output:
X_tr: Normalized training features
'''
X_mu = np.mean(X_tr, axis=0)
X_tr = X_tr - X_mu
X_sig = np.std(X_tr, axis=0)
X_tr = X_tr/X_sig
return X_tr, X_mu, X_sig
def genmodel(num_units, actfn='relu', reg_coeff=0.0, last_act='softmax'):
''' Generate a neural network model of approporiate architecture
Args:
num_units: architecture of network in the format [n1, n2, ... , nL]
actfn: activation function for hidden layers ('relu'/'sigmoid'/'linear'/'softmax')
reg_coeff: L2-regularization coefficient
last_act: activation function for final layer ('relu'/'sigmoid'/'linear'/'softmax')
Output:
model: Keras sequential model with appropriate fully-connected architecture
'''
model = Sequential()
for i in range(1, len(num_units)):
if i == 1 and i < len(num_units) - 1:
model.add(Dense(input_dim=num_units[0], output_dim=num_units[i], activation=actfn,
W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal'))
elif i == 1 and i == len(num_units) - 1:
model.add(Dense(input_dim=num_units[0], output_dim=num_units[i], activation=last_act,
W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal'))
elif i < len(num_units) - 1:
model.add(Dense(output_dim=num_units[i], activation=actfn,
W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal'))
elif i == len(num_units) - 1:
model.add(Dense(output_dim=num_units[i], activation=last_act,
W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal'))
return model
def transform_label(labels):
labels_new_arr = []
for idx,label in enumerate(labels):
label_new = [0] * len(users_va_te_dict) * 2
if label[1] == '0' :
label_new[ users_va_te_dict[label[0]] ] = 1
else :
label_new[ users_va_te_dict[label[0]] + 1 ] = 1
labels_new_arr.append(label_new)
# if (idx+1) % 1000 == 0:
# break
print "labels_new_arr created" , len(labels_new_arr)
return labels_new_arr
def original_label(label):
return [ l.index(1) for l in label]
def get_transform_label():
'''
Returns list of labels as list of [0/1 , 1/0]
if label = 1 [0, 1]
if label = 0 [1, 0]
'''
count = 0
users_order = []
##features to be deletd
del_rows = []
with open("../../bytecup2016data/invited_info_train_PROC.txt","r") as f:
training_data = f.readline().strip().split("\t")
while training_data and len(training_data) >= 2 :
user_id = training_data[1]
label = training_data[2]
if user_id in users_va_te_dict:
users_order.append((user_id,label) )
else:
del_rows.append(count)
count += 1
training_data = f.readline().strip().split("\t")
f.close()
print "users_order created ", len(users_order), len(del_rows)
return transform_label(users_order), del_rows
features = pickle.load( open("../feature_engg/feature/all_features.p", "rb") )
labels, del_rows = get_transform_label()
# features = np.random.normal(size=(26796,3))
# labels, del_rows = get_transform_label()
print len(features),len(features[0])
print len(labels),len(labels[0])
features = np.array(features)
features = np.delete(features, del_rows, axis=0)
col_deleted = np.nonzero((features==0).sum(axis=0) > (len(features)-1000))
# col_deleted = col_deleted[0].tolist() + range(6,22) + range(28,44)
print col_deleted
features = np.delete(features, col_deleted, axis=1)
print len(features),len(features[0])
print len(labels),len(labels[0])
features, X_mu, X_sig = normalize(features)
save_res = {"col_deleted":col_deleted,"X_mu":X_mu,"X_sig":X_sig}
with open("model/train_config", 'wb') as pickle_file:
pickle.dump(save_res, pickle_file, protocol=2)
print "Dumped config"
momentum = 0.99
eStop = True
sgd_Nesterov = True
sgd_lr = 1e-5
batch_size=5000
nb_epoch=100
verbose=True
features,labels = [] , []
features_tr, features_te,labels_tr, labels_te = train_test_split(features,labels, train_size = 0.85)
print "Using separate test data", len(features_tr), len(features_te)
def run_NN(arch, reg_coeff, sgd_decay, subsample_size=0, save=False):
# features_tr, labels_tr = balanced_subsample(features_tr, original_label(labels_tr), subsample_size = subsample_size)
# labels_tr = transform_label(labels_tr)
# print "Training data balanced-", features_tr.shape, len(labels_tr)
call_ES = EarlyStopping(monitor='val_acc', patience=3, verbose=1, mode='auto')
# Generate Model
model = genmodel(num_units=arch, reg_coeff=reg_coeff )
# Compile Model
sgd = SGD(lr=sgd_lr, decay=sgd_decay, momentum=momentum,
nesterov=sgd_Nesterov)
# sgd = RMSprop(lr=sgd_lr, rho=0.9, epsilon=1e-08, decay=sgd_decay)
model.compile(loss='MSE', optimizer=sgd,
metrics=['accuracy'])
# Train Model
if eStop:
model.fit(features_tr, labels_tr, nb_epoch=nb_epoch, batch_size=batch_size,
verbose=verbose, callbacks=[call_ES], validation_split=0.1,
validation_data=None, shuffle=True)
else:
model.fit(features_tr, labels_tr, nb_epoch=nb_epoch, batch_size=batch_size,
verbose=verbose)
labels_pred = model.predict_classes(features_te)
print len(labels_te[0]), labels_pred[0]
y_true, y_pred = original_label(labels_te), labels_pred
print y_true[0], y_pred[0]
print "arch, reg_coeff, sgd_decay, subsample_size", arch, reg_coeff, sgd_decay, subsample_size
macro_rep = f1_score(y_true, y_pred, average = 'macro')
print "macro", macro_rep
weighted_report = f1_score(y_true, y_pred, average = 'weighted')
print "weighted", weighted_report
with open("results_search_multi_deep.txt", "a") as f:
f.write("macro_rep- "+str(macro_rep))
f.write("\n")
f.write("weighted_report- "+str(weighted_report))
f.write("\n")
f.write(" ".join([str(s) for s in ["arch, reg_coeff, sgd_decay, subsample_size", arch, reg_coeff, sgd_decay, subsample_size]]))
f.write("\n")
if save:
# Save model
model.save("model/model_deep.h5")
print("Saved model to disk")
arch_range = [[len(features_tr[0]),1024,len(labels_tr[0])], [len(features_tr[0]),1024,512,len(labels_tr[0])], [len(features_tr[0]),1024,1024,len(labels_tr[0])],[len(features_tr[0]),1024,512,256,len(labels_tr[0])]]
reg_coeffs_range = [1e-6, 5e-6, 1e-5, 5e-5, 5e-4 ]
sgd_decays_range = [1e-6, 1e-5, 5e-5, 1e-4, 5e-4 ]
class_weight_0_range = [1]
# subsample_size_range = [2,2.5,3]
#GRID SEARCH ON BEST PARAM
for arch in arch_range:
for reg_coeff in reg_coeffs_range:
for sgd_decay in sgd_decays_range:
# for subsample_size in subsample_size_range:
run_NN(arch, reg_coeff, sgd_decay)
# arch = [len(features[0]),1024,512,2]
# reg_coeff = 1e-05
# sgd_decay = 1e-05
# class_weight_0 = 0.5
| '''
Uses flattened features in feature directory and run a SVM on it
'''
from keras.layers import Dense
from keras.models import Sequential
import keras.regularizers as Reg
from keras.optimizers import SGD, RMSprop
from keras.callbacks import EarlyStopping
import cPickle as pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
import theano
from models.down_sampling import balanced_subsample
theano.config.openmp = True
OMP_NUM_THREADS=16
users_va_te_dict = dict([ (v,idx) for (idx,v) in enumerate(pickle.load(open("../../bytecup2016data/users_va_te.p"))) ])
print "users_va_te_dict created ", len(users_va_te_dict)
def normalize(X_tr):
''' Normalize training and test data features
Args:
X_tr: Unnormalized training features
Output:
X_tr: Normalized training features
'''
X_mu = np.mean(X_tr, axis=0)
X_tr = X_tr - X_mu
X_sig = np.std(X_tr, axis=0)
X_tr = X_tr/X_sig
return X_tr, X_mu, X_sig
def genmodel(num_units, actfn='relu', reg_coeff=0.0, last_act='softmax'):
''' Generate a neural network model of approporiate architecture
Args:
num_units: architecture of network in the format [n1, n2, ... , nL]
actfn: activation function for hidden layers ('relu'/'sigmoid'/'linear'/'softmax')
reg_coeff: L2-regularization coefficient
last_act: activation function for final layer ('relu'/'sigmoid'/'linear'/'softmax')
Output:
model: Keras sequential model with appropriate fully-connected architecture
'''
model = Sequential()
for i in range(1, len(num_units)):
if i == 1 and i < len(num_units) - 1:
model.add(Dense(input_dim=num_units[0], output_dim=num_units[i], activation=actfn,
W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal'))
elif i == 1 and i == len(num_units) - 1:
model.add(Dense(input_dim=num_units[0], output_dim=num_units[i], activation=last_act,
W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal'))
elif i < len(num_units) - 1:
model.add(Dense(output_dim=num_units[i], activation=actfn,
W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal'))
elif i == len(num_units) - 1:
model.add(Dense(output_dim=num_units[i], activation=last_act,
W_regularizer=Reg.l2(l=reg_coeff), init='glorot_normal'))
return model
def transform_label(labels):
labels_new_arr = []
for idx,label in enumerate(labels):
label_new = [0] * len(users_va_te_dict) * 2
if label[1] == '0' :
label_new[ users_va_te_dict[label[0]] ] = 1
else :
label_new[ users_va_te_dict[label[0]] + 1 ] = 1
labels_new_arr.append(label_new)
# if (idx+1) % 1000 == 0:
# break
print "labels_new_arr created" , len(labels_new_arr)
return labels_new_arr
def original_label(label):
return [ l.index(1) for l in label]
def get_transform_label():
'''
Returns list of labels as list of [0/1 , 1/0]
if label = 1 [0, 1]
if label = 0 [1, 0]
'''
count = 0
users_order = []
##features to be deletd
del_rows = []
with open("../../bytecup2016data/invited_info_train_PROC.txt","r") as f:
training_data = f.readline().strip().split("\t")
while training_data and len(training_data) >= 2 :
user_id = training_data[1]
label = training_data[2]
if user_id in users_va_te_dict:
users_order.append((user_id,label) )
else:
del_rows.append(count)
count += 1
training_data = f.readline().strip().split("\t")
f.close()
print "users_order created ", len(users_order), len(del_rows)
return transform_label(users_order), del_rows
features = pickle.load( open("../feature_engg/feature/all_features.p", "rb") )
labels, del_rows = get_transform_label()
# features = np.random.normal(size=(26796,3))
# labels, del_rows = get_transform_label()
print len(features),len(features[0])
print len(labels),len(labels[0])
features = np.array(features)
features = np.delete(features, del_rows, axis=0)
col_deleted = np.nonzero((features==0).sum(axis=0) > (len(features)-1000))
# col_deleted = col_deleted[0].tolist() + range(6,22) + range(28,44)
print col_deleted
features = np.delete(features, col_deleted, axis=1)
print len(features),len(features[0])
print len(labels),len(labels[0])
features, X_mu, X_sig = normalize(features)
save_res = {"col_deleted":col_deleted,"X_mu":X_mu,"X_sig":X_sig}
with open("model/train_config", 'wb') as pickle_file:
pickle.dump(save_res, pickle_file, protocol=2)
print "Dumped config"
momentum = 0.99
eStop = True
sgd_Nesterov = True
sgd_lr = 1e-5
batch_size=5000
nb_epoch=100
verbose=True
features,labels = [] , []
features_tr, features_te,labels_tr, labels_te = train_test_split(features,labels, train_size = 0.85)
print "Using separate test data", len(features_tr), len(features_te)
def run_NN(arch, reg_coeff, sgd_decay, subsample_size=0, save=False):
# features_tr, labels_tr = balanced_subsample(features_tr, original_label(labels_tr), subsample_size = subsample_size)
# labels_tr = transform_label(labels_tr)
# print "Training data balanced-", features_tr.shape, len(labels_tr)
call_ES = EarlyStopping(monitor='val_acc', patience=3, verbose=1, mode='auto')
# Generate Model
model = genmodel(num_units=arch, reg_coeff=reg_coeff )
# Compile Model
sgd = SGD(lr=sgd_lr, decay=sgd_decay, momentum=momentum,
nesterov=sgd_Nesterov)
# sgd = RMSprop(lr=sgd_lr, rho=0.9, epsilon=1e-08, decay=sgd_decay)
model.compile(loss='MSE', optimizer=sgd,
metrics=['accuracy'])
# Train Model
if eStop:
model.fit(features_tr, labels_tr, nb_epoch=nb_epoch, batch_size=batch_size,
verbose=verbose, callbacks=[call_ES], validation_split=0.1,
validation_data=None, shuffle=True)
else:
model.fit(features_tr, labels_tr, nb_epoch=nb_epoch, batch_size=batch_size,
verbose=verbose)
labels_pred = model.predict_classes(features_te)
print len(labels_te[0]), labels_pred[0]
y_true, y_pred = original_label(labels_te), labels_pred
print y_true[0], y_pred[0]
print "arch, reg_coeff, sgd_decay, subsample_size", arch, reg_coeff, sgd_decay, subsample_size
macro_rep = f1_score(y_true, y_pred, average = 'macro')
print "macro", macro_rep
weighted_report = f1_score(y_true, y_pred, average = 'weighted')
print "weighted", weighted_report
with open("results_search_multi_deep.txt", "a") as f:
f.write("macro_rep- "+str(macro_rep))
f.write("\n")
f.write("weighted_report- "+str(weighted_report))
f.write("\n")
f.write(" ".join([str(s) for s in ["arch, reg_coeff, sgd_decay, subsample_size", arch, reg_coeff, sgd_decay, subsample_size]]))
f.write("\n")
if save:
# Save model
model.save("model/model_deep.h5")
print("Saved model to disk")
arch_range = [[len(features_tr[0]),1024,len(labels_tr[0])], [len(features_tr[0]),1024,512,len(labels_tr[0])], [len(features_tr[0]),1024,1024,len(labels_tr[0])],[len(features_tr[0]),1024,512,256,len(labels_tr[0])]]
reg_coeffs_range = [1e-6, 5e-6, 1e-5, 5e-5, 5e-4 ]
sgd_decays_range = [1e-6, 1e-5, 5e-5, 1e-4, 5e-4 ]
class_weight_0_range = [1]
# subsample_size_range = [2,2.5,3]
#GRID SEARCH ON BEST PARAM
for arch in arch_range:
for reg_coeff in reg_coeffs_range:
for sgd_decay in sgd_decays_range:
# for subsample_size in subsample_size_range:
run_NN(arch, reg_coeff, sgd_decay)
# arch = [len(features[0]),1024,512,2]
# reg_coeff = 1e-05
# sgd_decay = 1e-05
# class_weight_0 = 0.5
| en | 0.655489 | Uses flattened features in feature directory and run a SVM on it Normalize training and test data features Args: X_tr: Unnormalized training features Output: X_tr: Normalized training features Generate a neural network model of approporiate architecture Args: num_units: architecture of network in the format [n1, n2, ... , nL] actfn: activation function for hidden layers ('relu'/'sigmoid'/'linear'/'softmax') reg_coeff: L2-regularization coefficient last_act: activation function for final layer ('relu'/'sigmoid'/'linear'/'softmax') Output: model: Keras sequential model with appropriate fully-connected architecture # if (idx+1) % 1000 == 0: # break Returns list of labels as list of [0/1 , 1/0] if label = 1 [0, 1] if label = 0 [1, 0] ##features to be deletd # features = np.random.normal(size=(26796,3)) # labels, del_rows = get_transform_label() # col_deleted = col_deleted[0].tolist() + range(6,22) + range(28,44) # features_tr, labels_tr = balanced_subsample(features_tr, original_label(labels_tr), subsample_size = subsample_size) # labels_tr = transform_label(labels_tr) # print "Training data balanced-", features_tr.shape, len(labels_tr) # Generate Model # Compile Model # sgd = RMSprop(lr=sgd_lr, rho=0.9, epsilon=1e-08, decay=sgd_decay) # Train Model # Save model # subsample_size_range = [2,2.5,3] #GRID SEARCH ON BEST PARAM # for subsample_size in subsample_size_range: # arch = [len(features[0]),1024,512,2] # reg_coeff = 1e-05 # sgd_decay = 1e-05 # class_weight_0 = 0.5 | 2.91148 | 3 |
formation.py | graham-kim/pygremlin-graph-visualiser | 0 | 7606 | <filename>formation.py
import sys
import os
sys.path.append( os.path.dirname(__file__) )
import numpy as np
import typing as tp
import angles
from model import Node, Link, Label
from spec import ArrowDraw, NodeSpec
class FormationManager:
def __init__(self):
self._nodes = {}
self._links = []
self._labels = []
@property
def nodes(self) -> tp.List[Node]:
return [n for n in self._nodes.values()]
@property
def links(self) -> tp.List[Link]:
return self._links
@property
def labels(self) -> tp.List[Link]:
return self._labels
def _id_if_str(self, node: tp.Tuple[str, int]) -> int:
if isinstance(node, int):
return node
else:
return self.id_of(node)
def text_of(self, node_id: int) -> str:
if not isinstance(node_id, int):
raise TypeError("Expected node_id to be int: {}".format(node_id))
return self._nodes[node_id].text
def pos_of(self, node_id: tp.Tuple[str, int]) -> np.array:
node_id = self._id_if_str(node_id)
return np.array(self._nodes[node_id].pos)
def pos_perp_to(self, from_id: int, to_id: int, shift_breadth: int, to_left: bool) -> np.array:
from_vec2 = np.array(self._nodes[from_id].pos)
to_vec2 = np.array(self._nodes[to_id].pos)
rel_vec2 = to_vec2 - from_vec2
flipped_y_unit_rel = angles.flip_y( angles.unit(rel_vec2) )
if to_left:
rotated_dir = angles.flip_y( \
angles.rotate_vector_to_left_by_90_deg( flipped_y_unit_rel ) )
else:
rotated_dir = angles.flip_y( \
angles.rotate_vector_to_right_by_90_deg( flipped_y_unit_rel ) )
return (from_vec2 + rel_vec2 / 2 + rotated_dir * shift_breadth).astype(int)
def id_of(self, text: str) -> int:
if not isinstance(text, str):
raise TypeError("{} should be a string".format(text))
ans = []
for key in self._nodes.keys():
if text == self._nodes[key].text:
ans.append(key)
if len(ans) == 0:
raise ValueError("No node has this text: {}".format(text))
elif len(ans) == 1:
return ans[0]
else:
raise ValueError("More than one node has the text {}: {}".format(text, ans))
def add_node(self, text: str, pos: tp.Tuple[int, int], colour: str="green", multibox: bool = False) -> int:
new_node = Node(text, pos, colour, multibox)
new_id = id(new_node)
self._nodes[new_id] = new_node
return new_id
def add_label(self, text: str, pos: tp.Tuple[int, int], colour: str="red"):
self._labels.append( Label(text, pos, colour) )
def add_link(self, from_id: tp.Tuple[str, int], to_id: tp.Tuple[str, int], colour: str="black", \
arrow_draw: ArrowDraw = ArrowDraw.FWD_ARROW, link_2_col: tp.Optional[str] = None):
self._links.append( Link(self._id_if_str(from_id), self._id_if_str(to_id), colour, arrow_draw, link_2_col) )
def add_dual_link(self, from_id: tp.Tuple[str, int], to_id: tp.Tuple[str, int], colour: str="black", \
second_colour: str="black"):
self.add_link(from_id, to_id, colour, ArrowDraw.DUAL_LINK, second_colour)
def add_linked_node(self, from_id: tp.Tuple[str, int], pos: tp.Tuple[int, int], spec: NodeSpec) -> int:
new_id = self.add_node(spec.text, pos, spec.node_col, spec.multibox)
self.add_link(from_id, new_id, spec.link_col, spec.link_draw, spec.link_2_col)
return new_id
def add_daisy_chain_links(self, nodes: tp.List[tp.Tuple[str, int]], arrow_draw: ArrowDraw = ArrowDraw.FWD_ARROW, \
link_col: str="black", link_2_col: tp.Optional[str] = None):
if not isinstance(nodes, list):
raise TypeError("Expected a list for nodes: {}".format(nodes))
if len(nodes) < 2:
raise ValueError("Expected at least 2 nodes, got {}".format(len(nodes)))
for i, item in enumerate(nodes[1:]):
prev_node = self._id_if_str(nodes[i]) # i is already the previous index
this_node = self._id_if_str(item)
self.add_link(prev_node, this_node, link_col, arrow_draw, link_2_col)
def add_depth_line_of_linked_nodes(self, start_id: tp.Tuple[str, int], dir: tp.Tuple[int, int], \
link_length: int, \
node_specs: tp.List[tp.Optional[NodeSpec]] \
) -> tp.List[int]:
added_ids = []
start_id = self._id_if_str(start_id)
start_pos = angles.vec2(self._nodes[start_id].pos)
unit_dir = angles.unit( dir )
count = 1
from_id = start_id
for spec in node_specs:
if spec is not None:
pos = start_pos + unit_dir * link_length * count
new_id = self.add_node(spec.text, pos, spec.node_col, spec.multibox)
if spec.link_draw == ArrowDraw.BACK_ARROW:
self.add_link(new_id, from_id, spec.link_col, ArrowDraw.FWD_ARROW, None)
elif spec.link_draw != ArrowDraw.NO_LINK:
self.add_link(from_id, new_id, spec.link_col, spec.link_draw, spec.link_2_col)
added_ids.append(new_id)
from_id = new_id
count += 1
return added_ids
def add_rail_of_nodes(self, start_coord: tp.Tuple[int, int], dir: tp.Tuple[int, int], \
link_length: int, \
node_specs: tp.List[tp.Optional[NodeSpec]] \
) -> tp.List[int]:
num_specs = len(node_specs)
if num_specs < 2:
raise ValueError("node_specs must have at least 2 elements")
if node_specs[0] is None or node_specs[-1] is None:
raise ValueError("The first and last item of node_specs must not be None")
first_id = self.add_node(node_specs[0].text, start_coord, \
node_specs[0].node_col, node_specs[0].multibox)
added_ids = [first_id]
new_ids = self.add_depth_line_of_linked_nodes(first_id, dir, link_length, node_specs[1:])
added_ids.extend(new_ids)
return added_ids
def add_breadth_line_of_sibling_nodes(self, parent_id: tp.Tuple[str, int], start_coord: tp.Tuple[int, int], \
end_coord: tp.Tuple[int, int], \
node_specs: tp.List[tp.Optional[NodeSpec]] \
) -> tp.List[int]:
num_specs = len(node_specs)
parent_id = self._id_if_str(parent_id)
if num_specs < 2:
raise ValueError("node_specs must have at least 2 elements")
if node_specs[0] is None or node_specs[-1] is None:
raise ValueError("The first and last item of node_specs must not be None")
added_ids = []
start_vec2 = angles.vec2(start_coord)
end_vec2 = angles.vec2(end_coord)
rel_vec2 = end_vec2 - start_vec2
count = 0
for spec in node_specs:
if spec is not None:
pos = start_vec2 + rel_vec2 * count / (num_specs - 1)
new_id = self.add_node(spec.text, pos, spec.node_col, spec.multibox)
if spec.link_draw == ArrowDraw.BACK_ARROW:
self.add_link(new_id, parent_id, spec.link_col, ArrowDraw.FWD_ARROW, None)
elif spec.link_draw != ArrowDraw.NO_LINK:
self.add_link(parent_id, new_id, spec.link_col, spec.link_draw, spec.link_2_col)
added_ids.append(new_id)
count += 1
return added_ids
def add_breadth_line_centered_on(self, parent_id: tp.Tuple[str, int], center_coord: tp.Tuple[int, int], \
link_length: int, node_specs: tp.List[tp.Optional[NodeSpec]] \
) -> tp.List[int]:
num_specs = len(node_specs)
if num_specs < 2:
raise ValueError("node_specs must have at least 2 elements")
parent_pos = self.pos_of(parent_id)
rel_vec2 = angles.vec2(center_coord) - parent_pos
rotated_vec2 = angles.flip_y( \
angles.rotate_vector_to_left_by_90_deg( \
angles.flip_y( angles.unit(rel_vec2) )))
half_total_length = link_length * float(num_specs-1) / 2.0
start_coord = center_coord + rotated_vec2 * half_total_length
end_coord = center_coord - rotated_vec2 * half_total_length
return self.add_breadth_line_of_sibling_nodes(parent_id, start_coord, end_coord, node_specs)
def add_arc_of_sibling_nodes(self, parent_id: tp.Tuple[str, int], radius: int, start_dir_coord: tp.Tuple[int, int], \
end_dir_coord: tp.Tuple[int, int], clockwise: bool, \
node_specs: tp.List[tp.Optional[NodeSpec]] \
) -> tp.List[int]:
parent_id = self._id_if_str(parent_id)
num_specs = len(node_specs)
if num_specs < 2:
raise ValueError("node_specs must have at least 2 elements")
if node_specs[0] is None or node_specs[-1] is None:
raise ValueError("The first and last item of node_specs must not be None")
added_ids = []
parent_pos = self._nodes[parent_id].pos
parent_vec2 = angles.vec2(parent_pos)
start_vec2 = angles.vec2(start_dir_coord) - parent_vec2
end_vec2 = angles.vec2(end_dir_coord) - parent_vec2
start_bear_rad = angles.get_bearing_rad_of( angles.flip_y(start_vec2) )
end_bear_rad = angles.get_bearing_rad_of( angles.flip_y(end_vec2) )
bear_diff_rad = angles.normalise_angle(end_bear_rad - start_bear_rad)
if clockwise:
bear_diff_rad = angles.flip_angle(bear_diff_rad)
count = 0
for spec in node_specs:
if spec is not None:
rotate_anticlockwise_by = bear_diff_rad * count / (num_specs - 1)
if clockwise:
rotate_anticlockwise_by *= -1
dir_vec = angles.flip_y( \
angles.get_unit_vector_after_rotating( \
angles.flip_y(start_vec2), rotate_anticlockwise_by ))
pos = parent_pos + dir_vec * radius
new_id = self.add_node(spec.text, pos, spec.node_col, spec.multibox)
if spec.link_draw == ArrowDraw.BACK_ARROW:
self.add_link(new_id, parent_id, spec.link_col, ArrowDraw.FWD_ARROW, None)
elif spec.link_draw != ArrowDraw.NO_LINK:
self.add_link(parent_id, new_id, spec.link_col, spec.link_draw, spec.link_2_col)
added_ids.append(new_id)
count += 1
return added_ids
| <filename>formation.py
import sys
import os
sys.path.append( os.path.dirname(__file__) )
import numpy as np
import typing as tp
import angles
from model import Node, Link, Label
from spec import ArrowDraw, NodeSpec
class FormationManager:
def __init__(self):
self._nodes = {}
self._links = []
self._labels = []
@property
def nodes(self) -> tp.List[Node]:
return [n for n in self._nodes.values()]
@property
def links(self) -> tp.List[Link]:
return self._links
@property
def labels(self) -> tp.List[Link]:
return self._labels
def _id_if_str(self, node: tp.Tuple[str, int]) -> int:
if isinstance(node, int):
return node
else:
return self.id_of(node)
def text_of(self, node_id: int) -> str:
if not isinstance(node_id, int):
raise TypeError("Expected node_id to be int: {}".format(node_id))
return self._nodes[node_id].text
def pos_of(self, node_id: tp.Tuple[str, int]) -> np.array:
node_id = self._id_if_str(node_id)
return np.array(self._nodes[node_id].pos)
def pos_perp_to(self, from_id: int, to_id: int, shift_breadth: int, to_left: bool) -> np.array:
from_vec2 = np.array(self._nodes[from_id].pos)
to_vec2 = np.array(self._nodes[to_id].pos)
rel_vec2 = to_vec2 - from_vec2
flipped_y_unit_rel = angles.flip_y( angles.unit(rel_vec2) )
if to_left:
rotated_dir = angles.flip_y( \
angles.rotate_vector_to_left_by_90_deg( flipped_y_unit_rel ) )
else:
rotated_dir = angles.flip_y( \
angles.rotate_vector_to_right_by_90_deg( flipped_y_unit_rel ) )
return (from_vec2 + rel_vec2 / 2 + rotated_dir * shift_breadth).astype(int)
def id_of(self, text: str) -> int:
if not isinstance(text, str):
raise TypeError("{} should be a string".format(text))
ans = []
for key in self._nodes.keys():
if text == self._nodes[key].text:
ans.append(key)
if len(ans) == 0:
raise ValueError("No node has this text: {}".format(text))
elif len(ans) == 1:
return ans[0]
else:
raise ValueError("More than one node has the text {}: {}".format(text, ans))
def add_node(self, text: str, pos: tp.Tuple[int, int], colour: str="green", multibox: bool = False) -> int:
new_node = Node(text, pos, colour, multibox)
new_id = id(new_node)
self._nodes[new_id] = new_node
return new_id
def add_label(self, text: str, pos: tp.Tuple[int, int], colour: str="red"):
self._labels.append( Label(text, pos, colour) )
def add_link(self, from_id: tp.Tuple[str, int], to_id: tp.Tuple[str, int], colour: str="black", \
arrow_draw: ArrowDraw = ArrowDraw.FWD_ARROW, link_2_col: tp.Optional[str] = None):
self._links.append( Link(self._id_if_str(from_id), self._id_if_str(to_id), colour, arrow_draw, link_2_col) )
def add_dual_link(self, from_id: tp.Tuple[str, int], to_id: tp.Tuple[str, int], colour: str="black", \
second_colour: str="black"):
self.add_link(from_id, to_id, colour, ArrowDraw.DUAL_LINK, second_colour)
def add_linked_node(self, from_id: tp.Tuple[str, int], pos: tp.Tuple[int, int], spec: NodeSpec) -> int:
new_id = self.add_node(spec.text, pos, spec.node_col, spec.multibox)
self.add_link(from_id, new_id, spec.link_col, spec.link_draw, spec.link_2_col)
return new_id
def add_daisy_chain_links(self, nodes: tp.List[tp.Tuple[str, int]], arrow_draw: ArrowDraw = ArrowDraw.FWD_ARROW, \
link_col: str="black", link_2_col: tp.Optional[str] = None):
if not isinstance(nodes, list):
raise TypeError("Expected a list for nodes: {}".format(nodes))
if len(nodes) < 2:
raise ValueError("Expected at least 2 nodes, got {}".format(len(nodes)))
for i, item in enumerate(nodes[1:]):
prev_node = self._id_if_str(nodes[i]) # i is already the previous index
this_node = self._id_if_str(item)
self.add_link(prev_node, this_node, link_col, arrow_draw, link_2_col)
def add_depth_line_of_linked_nodes(self, start_id: tp.Tuple[str, int], dir: tp.Tuple[int, int], \
link_length: int, \
node_specs: tp.List[tp.Optional[NodeSpec]] \
) -> tp.List[int]:
added_ids = []
start_id = self._id_if_str(start_id)
start_pos = angles.vec2(self._nodes[start_id].pos)
unit_dir = angles.unit( dir )
count = 1
from_id = start_id
for spec in node_specs:
if spec is not None:
pos = start_pos + unit_dir * link_length * count
new_id = self.add_node(spec.text, pos, spec.node_col, spec.multibox)
if spec.link_draw == ArrowDraw.BACK_ARROW:
self.add_link(new_id, from_id, spec.link_col, ArrowDraw.FWD_ARROW, None)
elif spec.link_draw != ArrowDraw.NO_LINK:
self.add_link(from_id, new_id, spec.link_col, spec.link_draw, spec.link_2_col)
added_ids.append(new_id)
from_id = new_id
count += 1
return added_ids
def add_rail_of_nodes(self, start_coord: tp.Tuple[int, int], dir: tp.Tuple[int, int], \
link_length: int, \
node_specs: tp.List[tp.Optional[NodeSpec]] \
) -> tp.List[int]:
num_specs = len(node_specs)
if num_specs < 2:
raise ValueError("node_specs must have at least 2 elements")
if node_specs[0] is None or node_specs[-1] is None:
raise ValueError("The first and last item of node_specs must not be None")
first_id = self.add_node(node_specs[0].text, start_coord, \
node_specs[0].node_col, node_specs[0].multibox)
added_ids = [first_id]
new_ids = self.add_depth_line_of_linked_nodes(first_id, dir, link_length, node_specs[1:])
added_ids.extend(new_ids)
return added_ids
def add_breadth_line_of_sibling_nodes(self, parent_id: tp.Tuple[str, int], start_coord: tp.Tuple[int, int], \
end_coord: tp.Tuple[int, int], \
node_specs: tp.List[tp.Optional[NodeSpec]] \
) -> tp.List[int]:
num_specs = len(node_specs)
parent_id = self._id_if_str(parent_id)
if num_specs < 2:
raise ValueError("node_specs must have at least 2 elements")
if node_specs[0] is None or node_specs[-1] is None:
raise ValueError("The first and last item of node_specs must not be None")
added_ids = []
start_vec2 = angles.vec2(start_coord)
end_vec2 = angles.vec2(end_coord)
rel_vec2 = end_vec2 - start_vec2
count = 0
for spec in node_specs:
if spec is not None:
pos = start_vec2 + rel_vec2 * count / (num_specs - 1)
new_id = self.add_node(spec.text, pos, spec.node_col, spec.multibox)
if spec.link_draw == ArrowDraw.BACK_ARROW:
self.add_link(new_id, parent_id, spec.link_col, ArrowDraw.FWD_ARROW, None)
elif spec.link_draw != ArrowDraw.NO_LINK:
self.add_link(parent_id, new_id, spec.link_col, spec.link_draw, spec.link_2_col)
added_ids.append(new_id)
count += 1
return added_ids
def add_breadth_line_centered_on(self, parent_id: tp.Tuple[str, int], center_coord: tp.Tuple[int, int], \
link_length: int, node_specs: tp.List[tp.Optional[NodeSpec]] \
) -> tp.List[int]:
num_specs = len(node_specs)
if num_specs < 2:
raise ValueError("node_specs must have at least 2 elements")
parent_pos = self.pos_of(parent_id)
rel_vec2 = angles.vec2(center_coord) - parent_pos
rotated_vec2 = angles.flip_y( \
angles.rotate_vector_to_left_by_90_deg( \
angles.flip_y( angles.unit(rel_vec2) )))
half_total_length = link_length * float(num_specs-1) / 2.0
start_coord = center_coord + rotated_vec2 * half_total_length
end_coord = center_coord - rotated_vec2 * half_total_length
return self.add_breadth_line_of_sibling_nodes(parent_id, start_coord, end_coord, node_specs)
def add_arc_of_sibling_nodes(self, parent_id: tp.Tuple[str, int], radius: int, start_dir_coord: tp.Tuple[int, int], \
end_dir_coord: tp.Tuple[int, int], clockwise: bool, \
node_specs: tp.List[tp.Optional[NodeSpec]] \
) -> tp.List[int]:
parent_id = self._id_if_str(parent_id)
num_specs = len(node_specs)
if num_specs < 2:
raise ValueError("node_specs must have at least 2 elements")
if node_specs[0] is None or node_specs[-1] is None:
raise ValueError("The first and last item of node_specs must not be None")
added_ids = []
parent_pos = self._nodes[parent_id].pos
parent_vec2 = angles.vec2(parent_pos)
start_vec2 = angles.vec2(start_dir_coord) - parent_vec2
end_vec2 = angles.vec2(end_dir_coord) - parent_vec2
start_bear_rad = angles.get_bearing_rad_of( angles.flip_y(start_vec2) )
end_bear_rad = angles.get_bearing_rad_of( angles.flip_y(end_vec2) )
bear_diff_rad = angles.normalise_angle(end_bear_rad - start_bear_rad)
if clockwise:
bear_diff_rad = angles.flip_angle(bear_diff_rad)
count = 0
for spec in node_specs:
if spec is not None:
rotate_anticlockwise_by = bear_diff_rad * count / (num_specs - 1)
if clockwise:
rotate_anticlockwise_by *= -1
dir_vec = angles.flip_y( \
angles.get_unit_vector_after_rotating( \
angles.flip_y(start_vec2), rotate_anticlockwise_by ))
pos = parent_pos + dir_vec * radius
new_id = self.add_node(spec.text, pos, spec.node_col, spec.multibox)
if spec.link_draw == ArrowDraw.BACK_ARROW:
self.add_link(new_id, parent_id, spec.link_col, ArrowDraw.FWD_ARROW, None)
elif spec.link_draw != ArrowDraw.NO_LINK:
self.add_link(parent_id, new_id, spec.link_col, spec.link_draw, spec.link_2_col)
added_ids.append(new_id)
count += 1
return added_ids
| en | 0.943289 | # i is already the previous index | 2.646873 | 3 |
opencv_camera/parameters/utils.py | MomsFriendlyRobotCompany/opencv_camera | 6 | 7607 | ##############################################
# The MIT License (MIT)
# Copyright (c) 2014 <NAME>
# see LICENSE for full details
##############################################
# -*- coding: utf-8 -*
from math import atan, pi
def fov(w,f):
"""
Returns the FOV as in degrees, given:
w: image width (or height) in pixels
f: focalLength (fx or fy) in pixels
"""
return 2*atan(w/2/f) * 180/pi
| ##############################################
# The MIT License (MIT)
# Copyright (c) 2014 <NAME>
# see LICENSE for full details
##############################################
# -*- coding: utf-8 -*
from math import atan, pi
def fov(w,f):
"""
Returns the FOV as in degrees, given:
w: image width (or height) in pixels
f: focalLength (fx or fy) in pixels
"""
return 2*atan(w/2/f) * 180/pi
| en | 0.363945 | ############################################## # The MIT License (MIT) # Copyright (c) 2014 <NAME> # see LICENSE for full details ############################################## # -*- coding: utf-8 -* Returns the FOV as in degrees, given: w: image width (or height) in pixels f: focalLength (fx or fy) in pixels | 2.796296 | 3 |
Code_Hybrid_SLIMBPR_CBF_RP3Beta.py | SamanFekri/BookRecommendation | 0 | 7608 | # This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.sparse as sps
import time
RM_train=pd.read_csv('./input/data_train.csv')
R_test=pd.read_csv('./input/data_target_users_test.csv')
URM=pd.read_csv('./input/data_train.csv')
ICM = pd.read_csv('./input/data_ICM_title_abstract.csv')
##### URM
URM_tuples = [tuple(x) for x in URM.to_numpy()]
userList, itemList, ratingList = zip(*URM_tuples)
userList = list(userList)
userList=np.array(userList,dtype=np.int64)
itemList = list(itemList)
itemList=np.array(itemList,dtype=np.int64)
ratingList = list(ratingList) #not needed
ratingList=np.array(ratingList,dtype=np.int64) #not needed
URM_all = sps.coo_matrix((ratingList, (userList, itemList)))
URM_all = URM_all.tocsr()
#### ICM
ICM_tuples = [tuple(x) for x in ICM.to_numpy()]
itemList_icm, featureList_icm, scoreList_icm = zip(*ICM_tuples)
itemList_icm = list(itemList_icm)
itemList_icm = np.array(itemList_icm,dtype=np.int64)
featureList_icm = list(featureList_icm)
featureList_icm = np.array(featureList_icm,dtype=np.int64)
scoreList_icm = list(scoreList_icm)
scoreList_icm = np.array(scoreList_icm,dtype=np.float64)
ICM_all = sps.coo_matrix((scoreList_icm, (itemList_icm, featureList_icm)))
#### Test
userTestList = [x for x in R_test.to_numpy()]
userTestList = zip(*userTestList)
userTestList = [list(a) for a in userTestList][0]
#### make validation and test
from Base.Evaluation.Evaluator import EvaluatorHoldout
from Data_manager.split_functions.split_train_validation_random_holdout import split_train_in_two_percentage_global_sample
URM_train, URM_test = split_train_in_two_percentage_global_sample(URM_all, train_percentage = 0.80)
URM_train, URM_validation = split_train_in_two_percentage_global_sample(URM_train, train_percentage = 0.80)
evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=[10])
evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[10])
### hybrid recommender
### Usinng TF IDF
ICM_all = ICM_all.tocsr()
num_tot_items = ICM_all.shape[0]
# let's count how many items have a certain feature
items_per_feature = np.ediff1d(ICM_all.indptr) + 1
# print(items_per_feature)
IDF = np.array(np.log(num_tot_items / items_per_feature))
from scipy.sparse import diags
diags(IDF)
ICM_idf = ICM_all.copy()
ICM_idf = diags(IDF)*ICM_idf
############## top pop
item_popularity = np.ediff1d(URM_all.tocsc().indptr)
popular_items = np.argsort(item_popularity)
popular_items = np.flip(popular_items, axis=0)
popular_items = popular_items[0:10]
###########
from HybridRecommender import HybridRecommender
recommender = HybridRecommender(URM_all)
recommender.fit([0.2, 0.3, 0.2], ICM_idf)
recoms = recommender.recommend(userTestList, cutoff=10)
recomList = []
for i in range(len(recoms)):
user_id = userTestList[i]
start_pos = URM_train.indptr[user_id]
end_pos = URM_train.indptr[user_id + 1]
if start_pos == end_pos:
recomList.append(' '.join(str(e) for e in popular_items))
else:
recomList.append(' '.join(str(e) for e in recoms[i]))
# print(recomList)
res = {"user_id": userTestList, "item_list": recomList}
result = pd.DataFrame(res, columns= ['user_id', 'item_list'])
result.to_csv('outputs/hybrid_slim_cbf_rp3v1.csv', index = False, header=True)
| # This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.sparse as sps
import time
RM_train=pd.read_csv('./input/data_train.csv')
R_test=pd.read_csv('./input/data_target_users_test.csv')
URM=pd.read_csv('./input/data_train.csv')
ICM = pd.read_csv('./input/data_ICM_title_abstract.csv')
##### URM
URM_tuples = [tuple(x) for x in URM.to_numpy()]
userList, itemList, ratingList = zip(*URM_tuples)
userList = list(userList)
userList=np.array(userList,dtype=np.int64)
itemList = list(itemList)
itemList=np.array(itemList,dtype=np.int64)
ratingList = list(ratingList) #not needed
ratingList=np.array(ratingList,dtype=np.int64) #not needed
URM_all = sps.coo_matrix((ratingList, (userList, itemList)))
URM_all = URM_all.tocsr()
#### ICM
ICM_tuples = [tuple(x) for x in ICM.to_numpy()]
itemList_icm, featureList_icm, scoreList_icm = zip(*ICM_tuples)
itemList_icm = list(itemList_icm)
itemList_icm = np.array(itemList_icm,dtype=np.int64)
featureList_icm = list(featureList_icm)
featureList_icm = np.array(featureList_icm,dtype=np.int64)
scoreList_icm = list(scoreList_icm)
scoreList_icm = np.array(scoreList_icm,dtype=np.float64)
ICM_all = sps.coo_matrix((scoreList_icm, (itemList_icm, featureList_icm)))
#### Test
userTestList = [x for x in R_test.to_numpy()]
userTestList = zip(*userTestList)
userTestList = [list(a) for a in userTestList][0]
#### make validation and test
from Base.Evaluation.Evaluator import EvaluatorHoldout
from Data_manager.split_functions.split_train_validation_random_holdout import split_train_in_two_percentage_global_sample
URM_train, URM_test = split_train_in_two_percentage_global_sample(URM_all, train_percentage = 0.80)
URM_train, URM_validation = split_train_in_two_percentage_global_sample(URM_train, train_percentage = 0.80)
evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=[10])
evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[10])
### hybrid recommender
### Usinng TF IDF
ICM_all = ICM_all.tocsr()
num_tot_items = ICM_all.shape[0]
# let's count how many items have a certain feature
items_per_feature = np.ediff1d(ICM_all.indptr) + 1
# print(items_per_feature)
IDF = np.array(np.log(num_tot_items / items_per_feature))
from scipy.sparse import diags
diags(IDF)
ICM_idf = ICM_all.copy()
ICM_idf = diags(IDF)*ICM_idf
############## top pop
item_popularity = np.ediff1d(URM_all.tocsc().indptr)
popular_items = np.argsort(item_popularity)
popular_items = np.flip(popular_items, axis=0)
popular_items = popular_items[0:10]
###########
from HybridRecommender import HybridRecommender
recommender = HybridRecommender(URM_all)
recommender.fit([0.2, 0.3, 0.2], ICM_idf)
recoms = recommender.recommend(userTestList, cutoff=10)
recomList = []
for i in range(len(recoms)):
user_id = userTestList[i]
start_pos = URM_train.indptr[user_id]
end_pos = URM_train.indptr[user_id + 1]
if start_pos == end_pos:
recomList.append(' '.join(str(e) for e in popular_items))
else:
recomList.append(' '.join(str(e) for e in recoms[i]))
# print(recomList)
res = {"user_id": userTestList, "item_list": recomList}
result = pd.DataFrame(res, columns= ['user_id', 'item_list'])
result.to_csv('outputs/hybrid_slim_cbf_rp3v1.csv', index = False, header=True)
| en | 0.72643 | # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load # linear algebra # data processing, CSV file I/O (e.g. pd.read_csv) ##### URM #not needed #not needed #### ICM #### Test #### make validation and test ### hybrid recommender ### Usinng TF IDF # let's count how many items have a certain feature # print(items_per_feature) ############## top pop ########### # print(recomList) | 2.333224 | 2 |
dodge/config.py | MoyTW/7DRL2016_Rewrite | 2 | 7609 | <gh_stars>1-10
import json
class Config(object):
def __init__(self, file_location):
with open(file_location, 'r') as f:
config = json.load(f)
self.SCREEN_WIDTH = int(config["SCREEN_WIDTH"])
self.SCREEN_HEIGHT = int(config["SCREEN_HEIGHT"])
self.MAP_WIDTH = int(config["MAP_WIDTH"])
self.MAP_HEIGHT = int(config["MAP_HEIGHT"])
self.PANEL_HEIGHT = int(config["PANEL_HEIGHT"])
self.FULL_SCREEN = bool(config["FULL_SCREEN"])
self.CAMERA_WIDTH = int(config["CAMERA_WIDTH"])
self.CAMERA_HEIGHT = int(config["CAMERA_HEIGHT"])
self.VISION_RADIUS = int(config["VISION_RADIUS"])
self.FOV_ALGO = int(config["FOV_ALGO"])
self.FOV_LIGHT_WALLS = bool(config["FOV_LIGHT_WALLS"])
self.HP_BAR_WIDTH = int(config["HP_BAR_WIDTH"])
# Derived values
self.PANEL_Y = self.SCREEN_HEIGHT - self.PANEL_HEIGHT
# etc etc etc
| import json
class Config(object):
def __init__(self, file_location):
with open(file_location, 'r') as f:
config = json.load(f)
self.SCREEN_WIDTH = int(config["SCREEN_WIDTH"])
self.SCREEN_HEIGHT = int(config["SCREEN_HEIGHT"])
self.MAP_WIDTH = int(config["MAP_WIDTH"])
self.MAP_HEIGHT = int(config["MAP_HEIGHT"])
self.PANEL_HEIGHT = int(config["PANEL_HEIGHT"])
self.FULL_SCREEN = bool(config["FULL_SCREEN"])
self.CAMERA_WIDTH = int(config["CAMERA_WIDTH"])
self.CAMERA_HEIGHT = int(config["CAMERA_HEIGHT"])
self.VISION_RADIUS = int(config["VISION_RADIUS"])
self.FOV_ALGO = int(config["FOV_ALGO"])
self.FOV_LIGHT_WALLS = bool(config["FOV_LIGHT_WALLS"])
self.HP_BAR_WIDTH = int(config["HP_BAR_WIDTH"])
# Derived values
self.PANEL_Y = self.SCREEN_HEIGHT - self.PANEL_HEIGHT
# etc etc etc | en | 0.508681 | # Derived values # etc etc etc | 2.793381 | 3 |
incal_lib/create_dataframe.py | barel-mishal/InCal_lib | 0 | 7610 | <reponame>barel-mishal/InCal_lib
import pandas as pd
import numpy as np
def create_calr_example_df(n_rows, start_date):
'''
'''
np.random.seed(20)
array = np.random.rand(n_rows)
cumulative = np.cumsum(array)
d = {
'feature1_subject_1': array,
'feature1_subject_2': array,
'feature2_subject_1': cumulative,
'feature2_subject_2': cumulative*2
}
idx = pd.date_range(start_date, periods=n_rows,
freq="MIN", name='Date_Time_1')
return pd.DataFrame(data=d, index=idx)
| import pandas as pd
import numpy as np
def create_calr_example_df(n_rows, start_date):
'''
'''
np.random.seed(20)
array = np.random.rand(n_rows)
cumulative = np.cumsum(array)
d = {
'feature1_subject_1': array,
'feature1_subject_2': array,
'feature2_subject_1': cumulative,
'feature2_subject_2': cumulative*2
}
idx = pd.date_range(start_date, periods=n_rows,
freq="MIN", name='Date_Time_1')
return pd.DataFrame(data=d, index=idx) | none | 1 | 2.985364 | 3 |
|
HybridSN/DataLoadAndOperate.py | lms-07/HybridSN | 0 | 7611 | import os
import numpy as np
import scipy.io as sio
import tifffile
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
#Load dataset
def loadData(name,data_path):
if name == 'IP':
data = sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected.mat'))['indian_pines_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Indian_pines_gt.mat'))['indian_pines_gt']
elif name == 'SA':
data = sio.loadmat(os.path.join(data_path, 'Salinas_corrected.mat'))['salinas_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Salinas_gt.mat'))['salinas_gt']
elif name == 'PU':
data = sio.loadmat(os.path.join(data_path, 'PaviaU.mat'))['paviaU']
labels = sio.loadmat(os.path.join(data_path, 'PaviaU_gt.mat'))['paviaU_gt']
elif name == 'HU13':
# dict_keys(['__header__', '__version__', '__globals__', 'Houston'])
#dict_values([b'MATLAB 5.0 MAT-file, Platform: PCWIN64, Created on: Wed Jul 17 16:45:01 2019', '1.0', [], array()])
#data = sio.loadmat(os.path.join(data_path, 'Houston.mat'))
#labels = sio.loadmat(os.path.join(data_path,'Houston_gt.mat'))
data = sio.loadmat(os.path.join(data_path, 'Houston.mat'))['Houston']
labels = sio.loadmat(os.path.join(data_path,'Houston_gt.mat'))['Houston_gt']
elif name == 'KSC':
data = sio.loadmat(os.path.join(data_path, 'KSC.mat'))['KSC']
labels = sio.loadmat(os.path.join(data_path,'KSC_gt.mat'))['KSC_gt']
return data, labels
# Use tifffile pkg read the hyperspectral img.
# Load .tiff data set and converted to .mat data
def loadTifDataTomat(data_path,save_DataPath,name):
if name=='HU13':
totalTif=tifffile.imread(os.path.join(data_path,'2013_IEEE_GRSS_DF_Contest_CASI.tif'))
trainTif=tifffile.imread(os.path.join(data_path,'train_roi.tif'))
valTif=tifffile.imread(os.path.join(data_path,'val_roi.tif'))
print(totalTif.shape,trainTif.shape,valTif.shape)
#spectral.imshow(totalTif)
#spectral.imshow(trainTif)
sio.savemat(os.path.join(save_DataPath,"totalTifHouston13.mat"),{'totalTifHouston13':totalTif})
sio.savemat(os.path.join(save_DataPath,"trainTifHouston13.mat"),{'trainTifHouston13':trainTif})
sio.savemat(os.path.join(save_DataPath,"valTifHouston13.mat"),{'valTifHouston13':valTif})
def loadTifMat(data_path,name):
if name=='HU13':
data=sio.loadmat(os.path.join(data_path, 'totalTifHouston13.mat'))['totalTifHouston13']
train=sio.loadmat(os.path.join(data_path, 'trainTifHouston13.mat'))['trainTifHouston13']
val=sio.loadmat(os.path.join(data_path, 'valTifHouston13.mat'))['valTifHouston13']
return data,train,val
### Using PCA for removing the spectral redundancy(冗余)
### Reduce the spectral dimension, from high-dimensional to low-dimensional.
def applyPCA(X, numComponents=75):
newX = np.reshape(X, (-1, X.shape[2]))
pca = PCA(n_components=numComponents, whiten=True)
newX = pca.fit_transform(newX)
newX = np.reshape(newX, (X.shape[0],X.shape[1], numComponents))
return newX, pca
### Padding zeros
def padWithZeros(X, margin=2):
newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2]))
x_offset = margin
y_offset = margin
newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X
return newX
### Create data cube,3D-patch.
def createImageCubes(X, y, windowSize=5, removeZeroLabels = True):
margin = int((windowSize - 1) / 2)
zeroPaddedX = padWithZeros(X, margin=margin)
# split patches
patchesData = np.zeros((X.shape[0] * X.shape[1], windowSize, windowSize, X.shape[2]))
patchesLabels = np.zeros((X.shape[0] * X.shape[1]))
patchIndex = 0
for r in range(margin, zeroPaddedX.shape[0] - margin):
for c in range(margin, zeroPaddedX.shape[1] - margin):
patch = zeroPaddedX[r - margin:r + margin + 1, c - margin:c + margin + 1]
patchesData[patchIndex, :, :, :] = patch
patchesLabels[patchIndex] = y[r-margin, c-margin]
patchIndex = patchIndex + 1
if removeZeroLabels:
patchesData = patchesData[patchesLabels>0,:,:,:]
patchesLabels = patchesLabels[patchesLabels>0]
patchesLabels -= 1
return patchesData, patchesLabels
# Dataset split.
def splitTrainTestSet(X, y, testRatio, randomState=345):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testRatio, random_state=randomState,
stratify=y)
return X_train, X_test, y_train, y_test | import os
import numpy as np
import scipy.io as sio
import tifffile
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
#Load dataset
def loadData(name,data_path):
if name == 'IP':
data = sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected.mat'))['indian_pines_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Indian_pines_gt.mat'))['indian_pines_gt']
elif name == 'SA':
data = sio.loadmat(os.path.join(data_path, 'Salinas_corrected.mat'))['salinas_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Salinas_gt.mat'))['salinas_gt']
elif name == 'PU':
data = sio.loadmat(os.path.join(data_path, 'PaviaU.mat'))['paviaU']
labels = sio.loadmat(os.path.join(data_path, 'PaviaU_gt.mat'))['paviaU_gt']
elif name == 'HU13':
# dict_keys(['__header__', '__version__', '__globals__', 'Houston'])
#dict_values([b'MATLAB 5.0 MAT-file, Platform: PCWIN64, Created on: Wed Jul 17 16:45:01 2019', '1.0', [], array()])
#data = sio.loadmat(os.path.join(data_path, 'Houston.mat'))
#labels = sio.loadmat(os.path.join(data_path,'Houston_gt.mat'))
data = sio.loadmat(os.path.join(data_path, 'Houston.mat'))['Houston']
labels = sio.loadmat(os.path.join(data_path,'Houston_gt.mat'))['Houston_gt']
elif name == 'KSC':
data = sio.loadmat(os.path.join(data_path, 'KSC.mat'))['KSC']
labels = sio.loadmat(os.path.join(data_path,'KSC_gt.mat'))['KSC_gt']
return data, labels
# Use tifffile pkg read the hyperspectral img.
# Load .tiff data set and converted to .mat data
def loadTifDataTomat(data_path,save_DataPath,name):
if name=='HU13':
totalTif=tifffile.imread(os.path.join(data_path,'2013_IEEE_GRSS_DF_Contest_CASI.tif'))
trainTif=tifffile.imread(os.path.join(data_path,'train_roi.tif'))
valTif=tifffile.imread(os.path.join(data_path,'val_roi.tif'))
print(totalTif.shape,trainTif.shape,valTif.shape)
#spectral.imshow(totalTif)
#spectral.imshow(trainTif)
sio.savemat(os.path.join(save_DataPath,"totalTifHouston13.mat"),{'totalTifHouston13':totalTif})
sio.savemat(os.path.join(save_DataPath,"trainTifHouston13.mat"),{'trainTifHouston13':trainTif})
sio.savemat(os.path.join(save_DataPath,"valTifHouston13.mat"),{'valTifHouston13':valTif})
def loadTifMat(data_path,name):
if name=='HU13':
data=sio.loadmat(os.path.join(data_path, 'totalTifHouston13.mat'))['totalTifHouston13']
train=sio.loadmat(os.path.join(data_path, 'trainTifHouston13.mat'))['trainTifHouston13']
val=sio.loadmat(os.path.join(data_path, 'valTifHouston13.mat'))['valTifHouston13']
return data,train,val
### Using PCA for removing the spectral redundancy(冗余)
### Reduce the spectral dimension, from high-dimensional to low-dimensional.
def applyPCA(X, numComponents=75):
newX = np.reshape(X, (-1, X.shape[2]))
pca = PCA(n_components=numComponents, whiten=True)
newX = pca.fit_transform(newX)
newX = np.reshape(newX, (X.shape[0],X.shape[1], numComponents))
return newX, pca
### Padding zeros
def padWithZeros(X, margin=2):
newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2]))
x_offset = margin
y_offset = margin
newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X
return newX
### Create data cube,3D-patch.
def createImageCubes(X, y, windowSize=5, removeZeroLabels = True):
margin = int((windowSize - 1) / 2)
zeroPaddedX = padWithZeros(X, margin=margin)
# split patches
patchesData = np.zeros((X.shape[0] * X.shape[1], windowSize, windowSize, X.shape[2]))
patchesLabels = np.zeros((X.shape[0] * X.shape[1]))
patchIndex = 0
for r in range(margin, zeroPaddedX.shape[0] - margin):
for c in range(margin, zeroPaddedX.shape[1] - margin):
patch = zeroPaddedX[r - margin:r + margin + 1, c - margin:c + margin + 1]
patchesData[patchIndex, :, :, :] = patch
patchesLabels[patchIndex] = y[r-margin, c-margin]
patchIndex = patchIndex + 1
if removeZeroLabels:
patchesData = patchesData[patchesLabels>0,:,:,:]
patchesLabels = patchesLabels[patchesLabels>0]
patchesLabels -= 1
return patchesData, patchesLabels
# Dataset split.
def splitTrainTestSet(X, y, testRatio, randomState=345):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testRatio, random_state=randomState,
stratify=y)
return X_train, X_test, y_train, y_test | en | 0.44998 | #Load dataset # dict_keys(['__header__', '__version__', '__globals__', 'Houston']) #dict_values([b'MATLAB 5.0 MAT-file, Platform: PCWIN64, Created on: Wed Jul 17 16:45:01 2019', '1.0', [], array()]) #data = sio.loadmat(os.path.join(data_path, 'Houston.mat')) #labels = sio.loadmat(os.path.join(data_path,'Houston_gt.mat')) # Use tifffile pkg read the hyperspectral img. # Load .tiff data set and converted to .mat data #spectral.imshow(totalTif) #spectral.imshow(trainTif) ### Using PCA for removing the spectral redundancy(冗余) ### Reduce the spectral dimension, from high-dimensional to low-dimensional. ### Padding zeros ### Create data cube,3D-patch. # split patches # Dataset split. | 2.486353 | 2 |
alipay/aop/api/domain/AlipayOpenIotmbsDooropenresultSyncModel.py | antopen/alipay-sdk-python-all | 0 | 7612 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenIotmbsDooropenresultSyncModel(object):
def __init__(self):
self._dev_id = None
self._door_state = None
self._project_id = None
@property
def dev_id(self):
return self._dev_id
@dev_id.setter
def dev_id(self, value):
self._dev_id = value
@property
def door_state(self):
return self._door_state
@door_state.setter
def door_state(self, value):
self._door_state = value
@property
def project_id(self):
return self._project_id
@project_id.setter
def project_id(self, value):
self._project_id = value
def to_alipay_dict(self):
params = dict()
if self.dev_id:
if hasattr(self.dev_id, 'to_alipay_dict'):
params['dev_id'] = self.dev_id.to_alipay_dict()
else:
params['dev_id'] = self.dev_id
if self.door_state:
if hasattr(self.door_state, 'to_alipay_dict'):
params['door_state'] = self.door_state.to_alipay_dict()
else:
params['door_state'] = self.door_state
if self.project_id:
if hasattr(self.project_id, 'to_alipay_dict'):
params['project_id'] = self.project_id.to_alipay_dict()
else:
params['project_id'] = self.project_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenIotmbsDooropenresultSyncModel()
if 'dev_id' in d:
o.dev_id = d['dev_id']
if 'door_state' in d:
o.door_state = d['door_state']
if 'project_id' in d:
o.project_id = d['project_id']
return o
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenIotmbsDooropenresultSyncModel(object):
def __init__(self):
self._dev_id = None
self._door_state = None
self._project_id = None
@property
def dev_id(self):
return self._dev_id
@dev_id.setter
def dev_id(self, value):
self._dev_id = value
@property
def door_state(self):
return self._door_state
@door_state.setter
def door_state(self, value):
self._door_state = value
@property
def project_id(self):
return self._project_id
@project_id.setter
def project_id(self, value):
self._project_id = value
def to_alipay_dict(self):
params = dict()
if self.dev_id:
if hasattr(self.dev_id, 'to_alipay_dict'):
params['dev_id'] = self.dev_id.to_alipay_dict()
else:
params['dev_id'] = self.dev_id
if self.door_state:
if hasattr(self.door_state, 'to_alipay_dict'):
params['door_state'] = self.door_state.to_alipay_dict()
else:
params['door_state'] = self.door_state
if self.project_id:
if hasattr(self.project_id, 'to_alipay_dict'):
params['project_id'] = self.project_id.to_alipay_dict()
else:
params['project_id'] = self.project_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenIotmbsDooropenresultSyncModel()
if 'dev_id' in d:
o.dev_id = d['dev_id']
if 'door_state' in d:
o.door_state = d['door_state']
if 'project_id' in d:
o.project_id = d['project_id']
return o
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 1.989218 | 2 |
setup.py | ghost58400/marlin-binary-protocol | 0 | 7613 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="marlin_binary_protocol",
version="0.0.7",
author="<NAME>",
author_email="<EMAIL>",
description="Transfer files with Marlin 2.0 firmware using Marlin Binary Protocol Mark II",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/charleswillis3/marlin-binary-protocol",
packages=setuptools.find_packages(),
install_requires=["heatshrink2>=0.9", "pyserial>=3.4", "backports.time_perf_counter; python_version < '3.3'"],
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4',
)
| import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="marlin_binary_protocol",
version="0.0.7",
author="<NAME>",
author_email="<EMAIL>",
description="Transfer files with Marlin 2.0 firmware using Marlin Binary Protocol Mark II",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/charleswillis3/marlin-binary-protocol",
packages=setuptools.find_packages(),
install_requires=["heatshrink2>=0.9", "pyserial>=3.4", "backports.time_perf_counter; python_version < '3.3'"],
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4',
)
| none | 1 | 1.418212 | 1 |
|
taut_euler_class.py | henryseg/Veering | 2 | 7614 | <reponame>henryseg/Veering
#
# taut_euler_class.py
#
from file_io import parse_data_file, write_data_file
from taut import liberal, isosig_to_tri_angle
from transverse_taut import is_transverse_taut
from sage.matrix.constructor import Matrix
from sage.modules.free_module_element import vector
from sage.arith.misc import gcd
from sage.arith.functions import lcm
#
# Goal - given a transverse taut triangulation, decide if the
# associated "absolute" euler class is torsion or not. If it is
# torsion, determine its order.
#
# Contents and overview:
# 1. References.
#
# 2. Background.
#
# 3. Helper functions.
#
# 4. Truncate. We build the correct "truncated" cell structure \calT'
# from (M, \calT) and give generators for the cochain groups
# C^k(\calT', \ZZ) (k = 1, 2).
#
# 5. Representative. We find a two-cocycle E \in Z^2(\calT', \ZZ)
# that represents E(\calT) \in H^2(M, \ZZ).
#
# 6. Coboundary. We find the matrix for the coboundary operator
# \delta^1.
#
# 7. Linear algebra. We solve the linear problem to decide if E is a
# coboundary - that is, if E lies in B^2(\calT', \ZZ) - that is, if E
# is in the image of \delta^1.
#
# 8. Remarks.
#
# 9. Calling code
#
# 1. References.
#
# <NAME> - Orderability and Dehn filling
# Ghys - Groups acting on the circle
# Thurston - A norm for the homology of three-manifolds
# <NAME> - Foliations, chapter four
# 2. Background:
# Suppose that (M, \calT) is a transverse taut triangulation. Then
# \calT^{2} is the "horizontal branched surface". This caries various
# laminations, which extend to foliations on M. All of these have the
# same Euler class, which we will denote E(\calT) \in H^2(M, \ZZ).
# Suppose that \calF is a carried foliation and let UT\calF be the
# unit tangent bundle over \calF. The Euler class E vanishes exactly
# when UT\calF has a section; that is, when the unit tangent bundle is
# trivialisable.
# Recall:
# Suppose that X is an F-bundle over B. We have
#
# i
# F -------> X <--.
# | |
# | |
# p| |s
# | |
# v |
# B ---'
#
# So s \from B \to X is a \emph{section} if p \circ s = Id_B
# 3. Helper functions
def diagonal(D):
return [D[i][i] for i in range(min(D.dimensions()))]
# 4. Truncate.
# Suppose that M is a connected, cusped, oriented three-manifold. Let
# C = C(M) \geq 1 be the number of cusps of M. Suppose that \calT is a
# transverse taut ideal triangulation of M. Let T = T(\calT) \geq 1
# be the number of tetrahedra of \calT.
# We use Regina to number and orient the edges \{e_i\}_{i = 0}^{T-1},
# the faces \{f_i\}_{i = 0}^{2T-1}, and the tetrahedra \{t_i\}_{i =
# 0}^{T-1} of \calT. We call all of these \emph{ideal} cells. Note
# that the transverse structure also gives us co-orientations of the
# e_i and the f_i, called "upwards"
# We remove a small open neighbourbood of all ideal vertices of all
# model tetrahedra. This gives the \emph{truncated} cell structure
# \calT'. The remains of the ideal cells are called \emph{truncated}
# cells; we abuse and reuse the notations e_i and f_i for these. The
# truncated cells inherit orientations and co-orientations. The new
# cells are called \emph{peripheral} cells. We number these as
# follows:
# e_{ij} is the peripheral edge cutting vertex v_j off of ideal face f_i
# f_{ij} is the peripheral face cutting vertex v_j off of ideal tetrahedron t_i
# Note that every truncated face is combinatorially a hexagon. The
# boundary of this hexagon contains three truncated edges alternating
# with three peripheral edges. We orient each peripheral edge e_{ij}
# so that the orientation of e_{ij} agrees with the orientation
# induced by \bdy f_i. We orient each peripheral face f_{ij}
# anti-clockwise, as viewed from infinity (that is, from outside of
# M). Also, we equip e_{ij} and f_{ij} with co-orientations pointing
# out of M, called "outward".
# e_{i0}
# ---
# / \
# e_2 / \ e_1
# / \
# / f_i \
# \ /
# e_{i1} --------- e_{i2}
# e_0
# For an edge e or a face f we use e^* and f^* to denote the dual in
# C^1(\calT', \ZZ) or C^2(\calT', \ZZ). Thus \{e^*_i\} \cup
# \{e^*_{ij}\} generates C^1(\calT', \ZZ) while \{f^*_i\} \cup
# \{f^*_{ij}\} generates C^2(\calT', \ZZ).
# For more pictures, see
# /Veering_code/NotesPictures/euler_notes_from_nathan.jpg
# 5. Representative
# We now construct a two-cocycle E \in Z^2(\calT', \ZZ). For every
# peripheral face f we take
# E(f) = 0.
# \begin{remark}
# To see that this is correct, let \calF be any foliation of M,
# transverse to the boundary. Suppose that f is the given peripheral
# triangle. We have a section of the restriction of UT\calF to \bdy
# f; namely the outward field. This extends over f to give a section
# of UT\calF restricted to f. So there is no obstruction to the
# extension. See below for a more precise discussion in terms of
# "Poincar\'e-Hopf index".
# \end{remark}
# Now suppose that f is a truncated face. Suppose that e_0, e_1, e_2
# are its three truncated edges. Recall that these are all oriented.
# Let AC(f) be the number of the edges e_0, e_1, e_2 that are
# oriented anti-clockwise (that is, agree with their induced
# orientation coming from f). We take
# E(f) = AC(f) - 2
# If we flip the transverse direction: AC(f') = 3 - AC(f),
# so E(f') = AC(f') - 2 = 1 - AC(f) = 2 - AC(f) - 1 = -E(f) - 1
# \begin{remark}
# Here is one way to remember (and explain!) this rule. Suppose that
# f is the given truncated face. Suppose that s is a section of UTf |
# \bdy f. Then index(s) is the total rotation of s with respect to
# the tangent field, _plus_ one. This can be rephrased in terms of
# the index of tangent vector fields extending s over all of f.
# Our choices of orientations of edges determine a section of UTf |
# \bdy f. Since all of the boundary edges e_{ij} of f are oriented
# the same way, we choose a standard framing there; Nathan tells us to
# just use the outward pointing section on all of the e_{ij}. Our
# choice of section on e_0 (say) has to (a) depend only on the
# orientation of e_0 and (b) has to be outward at the endpoints of
# e_0. The simplest choice is the section that rotates by +\pi with
# respect to the tangent along \bdy f_i, as we move forward along e_0.
# So s points _back_ at the beginning of e_0, points _right_ in the
# middle of e_0, and points _forwards_ at the end of e_0. The total
# rotation of the resulting field (with respect to the tangent field)
# is AC(f) - 3. Thus E(f) = AC(f) - 2 is the index. You can check
# this works by drawing the four possible pictures and computing the index
# of any extension of s over f.
# \end{remark}
# Claim: \delta^2 E = 0.
# That is, E is a cocycle.
# Proof of claim: Fix a truncated tetrahedron t and fix some oriention
# of its truncated edges. A direct calculation shows that
# \delta E (t) = E \bdy t = 0.
# Likewise, a direct computation shows that switching the orientation
# of a single edge leaves E \bdy t unchanged. QED.
### It would be nice to have a less computational proof!
def euler_cocycle(tri, angle):
"""
Given a regina triangulation "tri", with oriented edges, and a
transverse taut angle structure "angle", returns the associated
two-cocycle E representing the Euler class E(tri).
"""
assert is_transverse_taut(tri, angle)
face_coorientations = is_transverse_taut(tri, angle, return_type = "face_coorientations")
# E will be a _row_ vector, because it eats column vectors.
E = []
# First deal with the truncated faces
for face in tri.faces(2): # 2 = dimension
# First we compute the number of Regina oriented edges that agree with the Regina orientation on face
AC = 0
for i in range(3):
perm = face.faceMapping(1, i)
# print perm[0], perm[1]
if perm[1] == ((perm[0] + 1) % 3): # the edge and face orientations agree so,
AC = AC + 1
# print "AC", AC
# Now we condition on whether or not Regina and angle agree on the (co-)orientation of the face.
if face_coorientations[face.index()] == 1:
E.append(AC - 2)
else:
E.append(1 - AC)
# Now deal with the peripheral faces
for tet in tri.tetrahedra():
for j in range(4):
E.append(0)
return E
# 6. Coboundary
# Suppose that e is a truncated edge. Let LF be the set of truncated
# faces to the left of e and let RF be the set of faces to the right. Then
# \delta e^* = \sum_{f \in LF} f^* - \sum_{f \in RF} f^*.
# Suppose that e is a peripheral edge. So there is a unique truncated
# face f meeting e. Note that f is to the left of e. There are
# also a pair of boundary faces meeting e: say f' _above_ e and f''
# _below_ e. Then
# \delta e^* = f^* + (f')^* - (f'')^*.
def coboundary(tri, angle):
"""
Given a triangulation "tri" (T), with oriented edges, and a
transverse taut angle structure "angle", returns the co-boundary
operator delta^1 \from C^1(T', ZZ) \to C^2(T', ZZ), as a matrix,
for the truncated triangulation T'. Note that, strictly speaking,
we don't need to use "angle" for this, but we use it to determine
orientation on faces for the Euler class, so we might as well use
it again here.
"""
# \delta^1 takes row vectors (functions on edges) and spits out
# row vectors (functions on faces). So, if c is a one-cochain
# then c \cdot \delta is a two-cochain.
delta = []
assert is_transverse_taut(tri, angle)
tet_vert_coorientations = is_transverse_taut(tri, angle, return_type = "tet_vert_coorientations")
face_coorientations = is_transverse_taut(tri, angle, return_type = "face_coorientations")
for edge in tri.edges():
# A row for every truncated edge
row = []
for face in tri.triangles():
# A row entry for every truncated face
count = 0
for i in range(3):
if face.edge(i) == edge:
perm = face.faceMapping(1, i)
if perm[1] == ((perm[0] + 1) % 3):
# the edge and face orientations agree so,
count += 1
else:
count -= 1
row.append(count * face_coorientations[face.index()])
# +1 if face is to the left of the edge, -1 if face is to
# the right of the edge, using Regina's edge orientation
# when viewed from above (using the transverse taut notion
# of up)
# ,'|
# ,' |
# ,' |
# ,' CCW | gets a +1
# `. ^
# `. |
# `. |
# `.|
for tet in tri.simplices():
for i in range(4):
row.append(0)
delta.append(row)
for face in tri.triangles():
face_embeddings = []
for j in range(2):
face_embeddings.append( face.embedding(j) )
for i in range(3): # vertices of the face
# A row for every peripheral edge
row = []
for face2 in tri.triangles():
# A row entry for every truncated face
if face2 == face:
row.append(1)
else:
row.append(0)
for tet in tri.simplices():
for k in range(4):
# A row entry for every peripheral face
count = 0
for j in range(2):
if (tet == face_embeddings[j].simplex()) and (face_embeddings[j].vertices()[i] == k):
# the tetrahedron is on the jth side of the
# face and the ith vertex of face is the kth
# vertex of tet
face_num_in_tet = face_embeddings[j].vertices()[3]
count -= tet_vert_coorientations[tet.index()][face_num_in_tet]
# tet_vert_coorientations is +1 if
# coorientation on face points out of the
# tetrahedron, and we want count += 1 if
# the peripheral face is above the
# peripheral edge
row.append(count)
delta.append(row)
return delta
# 7. Linear algebra
# We ask: is there a one-cocycle C \in C^1(\calT', \ZZ) so that
# \delta C = E? If so, then [E] = E(\calT) is zero in H^2, as
# desired.
# This is a linear algebra problem, so can be solved by, say, sage.
def order_of_euler_class(delta, E):
"""
Given the coboundary operator delta and an Euler two-cocycle E,
returns k if [E] is k--torsion. By convention, returns zero if
[E] is non-torsion. Note that the trivial element is 1--torsion.
"""
delta = Matrix(delta)
E = vector(E)
# Note that E is a coboundary if there is a one-cocycle C solving
#
# E = C*delta
#
# We can find C (if it exists at all) using Smith normal form.
D, U, V = delta.smith_form()
assert D == U*delta*V
# So we are trying to solve
#
# C*delta = C*U.inverse()*D*V.inverse() = E
#
# for a one-cochain C. Multiply by V to get
#
# C*delta*V = C*U.inverse()*D = E*V
#
# Now set
#
# B = C*U.inverse(), and so B*U = C
#
# and rewrite to get
#
# B*U*delta*V = B*D = E*V
#
# So define E' by:
Ep = E*V
# Finally we attempt to solve B * D = Ep. Note that D is
# diagonal: so if we can solve all of the equations
# B[i] * D[i][i] == Ep[i]
# with B[i] integers, then [E] = 0 in cohomology.
diag = diagonal(D)
if any( (diag[i] == 0 and Ep[i] != 0) for i in range(len(Ep)) ):
return 0
# All zeros are at the end in Smith normal form. Since we've
# passed the above we can now remove them.
first_zero = diag.index(0)
diag = diag[:first_zero]
Ep = Ep[:first_zero]
# Since diag[i] is (now) never zero we can divide to get the
# fractions Ep[i]/diag[i] and then find the scaling that makes
# them simultaneously integral.
denoms = [ diag[i] / gcd(Ep[i], diag[i]) for i in range(len(Ep)) ]
return lcm(denoms)
# 8. Remarks
# a) Here is a nice trick that proves [E] = 0 in some cases. Suppose
# that \gamma is an oriented path in \bdy M. Suppose that \gamma is
# transverse to the one-skeleton of \calT'. We form a one-cocycle
# D_\gamma by adding up the boundary edges that \gamma crosses, with
# sign. The sign is positive if \gamma crosses from below to above,
# and negative otherwise. Note that \delta D_\gamma vanishes on all
# boundary faces.
# b) <NAME> says that we should take the paths that go up
# through the centres of tetrahedra and take the Poincare dual. BUT I
# think this is not what we want... Marc is thinking of the relative
# Euler class as discussed on page 390 of his paper "Taut ideal
# triangulations of three-manifolds". The relative Euler class lives
# in H^2(M, \bdy M), so is Poincare dual to an element of H_1(M),
# represented by a collection of loops.
# c) [2019-03-31] It seems that, for transverse veering triangulations
# in the 16 census, the Euler class is always zero or two-torsion.
# Note that there are manifolds M in the census where H^2(M, \ZZ) has
# positive rank... What about odd torsion?
# Question: If the veering triangulation is edge-orientable, does the
# Euler class vanish?
# Answer: Yes. Here is a version of a discussion with Nathan
# [2020-04-03] - he says the following:
# Suppose that F is a foliation carried by the horizontal branched
# surface. Let UTF be the unit tangent bundle to F. We think of
# e(UTF) as being the obstruction to UTF having a section. Let G be
# the foliation carried by the upper (aka green) branched surface. If
# G is transversely orientable (aka edge-orientability of the veering
# triangulation) then G \cap F gives the desired section, and e(UTF) =
# 0. Note that G \cap F gives, for every point, a pair of points in
# the unit tangent circle. So let PUTF be the projective unit tangent
# bundle to F. This definitely has a section, so e(PUTF) = 0. Now,
# the bundle UTF is a double cover of the bundle PUTF.
# Claim: The euler class is multiplicative with respect to covers (in
# both senses).
# With the claim in hand, we have
# 2 * e(UTF) = e(PUTF) = 0
# We deduce that e(UTF) is either zero or two-torsion.
# 9. Calling code
@liberal
def order_of_euler_class_wrapper(tri, angle):
"""
Returns the order of the euler class.
"""
return order_of_euler_class(coboundary(tri, angle), euler_cocycle(tri, angle))
def compute_order_of_euler_classes(file_in, number=None, file_out=None):
data_in = parse_data_file(file_in)
data_in = [line.split(" ") for line in data_in]
if number != None:
data_in = data_in[:number]
data_out = []
evil = []
for i, line in enumerate(data_in):
if i % 50 == 0:
print( ((1.0*i)/(1.0*len(data_in)), len(data_out)) )
sig = line[0]
tri, angle = isosig_to_tri_angle(sig)
# angle = [int(letter) for letter in angle_s]
curr_euler = order_of_euler_class(coboundary(tri, angle), euler_cocycle(tri, angle))
if curr_euler == "non-torsion":
evil.append(sig)
print(sig + " has non-torsion Euler class!!!!")
elif curr_euler == 1: # order is one so [E] = 0. Boring.
pass
else:
line_out = [sig, str(curr_euler)]
line_out.extend(line[1:])
data_out.append(line_out)
if file_out != None:
write_data_file(data_out, file_out)
print( ("list of evil:", evil) )
return data_out
| #
# taut_euler_class.py
#
from file_io import parse_data_file, write_data_file
from taut import liberal, isosig_to_tri_angle
from transverse_taut import is_transverse_taut
from sage.matrix.constructor import Matrix
from sage.modules.free_module_element import vector
from sage.arith.misc import gcd
from sage.arith.functions import lcm
#
# Goal - given a transverse taut triangulation, decide if the
# associated "absolute" euler class is torsion or not. If it is
# torsion, determine its order.
#
# Contents and overview:
# 1. References.
#
# 2. Background.
#
# 3. Helper functions.
#
# 4. Truncate. We build the correct "truncated" cell structure \calT'
# from (M, \calT) and give generators for the cochain groups
# C^k(\calT', \ZZ) (k = 1, 2).
#
# 5. Representative. We find a two-cocycle E \in Z^2(\calT', \ZZ)
# that represents E(\calT) \in H^2(M, \ZZ).
#
# 6. Coboundary. We find the matrix for the coboundary operator
# \delta^1.
#
# 7. Linear algebra. We solve the linear problem to decide if E is a
# coboundary - that is, if E lies in B^2(\calT', \ZZ) - that is, if E
# is in the image of \delta^1.
#
# 8. Remarks.
#
# 9. Calling code
#
# 1. References.
#
# <NAME> - Orderability and Dehn filling
# Ghys - Groups acting on the circle
# Thurston - A norm for the homology of three-manifolds
# <NAME> - Foliations, chapter four
# 2. Background:
# Suppose that (M, \calT) is a transverse taut triangulation. Then
# \calT^{2} is the "horizontal branched surface". This caries various
# laminations, which extend to foliations on M. All of these have the
# same Euler class, which we will denote E(\calT) \in H^2(M, \ZZ).
# Suppose that \calF is a carried foliation and let UT\calF be the
# unit tangent bundle over \calF. The Euler class E vanishes exactly
# when UT\calF has a section; that is, when the unit tangent bundle is
# trivialisable.
# Recall:
# Suppose that X is an F-bundle over B. We have
#
# i
# F -------> X <--.
# | |
# | |
# p| |s
# | |
# v |
# B ---'
#
# So s \from B \to X is a \emph{section} if p \circ s = Id_B
# 3. Helper functions
def diagonal(D):
return [D[i][i] for i in range(min(D.dimensions()))]
# 4. Truncate.
# Suppose that M is a connected, cusped, oriented three-manifold. Let
# C = C(M) \geq 1 be the number of cusps of M. Suppose that \calT is a
# transverse taut ideal triangulation of M. Let T = T(\calT) \geq 1
# be the number of tetrahedra of \calT.
# We use Regina to number and orient the edges \{e_i\}_{i = 0}^{T-1},
# the faces \{f_i\}_{i = 0}^{2T-1}, and the tetrahedra \{t_i\}_{i =
# 0}^{T-1} of \calT. We call all of these \emph{ideal} cells. Note
# that the transverse structure also gives us co-orientations of the
# e_i and the f_i, called "upwards"
# We remove a small open neighbourbood of all ideal vertices of all
# model tetrahedra. This gives the \emph{truncated} cell structure
# \calT'. The remains of the ideal cells are called \emph{truncated}
# cells; we abuse and reuse the notations e_i and f_i for these. The
# truncated cells inherit orientations and co-orientations. The new
# cells are called \emph{peripheral} cells. We number these as
# follows:
# e_{ij} is the peripheral edge cutting vertex v_j off of ideal face f_i
# f_{ij} is the peripheral face cutting vertex v_j off of ideal tetrahedron t_i
# Note that every truncated face is combinatorially a hexagon. The
# boundary of this hexagon contains three truncated edges alternating
# with three peripheral edges. We orient each peripheral edge e_{ij}
# so that the orientation of e_{ij} agrees with the orientation
# induced by \bdy f_i. We orient each peripheral face f_{ij}
# anti-clockwise, as viewed from infinity (that is, from outside of
# M). Also, we equip e_{ij} and f_{ij} with co-orientations pointing
# out of M, called "outward".
# e_{i0}
# ---
# / \
# e_2 / \ e_1
# / \
# / f_i \
# \ /
# e_{i1} --------- e_{i2}
# e_0
# For an edge e or a face f we use e^* and f^* to denote the dual in
# C^1(\calT', \ZZ) or C^2(\calT', \ZZ). Thus \{e^*_i\} \cup
# \{e^*_{ij}\} generates C^1(\calT', \ZZ) while \{f^*_i\} \cup
# \{f^*_{ij}\} generates C^2(\calT', \ZZ).
# For more pictures, see
# /Veering_code/NotesPictures/euler_notes_from_nathan.jpg
# 5. Representative
# We now construct a two-cocycle E \in Z^2(\calT', \ZZ). For every
# peripheral face f we take
# E(f) = 0.
# \begin{remark}
# To see that this is correct, let \calF be any foliation of M,
# transverse to the boundary. Suppose that f is the given peripheral
# triangle. We have a section of the restriction of UT\calF to \bdy
# f; namely the outward field. This extends over f to give a section
# of UT\calF restricted to f. So there is no obstruction to the
# extension. See below for a more precise discussion in terms of
# "Poincar\'e-Hopf index".
# \end{remark}
# Now suppose that f is a truncated face. Suppose that e_0, e_1, e_2
# are its three truncated edges. Recall that these are all oriented.
# Let AC(f) be the number of the edges e_0, e_1, e_2 that are
# oriented anti-clockwise (that is, agree with their induced
# orientation coming from f). We take
# E(f) = AC(f) - 2
# If we flip the transverse direction: AC(f') = 3 - AC(f),
# so E(f') = AC(f') - 2 = 1 - AC(f) = 2 - AC(f) - 1 = -E(f) - 1
# \begin{remark}
# Here is one way to remember (and explain!) this rule. Suppose that
# f is the given truncated face. Suppose that s is a section of UTf |
# \bdy f. Then index(s) is the total rotation of s with respect to
# the tangent field, _plus_ one. This can be rephrased in terms of
# the index of tangent vector fields extending s over all of f.
# Our choices of orientations of edges determine a section of UTf |
# \bdy f. Since all of the boundary edges e_{ij} of f are oriented
# the same way, we choose a standard framing there; Nathan tells us to
# just use the outward pointing section on all of the e_{ij}. Our
# choice of section on e_0 (say) has to (a) depend only on the
# orientation of e_0 and (b) has to be outward at the endpoints of
# e_0. The simplest choice is the section that rotates by +\pi with
# respect to the tangent along \bdy f_i, as we move forward along e_0.
# So s points _back_ at the beginning of e_0, points _right_ in the
# middle of e_0, and points _forwards_ at the end of e_0. The total
# rotation of the resulting field (with respect to the tangent field)
# is AC(f) - 3. Thus E(f) = AC(f) - 2 is the index. You can check
# this works by drawing the four possible pictures and computing the index
# of any extension of s over f.
# \end{remark}
# Claim: \delta^2 E = 0.
# That is, E is a cocycle.
# Proof of claim: Fix a truncated tetrahedron t and fix some oriention
# of its truncated edges. A direct calculation shows that
# \delta E (t) = E \bdy t = 0.
# Likewise, a direct computation shows that switching the orientation
# of a single edge leaves E \bdy t unchanged. QED.
### It would be nice to have a less computational proof!
def euler_cocycle(tri, angle):
"""
Given a regina triangulation "tri", with oriented edges, and a
transverse taut angle structure "angle", returns the associated
two-cocycle E representing the Euler class E(tri).
"""
assert is_transverse_taut(tri, angle)
face_coorientations = is_transverse_taut(tri, angle, return_type = "face_coorientations")
# E will be a _row_ vector, because it eats column vectors.
E = []
# First deal with the truncated faces
for face in tri.faces(2): # 2 = dimension
# First we compute the number of Regina oriented edges that agree with the Regina orientation on face
AC = 0
for i in range(3):
perm = face.faceMapping(1, i)
# print perm[0], perm[1]
if perm[1] == ((perm[0] + 1) % 3): # the edge and face orientations agree so,
AC = AC + 1
# print "AC", AC
# Now we condition on whether or not Regina and angle agree on the (co-)orientation of the face.
if face_coorientations[face.index()] == 1:
E.append(AC - 2)
else:
E.append(1 - AC)
# Now deal with the peripheral faces
for tet in tri.tetrahedra():
for j in range(4):
E.append(0)
return E
# 6. Coboundary
# Suppose that e is a truncated edge. Let LF be the set of truncated
# faces to the left of e and let RF be the set of faces to the right. Then
# \delta e^* = \sum_{f \in LF} f^* - \sum_{f \in RF} f^*.
# Suppose that e is a peripheral edge. So there is a unique truncated
# face f meeting e. Note that f is to the left of e. There are
# also a pair of boundary faces meeting e: say f' _above_ e and f''
# _below_ e. Then
# \delta e^* = f^* + (f')^* - (f'')^*.
def coboundary(tri, angle):
"""
Given a triangulation "tri" (T), with oriented edges, and a
transverse taut angle structure "angle", returns the co-boundary
operator delta^1 \from C^1(T', ZZ) \to C^2(T', ZZ), as a matrix,
for the truncated triangulation T'. Note that, strictly speaking,
we don't need to use "angle" for this, but we use it to determine
orientation on faces for the Euler class, so we might as well use
it again here.
"""
# \delta^1 takes row vectors (functions on edges) and spits out
# row vectors (functions on faces). So, if c is a one-cochain
# then c \cdot \delta is a two-cochain.
delta = []
assert is_transverse_taut(tri, angle)
tet_vert_coorientations = is_transverse_taut(tri, angle, return_type = "tet_vert_coorientations")
face_coorientations = is_transverse_taut(tri, angle, return_type = "face_coorientations")
for edge in tri.edges():
# A row for every truncated edge
row = []
for face in tri.triangles():
# A row entry for every truncated face
count = 0
for i in range(3):
if face.edge(i) == edge:
perm = face.faceMapping(1, i)
if perm[1] == ((perm[0] + 1) % 3):
# the edge and face orientations agree so,
count += 1
else:
count -= 1
row.append(count * face_coorientations[face.index()])
# +1 if face is to the left of the edge, -1 if face is to
# the right of the edge, using Regina's edge orientation
# when viewed from above (using the transverse taut notion
# of up)
# ,'|
# ,' |
# ,' |
# ,' CCW | gets a +1
# `. ^
# `. |
# `. |
# `.|
for tet in tri.simplices():
for i in range(4):
row.append(0)
delta.append(row)
for face in tri.triangles():
face_embeddings = []
for j in range(2):
face_embeddings.append( face.embedding(j) )
for i in range(3): # vertices of the face
# A row for every peripheral edge
row = []
for face2 in tri.triangles():
# A row entry for every truncated face
if face2 == face:
row.append(1)
else:
row.append(0)
for tet in tri.simplices():
for k in range(4):
# A row entry for every peripheral face
count = 0
for j in range(2):
if (tet == face_embeddings[j].simplex()) and (face_embeddings[j].vertices()[i] == k):
# the tetrahedron is on the jth side of the
# face and the ith vertex of face is the kth
# vertex of tet
face_num_in_tet = face_embeddings[j].vertices()[3]
count -= tet_vert_coorientations[tet.index()][face_num_in_tet]
# tet_vert_coorientations is +1 if
# coorientation on face points out of the
# tetrahedron, and we want count += 1 if
# the peripheral face is above the
# peripheral edge
row.append(count)
delta.append(row)
return delta
# 7. Linear algebra
# We ask: is there a one-cocycle C \in C^1(\calT', \ZZ) so that
# \delta C = E? If so, then [E] = E(\calT) is zero in H^2, as
# desired.
# This is a linear algebra problem, so can be solved by, say, sage.
def order_of_euler_class(delta, E):
"""
Given the coboundary operator delta and an Euler two-cocycle E,
returns k if [E] is k--torsion. By convention, returns zero if
[E] is non-torsion. Note that the trivial element is 1--torsion.
"""
delta = Matrix(delta)
E = vector(E)
# Note that E is a coboundary if there is a one-cocycle C solving
#
# E = C*delta
#
# We can find C (if it exists at all) using Smith normal form.
D, U, V = delta.smith_form()
assert D == U*delta*V
# So we are trying to solve
#
# C*delta = C*U.inverse()*D*V.inverse() = E
#
# for a one-cochain C. Multiply by V to get
#
# C*delta*V = C*U.inverse()*D = E*V
#
# Now set
#
# B = C*U.inverse(), and so B*U = C
#
# and rewrite to get
#
# B*U*delta*V = B*D = E*V
#
# So define E' by:
Ep = E*V
# Finally we attempt to solve B * D = Ep. Note that D is
# diagonal: so if we can solve all of the equations
# B[i] * D[i][i] == Ep[i]
# with B[i] integers, then [E] = 0 in cohomology.
diag = diagonal(D)
if any( (diag[i] == 0 and Ep[i] != 0) for i in range(len(Ep)) ):
return 0
# All zeros are at the end in Smith normal form. Since we've
# passed the above we can now remove them.
first_zero = diag.index(0)
diag = diag[:first_zero]
Ep = Ep[:first_zero]
# Since diag[i] is (now) never zero we can divide to get the
# fractions Ep[i]/diag[i] and then find the scaling that makes
# them simultaneously integral.
denoms = [ diag[i] / gcd(Ep[i], diag[i]) for i in range(len(Ep)) ]
return lcm(denoms)
# 8. Remarks
# a) Here is a nice trick that proves [E] = 0 in some cases. Suppose
# that \gamma is an oriented path in \bdy M. Suppose that \gamma is
# transverse to the one-skeleton of \calT'. We form a one-cocycle
# D_\gamma by adding up the boundary edges that \gamma crosses, with
# sign. The sign is positive if \gamma crosses from below to above,
# and negative otherwise. Note that \delta D_\gamma vanishes on all
# boundary faces.
# b) <NAME> says that we should take the paths that go up
# through the centres of tetrahedra and take the Poincare dual. BUT I
# think this is not what we want... Marc is thinking of the relative
# Euler class as discussed on page 390 of his paper "Taut ideal
# triangulations of three-manifolds". The relative Euler class lives
# in H^2(M, \bdy M), so is Poincare dual to an element of H_1(M),
# represented by a collection of loops.
# c) [2019-03-31] It seems that, for transverse veering triangulations
# in the 16 census, the Euler class is always zero or two-torsion.
# Note that there are manifolds M in the census where H^2(M, \ZZ) has
# positive rank... What about odd torsion?
# Question: If the veering triangulation is edge-orientable, does the
# Euler class vanish?
# Answer: Yes. Here is a version of a discussion with Nathan
# [2020-04-03] - he says the following:
# Suppose that F is a foliation carried by the horizontal branched
# surface. Let UTF be the unit tangent bundle to F. We think of
# e(UTF) as being the obstruction to UTF having a section. Let G be
# the foliation carried by the upper (aka green) branched surface. If
# G is transversely orientable (aka edge-orientability of the veering
# triangulation) then G \cap F gives the desired section, and e(UTF) =
# 0. Note that G \cap F gives, for every point, a pair of points in
# the unit tangent circle. So let PUTF be the projective unit tangent
# bundle to F. This definitely has a section, so e(PUTF) = 0. Now,
# the bundle UTF is a double cover of the bundle PUTF.
# Claim: The euler class is multiplicative with respect to covers (in
# both senses).
# With the claim in hand, we have
# 2 * e(UTF) = e(PUTF) = 0
# We deduce that e(UTF) is either zero or two-torsion.
# 9. Calling code
@liberal
def order_of_euler_class_wrapper(tri, angle):
"""
Returns the order of the euler class.
"""
return order_of_euler_class(coboundary(tri, angle), euler_cocycle(tri, angle))
def compute_order_of_euler_classes(file_in, number=None, file_out=None):
data_in = parse_data_file(file_in)
data_in = [line.split(" ") for line in data_in]
if number != None:
data_in = data_in[:number]
data_out = []
evil = []
for i, line in enumerate(data_in):
if i % 50 == 0:
print( ((1.0*i)/(1.0*len(data_in)), len(data_out)) )
sig = line[0]
tri, angle = isosig_to_tri_angle(sig)
# angle = [int(letter) for letter in angle_s]
curr_euler = order_of_euler_class(coboundary(tri, angle), euler_cocycle(tri, angle))
if curr_euler == "non-torsion":
evil.append(sig)
print(sig + " has non-torsion Euler class!!!!")
elif curr_euler == 1: # order is one so [E] = 0. Boring.
pass
else:
line_out = [sig, str(curr_euler)]
line_out.extend(line[1:])
data_out.append(line_out)
if file_out != None:
write_data_file(data_out, file_out)
print( ("list of evil:", evil) )
return data_out | en | 0.881353 | # # taut_euler_class.py # # # Goal - given a transverse taut triangulation, decide if the # associated "absolute" euler class is torsion or not. If it is # torsion, determine its order. # # Contents and overview: # 1. References. # # 2. Background. # # 3. Helper functions. # # 4. Truncate. We build the correct "truncated" cell structure \calT' # from (M, \calT) and give generators for the cochain groups # C^k(\calT', \ZZ) (k = 1, 2). # # 5. Representative. We find a two-cocycle E \in Z^2(\calT', \ZZ) # that represents E(\calT) \in H^2(M, \ZZ). # # 6. Coboundary. We find the matrix for the coboundary operator # \delta^1. # # 7. Linear algebra. We solve the linear problem to decide if E is a # coboundary - that is, if E lies in B^2(\calT', \ZZ) - that is, if E # is in the image of \delta^1. # # 8. Remarks. # # 9. Calling code # # 1. References. # # <NAME> - Orderability and Dehn filling # Ghys - Groups acting on the circle # Thurston - A norm for the homology of three-manifolds # <NAME> - Foliations, chapter four # 2. Background: # Suppose that (M, \calT) is a transverse taut triangulation. Then # \calT^{2} is the "horizontal branched surface". This caries various # laminations, which extend to foliations on M. All of these have the # same Euler class, which we will denote E(\calT) \in H^2(M, \ZZ). # Suppose that \calF is a carried foliation and let UT\calF be the # unit tangent bundle over \calF. The Euler class E vanishes exactly # when UT\calF has a section; that is, when the unit tangent bundle is # trivialisable. # Recall: # Suppose that X is an F-bundle over B. We have # # i # F -------> X <--. # | | # | | # p| |s # | | # v | # B ---' # # So s \from B \to X is a \emph{section} if p \circ s = Id_B # 3. Helper functions # 4. Truncate. # Suppose that M is a connected, cusped, oriented three-manifold. Let # C = C(M) \geq 1 be the number of cusps of M. Suppose that \calT is a # transverse taut ideal triangulation of M. Let T = T(\calT) \geq 1 # be the number of tetrahedra of \calT. # We use Regina to number and orient the edges \{e_i\}_{i = 0}^{T-1}, # the faces \{f_i\}_{i = 0}^{2T-1}, and the tetrahedra \{t_i\}_{i = # 0}^{T-1} of \calT. We call all of these \emph{ideal} cells. Note # that the transverse structure also gives us co-orientations of the # e_i and the f_i, called "upwards" # We remove a small open neighbourbood of all ideal vertices of all # model tetrahedra. This gives the \emph{truncated} cell structure # \calT'. The remains of the ideal cells are called \emph{truncated} # cells; we abuse and reuse the notations e_i and f_i for these. The # truncated cells inherit orientations and co-orientations. The new # cells are called \emph{peripheral} cells. We number these as # follows: # e_{ij} is the peripheral edge cutting vertex v_j off of ideal face f_i # f_{ij} is the peripheral face cutting vertex v_j off of ideal tetrahedron t_i # Note that every truncated face is combinatorially a hexagon. The # boundary of this hexagon contains three truncated edges alternating # with three peripheral edges. We orient each peripheral edge e_{ij} # so that the orientation of e_{ij} agrees with the orientation # induced by \bdy f_i. We orient each peripheral face f_{ij} # anti-clockwise, as viewed from infinity (that is, from outside of # M). Also, we equip e_{ij} and f_{ij} with co-orientations pointing # out of M, called "outward". # e_{i0} # --- # / \ # e_2 / \ e_1 # / \ # / f_i \ # \ / # e_{i1} --------- e_{i2} # e_0 # For an edge e or a face f we use e^* and f^* to denote the dual in # C^1(\calT', \ZZ) or C^2(\calT', \ZZ). Thus \{e^*_i\} \cup # \{e^*_{ij}\} generates C^1(\calT', \ZZ) while \{f^*_i\} \cup # \{f^*_{ij}\} generates C^2(\calT', \ZZ). # For more pictures, see # /Veering_code/NotesPictures/euler_notes_from_nathan.jpg # 5. Representative # We now construct a two-cocycle E \in Z^2(\calT', \ZZ). For every # peripheral face f we take # E(f) = 0. # \begin{remark} # To see that this is correct, let \calF be any foliation of M, # transverse to the boundary. Suppose that f is the given peripheral # triangle. We have a section of the restriction of UT\calF to \bdy # f; namely the outward field. This extends over f to give a section # of UT\calF restricted to f. So there is no obstruction to the # extension. See below for a more precise discussion in terms of # "Poincar\'e-Hopf index". # \end{remark} # Now suppose that f is a truncated face. Suppose that e_0, e_1, e_2 # are its three truncated edges. Recall that these are all oriented. # Let AC(f) be the number of the edges e_0, e_1, e_2 that are # oriented anti-clockwise (that is, agree with their induced # orientation coming from f). We take # E(f) = AC(f) - 2 # If we flip the transverse direction: AC(f') = 3 - AC(f), # so E(f') = AC(f') - 2 = 1 - AC(f) = 2 - AC(f) - 1 = -E(f) - 1 # \begin{remark} # Here is one way to remember (and explain!) this rule. Suppose that # f is the given truncated face. Suppose that s is a section of UTf | # \bdy f. Then index(s) is the total rotation of s with respect to # the tangent field, _plus_ one. This can be rephrased in terms of # the index of tangent vector fields extending s over all of f. # Our choices of orientations of edges determine a section of UTf | # \bdy f. Since all of the boundary edges e_{ij} of f are oriented # the same way, we choose a standard framing there; Nathan tells us to # just use the outward pointing section on all of the e_{ij}. Our # choice of section on e_0 (say) has to (a) depend only on the # orientation of e_0 and (b) has to be outward at the endpoints of # e_0. The simplest choice is the section that rotates by +\pi with # respect to the tangent along \bdy f_i, as we move forward along e_0. # So s points _back_ at the beginning of e_0, points _right_ in the # middle of e_0, and points _forwards_ at the end of e_0. The total # rotation of the resulting field (with respect to the tangent field) # is AC(f) - 3. Thus E(f) = AC(f) - 2 is the index. You can check # this works by drawing the four possible pictures and computing the index # of any extension of s over f. # \end{remark} # Claim: \delta^2 E = 0. # That is, E is a cocycle. # Proof of claim: Fix a truncated tetrahedron t and fix some oriention # of its truncated edges. A direct calculation shows that # \delta E (t) = E \bdy t = 0. # Likewise, a direct computation shows that switching the orientation # of a single edge leaves E \bdy t unchanged. QED. ### It would be nice to have a less computational proof! Given a regina triangulation "tri", with oriented edges, and a transverse taut angle structure "angle", returns the associated two-cocycle E representing the Euler class E(tri). # E will be a _row_ vector, because it eats column vectors. # First deal with the truncated faces # 2 = dimension # First we compute the number of Regina oriented edges that agree with the Regina orientation on face # print perm[0], perm[1] # the edge and face orientations agree so, # print "AC", AC # Now we condition on whether or not Regina and angle agree on the (co-)orientation of the face. # Now deal with the peripheral faces # 6. Coboundary # Suppose that e is a truncated edge. Let LF be the set of truncated # faces to the left of e and let RF be the set of faces to the right. Then # \delta e^* = \sum_{f \in LF} f^* - \sum_{f \in RF} f^*. # Suppose that e is a peripheral edge. So there is a unique truncated # face f meeting e. Note that f is to the left of e. There are # also a pair of boundary faces meeting e: say f' _above_ e and f'' # _below_ e. Then # \delta e^* = f^* + (f')^* - (f'')^*. Given a triangulation "tri" (T), with oriented edges, and a transverse taut angle structure "angle", returns the co-boundary operator delta^1 \from C^1(T', ZZ) \to C^2(T', ZZ), as a matrix, for the truncated triangulation T'. Note that, strictly speaking, we don't need to use "angle" for this, but we use it to determine orientation on faces for the Euler class, so we might as well use it again here. # \delta^1 takes row vectors (functions on edges) and spits out # row vectors (functions on faces). So, if c is a one-cochain # then c \cdot \delta is a two-cochain. # A row for every truncated edge # A row entry for every truncated face # the edge and face orientations agree so, # +1 if face is to the left of the edge, -1 if face is to # the right of the edge, using Regina's edge orientation # when viewed from above (using the transverse taut notion # of up) # ,'| # ,' | # ,' | # ,' CCW | gets a +1 # `. ^ # `. | # `. | # `.| # vertices of the face # A row for every peripheral edge # A row entry for every truncated face # A row entry for every peripheral face # the tetrahedron is on the jth side of the # face and the ith vertex of face is the kth # vertex of tet # tet_vert_coorientations is +1 if # coorientation on face points out of the # tetrahedron, and we want count += 1 if # the peripheral face is above the # peripheral edge # 7. Linear algebra # We ask: is there a one-cocycle C \in C^1(\calT', \ZZ) so that # \delta C = E? If so, then [E] = E(\calT) is zero in H^2, as # desired. # This is a linear algebra problem, so can be solved by, say, sage. Given the coboundary operator delta and an Euler two-cocycle E, returns k if [E] is k--torsion. By convention, returns zero if [E] is non-torsion. Note that the trivial element is 1--torsion. # Note that E is a coboundary if there is a one-cocycle C solving # # E = C*delta # # We can find C (if it exists at all) using Smith normal form. # So we are trying to solve # # C*delta = C*U.inverse()*D*V.inverse() = E # # for a one-cochain C. Multiply by V to get # # C*delta*V = C*U.inverse()*D = E*V # # Now set # # B = C*U.inverse(), and so B*U = C # # and rewrite to get # # B*U*delta*V = B*D = E*V # # So define E' by: # Finally we attempt to solve B * D = Ep. Note that D is # diagonal: so if we can solve all of the equations # B[i] * D[i][i] == Ep[i] # with B[i] integers, then [E] = 0 in cohomology. # All zeros are at the end in Smith normal form. Since we've # passed the above we can now remove them. # Since diag[i] is (now) never zero we can divide to get the # fractions Ep[i]/diag[i] and then find the scaling that makes # them simultaneously integral. # 8. Remarks # a) Here is a nice trick that proves [E] = 0 in some cases. Suppose # that \gamma is an oriented path in \bdy M. Suppose that \gamma is # transverse to the one-skeleton of \calT'. We form a one-cocycle # D_\gamma by adding up the boundary edges that \gamma crosses, with # sign. The sign is positive if \gamma crosses from below to above, # and negative otherwise. Note that \delta D_\gamma vanishes on all # boundary faces. # b) <NAME> says that we should take the paths that go up # through the centres of tetrahedra and take the Poincare dual. BUT I # think this is not what we want... Marc is thinking of the relative # Euler class as discussed on page 390 of his paper "Taut ideal # triangulations of three-manifolds". The relative Euler class lives # in H^2(M, \bdy M), so is Poincare dual to an element of H_1(M), # represented by a collection of loops. # c) [2019-03-31] It seems that, for transverse veering triangulations # in the 16 census, the Euler class is always zero or two-torsion. # Note that there are manifolds M in the census where H^2(M, \ZZ) has # positive rank... What about odd torsion? # Question: If the veering triangulation is edge-orientable, does the # Euler class vanish? # Answer: Yes. Here is a version of a discussion with Nathan # [2020-04-03] - he says the following: # Suppose that F is a foliation carried by the horizontal branched # surface. Let UTF be the unit tangent bundle to F. We think of # e(UTF) as being the obstruction to UTF having a section. Let G be # the foliation carried by the upper (aka green) branched surface. If # G is transversely orientable (aka edge-orientability of the veering # triangulation) then G \cap F gives the desired section, and e(UTF) = # 0. Note that G \cap F gives, for every point, a pair of points in # the unit tangent circle. So let PUTF be the projective unit tangent # bundle to F. This definitely has a section, so e(PUTF) = 0. Now, # the bundle UTF is a double cover of the bundle PUTF. # Claim: The euler class is multiplicative with respect to covers (in # both senses). # With the claim in hand, we have # 2 * e(UTF) = e(PUTF) = 0 # We deduce that e(UTF) is either zero or two-torsion. # 9. Calling code Returns the order of the euler class. # angle = [int(letter) for letter in angle_s] # order is one so [E] = 0. Boring. | 2.214289 | 2 |
mailing/urls.py | ananyamalik/Railway-Concession-Portal | 0 | 7615 | <filename>mailing/urls.py
from django.urls import path
from .views import ( student_list, student_add, student_profile,student_delete )
| <filename>mailing/urls.py
from django.urls import path
from .views import ( student_list, student_add, student_profile,student_delete )
| none | 1 | 1.518483 | 2 |
|
transformers/tests/tokenization_xlnet_test.py | deepbluesea/transformers | 270 | 7616 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
from transformers.tokenization_xlnet import (XLNetTokenizer, SPIECE_UNDERLINE)
from .tokenization_tests_commons import CommonTestCases
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'fixtures/test_sentencepiece.model')
class XLNetTokenizationTest(CommonTestCases.CommonTokenizerTester):
tokenizer_class = XLNetTokenizer
def setUp(self):
super(XLNetTokenizationTest, self).setUp()
# We have a SentencePiece fixture for testing
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs):
return XLNetTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self):
input_text = u"This is a test"
output_text = u"This is a test"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize(u'This is a test')
self.assertListEqual(tokens, [u'▁This', u'▁is', u'▁a', u'▁t', u'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382])
tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.")
self.assertListEqual(tokens, [SPIECE_UNDERLINE + u'I', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b',
u'or', u'n', SPIECE_UNDERLINE + u'in', SPIECE_UNDERLINE + u'',
u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u's', u'é', u'.'])
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids, [8, 21, 84, 55, 24, 19, 7, 0,
602, 347, 347, 347, 3, 12, 66,
46, 72, 80, 6, 0, 4])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens, [SPIECE_UNDERLINE + u'I', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b',
u'or', u'n', SPIECE_UNDERLINE + u'in',
SPIECE_UNDERLINE + u'', u'<unk>', u'2', u'0', u'0', u'0', u',',
SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u's',
u'<unk>', u'.'])
def test_tokenizer_lower(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=True)
tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.")
self.assertListEqual(tokens, [SPIECE_UNDERLINE + u'', u'i', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b',
u'or', u'n', SPIECE_UNDERLINE + u'in', SPIECE_UNDERLINE + u'',
u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u'se', u'.'])
self.assertListEqual(tokenizer.tokenize(u"H\u00E9llo"), [u"▁he", u"ll", u"o"])
def test_tokenizer_no_lower(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=False)
tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.")
self.assertListEqual(tokens, [SPIECE_UNDERLINE + u'I', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b', u'or',
u'n', SPIECE_UNDERLINE + u'in', SPIECE_UNDERLINE + u'',
u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u'se', u'.'])
def test_sequence_builders(self):
tokenizer = XLNetTokenizer.from_pretrained("xlnet-base-cased")
text = tokenizer.encode("sequence builders")
text_2 = tokenizer.encode("multi-sequence build")
encoded_sentence = tokenizer.add_special_tokens_single_sequence(text)
encoded_pair = tokenizer.add_special_tokens_sequence_pair(text, text_2)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_2 + [4, 3]
if __name__ == '__main__':
unittest.main()
| # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
from transformers.tokenization_xlnet import (XLNetTokenizer, SPIECE_UNDERLINE)
from .tokenization_tests_commons import CommonTestCases
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'fixtures/test_sentencepiece.model')
class XLNetTokenizationTest(CommonTestCases.CommonTokenizerTester):
tokenizer_class = XLNetTokenizer
def setUp(self):
super(XLNetTokenizationTest, self).setUp()
# We have a SentencePiece fixture for testing
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs):
return XLNetTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self):
input_text = u"This is a test"
output_text = u"This is a test"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize(u'This is a test')
self.assertListEqual(tokens, [u'▁This', u'▁is', u'▁a', u'▁t', u'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382])
tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.")
self.assertListEqual(tokens, [SPIECE_UNDERLINE + u'I', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b',
u'or', u'n', SPIECE_UNDERLINE + u'in', SPIECE_UNDERLINE + u'',
u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u's', u'é', u'.'])
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids, [8, 21, 84, 55, 24, 19, 7, 0,
602, 347, 347, 347, 3, 12, 66,
46, 72, 80, 6, 0, 4])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens, [SPIECE_UNDERLINE + u'I', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b',
u'or', u'n', SPIECE_UNDERLINE + u'in',
SPIECE_UNDERLINE + u'', u'<unk>', u'2', u'0', u'0', u'0', u',',
SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u's',
u'<unk>', u'.'])
def test_tokenizer_lower(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=True)
tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.")
self.assertListEqual(tokens, [SPIECE_UNDERLINE + u'', u'i', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b',
u'or', u'n', SPIECE_UNDERLINE + u'in', SPIECE_UNDERLINE + u'',
u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u'se', u'.'])
self.assertListEqual(tokenizer.tokenize(u"H\u00E9llo"), [u"▁he", u"ll", u"o"])
def test_tokenizer_no_lower(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=False)
tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.")
self.assertListEqual(tokens, [SPIECE_UNDERLINE + u'I', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b', u'or',
u'n', SPIECE_UNDERLINE + u'in', SPIECE_UNDERLINE + u'',
u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u'se', u'.'])
def test_sequence_builders(self):
tokenizer = XLNetTokenizer.from_pretrained("xlnet-base-cased")
text = tokenizer.encode("sequence builders")
text_2 = tokenizer.encode("multi-sequence build")
encoded_sentence = tokenizer.add_special_tokens_single_sequence(text)
encoded_pair = tokenizer.add_special_tokens_sequence_pair(text, text_2)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_2 + [4, 3]
if __name__ == '__main__':
unittest.main()
| en | 0.853889 | # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # We have a SentencePiece fixture for testing | 2.256918 | 2 |
preprocess/utils/liftOver_vcf.py | Rongtingting/xcltk | 0 | 7617 | # forked from https://github.com/single-cell-genetics/cellSNP
## A python wrap of UCSC liftOver function for vcf file
## UCSC liftOver binary and hg19 to hg38 chain file:
## https://genome.ucsc.edu/cgi-bin/hgLiftOver
## http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/liftOver
## http://hgdownload.soe.ucsc.edu/goldenPath/hg19/liftOver/hg19ToHg38.over.chain.gz
import sys
import gzip
import subprocess
from optparse import OptionParser
LIFTOVER_INFO = '##INFO=<ID=OLD,Number=1,Type=Integer,'
LIFTOVER_INFO += 'Description="position before liftover">\n'
def vcf_to_bed(vcf_file, out_file, chr_in=True):
if vcf_file[-3:] == ".gz":
is_gzip = True
fid_in = gzip.open(vcf_file, "r")
else:
is_gzip = False
fid_in = open(vcf_file, "r")
fid_out = open(out_file, "w")
for line in fid_in:
if is_gzip:
line = line.decode('utf-8')
if line.startswith("#") == False:
line_val = line.rstrip().split("\t")[:8]
if chr_in and line_val[0].startswith("chr") == False:
line_val[0] = "chr" + line_val[0]
line_val[2] = str(int(line_val[1]) + 1)
fid_out.writelines("\t".join(line_val[:3]) + "\n")
fid_in.close()
fid_out.close()
return None
def update_vcf(vcf_file, bed_new, bed_unmap, out_file):
## unmapped lines
unmap_pos = []
_fid = open(bed_unmap, "r")
for line in _fid:
if not line.startswith("#"):
_pos_id = "_".join(line.rstrip().split("\t")[:2])
unmap_pos.append(_pos_id)
_fid.close()
if vcf_file[-3:] == ".gz":
is_gzip = True
fid_in = gzip.open(vcf_file, "r")
else:
is_gzip = False
fid_in = open(vcf_file, "r")
cnt1 = 0
idx_unmap = 0
fid_bed = open(bed_new, "r")
fid_out = open(out_file, "w")
for line in fid_in:
if is_gzip:
line = line.decode('utf-8')
if line.startswith("#"):
if line.startswith("#CHROM"):
fid_out.writelines(LIFTOVER_INFO)
fid_out.writelines(line)
else:
line_val = line.rstrip().split("\t")
if idx_unmap < len(unmap_pos):
_pos_id = "_".join(line_val[:2])
if line_val[0].startswith("chr") == False:
_pos_id = "chr" + _pos_id
if _pos_id == unmap_pos[idx_unmap]:
idx_unmap += 1
continue
cnt1 += 1
bed_line = fid_bed.readline()
line_val[7] = "OLD=" + line_val[1] + ";" + line_val[7]
line_val[1] = bed_line.rstrip().split("\t")[1]
fid_out.writelines("\t".join(line_val) + "\n")
print(cnt1, idx_unmap)
fid_in.close()
fid_bed.close()
fid_out.close()
return None
def main():
import warnings
warnings.filterwarnings('error')
# parse command line options
parser = OptionParser()
parser.add_option("--chainFile", "-c", dest="chain_file", default=None,
help=("Chain file, full path."))
parser.add_option("--inFile", "-i", dest="in_file", default=None,
help=("Input vcf file, full path."))
parser.add_option("--outFile", "-o", dest="out_file", default=None,
help=("Output VCF file, full path."))
parser.add_option("--liftOverPath", "-P", dest="liftOver_path", default=None,
help=("liftOver_path if it is not in PATH variable."))
(options, args) = parser.parse_args()
if len(sys.argv[1:]) == 0:
print("liftOver-vcf: a wrap of UCSC liftOver for VCF file.\n")
print("use -h or --help for help on argument.")
sys.exit(1)
in_file = options.in_file
bed_file = options.in_file.split(".vcf")[0] + ".bed"
new_bed_file = options.out_file.split(".vcf")[0] + ".bed"
unmap_bed_file = options.out_file.split(".vcf")[0] + ".unmap.bed"
## generate bed file
print("converting vcf to bed file ... ")
vcf_to_bed(in_file, bed_file)
## UCSC liftOver on bed file
chain_file = options.chain_file
if options.liftOver_path is None:
liftOver = "liftOver"
else:
# check if path exists
liftOver = options.liftOver_path
print("liftOver bed file ... ")
bashCommand = "%s %s %s %s %s" %(liftOver, bed_file, chain_file,
new_bed_file, unmap_bed_file)
#print(bashCommand)
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
## update vcf file
out_file = options.out_file
if out_file[-3:] == ".gz":
out_file = out_file[:-3]
print("updating vcf file ... ")
update_vcf(in_file, new_bed_file, unmap_bed_file, out_file)
print("gzip vcf file ... ")
import shutil
if shutil.which("bgzip") is not None:
bashCommand = "bgzip -f %s" %(out_file)
else:
bashCommand = "gzip -f %s" %(out_file)
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
return None
if __name__ == "__main__":
main()
| # forked from https://github.com/single-cell-genetics/cellSNP
## A python wrap of UCSC liftOver function for vcf file
## UCSC liftOver binary and hg19 to hg38 chain file:
## https://genome.ucsc.edu/cgi-bin/hgLiftOver
## http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/liftOver
## http://hgdownload.soe.ucsc.edu/goldenPath/hg19/liftOver/hg19ToHg38.over.chain.gz
import sys
import gzip
import subprocess
from optparse import OptionParser
LIFTOVER_INFO = '##INFO=<ID=OLD,Number=1,Type=Integer,'
LIFTOVER_INFO += 'Description="position before liftover">\n'
def vcf_to_bed(vcf_file, out_file, chr_in=True):
if vcf_file[-3:] == ".gz":
is_gzip = True
fid_in = gzip.open(vcf_file, "r")
else:
is_gzip = False
fid_in = open(vcf_file, "r")
fid_out = open(out_file, "w")
for line in fid_in:
if is_gzip:
line = line.decode('utf-8')
if line.startswith("#") == False:
line_val = line.rstrip().split("\t")[:8]
if chr_in and line_val[0].startswith("chr") == False:
line_val[0] = "chr" + line_val[0]
line_val[2] = str(int(line_val[1]) + 1)
fid_out.writelines("\t".join(line_val[:3]) + "\n")
fid_in.close()
fid_out.close()
return None
def update_vcf(vcf_file, bed_new, bed_unmap, out_file):
## unmapped lines
unmap_pos = []
_fid = open(bed_unmap, "r")
for line in _fid:
if not line.startswith("#"):
_pos_id = "_".join(line.rstrip().split("\t")[:2])
unmap_pos.append(_pos_id)
_fid.close()
if vcf_file[-3:] == ".gz":
is_gzip = True
fid_in = gzip.open(vcf_file, "r")
else:
is_gzip = False
fid_in = open(vcf_file, "r")
cnt1 = 0
idx_unmap = 0
fid_bed = open(bed_new, "r")
fid_out = open(out_file, "w")
for line in fid_in:
if is_gzip:
line = line.decode('utf-8')
if line.startswith("#"):
if line.startswith("#CHROM"):
fid_out.writelines(LIFTOVER_INFO)
fid_out.writelines(line)
else:
line_val = line.rstrip().split("\t")
if idx_unmap < len(unmap_pos):
_pos_id = "_".join(line_val[:2])
if line_val[0].startswith("chr") == False:
_pos_id = "chr" + _pos_id
if _pos_id == unmap_pos[idx_unmap]:
idx_unmap += 1
continue
cnt1 += 1
bed_line = fid_bed.readline()
line_val[7] = "OLD=" + line_val[1] + ";" + line_val[7]
line_val[1] = bed_line.rstrip().split("\t")[1]
fid_out.writelines("\t".join(line_val) + "\n")
print(cnt1, idx_unmap)
fid_in.close()
fid_bed.close()
fid_out.close()
return None
def main():
import warnings
warnings.filterwarnings('error')
# parse command line options
parser = OptionParser()
parser.add_option("--chainFile", "-c", dest="chain_file", default=None,
help=("Chain file, full path."))
parser.add_option("--inFile", "-i", dest="in_file", default=None,
help=("Input vcf file, full path."))
parser.add_option("--outFile", "-o", dest="out_file", default=None,
help=("Output VCF file, full path."))
parser.add_option("--liftOverPath", "-P", dest="liftOver_path", default=None,
help=("liftOver_path if it is not in PATH variable."))
(options, args) = parser.parse_args()
if len(sys.argv[1:]) == 0:
print("liftOver-vcf: a wrap of UCSC liftOver for VCF file.\n")
print("use -h or --help for help on argument.")
sys.exit(1)
in_file = options.in_file
bed_file = options.in_file.split(".vcf")[0] + ".bed"
new_bed_file = options.out_file.split(".vcf")[0] + ".bed"
unmap_bed_file = options.out_file.split(".vcf")[0] + ".unmap.bed"
## generate bed file
print("converting vcf to bed file ... ")
vcf_to_bed(in_file, bed_file)
## UCSC liftOver on bed file
chain_file = options.chain_file
if options.liftOver_path is None:
liftOver = "liftOver"
else:
# check if path exists
liftOver = options.liftOver_path
print("liftOver bed file ... ")
bashCommand = "%s %s %s %s %s" %(liftOver, bed_file, chain_file,
new_bed_file, unmap_bed_file)
#print(bashCommand)
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
## update vcf file
out_file = options.out_file
if out_file[-3:] == ".gz":
out_file = out_file[:-3]
print("updating vcf file ... ")
update_vcf(in_file, new_bed_file, unmap_bed_file, out_file)
print("gzip vcf file ... ")
import shutil
if shutil.which("bgzip") is not None:
bashCommand = "bgzip -f %s" %(out_file)
else:
bashCommand = "gzip -f %s" %(out_file)
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
return None
if __name__ == "__main__":
main()
| en | 0.579815 | # forked from https://github.com/single-cell-genetics/cellSNP ## A python wrap of UCSC liftOver function for vcf file ## UCSC liftOver binary and hg19 to hg38 chain file: ## https://genome.ucsc.edu/cgi-bin/hgLiftOver ## http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/liftOver ## http://hgdownload.soe.ucsc.edu/goldenPath/hg19/liftOver/hg19ToHg38.over.chain.gz #INFO=<ID=OLD,Number=1,Type=Integer,' ## unmapped lines # parse command line options ## generate bed file ## UCSC liftOver on bed file # check if path exists #print(bashCommand) ## update vcf file | 2.365941 | 2 |
pomodorr/frames/tests/test_consumers.py | kamil559/pomodorr | 0 | 7618 | <filename>pomodorr/frames/tests/test_consumers.py
import json
import pytest
from channels.db import database_sync_to_async
from channels.testing import WebsocketCommunicator
from pytest_lazyfixture import lazy_fixture
from pomodorr.frames import statuses
from pomodorr.frames.models import DateFrame
from pomodorr.frames.routing import frames_application
from pomodorr.frames.selectors.date_frame_selector import get_finished_date_frames_for_task
pytestmark = [pytest.mark.django_db(transaction=True), pytest.mark.asyncio]
async def test_connect_websocket(task_instance, active_user):
communicator = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator.scope['user'] = active_user
connected, _ = await communicator.connect()
assert connected
await communicator.disconnect()
@pytest.mark.parametrize(
'tested_frame_type',
[DateFrame.pomodoro_type, DateFrame.break_type, DateFrame.pause_type]
)
async def test_start_and_finish_date_frame(tested_frame_type, task_instance, active_user):
communicator = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator.scope['user'] = active_user
await communicator.connect()
assert await database_sync_to_async(task_instance.frames.exists)() is False
await communicator.send_json_to({
'type': 'frame_start',
'frame_type': tested_frame_type
})
response = await communicator.receive_json_from()
assert response['level'] == statuses.MESSAGE_LEVEL_CHOICES[statuses.LEVEL_TYPE_SUCCESS]
assert response['code'] == statuses.LEVEL_TYPE_SUCCESS
assert response['action'] == statuses.MESSAGE_FRAME_ACTION_CHOICES[statuses.FRAME_ACTION_STARTED]
started_date_frame_id = response['data']['date_frame_id']
assert started_date_frame_id is not None
assert await database_sync_to_async(task_instance.frames.exists)()
await communicator.send_json_to({
'type': 'frame_finish',
'date_frame_id': started_date_frame_id
})
response = await communicator.receive_json_from()
assert response['level'] == statuses.MESSAGE_LEVEL_CHOICES[statuses.LEVEL_TYPE_SUCCESS]
assert response['code'] == statuses.LEVEL_TYPE_SUCCESS
assert response['action'] == statuses.MESSAGE_FRAME_ACTION_CHOICES[statuses.FRAME_ACTION_FINISHED]
assert await database_sync_to_async(get_finished_date_frames_for_task(task=task_instance).exists)()
await communicator.disconnect()
async def test_start_and_finish_pomodoro_with_pause_inside(task_instance, active_user):
communicator = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator.scope['user'] = active_user
await communicator.connect()
await communicator.send_json_to({
'type': 'frame_start',
'frame_type': DateFrame.pomodoro_type
})
pomodoro_started_response = await communicator.receive_json_from()
assert pomodoro_started_response['action'] == statuses.MESSAGE_FRAME_ACTION_CHOICES[statuses.FRAME_ACTION_STARTED]
started_pomodoro_id = pomodoro_started_response['data']['date_frame_id']
await communicator.send_json_to({
'type': 'frame_start',
'frame_type': DateFrame.pause_type
})
pause_started_response = await communicator.receive_json_from()
assert pause_started_response['action'] == statuses.MESSAGE_FRAME_ACTION_CHOICES[statuses.FRAME_ACTION_STARTED]
pomodoro = await database_sync_to_async(DateFrame.objects.get)(id=started_pomodoro_id)
assert pomodoro.end is None # check if pomodoro hasn't been stopped by starting a pause date frame
started_pause_id = pause_started_response['data']['date_frame_id']
pause = await database_sync_to_async(DateFrame.objects.get)(id=started_pause_id)
assert pause.end is None
await communicator.send_json_to({
'type': 'frame_finish',
'date_frame_id': started_pause_id
})
pause_finished_response = await communicator.receive_json_from()
assert pause_finished_response['action'] == statuses.MESSAGE_FRAME_ACTION_CHOICES[statuses.FRAME_ACTION_FINISHED]
await database_sync_to_async(pause.refresh_from_db)()
assert pause.end is not None # pause should be finished here
await database_sync_to_async(pomodoro.refresh_from_db)()
assert pomodoro.end is None
await communicator.send_json_to({
'type': 'frame_finish',
'date_frame_id': started_pomodoro_id
})
pomodoro_finished_response = await communicator.receive_json_from()
await database_sync_to_async(pomodoro.refresh_from_db)()
assert pomodoro.end is not None # Only now the pomodoro is expected to be finished
assert pomodoro_finished_response['action'] == statuses.MESSAGE_FRAME_ACTION_CHOICES[statuses.FRAME_ACTION_FINISHED]
assert await database_sync_to_async(get_finished_date_frames_for_task(task=task_instance).count)() == 2
await communicator.disconnect()
@pytest.mark.parametrize(
'tested_frame_type',
[DateFrame.pomodoro_type, DateFrame.break_type, DateFrame.pause_type]
)
async def test_channel_group_separation(tested_frame_type, active_user, task_instance,
task_instance_in_second_project):
communicator_1 = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator_2 = WebsocketCommunicator(frames_application, f'date_frames/{task_instance_in_second_project.id}/')
communicator_1.scope['user'] = active_user
communicator_2.scope['user'] = active_user
communicator_1_connected, _ = await communicator_1.connect()
communicator_2_connected, _ = await communicator_2.connect()
assert communicator_1_connected
assert communicator_2_connected
assert await communicator_1.receive_nothing()
assert await communicator_2.receive_nothing()
await communicator_1.send_json_to({
'type': 'frame_start',
'frame_type': tested_frame_type
})
assert await communicator_1.receive_nothing() is False
assert await communicator_2.receive_nothing()
await communicator_1.disconnect()
await communicator_2.disconnect()
@pytest.mark.parametrize(
'tested_frame_type',
[DateFrame.pomodoro_type, DateFrame.break_type, DateFrame.pause_type]
)
async def test_connection_discarded_before_second_connection_established(tested_frame_type, active_user, task_instance):
communicator_1 = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator_2 = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator_1.scope['user'] = active_user
communicator_2.scope['user'] = active_user
communicator_1_connected, _ = await communicator_1.connect()
assert communicator_1_connected
communicator_2_connected, _ = await communicator_2.connect()
assert communicator_2_connected
connection_close_response = await communicator_1.receive_output()
assert connection_close_response['type'] == 'websocket.close'
assert await communicator_1.receive_nothing()
assert await communicator_2.receive_nothing()
await communicator_2.send_json_to({
'type': 'frame_start',
'frame_type': tested_frame_type
})
assert await communicator_1.receive_nothing()
assert await communicator_2.receive_nothing() is False
await communicator_2.disconnect()
@pytest.mark.parametrize(
'tested_frame_type',
[
lazy_fixture('pomodoro_in_progress'),
lazy_fixture('pause_in_progress')
]
)
async def test_date_frame_force_finished_and_client_notified(tested_frame_type, active_user, task_instance):
communicator_1 = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator_2 = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator_1.scope['user'] = active_user
communicator_2.scope['user'] = active_user
await communicator_1.connect()
await communicator_2.connect()
notification_message = await communicator_1.receive_output()
assert notification_message['type'] == 'websocket.send'
assert json.loads(notification_message['text'])['action'] == statuses.MESSAGE_FRAME_ACTION_CHOICES[
statuses.FRAME_ACTION_FORCE_TERMINATED]
connection_close_response = await communicator_1.receive_output()
assert connection_close_response['type'] == 'websocket.close'
await communicator_1.disconnect()
await communicator_2.disconnect()
async def test_channel_group_permission(task_instance_for_random_project, active_user):
communicator = WebsocketCommunicator(frames_application, f'date_frames/{task_instance_for_random_project.id}/')
communicator.scope['user'] = active_user
connected, _ = await communicator.connect()
assert connected is False
| <filename>pomodorr/frames/tests/test_consumers.py
import json
import pytest
from channels.db import database_sync_to_async
from channels.testing import WebsocketCommunicator
from pytest_lazyfixture import lazy_fixture
from pomodorr.frames import statuses
from pomodorr.frames.models import DateFrame
from pomodorr.frames.routing import frames_application
from pomodorr.frames.selectors.date_frame_selector import get_finished_date_frames_for_task
pytestmark = [pytest.mark.django_db(transaction=True), pytest.mark.asyncio]
async def test_connect_websocket(task_instance, active_user):
communicator = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator.scope['user'] = active_user
connected, _ = await communicator.connect()
assert connected
await communicator.disconnect()
@pytest.mark.parametrize(
'tested_frame_type',
[DateFrame.pomodoro_type, DateFrame.break_type, DateFrame.pause_type]
)
async def test_start_and_finish_date_frame(tested_frame_type, task_instance, active_user):
communicator = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator.scope['user'] = active_user
await communicator.connect()
assert await database_sync_to_async(task_instance.frames.exists)() is False
await communicator.send_json_to({
'type': 'frame_start',
'frame_type': tested_frame_type
})
response = await communicator.receive_json_from()
assert response['level'] == statuses.MESSAGE_LEVEL_CHOICES[statuses.LEVEL_TYPE_SUCCESS]
assert response['code'] == statuses.LEVEL_TYPE_SUCCESS
assert response['action'] == statuses.MESSAGE_FRAME_ACTION_CHOICES[statuses.FRAME_ACTION_STARTED]
started_date_frame_id = response['data']['date_frame_id']
assert started_date_frame_id is not None
assert await database_sync_to_async(task_instance.frames.exists)()
await communicator.send_json_to({
'type': 'frame_finish',
'date_frame_id': started_date_frame_id
})
response = await communicator.receive_json_from()
assert response['level'] == statuses.MESSAGE_LEVEL_CHOICES[statuses.LEVEL_TYPE_SUCCESS]
assert response['code'] == statuses.LEVEL_TYPE_SUCCESS
assert response['action'] == statuses.MESSAGE_FRAME_ACTION_CHOICES[statuses.FRAME_ACTION_FINISHED]
assert await database_sync_to_async(get_finished_date_frames_for_task(task=task_instance).exists)()
await communicator.disconnect()
async def test_start_and_finish_pomodoro_with_pause_inside(task_instance, active_user):
communicator = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator.scope['user'] = active_user
await communicator.connect()
await communicator.send_json_to({
'type': 'frame_start',
'frame_type': DateFrame.pomodoro_type
})
pomodoro_started_response = await communicator.receive_json_from()
assert pomodoro_started_response['action'] == statuses.MESSAGE_FRAME_ACTION_CHOICES[statuses.FRAME_ACTION_STARTED]
started_pomodoro_id = pomodoro_started_response['data']['date_frame_id']
await communicator.send_json_to({
'type': 'frame_start',
'frame_type': DateFrame.pause_type
})
pause_started_response = await communicator.receive_json_from()
assert pause_started_response['action'] == statuses.MESSAGE_FRAME_ACTION_CHOICES[statuses.FRAME_ACTION_STARTED]
pomodoro = await database_sync_to_async(DateFrame.objects.get)(id=started_pomodoro_id)
assert pomodoro.end is None # check if pomodoro hasn't been stopped by starting a pause date frame
started_pause_id = pause_started_response['data']['date_frame_id']
pause = await database_sync_to_async(DateFrame.objects.get)(id=started_pause_id)
assert pause.end is None
await communicator.send_json_to({
'type': 'frame_finish',
'date_frame_id': started_pause_id
})
pause_finished_response = await communicator.receive_json_from()
assert pause_finished_response['action'] == statuses.MESSAGE_FRAME_ACTION_CHOICES[statuses.FRAME_ACTION_FINISHED]
await database_sync_to_async(pause.refresh_from_db)()
assert pause.end is not None # pause should be finished here
await database_sync_to_async(pomodoro.refresh_from_db)()
assert pomodoro.end is None
await communicator.send_json_to({
'type': 'frame_finish',
'date_frame_id': started_pomodoro_id
})
pomodoro_finished_response = await communicator.receive_json_from()
await database_sync_to_async(pomodoro.refresh_from_db)()
assert pomodoro.end is not None # Only now the pomodoro is expected to be finished
assert pomodoro_finished_response['action'] == statuses.MESSAGE_FRAME_ACTION_CHOICES[statuses.FRAME_ACTION_FINISHED]
assert await database_sync_to_async(get_finished_date_frames_for_task(task=task_instance).count)() == 2
await communicator.disconnect()
@pytest.mark.parametrize(
'tested_frame_type',
[DateFrame.pomodoro_type, DateFrame.break_type, DateFrame.pause_type]
)
async def test_channel_group_separation(tested_frame_type, active_user, task_instance,
task_instance_in_second_project):
communicator_1 = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator_2 = WebsocketCommunicator(frames_application, f'date_frames/{task_instance_in_second_project.id}/')
communicator_1.scope['user'] = active_user
communicator_2.scope['user'] = active_user
communicator_1_connected, _ = await communicator_1.connect()
communicator_2_connected, _ = await communicator_2.connect()
assert communicator_1_connected
assert communicator_2_connected
assert await communicator_1.receive_nothing()
assert await communicator_2.receive_nothing()
await communicator_1.send_json_to({
'type': 'frame_start',
'frame_type': tested_frame_type
})
assert await communicator_1.receive_nothing() is False
assert await communicator_2.receive_nothing()
await communicator_1.disconnect()
await communicator_2.disconnect()
@pytest.mark.parametrize(
'tested_frame_type',
[DateFrame.pomodoro_type, DateFrame.break_type, DateFrame.pause_type]
)
async def test_connection_discarded_before_second_connection_established(tested_frame_type, active_user, task_instance):
communicator_1 = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator_2 = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator_1.scope['user'] = active_user
communicator_2.scope['user'] = active_user
communicator_1_connected, _ = await communicator_1.connect()
assert communicator_1_connected
communicator_2_connected, _ = await communicator_2.connect()
assert communicator_2_connected
connection_close_response = await communicator_1.receive_output()
assert connection_close_response['type'] == 'websocket.close'
assert await communicator_1.receive_nothing()
assert await communicator_2.receive_nothing()
await communicator_2.send_json_to({
'type': 'frame_start',
'frame_type': tested_frame_type
})
assert await communicator_1.receive_nothing()
assert await communicator_2.receive_nothing() is False
await communicator_2.disconnect()
@pytest.mark.parametrize(
'tested_frame_type',
[
lazy_fixture('pomodoro_in_progress'),
lazy_fixture('pause_in_progress')
]
)
async def test_date_frame_force_finished_and_client_notified(tested_frame_type, active_user, task_instance):
communicator_1 = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator_2 = WebsocketCommunicator(frames_application, f'date_frames/{task_instance.id}/')
communicator_1.scope['user'] = active_user
communicator_2.scope['user'] = active_user
await communicator_1.connect()
await communicator_2.connect()
notification_message = await communicator_1.receive_output()
assert notification_message['type'] == 'websocket.send'
assert json.loads(notification_message['text'])['action'] == statuses.MESSAGE_FRAME_ACTION_CHOICES[
statuses.FRAME_ACTION_FORCE_TERMINATED]
connection_close_response = await communicator_1.receive_output()
assert connection_close_response['type'] == 'websocket.close'
await communicator_1.disconnect()
await communicator_2.disconnect()
async def test_channel_group_permission(task_instance_for_random_project, active_user):
communicator = WebsocketCommunicator(frames_application, f'date_frames/{task_instance_for_random_project.id}/')
communicator.scope['user'] = active_user
connected, _ = await communicator.connect()
assert connected is False
| en | 0.916656 | # check if pomodoro hasn't been stopped by starting a pause date frame # pause should be finished here # Only now the pomodoro is expected to be finished | 2.082868 | 2 |
Bot/db_aps.py | FaHoLo/Fish_shop | 0 | 7619 | import logging
import os
import redis
import moltin_aps
_database = None
db_logger = logging.getLogger('db_logger')
async def get_database_connection():
global _database
if _database is None:
database_password = os.getenv('DB_PASSWORD')
database_host = os.getenv('DB_HOST')
database_port = os.getenv('DB_PORT')
_database = redis.Redis(host=database_host, port=database_port, password=database_password)
db_logger.debug('Got new db connection')
return _database
def get_moltin_customer_id(customer_key):
db = await get_database_connection()
customer_id = db.get(customer_key)
if customer_id:
customer_id = customer_id.decode('utf-8')
db_logger.debug(f'Got moltin customer id «{customer_id}» from db')
return customer_id
def update_customer_info(customer_key, customer_info):
db = await get_database_connection()
customer_id = db.get(customer_key).decode('utf-8')
moltin_aps.update_customer_info(customer_id, customer_info)
db_logger.debug(f'Customer «{customer_id}» info was updated')
def create_customer(customer_key, customer_info):
db = await get_database_connection()
customer_id = moltin_aps.create_customer(customer_info)['data']['id']
db.set(customer_key, customer_id)
db_logger.debug(f'New customer «{customer_key}» was created')
| import logging
import os
import redis
import moltin_aps
_database = None
db_logger = logging.getLogger('db_logger')
async def get_database_connection():
global _database
if _database is None:
database_password = os.getenv('DB_PASSWORD')
database_host = os.getenv('DB_HOST')
database_port = os.getenv('DB_PORT')
_database = redis.Redis(host=database_host, port=database_port, password=database_password)
db_logger.debug('Got new db connection')
return _database
def get_moltin_customer_id(customer_key):
db = await get_database_connection()
customer_id = db.get(customer_key)
if customer_id:
customer_id = customer_id.decode('utf-8')
db_logger.debug(f'Got moltin customer id «{customer_id}» from db')
return customer_id
def update_customer_info(customer_key, customer_info):
db = await get_database_connection()
customer_id = db.get(customer_key).decode('utf-8')
moltin_aps.update_customer_info(customer_id, customer_info)
db_logger.debug(f'Customer «{customer_id}» info was updated')
def create_customer(customer_key, customer_info):
db = await get_database_connection()
customer_id = moltin_aps.create_customer(customer_info)['data']['id']
db.set(customer_key, customer_id)
db_logger.debug(f'New customer «{customer_key}» was created')
| none | 1 | 2.170927 | 2 |
|
backend/server/tables/__init__.py | shiv12095/realtimeviz | 1 | 7620 | from .lime_bike_feed import LimeBikeFeed
from .lime_bike_trips import LimeBikeTrips
from .lime_bike_trips_analyze import LimeBikeTripsAnalyze
| from .lime_bike_feed import LimeBikeFeed
from .lime_bike_trips import LimeBikeTrips
from .lime_bike_trips_analyze import LimeBikeTripsAnalyze
| none | 1 | 1.040724 | 1 |
|
sapmon/payload/provider/sapnetweaver.py | gummadirajesh/AzureMonitorForSAPSolutions | 0 | 7621 | # Python modules
import json
import logging
from datetime import datetime, timedelta, timezone
from time import time
from typing import Any, Callable
import re
import requests
from requests import Session
from threading import Lock
# SOAP Client modules
from zeep import Client
from zeep import helpers
from zeep.transports import Transport
from zeep.exceptions import Fault
# Payload modules
from const import *
from helper.azure import AzureStorageAccount
from helper.context import *
from helper.tools import *
from provider.base import ProviderInstance, ProviderCheck
from netweaver.metricclientfactory import NetWeaverMetricClient, MetricClientFactory
from netweaver.rfcsdkinstaller import PATH_RFC_SDK_INSTALL, SapRfcSdkInstaller
from typing import Dict
# Suppress SSLError warning due to missing SAP server certificate
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# wait time in between attempts to re-download and install RFC SDK package if we have a download blob
# URL defined and previous install attempt was not successful
MINIMUM_RFC_INSTALL_RETRY_INTERVAL = timedelta(minutes=30)
# timeout to use for all SOAP WSDL fetch and other API calls
SOAP_API_TIMEOUT_SECS = 5
# soap client cache expiration, after which amount of time both successful + failed soap client instantiation attempts will be refreshed
SOAP_CLIENT_CACHE_EXPIRATIION = timedelta(minutes=10)
class sapNetweaverProviderInstance(ProviderInstance):
# static / class variables to enforce singleton behavior around rfc sdk installation attempts across all
# instances of SAP Netweaver provider
_isRfcInstalled = None
_rfcInstallerLock = Lock()
def __init__(self,
tracer: logging.Logger,
ctx: Context,
providerInstance: Dict[str, str],
skipContent: bool = False,
**kwargs) -> None:
self.sapSid = None
self.sapHostName = None
self.sapInstanceNr = None
self.sapSubdomain = None
# RFC SDK call settings
self.sapUsername = None
self.sapPassword = None
self.sapClientId = None
self.sapRfcSdkBlobUrl = None
self.sapLogonGroup = None
# provider instance flag for whether RFC calls should be enabled for this specific Netweaver provider instance
self._areRfcCallsEnabled = None
# cache WSDL SOAP clients so we can re-use them across checks for the same provider and cut down off-box calls
self._soapClientCache = {}
# the RFC SDK does not allow client to specify a timeout and in fact appears to have a connection timeout of 60 secs.
# In cases where RFC calls timeout due to some misconfiguration, multiple retries can lead to metric gaps of several minutes.
# We are limiting retries here because it is extremely rare for SOAP or RFC call to fail on first attempt and succeed on retry,
# as most of these failures are due to persistent issues. Better to not waste limited time budget.
retrySettings = {
"retries": 1,
"delayInSeconds": 1,
"backoffMultiplier": 2
}
super().__init__(tracer,
ctx,
providerInstance,
retrySettings,
skipContent,
**kwargs)
"""
parse provider properties and get sid, host name and instance number
"""
def parseProperties(self) -> bool:
self.sapSid = self.metadata.get("sapSid", "")
if not self.sapSid:
self.tracer.error("%s sapSid cannot be empty", self.fullName)
return False
# provider level common logging prefix
self.logTag = "[%s][%s]" % (self.fullName, self.sapSid)
self.sapHostName = self.providerProperties.get("sapHostName", None)
if not self.sapHostName:
self.tracer.error("%s sapHostName cannot be empty", self.logTag)
return False
instanceNr = self.providerProperties.get("sapInstanceNr", None)
if instanceNr is None: # 0 is an acceptable value for Instance Number
self.tracer.error("%s sapInstanceNr cannot be empty", self.logTag)
return False
if not type(instanceNr) is int or instanceNr < 0 or instanceNr > 98:
self.tracer.error("%s sapInstanceNr can only be between 00 and 98 but %s was passed", self.logTag, str(instanceNr))
return False
self.sapInstanceNr = str(instanceNr).zfill(2)
self.sapSubdomain = self.providerProperties.get("sapSubdomain", "")
self.sapUsername = self.providerProperties.get('sapUsername', None)
self.sapPassword = self.providerProperties.get('sapPassword', None)
self.sapClientId = self.providerProperties.get('sapClientId', None)
self.sapLogonGroup = self.providerProperties.get('sapLogonGroup',None)
self.sapRfcSdkBlobUrl = self.providerProperties.get('sapRfcSdkBlobUrl', None)
# if user did not specify password directly via UI, check to see if they instead
# provided link to Key Vault secret
if not self.sapPassword:
sapPasswordKeyVaultUrl = self.providerProperties.get("sapPasswordKeyVaultUrl", None)
if sapPasswordKeyVaultUrl:
self.tracer.info("%s sapPassword key vault URL specified, attempting to fetch from %s", self.logTag, sapPasswordKeyVaultUrl)
try:
keyVaultUrlPatternMatch = re.match(REGEX_EXTERNAL_KEYVAULT_URL,
sapPasswordKeyVaultUrl,
re.IGNORECASE)
keyVaultName = keyVaultUrlPatternMatch.group(1)
secretName = keyVaultUrlPatternMatch.group(2)
except Exception as e:
self.tracer.error("%s invalid sapPassword Key Vault secret url format: %s", self.logTag, sapPasswordKeyVaultUrl)
return False
try:
kv = AzureKeyVault(self.tracer, keyVaultName, self.ctx.msiClientId)
self.sapPassword = kv.getSecret(secretName, None).value
if not self.sapPassword:
raise Exception("failed to read sapPassword secret")
except Exception as e:
self.tracer.error("%s error fetching sapPassword secret from keyVault url: %s, %s",
self.logTag,
sapPasswordKeyVaultUrl,
e)
return False
return True
def _getHttpPortFromInstanceNr(self, instanceNr: str) -> str:
return '5%s13' % instanceNr # As per SAP documentation, default http port is of the form 5<NR>13
def _getHttpsPortFromInstanceNr(self, instanceNr: str) -> str:
return '5%s14' % instanceNr # As per SAP documentation, default https port is of the form 5<NR>14
def getMessageServerPortFromInstanceNr(self, instanceNr: str) -> str:
return '81%s' % instanceNr # As per SAP documentation, default http port is of the form 81<NR>
def getFullyQualifiedDomainName(self, hostname: str) -> str:
if self.sapSubdomain:
return hostname + "." + self.sapSubdomain
else:
return hostname
"""
will first attempt to create SOAP client for hostname using the HTTPS port derived from the SAP instance number,
and if that does not succeed will then try to create client using the derived HTTP port
(if neither hostname or instance are specified, will default to the primary hostname/instance that the
provider was initialized with from properties)
"""
def getDefaultClient(self,
hostname: str = None,
instance: str = None) -> Client:
if not hostname:
hostname = self.sapHostName
if not instance:
instance = self.sapInstanceNr
httpsPort = self._getHttpsPortFromInstanceNr(instance)
httpPort = self._getHttpPortFromInstanceNr(instance)
portList = [(httpsPort,"https"),(httpPort,"http")]
exceptionDetails = None
startTime = time()
for port,protocol in portList:
startTime = time()
self.tracer.info("%s attempting to fetch default client for hostname=%s on %s port %s",
self.logTag, hostname, protocol, port)
try:
client = self.getClient(hostname, httpProtocol=protocol, port=port)
return client
except Exception as e:
exceptionDetails = e
self.tracer.info("%s error fetching default client hostname=%s on %s port %s: %s [%d ms]",
self.logTag, self.sapHostName, protocol, port, e, TimeUtils.getElapsedMilliseconds(startTime))
self.tracer.error("[%s] error fetching default client hostname=%s on port %s : %s [%d ms]",
self.logTag, self.sapHostName, portList, exceptionDetails, TimeUtils.getElapsedMilliseconds(startTime), exc_info=True)
raise exceptionDetails
"""
attempt to create a SOAP client for the specified hostname using specific protocol and port
(for when we already have a known hostconfig for this hostname, and already know whether HTTPS or HTTP should be used)
Store successful clients in cache so we don't make unnecessary WSDL fetchs for future API calls to the same instance
"""
def getClient(self,
hostname: str,
httpProtocol: str,
port: str,
useCache: bool = True) -> Client:
if not hostname or not httpProtocol or not port:
raise Exception("%s cannot create client with empty httpProtocol, hostname or port (%s:%s:%s)" % \
(self.logTag, httpProtocol, hostname, port))
if httpProtocol != "http" and httpProtocol != "https":
raise Exception("%s httpProtocol %s is not valid for hostname: %s, port: %s" % \
(self.logTag, httpProtocol, hostname, port))
hostname = self.getFullyQualifiedDomainName(hostname)
url = '%s://%s:%s/?wsdl' % (httpProtocol, hostname, port)
if (useCache and url in self._soapClientCache):
cacheEntry = self._soapClientCache[url]
# respect cache expiration; if cache is expired allow client to be refreshed below
if (cacheEntry['expirationDateTime'] > datetime.utcnow()):
if (cacheEntry['client']):
# self.tracer.info("%s using cached SOAP client for wsdl: %s", self.logTag, url)
return cacheEntry['client']
else:
# previously cached soap client attempt was failure
raise Exception("%s cached SOAP client failure for wsdl: %s" % (self.logTag, url))
self.tracer.info("%s connecting to wsdl url: %s", self.logTag, url)
startTime = time()
client = None
try:
session = Session()
session.verify = False
client = Client(url, transport=Transport(session=session, timeout=SOAP_API_TIMEOUT_SECS, operation_timeout=SOAP_API_TIMEOUT_SECS))
self.tracer.info("%s initialized SOAP client url: %s [%d ms]",
self.logTag, url, TimeUtils.getElapsedMilliseconds(startTime))
return client
except Exception as e:
self.tracer.error("%s error fetching wsdl url: %s: %s [%d ms]",
self.logTag, url, e, TimeUtils.getElapsedMilliseconds(startTime), exc_info=True)
raise e
finally:
# cache successsful and failed soap client attempts to reduce future API calls
self._soapClientCache[url] = { 'client': client, 'expirationDateTime': datetime.utcnow() + SOAP_CLIENT_CACHE_EXPIRATIION }
def callSoapApi(self, client: Client, apiName: str) -> str:
self.tracer.info("%s executing SOAP API: %s for wsdl: %s", self.logTag, apiName, client.wsdl.location)
startTime = time()
try:
method = getattr(client.service, apiName)
result = method()
self.tracer.info("%s successful SOAP API: %s for wsdl: %s [%d ms]",
self.logTag, apiName, client.wsdl.location, TimeUtils.getElapsedMilliseconds(startTime))
return result
except Exception as e:
self.tracer.error("%s error while calling SOAP API: %s for wsdl: %s: %s [%d ms]",
self.logTag, apiName, client.wsdl.location, e, TimeUtils.getElapsedMilliseconds(startTime), exc_info=True)
raise e
"""
return a netweaver RFC client initialized with "MESSAGESERVER" instance we find
for this SID.
"""
def getRfcClient(self, logTag: str) -> NetWeaverMetricClient:
# RFC connections against application server instances can be made through 'MESSAGESERVER' instances
dispatcherInstance = self.getMessageServerInstance()
return MetricClientFactory.getMetricClient(tracer=self.tracer,
logTag=logTag,
sapHostName=dispatcherInstance['hostname'],
sapSysNr=str(dispatcherInstance['instanceNr']),
sapSubdomain=self.sapSubdomain,
sapSid=self.sapSid,
sapClient=str(self.sapClientId),
sapLogonGroup = self.sapLogonGroup,
sapUsername=self.sapUsername,
sapPassword=<PASSWORD>.sapPassword)
def validate(self) -> bool:
logTag = "[%s][%s][validation]" % (self.fullName, self.sapSid)
# HACK: Load content json to fetch the list of APIs in the checks
self.initContent()
try:
self._validateSoapClient()
except Exception as e:
self.tracer.error("%s SOAP API validation failure: %s", logTag, e, exc_info=True)
return False
try:
self._validateRfcClient()
except Exception as e:
self.tracer.error("%s RFC client validation failure: %s", logTag, e, exc_info=True)
return False
return True
"""
iterate through all SOAP API calls and attempt to validate that SOAP API client can be instantiated
and expected APIs are callable
"""
def _validateSoapClient(self) -> None:
###
# TODO: this entire function needs to be rethought to me more precise in terms of which instances
# are called for which APIs, as some APIs will not work for some function types.
###
logTag = "[%s][%s][validation]" % (self.fullName, self.sapSid)
# hard-coded list of checks that correspond to SOAP API calls to validate
soapApiChecks = ['GetSystemInstanceList',
'GetProcessList',
'ABAPGetWPTable',
'GetQueueStatistic',
'EnqGetStatistic']
self.tracer.info("%s connecting to sap to validate SOAP API connectivity", logTag)
try:
client = self.getDefaultClient(hostname=self.sapHostName, instance=self.sapInstanceNr)
except Exception as e:
self.tracer.error("%s error occured while initializing SOAP client to SAP server: %s|%s, %s",
logTag,
self.sapHostName,
self.sapInstanceNr,
e,
exc_info=True)
raise
# Ensure that all APIs in the checks are valid and are marked as unprotected.
# Some APIs are compatible with only specific instance types and throw a Fault if run against
# an incompatible one.
# However, here we suppress all errors except Unauthorized since the Monitor phase takes
# care of calling the API against the right instance type. As long as we don't get an
# Unauthorized error, we know we can safely call them during the Monitor phase.
isValid = True
for check in self.checks:
apiName = check.name
if (apiName not in soapApiChecks):
# this is not a SOAP API check
continue
method = getattr(client.service, apiName, None) # Returning None when API not found
if method is None:
self.tracer.error("%s SOAP client failure: api %s does not exist for %s", logTag, apiName, client.wsdl.location)
isValid = False
else:
try:
self.callSoapApi(client, apiName)
self.tracer.info("%s validated SOAP api %s for %s", logTag, apiName, client.wsdl.location)
except Fault as e:
if (e.code == "SOAP-ENV:Client" and e.message == "HTTP Error: 'Unauthorized'"):
isValid = False
self.tracer.error("%s SOAP api %s is protected for %s, %s ", logTag, apiName, client.wsdl.location, e, exc_info=True)
else:
self.tracer.error("%s suppressing error during validation of SOAP api %s for %s, %s", logTag, apiName, client.wsdl.location, e, exc_info=True)
except Exception as e:
self.tracer.error("%s suppressing error during validation of SOAP api %s for %s, %s ", logTag, apiName, client.wsdl.location, e, exc_info=True)
if (not isValid):
raise Exception("%s one or more SOAP APIs failed validation" % (logTag))
"""
if customer provided RFC SDK configuration, then validate that all required properties are specified
and validate we can establish RFC client connections to APIs we need to call
"""
def _validateRfcClient(self) -> None:
logTag = "[%s][%s][validation]" % (self.fullName, self.sapSid)
# are any RFC SDK config properties populated?
if (not self.sapUsername or
not self.sapPassword or
not self.sapClientId or
not self.sapRfcSdkBlobUrl):
# customer has not chosen to enable RFC SDK, nothing to validate
return
# are ALL RFC SDK config properties populated?
if (not self.sapUsername and
not self.sapPassword and
not self.sapClientId and
not self.sapRfcSdkBlobUrl):
# customer specified only partial set of config properties needed to enable RFC, so fail validation
raise Exception("must specify all properties to enable RFC metric collection: Username, Password, ClientId, and RfcSdkBlobUrl")
if (not self.areRfcMetricsEnabled()):
raise Exception("RFC SDK failed to install and is not usable")
# initialize a client for the first healthy ABAP/Dispatcher instance we find
client = self.getRfcClient(logTag=logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
# get metric query window to lookback 10 minutes to see if any results are available. If not that probably
# indicates customer has not enabled SMON on their SAP system
self.tracer.info("%s attempting to fetch server timestamp from %s", logTag, sapHostnameStr)
(startTime, endTime) = client.getQueryWindow(lastRunServerTime=None,
minimumRunIntervalSecs=600,
logTag=logTag)
self.tracer.info("%s attempting to fetch SMON metrics from %s", logTag, sapHostnameStr)
result = client.getSmonMetrics(startDateTime=startTime, endDateTime=endTime, logTag=logTag)
self.tracer.info("%s successfully queried SMON metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch SWNC workload metrics from %s", logTag, sapHostnameStr)
result = client.getSwncWorkloadMetrics(startDateTime=startTime, endDateTime=endTime, logTag=logTag)
self.tracer.info("%s successfully queried SWNC workload metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch Short Dump metrics from %s", logTag, sapHostnameStr)
result = client.getShortDumpsMetrics(startDateTime=startTime, endDateTime=endTime, logTag=logTag)
self.tracer.info("%s successfully queried Short Dump metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch Sys Log metrics from %s", logTag, sapHostnameStr)
result = client.getSysLogMetrics(startDateTime=startTime, endDateTime=endTime, logTag=logTag)
self.tracer.info("%s successfully queried Sys Log metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch Failed Updates metrics from %s", logTag, sapHostnameStr)
result = client.getFailedUpdatesMetrics(logTag=logTag)
self.tracer.info("%s successfully queried Failed Updates metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch Batch Job metrics from %s", logTag, sapHostnameStr)
result = client.getBatchJobMetrics(startDateTime=startTime, endDateTime=endTime, logTag=logTag)
self.tracer.info("%s successfully queried Batch Job metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch inbound queue metrics from %s", logTag, sapHostnameStr)
result = client.getInboundQueuesMetrics(logTag=logTag)
self.tracer.info("%s successfully queried inbound queue metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch outbound queue metrics from %s", logTag, sapHostnameStr)
result = client.getOutboundQueuesMetrics(logTag=logTag)
self.tracer.info("%s successfully queried outbound queue metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch lock entries metrics from %s", logTag, sapHostnameStr)
result = client.getEnqueueReadMetrics(logTag=logTag)
self.tracer.info("%s successfully queried lock entries metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s successfully validated all known RFC SDK calls", logTag)
"""
query SAP SOAP API to return list of all instances in the SID, but if caller specifies that cached results are okay
and we have cached instance list with the provider instance, then just return the cached results
"""
def getInstances(self,
filterFeatures: list = None ,
filterType: str = None,
useCache: bool = True) -> list:
# Use cached list of instances if available since they should not change within a single monitor run;
# but if cache is not available or if caller explicitly asks to skip cache then make the SOAP call
if ('hostConfig' in self.state and useCache):
# self.tracer.debug("%s using cached list of system instances", self.logTag)
return self.filterInstancesByFeature(self.state['hostConfig'], filterFeatures=filterFeatures, filterType=filterType)
self.tracer.info("%s getting list of system instances", self.logTag)
startTime = time()
instanceList = []
hosts = self._getHosts()
# Use last known hosts to fetch the updated list of hosts
# Walk through the known hostnames and stop whenever any of them returns the list of all instances
isSuccess = False
for host in hosts:
hostname, instanceNum, httpProtocol, port = host[0], host[1], host[2], host[3]
try:
apiName = 'GetSystemInstanceList'
# if we have a cached host config with already defined protocol and port, then we can initialize
# client directly from that, otherwise we have to instantiate client using ports derived from the instance number
# which will try the derived HTTPS port first and then fallback to derived HTTP port
if (not httpProtocol or not port):
client = self.getDefaultClient(hostname=hostname, instance=instanceNum)
else:
client = self.getClient(hostname, httpProtocol, port)
result = self.callSoapApi(client, apiName)
instanceList = self._parseResults(result)
# cache latest results in provider state
self.state['hostConfig'] = instanceList
isSuccess = True
break
except Exception as e:
self.tracer.error("%s could not connect to SAP with hostname: %s and port: %s", self.logTag, hostname, port, exc_info=True)
if not isSuccess:
raise Exception("%s could not connect to any SAP instances with hosts %s [%d ms]" % \
(self.logTag, hosts, TimeUtils.getElapsedMilliseconds(startTime)))
self.tracer.info("%s finished getting all system instances [%d ms]", self.logTag, TimeUtils.getElapsedMilliseconds(startTime))
return self.filterInstancesByFeature(instanceList, filterFeatures=filterFeatures, filterType=filterType)
"""
fetch cached instance list for this provider and filter down to the list 'ABAP' feature functions
that are healthy (ie. have dispstatus attribute of 'SAPControl-GREEN'). Just return first in the list.
"""
def getActiveDispatcherInstance(self):
# Use cached list of instances if available since they don't change that frequently,
# and filter down to only healthy dispatcher instances since RFC direct application server connection
# only works against dispatchera
dispatcherInstances = self.getInstances(filterFeatures=['ABAP'], filterType='include', useCache=True)
healthyInstances = [instance for instance in dispatcherInstances if 'GREEN' in instance['dispstatus']]
if (len(healthyInstances) == 0):
raise Exception("No healthy ABAP/dispatcher instance found for %s" % self.sapSid)
# return first healthy instance in list
return healthyInstances[0]
"""
fetch cached instance list for this provider and filter down to the list 'MESSAGESERVER' feature functions
return the available message server
"""
def getMessageServerInstance(self):
# Use cached list of instances if available since they don't change that frequently,
# and filter down to only healthy dispatcher instances since RFC direct application server connection
# only works against dispatchera
dispatcherInstances = self.getInstances(filterFeatures=['MESSAGESERVER'], filterType='include', useCache=True)
if (len(dispatcherInstances) == 0):
raise Exception("No MESSAGESERVER instance found for %s" % self.sapSid)
# return first healthy instance in list
return dispatcherInstances[0]
"""
given a list of sap instances and a set of instance features (ie. functions) to include or exclude,
apply filtering logic and return only those instances that match the filter conditions:
'include' filter type will include any instance that matches any of the feature filters
'exclude' filter type will exclude any instance that matches any of the feature filters
"""
def filterInstancesByFeature(self,
sapInstances: list,
filterFeatures: list = None,
filterType: str = None) -> list:
if (not filterFeatures or len(filterFeatures) == 0 or not sapInstances):
return sapInstances
self.tracer.info("%s filtering list of system instances based on features: %s", self.logTag, filterFeatures)
instances = [(instance, instance['features'].split('|')) for instance in sapInstances]
if filterType == "include":
# Inclusion filter
# Only include instances that match at least one of the filter features
filtered_instances = [instance for (instance, instance_features) in instances \
if not set(filterFeatures).isdisjoint(set(instance_features))]
elif filterType == "exclude":
# Exclusion filter
# Only include instance that match none of the filter features
filtered_instances = [instance for (instance, instance_features) in instances \
if set(filterFeatures).isdisjoint(set(instance_features))]
else:
raise Exception("%s filterType '%s' is not supported filter type" % (self.logTag, filterType))
return filtered_instances
"""
helper method to deserialize result and return as list of dictionary objects
"""
def _parseResults(self, results: list) -> list:
return helpers.serialize_object(results, dict)
"""
private method to return default provider hostname config (what customer provided at time netweaver provided was added)
or a fully fleshed out list of <hostname / instance # / https:Port> tuples based on a previous cached call to getInstances()
"""
def _getHosts(self) -> list:
# Fetch last known list from storage. If storage does not have list, use provided
# hostname and instanceNr
if 'hostConfig' not in self.state:
self.tracer.info("%s no host config persisted yet, using user-provided host name and instance nr", self.logTag)
hosts = [(self.sapHostName,
self.sapInstanceNr,
None,
None)]
else:
self.tracer.info("%s fetching last known host config", self.logTag)
currentHostConfig = self.state['hostConfig']
hosts = [(hostConfig['hostname'],
hostConfig['instanceNr'],
"https" if (hostConfig['httpsPort'] and hostConfig['httpsPort'] != "0") else "http",
hostConfig['httpsPort'] if (hostConfig['httpsPort'] and hostConfig['httpsPort'] != "0") else hostConfig['httpPort']) for hostConfig in currentHostConfig]
return hosts
"""
returns flag to indicate whether provider checks should attempt to use RFC SDK client calls to fetch certain metrics.
First time may perform fairly expensive checks to validate if RFC SDK is installed anc configured, and may attempt
to download user provided blob to install to local system. We only want to attempt this at most once per process,
so first caller to this function will pay that cost and the resulting success/failure flag will be cached.
"""
def areRfcMetricsEnabled(self) -> bool:
if self._areRfcCallsEnabled != None:
# the flag for whether RFC is usable has already been initialzed, so return
return self._areRfcCallsEnabled
# there may be 1..N sapNetWeaverProviderInstance instances per sapmon process, and each instance
# may choose to enable/disable RFC calls individually, but we should only attempt to install the
# RFC SDK at most once per process. Use a static/class variable to determine if installation
# attempt has already been attempted and was success/failure, and do all this inside of
# a lock and cache flag for future checks
try:
# class singleton lock
sapNetweaverProviderInstance._rfcInstallerLock.acquire(blocking=True)
# check -> lock -> check
if (self._areRfcCallsEnabled != None):
# flag was initialized prior to obtaining the lock
return self._areRfcCallsEnabled
# ensure this provider instance has necessary config settings to enable RFC SDK calls
if (not self.sapUsername or
not self.sapPassword or
not self.sapClientId or
not self.sapRfcSdkBlobUrl or
not self.sapLogonGroup):
self.tracer.info("%s Netweaver RFC calls disabled for because missing one or more required " +
"config properties: sapUsername, sapPassword, sapClientId, sapLogonGroup and sapRfcSdkBlobUrl",
self.logTag)
self._areRfcCallsEnabled = False
return False
# only attempt to install RFC SDK once per process execution
if (sapNetweaverProviderInstance._isRfcInstalled == None):
sapNetweaverProviderInstance._isRfcInstalled = self._trySetupRfcSdk()
self._areRfcCallsEnabled = sapNetweaverProviderInstance._isRfcInstalled
return self._areRfcCallsEnabled
except Exception as e:
self.tracer.error("%s Exception trying to check if rfc sdk metrics are enabled, %s", self.logTag, e, exc_info=True)
sapNetweaverProviderInstance._isRfcInstalled = False
self._areRfcCallsEnabled = False
finally:
sapNetweaverProviderInstance._rfcInstallerLock.release()
return False
"""
validate that RFC SDK package has been installed and configured correctly and is usable by pyrfc module.
If pyrfc module cannot be imported, then potentially attempt to download RFC SDK blob, install to local system,
and configure necessary environment variables and system settings so that the libraries can be
successfully loaded by the pyrfc module.
Returns flag indicating whether pyrfc module can be imnported (ie. whether RFC calls can be enabled)
Pre-requisites for RFC SDK installation attempt:
1.) Customer provided config property sapRfcSdkBlobUrl must be non-empty.
2.) python module for "pynwrfc" must be installed
3.) was the last failed SDK installation attempt more than N minutes ago (defined by MINIMUM_RFC_INSTALL_RETRY_INTERVAL)
4.) does the sapRfcSdkBlobUrl provided by customer actually exist in the storage account
5.) was the last_modified timestamp on the sapRfcSdkBlobUrl blob modified since the last failed installation attempt
"""
def _trySetupRfcSdk(self) -> bool:
try:
# if no RFC SDK download blob url specified, treat as kill switch to disable any RFC calls
if (not self.sapRfcSdkBlobUrl):
self.tracer.info("%s No user provided RFC SDK blob url, will not leverage RFC SDK. quitting...", self.logTag)
return False
installer = SapRfcSdkInstaller(tracer=self.tracer, installPath=PATH_RFC_SDK_INSTALL)
# environment variables must be initialized before RFC and pyrfc installation can be validated
self.tracer.info("%s initializing RFC SDK environment...", self.logTag)
if (not installer.initRfcSdkEnvironment()):
self.tracer.error("%s failed to initialize rfc sdk environment pre-requisites", self.logTag)
return False
# if we are able to successfully import the pyrfc connector module, that means RFC SDK
# libraries must be installed and were able to be found by pyrfc package initialization,
# so no need to do any further checks.
if (installer.isPyrfcModuleUsable()):
# pyrfc package is usable, which means RFC SDK is already installed and environment configured correctly
self.tracer.info("%s Pyrfc module is usable, RFC calls will be enabled", self.logTag)
return True
# if pyrfc module cannot be imported, check to see if it is even installed. Assumption is that
# pyrfc module is installed as part of container image, so if it is missing something is wrong
# there is no need to even try to install the RFC SDK
if (not installer.isPyrfcModuleInstalled()):
self.tracer.error("%s Pyrfc module is not installed, RFC calls will be disabled", self.logTag)
return False
# check last sdk install attempt time so we can limit how often we retry
# to download and install SDK on persistent failures (eg. no more than once every 30 mins)
lastSdkInstallAttemptTime = installer.getLastSdkInstallAttemptTime()
if (lastSdkInstallAttemptTime > (datetime.now(timezone.utc) - MINIMUM_RFC_INSTALL_RETRY_INTERVAL)):
self.tracer.info("%s last RFC SDK install attempt was %s, minimum attempt retry %s, skipping...",
self.logTag,
lastSdkInstallAttemptTime,
MINIMUM_RFC_INSTALL_RETRY_INTERVAL)
return False
self.tracer.info("%s RFC SDK is not installed, so attempt installation now...", self.logTag)
blobStorageAccount = AzureStorageAccount(tracer=self.tracer,
sapmonId=self.ctx.sapmonId,
msiClientId=self.ctx.msiClientId,
subscriptionId=self.ctx.vmInstance["subscriptionId"],
resourceGroup=self.ctx.vmInstance["resourceGroupName"])
# first check that rfc sdk download blob exists in Azure Storage account, and if it
# exixts also fetch the last_modified timestamp metadata
doesPackageExist, packageLastModifiedTime = installer.isRfcSdkAvailableForDownload(
blobUrl=self.sapRfcSdkBlobUrl,
storageAccount=blobStorageAccount)
if (not doesPackageExist):
self.tracer.error("%s User provided RFC SDK blob does not exist %s, skipping...", self.logTag, self.sapRfcSdkBlobUrl)
return False
self.tracer.info("%s user provided RFC SDK blob exists for download %s, lastModified=%s",
self.logTag, self.sapRfcSdkBlobUrl, packageLastModifiedTime)
# the user provided sdk blob exists, so before we download compare the last_modified timestamp
# with the last modified time of the last download attempt. If nothing has changed,
# then no need to try and download the package again
# TODO: confirm, should we go ahead and try to re-download previously failed packages
# once every 30 minutes anyway? just in case failure was something external?
lastInstallPackageModifiedTime = installer.getLastSdkInstallPackageModifiedTime()
if (packageLastModifiedTime == lastInstallPackageModifiedTime):
self.tracer.info("%s rfc sdk download package has not been modified since last download " +
"attempt (last_modified=%s), will not download again",
self.logTag,
lastInstallPackageModifiedTime)
return False
self.tracer.info("%s user provided rfc sdk package last_modified (%s) has changed " +
"since last install attempt (%s), attempting to re-download and install",
self.logTag,
packageLastModifiedTime,
lastInstallPackageModifiedTime)
# try to download user provided RFC SDK blob, install to local system and configure necessary
# environment variables and system settings so that it can be usable by pyrfc module
if (not installer.downloadAndInstallRfcSdk(blobUrl=self.sapRfcSdkBlobUrl, storageAccount=blobStorageAccount)):
self.tracer.error("%s failed to download and install rfc sdk package, RFC calls will not be enabled...", self.logTag)
return False
# on Linux pyrfc module may not be usable upon first install attempt, as it appears that unpacking
# libraries to the LD_LIBRARY_PATH env variable after the python process starts may not pick up the change.
# The module should be usable on the next sapmon process run.
if (not installer.isPyrfcModuleUsable()):
self.tracer.error("%s pyrfc module still not usable after RFC SDK install (might require process restart), " +
"RFC calls will not be enabled...",
self.logTag)
return False
self.tracer.info("%s pyrfc module is usable after RFC SDK install, RFC calls will be enabled...", self.logTag)
return True
except Exception as e:
self.tracer.error("%s exception trying to setup and validate RFC SDK, RFC calls will be disabled: %s", self.logTag, e, exc_info=True)
return False
###########################
class sapNetweaverProviderCheck(ProviderCheck):
lastResult = []
# hard-coded set of action names that require RFC SDK to be usable
# and can override runtime isEnabled() check if RFC is not usable
rfcCheckNames = {'SMON_Metrics', 'SWNC_Workload_Metrics', 'SDF_Short_Dumps_Metrics', 'Sys_Log_Metrics',
'Failed_Updates_Metrics', 'Batch_Jobs_Metrics', 'Inbound_Queues_Metrics', 'Outbound_Queues_Metrics',
'Enqueue_Read_Metrics'}
def __init__(self,
provider: ProviderInstance,
**kwargs
):
super().__init__(provider, **kwargs)
self.lastRunLocal = None
self.lastRunServer = None
# provider check common logging prefix
self.logTag = "[%s][%s]" % (self.fullName, self.providerInstance.sapSid)
"""
return flag indicating whether this check instances requires the SAP RFC SDK to be installed and usable
"""
def doesCheckRequireRfcSdk(self) -> bool:
return self.name in sapNetweaverProviderCheck.rfcCheckNames
"""
override base ProviderCheck implementation to allow RFC metric collection methods enabled in
the default Provider JSON configuration yet treated as disabled at runtime if RFC SDK
is not configured (to reduce log spam)
"""
def isEnabled(self) -> bool:
if not self.state["isEnabled"]:
return False
# if this check requires RFC and RFC is not installed, then treat as disabled
if (self.doesCheckRequireRfcSdk()):
if (not self.providerInstance.areRfcMetricsEnabled()):
return False
return True
def _getFormattedTimestamp(self) -> str:
return datetime.utcnow().isoformat()
def _parseResult(self, result: object) -> list:
return [helpers.serialize_object(result, dict)]
def _parseResults(self, results: list) -> list:
return helpers.serialize_object(results, dict)
def _getServerTimestamp(self) -> datetime:
self.tracer.info("%s fetching current timestamp from message server", self.logTag)
message_server_instances = self.providerInstance.getInstances(filterFeatures=['MESSAGESERVER'], filterType='include', useCache=True)
date = datetime.fromisoformat(self._getFormattedTimestamp())
# Get timestamp from the first message server that returns a valid date
for instance in message_server_instances:
hostname = instance['hostname']
instanceNr = str(instance['instanceNr']).zfill(2)
port = self.providerInstance.getMessageServerPortFromInstanceNr(instanceNr)
hostname = self.providerInstance.getFullyQualifiedDomainName(hostname)
message_server_endpoint = "http://%s:%s/" % (hostname, port)
try:
# We only care about the date in the response header. so we ignore the response body
# 'Thu, 04 Mar 2021 05:02:12 GMT'
# NOTE: we don't need to follow redirects because the redirect response itself 300-3XX
# will have the 'date' header as well. In some cases we were following a chain
# of redirects that would terminate in a 404, which would not have the 'date' header
response = requests.get(message_server_endpoint, allow_redirects=False)
if ('date' not in response.headers):
raise Exception("no 'date' response header found for response status:%s/%s from:%s"
% (response.status_code, response.reason, message_server_endpoint))
date = datetime.strptime(response.headers['date'], '%a, %d %b %Y %H:%M:%S %Z')
self.tracer.info("%s received message server %s header: %s, parsed time: %s",
self.logTag,
message_server_endpoint,
response.headers['date'],
date)
break
except Exception as e:
self.tracer.info("%s suppressing expected error while fetching server time during HTTP GET request to url %s: %s ",
self.logTag, message_server_endpoint, e)
return date
def _actionGetSystemInstanceList(self) -> None:
self.tracer.info("%s refreshing list of system instances", self.logTag)
self.lastRunLocal = datetime.utcnow()
# when performing the actual provider check action, always fetch fressh instance list snapshot and refresh the cache
instanceList = self.providerInstance.getInstances(useCache=False)
self.lastRunServer = self._getServerTimestamp()
# Update host config, if new list is fetched
# Parse dictionary and add current timestamp and SID to data and log it
if len(instanceList) != 0:
currentTimestamp = self._getFormattedTimestamp()
for instance in instanceList:
instance['timestamp'] = currentTimestamp
instance['serverTimestamp'] = self.lastRunServer.isoformat()
instance['SID'] = self.providerInstance.sapSid
instance['subdomain'] = self.providerInstance.sapSubdomain
self.lastResult = instanceList
# Update internal state
if not self.updateState():
raise Exception("%s failed to update state" % self.logTag)
self.tracer.info("%s successfully fetched system instance list", self.logTag)
def _executeWebServiceRequest(self, apiName: str, filterFeatures: list, filterType: str, parser: Callable[[Any], list] = None) -> None:
self.tracer.info("[%s] executing web service request: %s" % (self.fullName, apiName))
self.lastRunLocal = datetime.utcnow()
# track latency of entire method excecution with dependencies
startTime = time()
if parser is None:
parser = self._parseResults
# Use cached list of instances if available since they don't change that frequently; else fetch afresh.
# filter down to just the instances we need for this SOAP API type
sapInstances = self.providerInstance.getInstances(useCache=True, filterFeatures=filterFeatures, filterType=filterType)
self.lastRunServer = self._getServerTimestamp()
if len(sapInstances) == 0:
self.tracer.info("%s no instances found that support this API: %s", self.logTag, apiName)
# Call web service
all_results = []
currentTimestamp = self._getFormattedTimestamp()
for instance in sapInstances:
# default to https unless the httpsPort was not defined, in which case fallback to http
httpProtocol = "https"
port = instance['httpsPort']
if ((not port) or port == "0"):
# fallback to http port instead
httpProtocol = "http"
port = instance['httpPort']
results = []
try:
client = self.providerInstance.getClient(instance['hostname'], httpProtocol, port)
results = self.providerInstance.callSoapApi(client, apiName)
if(apiName == "GetProcessList"):
results = self._sanitizeGetProcessList(results)
elif(apiName == "ABAPGetWPTable"):
results = self._sanitizeABAPGetWPTable(results)
except Exception as e:
self.tracer.error("%s unable to call the Soap Api %s - %s://%s:%s, %s", self.logTag, apiName, httpProtocol, instance['hostname'], port, e, exc_info=True)
continue
if len(results) != 0:
parsed_results = parser(results)
for result in parsed_results:
result['hostname'] = instance['hostname']
result['instanceNr'] = instance['instanceNr']
result['subdomain'] = self.providerInstance.sapSubdomain
result['timestamp'] = currentTimestamp
result['serverTimestamp'] = self.lastRunServer.isoformat()
result['SID'] = self.providerInstance.sapSid
all_results.extend(parsed_results)
if len(all_results) == 0:
self.tracer.info("%s no results found for: %s", self.logTag, apiName)
self.lastResult = all_results
# Update internal state
if not self.updateState():
raise Exception("[%s] failed to update state for web service request: %s [%d ms]" % \
(self.logTag, apiName, TimeUtils.getElapsedMilliseconds(startTime)))
self.tracer.info("%s successfully processed web service request: %s [%d ms]",
self.logTag, apiName, TimeUtils.getElapsedMilliseconds(startTime))
def _actionExecuteGenericWebServiceRequest(self, apiName: str, filterFeatures: list, filterType: str) -> None:
self._executeWebServiceRequest(apiName, filterFeatures, filterType, self._parseResults)
def _actionExecuteEnqGetStatistic(self, apiName: str, filterFeatures: list, filterType: str) -> None:
self._executeWebServiceRequest(apiName, filterFeatures, filterType, self._parseResult)
"""
Method to parse the value based on the key provided and set the values with None value to empty string ''
"""
def _getKeyValue(self, dictionary, key, apiName):
if key not in dictionary:
raise ValueError("Result received for api %s does not contain key: %s"% (apiName, key))
if(dictionary[key] == None):
dictionary[key] = ""
return dictionary[key]
"""
Method to parse the results from ABAPGetWPTable and set the strings with None value to empty string ''
"""
def _sanitizeABAPGetWPTable(self, records: list) -> list:
apiName = "ABAPGetWPTable"
processed_results = list()
for record in records:
processed_result = {
"Action": self._getKeyValue(record, 'Action', apiName),
"Client": self._getKeyValue(record, 'Client', apiName),
"Cpu": self._getKeyValue(record, 'Cpu', apiName),
"Err": self._getKeyValue(record, 'Err', apiName),
"No": self._getKeyValue(record, 'No', apiName),
"Pid": self._getKeyValue(record, 'Pid', apiName),
"Program": self._getKeyValue(record, 'Program', apiName),
"Reason": self._getKeyValue(record, 'Reason', apiName),
"Sem": self._getKeyValue(record, 'Sem', apiName),
"Start": self._getKeyValue(record, 'Start', apiName),
"Status": self._getKeyValue(record, 'Status', apiName),
"Table": self._getKeyValue(record, 'Table', apiName),
"Time": self._getKeyValue(record, 'Time', apiName),
"Typ": self._getKeyValue(record, 'Typ', apiName),
"User": self._getKeyValue(record, 'User', apiName)
}
processed_results.append(processed_result)
return processed_results
"""
Method to parse the results from GetProcessList and set the strings with None value to empty string ''
"""
def _sanitizeGetProcessList(self, records: list) -> list:
apiName = "GetProcessList"
processed_results = list()
for record in records:
processed_result = {
"description": self._getKeyValue(record, 'description', apiName),
"dispstatus": self._getKeyValue(record, 'dispstatus', apiName),
"elapsedtime": self._getKeyValue(record, 'elapsedtime', apiName),
"name": self._getKeyValue(record, 'name', apiName),
"pid": self._getKeyValue(record, 'pid', apiName),
"starttime": self._getKeyValue(record, 'starttime', apiName),
"textstatus": self._getKeyValue(record, 'textstatus', apiName)
}
processed_results.append(processed_result)
return processed_results
"""
netweaver provider check action to query for SDF/SMON Analysis Run metrics
"""
def _actionGetSmonAnalysisMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping SMON metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
# get metric query window based on our last successful query where results were returned
(startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer,
minimumRunIntervalSecs=self.frequencySecs,
logTag=self.logTag)
self.lastResult = client.getSmonMetrics(startDateTime=startTime, endDateTime=endTime, logTag=self.logTag)
self.tracer.info("%s successfully queried SMON metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
self.lastRunServer = endTime
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch SMON Analysis Run metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for SWNC workload statistics and decorate with ST03 metric calculations
"""
def _actionGetSwncWorkloadMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping SWNC metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
# get metric query window based on our last successful query where results were returned
(startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer,
minimumRunIntervalSecs=self.frequencySecs,
logTag=self.logTag)
self.lastResult = client.getSwncWorkloadMetrics(startDateTime=startTime, endDateTime=endTime, logTag=self.logTag)
self.tracer.info("%s successfully queried SWNC workload metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
self.lastRunServer = endTime
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch SWNC workload metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for short dumps
"""
def _actionGetShortDumpsMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping short dumps metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
# get metric query window based on our last successful query where results were returned
(startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer,
minimumRunIntervalSecs=self.frequencySecs,
logTag=self.logTag)
self.lastResult = client.getShortDumpsMetrics(startDateTime=startTime, endDateTime=endTime, logTag=self.logTag)
self.tracer.info("%s successfully queried short dumps metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
self.lastRunServer = endTime
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch short dumps metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for sys logs
"""
def _actionGetSysLogMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping sys logs metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
# get metric query window based on our last successful query where results were returned
(startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer,
minimumRunIntervalSecs=self.frequencySecs,
logTag=self.logTag)
self.lastResult = client.getSysLogMetrics(startDateTime=startTime, endDateTime=endTime, logTag=self.logTag)
self.tracer.info("%s successfully queried sys log metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
self.lastRunServer = endTime
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch sys logs metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for failed updates metrics
"""
def _actionGetFailedUpdatesMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping sys logs metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
# get metric query window based on our last successful query where results were returned
(startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer,
minimumRunIntervalSecs=self.frequencySecs,
logTag=self.logTag)
self.lastResult = client.getFailedUpdatesMetrics(logTag=self.logTag)
self.tracer.info("%s successfully queried failed updates metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
self.lastRunServer = endTime
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch failed updates metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for batch job metrics
"""
def _actionGetBatchJobMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping batch jobs metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
# get metric query window based on our last successful query where results were returned
(startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer,
minimumRunIntervalSecs=self.frequencySecs,
logTag=self.logTag)
self.lastResult = client.getBatchJobMetrics(startDateTime=startTime, endDateTime=endTime, logTag=self.logTag)
self.tracer.info("%s successfully queried batch job metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
self.lastRunServer = endTime
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch failed updates metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for inbound queues statistics
"""
def _actionGetInboundQueuesMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping Current Inbound Queues metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
self.lastResult = client.getInboundQueuesMetrics(logTag=self.logTag)
self.tracer.info("%s successfully queried Current Inbound Queues metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch Current Inbound Queues metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for outbound queues statistics
"""
def _actionGetOutboundQueuesMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping Current Outbound Queues metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
self.lastResult = client.getOutboundQueuesMetrics(logTag=self.logTag)
self.tracer.info("%s successfully queried Current Outbound Queues metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch Current Outbound Queues metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for object lock entries by connecting to ENQUEUE_READ RFC
"""
def _actionGetEnqueueReadMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping ENQUEUE_READ metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
self.lastResult = client.getEnqueueReadMetrics(logTag=self.logTag)
self.tracer.info("%s successfully queried ENQUEUE_READ metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch ENQUEUE_READ metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
def generateJsonString(self) -> str:
self.tracer.info("%s converting result to json string", self.logTag)
if self.lastResult is not None and len(self.lastResult) != 0:
for result in self.lastResult:
result['SAPMON_VERSION'] = PAYLOAD_VERSION
result['PROVIDER_INSTANCE'] = self.providerInstance.name
result['METADATA'] = self.providerInstance.metadata
resultJsonString = json.dumps(self.lastResult, sort_keys=True, indent=4, cls=JsonEncoder)
self.tracer.debug("%s resultJson=%s", self.logTag, str(resultJsonString))
return resultJsonString
def updateState(self) -> bool:
self.tracer.info("%s updating internal state", self.logTag)
self.state['lastRunLocal'] = self.lastRunLocal
self.state['lastRunServer'] = self.lastRunServer
self.tracer.info("%s internal state successfully updated", self.logTag)
return True
| # Python modules
import json
import logging
from datetime import datetime, timedelta, timezone
from time import time
from typing import Any, Callable
import re
import requests
from requests import Session
from threading import Lock
# SOAP Client modules
from zeep import Client
from zeep import helpers
from zeep.transports import Transport
from zeep.exceptions import Fault
# Payload modules
from const import *
from helper.azure import AzureStorageAccount
from helper.context import *
from helper.tools import *
from provider.base import ProviderInstance, ProviderCheck
from netweaver.metricclientfactory import NetWeaverMetricClient, MetricClientFactory
from netweaver.rfcsdkinstaller import PATH_RFC_SDK_INSTALL, SapRfcSdkInstaller
from typing import Dict
# Suppress SSLError warning due to missing SAP server certificate
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# wait time in between attempts to re-download and install RFC SDK package if we have a download blob
# URL defined and previous install attempt was not successful
MINIMUM_RFC_INSTALL_RETRY_INTERVAL = timedelta(minutes=30)
# timeout to use for all SOAP WSDL fetch and other API calls
SOAP_API_TIMEOUT_SECS = 5
# soap client cache expiration, after which amount of time both successful + failed soap client instantiation attempts will be refreshed
SOAP_CLIENT_CACHE_EXPIRATIION = timedelta(minutes=10)
class sapNetweaverProviderInstance(ProviderInstance):
# static / class variables to enforce singleton behavior around rfc sdk installation attempts across all
# instances of SAP Netweaver provider
_isRfcInstalled = None
_rfcInstallerLock = Lock()
def __init__(self,
tracer: logging.Logger,
ctx: Context,
providerInstance: Dict[str, str],
skipContent: bool = False,
**kwargs) -> None:
self.sapSid = None
self.sapHostName = None
self.sapInstanceNr = None
self.sapSubdomain = None
# RFC SDK call settings
self.sapUsername = None
self.sapPassword = None
self.sapClientId = None
self.sapRfcSdkBlobUrl = None
self.sapLogonGroup = None
# provider instance flag for whether RFC calls should be enabled for this specific Netweaver provider instance
self._areRfcCallsEnabled = None
# cache WSDL SOAP clients so we can re-use them across checks for the same provider and cut down off-box calls
self._soapClientCache = {}
# the RFC SDK does not allow client to specify a timeout and in fact appears to have a connection timeout of 60 secs.
# In cases where RFC calls timeout due to some misconfiguration, multiple retries can lead to metric gaps of several minutes.
# We are limiting retries here because it is extremely rare for SOAP or RFC call to fail on first attempt and succeed on retry,
# as most of these failures are due to persistent issues. Better to not waste limited time budget.
retrySettings = {
"retries": 1,
"delayInSeconds": 1,
"backoffMultiplier": 2
}
super().__init__(tracer,
ctx,
providerInstance,
retrySettings,
skipContent,
**kwargs)
"""
parse provider properties and get sid, host name and instance number
"""
def parseProperties(self) -> bool:
self.sapSid = self.metadata.get("sapSid", "")
if not self.sapSid:
self.tracer.error("%s sapSid cannot be empty", self.fullName)
return False
# provider level common logging prefix
self.logTag = "[%s][%s]" % (self.fullName, self.sapSid)
self.sapHostName = self.providerProperties.get("sapHostName", None)
if not self.sapHostName:
self.tracer.error("%s sapHostName cannot be empty", self.logTag)
return False
instanceNr = self.providerProperties.get("sapInstanceNr", None)
if instanceNr is None: # 0 is an acceptable value for Instance Number
self.tracer.error("%s sapInstanceNr cannot be empty", self.logTag)
return False
if not type(instanceNr) is int or instanceNr < 0 or instanceNr > 98:
self.tracer.error("%s sapInstanceNr can only be between 00 and 98 but %s was passed", self.logTag, str(instanceNr))
return False
self.sapInstanceNr = str(instanceNr).zfill(2)
self.sapSubdomain = self.providerProperties.get("sapSubdomain", "")
self.sapUsername = self.providerProperties.get('sapUsername', None)
self.sapPassword = self.providerProperties.get('sapPassword', None)
self.sapClientId = self.providerProperties.get('sapClientId', None)
self.sapLogonGroup = self.providerProperties.get('sapLogonGroup',None)
self.sapRfcSdkBlobUrl = self.providerProperties.get('sapRfcSdkBlobUrl', None)
# if user did not specify password directly via UI, check to see if they instead
# provided link to Key Vault secret
if not self.sapPassword:
sapPasswordKeyVaultUrl = self.providerProperties.get("sapPasswordKeyVaultUrl", None)
if sapPasswordKeyVaultUrl:
self.tracer.info("%s sapPassword key vault URL specified, attempting to fetch from %s", self.logTag, sapPasswordKeyVaultUrl)
try:
keyVaultUrlPatternMatch = re.match(REGEX_EXTERNAL_KEYVAULT_URL,
sapPasswordKeyVaultUrl,
re.IGNORECASE)
keyVaultName = keyVaultUrlPatternMatch.group(1)
secretName = keyVaultUrlPatternMatch.group(2)
except Exception as e:
self.tracer.error("%s invalid sapPassword Key Vault secret url format: %s", self.logTag, sapPasswordKeyVaultUrl)
return False
try:
kv = AzureKeyVault(self.tracer, keyVaultName, self.ctx.msiClientId)
self.sapPassword = kv.getSecret(secretName, None).value
if not self.sapPassword:
raise Exception("failed to read sapPassword secret")
except Exception as e:
self.tracer.error("%s error fetching sapPassword secret from keyVault url: %s, %s",
self.logTag,
sapPasswordKeyVaultUrl,
e)
return False
return True
def _getHttpPortFromInstanceNr(self, instanceNr: str) -> str:
return '5%s13' % instanceNr # As per SAP documentation, default http port is of the form 5<NR>13
def _getHttpsPortFromInstanceNr(self, instanceNr: str) -> str:
return '5%s14' % instanceNr # As per SAP documentation, default https port is of the form 5<NR>14
def getMessageServerPortFromInstanceNr(self, instanceNr: str) -> str:
return '81%s' % instanceNr # As per SAP documentation, default http port is of the form 81<NR>
def getFullyQualifiedDomainName(self, hostname: str) -> str:
if self.sapSubdomain:
return hostname + "." + self.sapSubdomain
else:
return hostname
"""
will first attempt to create SOAP client for hostname using the HTTPS port derived from the SAP instance number,
and if that does not succeed will then try to create client using the derived HTTP port
(if neither hostname or instance are specified, will default to the primary hostname/instance that the
provider was initialized with from properties)
"""
def getDefaultClient(self,
hostname: str = None,
instance: str = None) -> Client:
if not hostname:
hostname = self.sapHostName
if not instance:
instance = self.sapInstanceNr
httpsPort = self._getHttpsPortFromInstanceNr(instance)
httpPort = self._getHttpPortFromInstanceNr(instance)
portList = [(httpsPort,"https"),(httpPort,"http")]
exceptionDetails = None
startTime = time()
for port,protocol in portList:
startTime = time()
self.tracer.info("%s attempting to fetch default client for hostname=%s on %s port %s",
self.logTag, hostname, protocol, port)
try:
client = self.getClient(hostname, httpProtocol=protocol, port=port)
return client
except Exception as e:
exceptionDetails = e
self.tracer.info("%s error fetching default client hostname=%s on %s port %s: %s [%d ms]",
self.logTag, self.sapHostName, protocol, port, e, TimeUtils.getElapsedMilliseconds(startTime))
self.tracer.error("[%s] error fetching default client hostname=%s on port %s : %s [%d ms]",
self.logTag, self.sapHostName, portList, exceptionDetails, TimeUtils.getElapsedMilliseconds(startTime), exc_info=True)
raise exceptionDetails
"""
attempt to create a SOAP client for the specified hostname using specific protocol and port
(for when we already have a known hostconfig for this hostname, and already know whether HTTPS or HTTP should be used)
Store successful clients in cache so we don't make unnecessary WSDL fetchs for future API calls to the same instance
"""
def getClient(self,
hostname: str,
httpProtocol: str,
port: str,
useCache: bool = True) -> Client:
if not hostname or not httpProtocol or not port:
raise Exception("%s cannot create client with empty httpProtocol, hostname or port (%s:%s:%s)" % \
(self.logTag, httpProtocol, hostname, port))
if httpProtocol != "http" and httpProtocol != "https":
raise Exception("%s httpProtocol %s is not valid for hostname: %s, port: %s" % \
(self.logTag, httpProtocol, hostname, port))
hostname = self.getFullyQualifiedDomainName(hostname)
url = '%s://%s:%s/?wsdl' % (httpProtocol, hostname, port)
if (useCache and url in self._soapClientCache):
cacheEntry = self._soapClientCache[url]
# respect cache expiration; if cache is expired allow client to be refreshed below
if (cacheEntry['expirationDateTime'] > datetime.utcnow()):
if (cacheEntry['client']):
# self.tracer.info("%s using cached SOAP client for wsdl: %s", self.logTag, url)
return cacheEntry['client']
else:
# previously cached soap client attempt was failure
raise Exception("%s cached SOAP client failure for wsdl: %s" % (self.logTag, url))
self.tracer.info("%s connecting to wsdl url: %s", self.logTag, url)
startTime = time()
client = None
try:
session = Session()
session.verify = False
client = Client(url, transport=Transport(session=session, timeout=SOAP_API_TIMEOUT_SECS, operation_timeout=SOAP_API_TIMEOUT_SECS))
self.tracer.info("%s initialized SOAP client url: %s [%d ms]",
self.logTag, url, TimeUtils.getElapsedMilliseconds(startTime))
return client
except Exception as e:
self.tracer.error("%s error fetching wsdl url: %s: %s [%d ms]",
self.logTag, url, e, TimeUtils.getElapsedMilliseconds(startTime), exc_info=True)
raise e
finally:
# cache successsful and failed soap client attempts to reduce future API calls
self._soapClientCache[url] = { 'client': client, 'expirationDateTime': datetime.utcnow() + SOAP_CLIENT_CACHE_EXPIRATIION }
def callSoapApi(self, client: Client, apiName: str) -> str:
self.tracer.info("%s executing SOAP API: %s for wsdl: %s", self.logTag, apiName, client.wsdl.location)
startTime = time()
try:
method = getattr(client.service, apiName)
result = method()
self.tracer.info("%s successful SOAP API: %s for wsdl: %s [%d ms]",
self.logTag, apiName, client.wsdl.location, TimeUtils.getElapsedMilliseconds(startTime))
return result
except Exception as e:
self.tracer.error("%s error while calling SOAP API: %s for wsdl: %s: %s [%d ms]",
self.logTag, apiName, client.wsdl.location, e, TimeUtils.getElapsedMilliseconds(startTime), exc_info=True)
raise e
"""
return a netweaver RFC client initialized with "MESSAGESERVER" instance we find
for this SID.
"""
def getRfcClient(self, logTag: str) -> NetWeaverMetricClient:
# RFC connections against application server instances can be made through 'MESSAGESERVER' instances
dispatcherInstance = self.getMessageServerInstance()
return MetricClientFactory.getMetricClient(tracer=self.tracer,
logTag=logTag,
sapHostName=dispatcherInstance['hostname'],
sapSysNr=str(dispatcherInstance['instanceNr']),
sapSubdomain=self.sapSubdomain,
sapSid=self.sapSid,
sapClient=str(self.sapClientId),
sapLogonGroup = self.sapLogonGroup,
sapUsername=self.sapUsername,
sapPassword=<PASSWORD>.sapPassword)
def validate(self) -> bool:
logTag = "[%s][%s][validation]" % (self.fullName, self.sapSid)
# HACK: Load content json to fetch the list of APIs in the checks
self.initContent()
try:
self._validateSoapClient()
except Exception as e:
self.tracer.error("%s SOAP API validation failure: %s", logTag, e, exc_info=True)
return False
try:
self._validateRfcClient()
except Exception as e:
self.tracer.error("%s RFC client validation failure: %s", logTag, e, exc_info=True)
return False
return True
"""
iterate through all SOAP API calls and attempt to validate that SOAP API client can be instantiated
and expected APIs are callable
"""
def _validateSoapClient(self) -> None:
###
# TODO: this entire function needs to be rethought to me more precise in terms of which instances
# are called for which APIs, as some APIs will not work for some function types.
###
logTag = "[%s][%s][validation]" % (self.fullName, self.sapSid)
# hard-coded list of checks that correspond to SOAP API calls to validate
soapApiChecks = ['GetSystemInstanceList',
'GetProcessList',
'ABAPGetWPTable',
'GetQueueStatistic',
'EnqGetStatistic']
self.tracer.info("%s connecting to sap to validate SOAP API connectivity", logTag)
try:
client = self.getDefaultClient(hostname=self.sapHostName, instance=self.sapInstanceNr)
except Exception as e:
self.tracer.error("%s error occured while initializing SOAP client to SAP server: %s|%s, %s",
logTag,
self.sapHostName,
self.sapInstanceNr,
e,
exc_info=True)
raise
# Ensure that all APIs in the checks are valid and are marked as unprotected.
# Some APIs are compatible with only specific instance types and throw a Fault if run against
# an incompatible one.
# However, here we suppress all errors except Unauthorized since the Monitor phase takes
# care of calling the API against the right instance type. As long as we don't get an
# Unauthorized error, we know we can safely call them during the Monitor phase.
isValid = True
for check in self.checks:
apiName = check.name
if (apiName not in soapApiChecks):
# this is not a SOAP API check
continue
method = getattr(client.service, apiName, None) # Returning None when API not found
if method is None:
self.tracer.error("%s SOAP client failure: api %s does not exist for %s", logTag, apiName, client.wsdl.location)
isValid = False
else:
try:
self.callSoapApi(client, apiName)
self.tracer.info("%s validated SOAP api %s for %s", logTag, apiName, client.wsdl.location)
except Fault as e:
if (e.code == "SOAP-ENV:Client" and e.message == "HTTP Error: 'Unauthorized'"):
isValid = False
self.tracer.error("%s SOAP api %s is protected for %s, %s ", logTag, apiName, client.wsdl.location, e, exc_info=True)
else:
self.tracer.error("%s suppressing error during validation of SOAP api %s for %s, %s", logTag, apiName, client.wsdl.location, e, exc_info=True)
except Exception as e:
self.tracer.error("%s suppressing error during validation of SOAP api %s for %s, %s ", logTag, apiName, client.wsdl.location, e, exc_info=True)
if (not isValid):
raise Exception("%s one or more SOAP APIs failed validation" % (logTag))
"""
if customer provided RFC SDK configuration, then validate that all required properties are specified
and validate we can establish RFC client connections to APIs we need to call
"""
def _validateRfcClient(self) -> None:
logTag = "[%s][%s][validation]" % (self.fullName, self.sapSid)
# are any RFC SDK config properties populated?
if (not self.sapUsername or
not self.sapPassword or
not self.sapClientId or
not self.sapRfcSdkBlobUrl):
# customer has not chosen to enable RFC SDK, nothing to validate
return
# are ALL RFC SDK config properties populated?
if (not self.sapUsername and
not self.sapPassword and
not self.sapClientId and
not self.sapRfcSdkBlobUrl):
# customer specified only partial set of config properties needed to enable RFC, so fail validation
raise Exception("must specify all properties to enable RFC metric collection: Username, Password, ClientId, and RfcSdkBlobUrl")
if (not self.areRfcMetricsEnabled()):
raise Exception("RFC SDK failed to install and is not usable")
# initialize a client for the first healthy ABAP/Dispatcher instance we find
client = self.getRfcClient(logTag=logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
# get metric query window to lookback 10 minutes to see if any results are available. If not that probably
# indicates customer has not enabled SMON on their SAP system
self.tracer.info("%s attempting to fetch server timestamp from %s", logTag, sapHostnameStr)
(startTime, endTime) = client.getQueryWindow(lastRunServerTime=None,
minimumRunIntervalSecs=600,
logTag=logTag)
self.tracer.info("%s attempting to fetch SMON metrics from %s", logTag, sapHostnameStr)
result = client.getSmonMetrics(startDateTime=startTime, endDateTime=endTime, logTag=logTag)
self.tracer.info("%s successfully queried SMON metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch SWNC workload metrics from %s", logTag, sapHostnameStr)
result = client.getSwncWorkloadMetrics(startDateTime=startTime, endDateTime=endTime, logTag=logTag)
self.tracer.info("%s successfully queried SWNC workload metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch Short Dump metrics from %s", logTag, sapHostnameStr)
result = client.getShortDumpsMetrics(startDateTime=startTime, endDateTime=endTime, logTag=logTag)
self.tracer.info("%s successfully queried Short Dump metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch Sys Log metrics from %s", logTag, sapHostnameStr)
result = client.getSysLogMetrics(startDateTime=startTime, endDateTime=endTime, logTag=logTag)
self.tracer.info("%s successfully queried Sys Log metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch Failed Updates metrics from %s", logTag, sapHostnameStr)
result = client.getFailedUpdatesMetrics(logTag=logTag)
self.tracer.info("%s successfully queried Failed Updates metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch Batch Job metrics from %s", logTag, sapHostnameStr)
result = client.getBatchJobMetrics(startDateTime=startTime, endDateTime=endTime, logTag=logTag)
self.tracer.info("%s successfully queried Batch Job metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch inbound queue metrics from %s", logTag, sapHostnameStr)
result = client.getInboundQueuesMetrics(logTag=logTag)
self.tracer.info("%s successfully queried inbound queue metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch outbound queue metrics from %s", logTag, sapHostnameStr)
result = client.getOutboundQueuesMetrics(logTag=logTag)
self.tracer.info("%s successfully queried outbound queue metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s attempting to fetch lock entries metrics from %s", logTag, sapHostnameStr)
result = client.getEnqueueReadMetrics(logTag=logTag)
self.tracer.info("%s successfully queried lock entries metrics from %s", logTag, sapHostnameStr)
self.tracer.info("%s successfully validated all known RFC SDK calls", logTag)
"""
query SAP SOAP API to return list of all instances in the SID, but if caller specifies that cached results are okay
and we have cached instance list with the provider instance, then just return the cached results
"""
def getInstances(self,
filterFeatures: list = None ,
filterType: str = None,
useCache: bool = True) -> list:
# Use cached list of instances if available since they should not change within a single monitor run;
# but if cache is not available or if caller explicitly asks to skip cache then make the SOAP call
if ('hostConfig' in self.state and useCache):
# self.tracer.debug("%s using cached list of system instances", self.logTag)
return self.filterInstancesByFeature(self.state['hostConfig'], filterFeatures=filterFeatures, filterType=filterType)
self.tracer.info("%s getting list of system instances", self.logTag)
startTime = time()
instanceList = []
hosts = self._getHosts()
# Use last known hosts to fetch the updated list of hosts
# Walk through the known hostnames and stop whenever any of them returns the list of all instances
isSuccess = False
for host in hosts:
hostname, instanceNum, httpProtocol, port = host[0], host[1], host[2], host[3]
try:
apiName = 'GetSystemInstanceList'
# if we have a cached host config with already defined protocol and port, then we can initialize
# client directly from that, otherwise we have to instantiate client using ports derived from the instance number
# which will try the derived HTTPS port first and then fallback to derived HTTP port
if (not httpProtocol or not port):
client = self.getDefaultClient(hostname=hostname, instance=instanceNum)
else:
client = self.getClient(hostname, httpProtocol, port)
result = self.callSoapApi(client, apiName)
instanceList = self._parseResults(result)
# cache latest results in provider state
self.state['hostConfig'] = instanceList
isSuccess = True
break
except Exception as e:
self.tracer.error("%s could not connect to SAP with hostname: %s and port: %s", self.logTag, hostname, port, exc_info=True)
if not isSuccess:
raise Exception("%s could not connect to any SAP instances with hosts %s [%d ms]" % \
(self.logTag, hosts, TimeUtils.getElapsedMilliseconds(startTime)))
self.tracer.info("%s finished getting all system instances [%d ms]", self.logTag, TimeUtils.getElapsedMilliseconds(startTime))
return self.filterInstancesByFeature(instanceList, filterFeatures=filterFeatures, filterType=filterType)
"""
fetch cached instance list for this provider and filter down to the list 'ABAP' feature functions
that are healthy (ie. have dispstatus attribute of 'SAPControl-GREEN'). Just return first in the list.
"""
def getActiveDispatcherInstance(self):
# Use cached list of instances if available since they don't change that frequently,
# and filter down to only healthy dispatcher instances since RFC direct application server connection
# only works against dispatchera
dispatcherInstances = self.getInstances(filterFeatures=['ABAP'], filterType='include', useCache=True)
healthyInstances = [instance for instance in dispatcherInstances if 'GREEN' in instance['dispstatus']]
if (len(healthyInstances) == 0):
raise Exception("No healthy ABAP/dispatcher instance found for %s" % self.sapSid)
# return first healthy instance in list
return healthyInstances[0]
"""
fetch cached instance list for this provider and filter down to the list 'MESSAGESERVER' feature functions
return the available message server
"""
def getMessageServerInstance(self):
# Use cached list of instances if available since they don't change that frequently,
# and filter down to only healthy dispatcher instances since RFC direct application server connection
# only works against dispatchera
dispatcherInstances = self.getInstances(filterFeatures=['MESSAGESERVER'], filterType='include', useCache=True)
if (len(dispatcherInstances) == 0):
raise Exception("No MESSAGESERVER instance found for %s" % self.sapSid)
# return first healthy instance in list
return dispatcherInstances[0]
"""
given a list of sap instances and a set of instance features (ie. functions) to include or exclude,
apply filtering logic and return only those instances that match the filter conditions:
'include' filter type will include any instance that matches any of the feature filters
'exclude' filter type will exclude any instance that matches any of the feature filters
"""
def filterInstancesByFeature(self,
sapInstances: list,
filterFeatures: list = None,
filterType: str = None) -> list:
if (not filterFeatures or len(filterFeatures) == 0 or not sapInstances):
return sapInstances
self.tracer.info("%s filtering list of system instances based on features: %s", self.logTag, filterFeatures)
instances = [(instance, instance['features'].split('|')) for instance in sapInstances]
if filterType == "include":
# Inclusion filter
# Only include instances that match at least one of the filter features
filtered_instances = [instance for (instance, instance_features) in instances \
if not set(filterFeatures).isdisjoint(set(instance_features))]
elif filterType == "exclude":
# Exclusion filter
# Only include instance that match none of the filter features
filtered_instances = [instance for (instance, instance_features) in instances \
if set(filterFeatures).isdisjoint(set(instance_features))]
else:
raise Exception("%s filterType '%s' is not supported filter type" % (self.logTag, filterType))
return filtered_instances
"""
helper method to deserialize result and return as list of dictionary objects
"""
def _parseResults(self, results: list) -> list:
return helpers.serialize_object(results, dict)
"""
private method to return default provider hostname config (what customer provided at time netweaver provided was added)
or a fully fleshed out list of <hostname / instance # / https:Port> tuples based on a previous cached call to getInstances()
"""
def _getHosts(self) -> list:
# Fetch last known list from storage. If storage does not have list, use provided
# hostname and instanceNr
if 'hostConfig' not in self.state:
self.tracer.info("%s no host config persisted yet, using user-provided host name and instance nr", self.logTag)
hosts = [(self.sapHostName,
self.sapInstanceNr,
None,
None)]
else:
self.tracer.info("%s fetching last known host config", self.logTag)
currentHostConfig = self.state['hostConfig']
hosts = [(hostConfig['hostname'],
hostConfig['instanceNr'],
"https" if (hostConfig['httpsPort'] and hostConfig['httpsPort'] != "0") else "http",
hostConfig['httpsPort'] if (hostConfig['httpsPort'] and hostConfig['httpsPort'] != "0") else hostConfig['httpPort']) for hostConfig in currentHostConfig]
return hosts
"""
returns flag to indicate whether provider checks should attempt to use RFC SDK client calls to fetch certain metrics.
First time may perform fairly expensive checks to validate if RFC SDK is installed anc configured, and may attempt
to download user provided blob to install to local system. We only want to attempt this at most once per process,
so first caller to this function will pay that cost and the resulting success/failure flag will be cached.
"""
def areRfcMetricsEnabled(self) -> bool:
if self._areRfcCallsEnabled != None:
# the flag for whether RFC is usable has already been initialzed, so return
return self._areRfcCallsEnabled
# there may be 1..N sapNetWeaverProviderInstance instances per sapmon process, and each instance
# may choose to enable/disable RFC calls individually, but we should only attempt to install the
# RFC SDK at most once per process. Use a static/class variable to determine if installation
# attempt has already been attempted and was success/failure, and do all this inside of
# a lock and cache flag for future checks
try:
# class singleton lock
sapNetweaverProviderInstance._rfcInstallerLock.acquire(blocking=True)
# check -> lock -> check
if (self._areRfcCallsEnabled != None):
# flag was initialized prior to obtaining the lock
return self._areRfcCallsEnabled
# ensure this provider instance has necessary config settings to enable RFC SDK calls
if (not self.sapUsername or
not self.sapPassword or
not self.sapClientId or
not self.sapRfcSdkBlobUrl or
not self.sapLogonGroup):
self.tracer.info("%s Netweaver RFC calls disabled for because missing one or more required " +
"config properties: sapUsername, sapPassword, sapClientId, sapLogonGroup and sapRfcSdkBlobUrl",
self.logTag)
self._areRfcCallsEnabled = False
return False
# only attempt to install RFC SDK once per process execution
if (sapNetweaverProviderInstance._isRfcInstalled == None):
sapNetweaverProviderInstance._isRfcInstalled = self._trySetupRfcSdk()
self._areRfcCallsEnabled = sapNetweaverProviderInstance._isRfcInstalled
return self._areRfcCallsEnabled
except Exception as e:
self.tracer.error("%s Exception trying to check if rfc sdk metrics are enabled, %s", self.logTag, e, exc_info=True)
sapNetweaverProviderInstance._isRfcInstalled = False
self._areRfcCallsEnabled = False
finally:
sapNetweaverProviderInstance._rfcInstallerLock.release()
return False
"""
validate that RFC SDK package has been installed and configured correctly and is usable by pyrfc module.
If pyrfc module cannot be imported, then potentially attempt to download RFC SDK blob, install to local system,
and configure necessary environment variables and system settings so that the libraries can be
successfully loaded by the pyrfc module.
Returns flag indicating whether pyrfc module can be imnported (ie. whether RFC calls can be enabled)
Pre-requisites for RFC SDK installation attempt:
1.) Customer provided config property sapRfcSdkBlobUrl must be non-empty.
2.) python module for "pynwrfc" must be installed
3.) was the last failed SDK installation attempt more than N minutes ago (defined by MINIMUM_RFC_INSTALL_RETRY_INTERVAL)
4.) does the sapRfcSdkBlobUrl provided by customer actually exist in the storage account
5.) was the last_modified timestamp on the sapRfcSdkBlobUrl blob modified since the last failed installation attempt
"""
def _trySetupRfcSdk(self) -> bool:
try:
# if no RFC SDK download blob url specified, treat as kill switch to disable any RFC calls
if (not self.sapRfcSdkBlobUrl):
self.tracer.info("%s No user provided RFC SDK blob url, will not leverage RFC SDK. quitting...", self.logTag)
return False
installer = SapRfcSdkInstaller(tracer=self.tracer, installPath=PATH_RFC_SDK_INSTALL)
# environment variables must be initialized before RFC and pyrfc installation can be validated
self.tracer.info("%s initializing RFC SDK environment...", self.logTag)
if (not installer.initRfcSdkEnvironment()):
self.tracer.error("%s failed to initialize rfc sdk environment pre-requisites", self.logTag)
return False
# if we are able to successfully import the pyrfc connector module, that means RFC SDK
# libraries must be installed and were able to be found by pyrfc package initialization,
# so no need to do any further checks.
if (installer.isPyrfcModuleUsable()):
# pyrfc package is usable, which means RFC SDK is already installed and environment configured correctly
self.tracer.info("%s Pyrfc module is usable, RFC calls will be enabled", self.logTag)
return True
# if pyrfc module cannot be imported, check to see if it is even installed. Assumption is that
# pyrfc module is installed as part of container image, so if it is missing something is wrong
# there is no need to even try to install the RFC SDK
if (not installer.isPyrfcModuleInstalled()):
self.tracer.error("%s Pyrfc module is not installed, RFC calls will be disabled", self.logTag)
return False
# check last sdk install attempt time so we can limit how often we retry
# to download and install SDK on persistent failures (eg. no more than once every 30 mins)
lastSdkInstallAttemptTime = installer.getLastSdkInstallAttemptTime()
if (lastSdkInstallAttemptTime > (datetime.now(timezone.utc) - MINIMUM_RFC_INSTALL_RETRY_INTERVAL)):
self.tracer.info("%s last RFC SDK install attempt was %s, minimum attempt retry %s, skipping...",
self.logTag,
lastSdkInstallAttemptTime,
MINIMUM_RFC_INSTALL_RETRY_INTERVAL)
return False
self.tracer.info("%s RFC SDK is not installed, so attempt installation now...", self.logTag)
blobStorageAccount = AzureStorageAccount(tracer=self.tracer,
sapmonId=self.ctx.sapmonId,
msiClientId=self.ctx.msiClientId,
subscriptionId=self.ctx.vmInstance["subscriptionId"],
resourceGroup=self.ctx.vmInstance["resourceGroupName"])
# first check that rfc sdk download blob exists in Azure Storage account, and if it
# exixts also fetch the last_modified timestamp metadata
doesPackageExist, packageLastModifiedTime = installer.isRfcSdkAvailableForDownload(
blobUrl=self.sapRfcSdkBlobUrl,
storageAccount=blobStorageAccount)
if (not doesPackageExist):
self.tracer.error("%s User provided RFC SDK blob does not exist %s, skipping...", self.logTag, self.sapRfcSdkBlobUrl)
return False
self.tracer.info("%s user provided RFC SDK blob exists for download %s, lastModified=%s",
self.logTag, self.sapRfcSdkBlobUrl, packageLastModifiedTime)
# the user provided sdk blob exists, so before we download compare the last_modified timestamp
# with the last modified time of the last download attempt. If nothing has changed,
# then no need to try and download the package again
# TODO: confirm, should we go ahead and try to re-download previously failed packages
# once every 30 minutes anyway? just in case failure was something external?
lastInstallPackageModifiedTime = installer.getLastSdkInstallPackageModifiedTime()
if (packageLastModifiedTime == lastInstallPackageModifiedTime):
self.tracer.info("%s rfc sdk download package has not been modified since last download " +
"attempt (last_modified=%s), will not download again",
self.logTag,
lastInstallPackageModifiedTime)
return False
self.tracer.info("%s user provided rfc sdk package last_modified (%s) has changed " +
"since last install attempt (%s), attempting to re-download and install",
self.logTag,
packageLastModifiedTime,
lastInstallPackageModifiedTime)
# try to download user provided RFC SDK blob, install to local system and configure necessary
# environment variables and system settings so that it can be usable by pyrfc module
if (not installer.downloadAndInstallRfcSdk(blobUrl=self.sapRfcSdkBlobUrl, storageAccount=blobStorageAccount)):
self.tracer.error("%s failed to download and install rfc sdk package, RFC calls will not be enabled...", self.logTag)
return False
# on Linux pyrfc module may not be usable upon first install attempt, as it appears that unpacking
# libraries to the LD_LIBRARY_PATH env variable after the python process starts may not pick up the change.
# The module should be usable on the next sapmon process run.
if (not installer.isPyrfcModuleUsable()):
self.tracer.error("%s pyrfc module still not usable after RFC SDK install (might require process restart), " +
"RFC calls will not be enabled...",
self.logTag)
return False
self.tracer.info("%s pyrfc module is usable after RFC SDK install, RFC calls will be enabled...", self.logTag)
return True
except Exception as e:
self.tracer.error("%s exception trying to setup and validate RFC SDK, RFC calls will be disabled: %s", self.logTag, e, exc_info=True)
return False
###########################
class sapNetweaverProviderCheck(ProviderCheck):
lastResult = []
# hard-coded set of action names that require RFC SDK to be usable
# and can override runtime isEnabled() check if RFC is not usable
rfcCheckNames = {'SMON_Metrics', 'SWNC_Workload_Metrics', 'SDF_Short_Dumps_Metrics', 'Sys_Log_Metrics',
'Failed_Updates_Metrics', 'Batch_Jobs_Metrics', 'Inbound_Queues_Metrics', 'Outbound_Queues_Metrics',
'Enqueue_Read_Metrics'}
def __init__(self,
provider: ProviderInstance,
**kwargs
):
super().__init__(provider, **kwargs)
self.lastRunLocal = None
self.lastRunServer = None
# provider check common logging prefix
self.logTag = "[%s][%s]" % (self.fullName, self.providerInstance.sapSid)
"""
return flag indicating whether this check instances requires the SAP RFC SDK to be installed and usable
"""
def doesCheckRequireRfcSdk(self) -> bool:
return self.name in sapNetweaverProviderCheck.rfcCheckNames
"""
override base ProviderCheck implementation to allow RFC metric collection methods enabled in
the default Provider JSON configuration yet treated as disabled at runtime if RFC SDK
is not configured (to reduce log spam)
"""
def isEnabled(self) -> bool:
if not self.state["isEnabled"]:
return False
# if this check requires RFC and RFC is not installed, then treat as disabled
if (self.doesCheckRequireRfcSdk()):
if (not self.providerInstance.areRfcMetricsEnabled()):
return False
return True
def _getFormattedTimestamp(self) -> str:
return datetime.utcnow().isoformat()
def _parseResult(self, result: object) -> list:
return [helpers.serialize_object(result, dict)]
def _parseResults(self, results: list) -> list:
return helpers.serialize_object(results, dict)
def _getServerTimestamp(self) -> datetime:
self.tracer.info("%s fetching current timestamp from message server", self.logTag)
message_server_instances = self.providerInstance.getInstances(filterFeatures=['MESSAGESERVER'], filterType='include', useCache=True)
date = datetime.fromisoformat(self._getFormattedTimestamp())
# Get timestamp from the first message server that returns a valid date
for instance in message_server_instances:
hostname = instance['hostname']
instanceNr = str(instance['instanceNr']).zfill(2)
port = self.providerInstance.getMessageServerPortFromInstanceNr(instanceNr)
hostname = self.providerInstance.getFullyQualifiedDomainName(hostname)
message_server_endpoint = "http://%s:%s/" % (hostname, port)
try:
# We only care about the date in the response header. so we ignore the response body
# 'Thu, 04 Mar 2021 05:02:12 GMT'
# NOTE: we don't need to follow redirects because the redirect response itself 300-3XX
# will have the 'date' header as well. In some cases we were following a chain
# of redirects that would terminate in a 404, which would not have the 'date' header
response = requests.get(message_server_endpoint, allow_redirects=False)
if ('date' not in response.headers):
raise Exception("no 'date' response header found for response status:%s/%s from:%s"
% (response.status_code, response.reason, message_server_endpoint))
date = datetime.strptime(response.headers['date'], '%a, %d %b %Y %H:%M:%S %Z')
self.tracer.info("%s received message server %s header: %s, parsed time: %s",
self.logTag,
message_server_endpoint,
response.headers['date'],
date)
break
except Exception as e:
self.tracer.info("%s suppressing expected error while fetching server time during HTTP GET request to url %s: %s ",
self.logTag, message_server_endpoint, e)
return date
def _actionGetSystemInstanceList(self) -> None:
self.tracer.info("%s refreshing list of system instances", self.logTag)
self.lastRunLocal = datetime.utcnow()
# when performing the actual provider check action, always fetch fressh instance list snapshot and refresh the cache
instanceList = self.providerInstance.getInstances(useCache=False)
self.lastRunServer = self._getServerTimestamp()
# Update host config, if new list is fetched
# Parse dictionary and add current timestamp and SID to data and log it
if len(instanceList) != 0:
currentTimestamp = self._getFormattedTimestamp()
for instance in instanceList:
instance['timestamp'] = currentTimestamp
instance['serverTimestamp'] = self.lastRunServer.isoformat()
instance['SID'] = self.providerInstance.sapSid
instance['subdomain'] = self.providerInstance.sapSubdomain
self.lastResult = instanceList
# Update internal state
if not self.updateState():
raise Exception("%s failed to update state" % self.logTag)
self.tracer.info("%s successfully fetched system instance list", self.logTag)
def _executeWebServiceRequest(self, apiName: str, filterFeatures: list, filterType: str, parser: Callable[[Any], list] = None) -> None:
self.tracer.info("[%s] executing web service request: %s" % (self.fullName, apiName))
self.lastRunLocal = datetime.utcnow()
# track latency of entire method excecution with dependencies
startTime = time()
if parser is None:
parser = self._parseResults
# Use cached list of instances if available since they don't change that frequently; else fetch afresh.
# filter down to just the instances we need for this SOAP API type
sapInstances = self.providerInstance.getInstances(useCache=True, filterFeatures=filterFeatures, filterType=filterType)
self.lastRunServer = self._getServerTimestamp()
if len(sapInstances) == 0:
self.tracer.info("%s no instances found that support this API: %s", self.logTag, apiName)
# Call web service
all_results = []
currentTimestamp = self._getFormattedTimestamp()
for instance in sapInstances:
# default to https unless the httpsPort was not defined, in which case fallback to http
httpProtocol = "https"
port = instance['httpsPort']
if ((not port) or port == "0"):
# fallback to http port instead
httpProtocol = "http"
port = instance['httpPort']
results = []
try:
client = self.providerInstance.getClient(instance['hostname'], httpProtocol, port)
results = self.providerInstance.callSoapApi(client, apiName)
if(apiName == "GetProcessList"):
results = self._sanitizeGetProcessList(results)
elif(apiName == "ABAPGetWPTable"):
results = self._sanitizeABAPGetWPTable(results)
except Exception as e:
self.tracer.error("%s unable to call the Soap Api %s - %s://%s:%s, %s", self.logTag, apiName, httpProtocol, instance['hostname'], port, e, exc_info=True)
continue
if len(results) != 0:
parsed_results = parser(results)
for result in parsed_results:
result['hostname'] = instance['hostname']
result['instanceNr'] = instance['instanceNr']
result['subdomain'] = self.providerInstance.sapSubdomain
result['timestamp'] = currentTimestamp
result['serverTimestamp'] = self.lastRunServer.isoformat()
result['SID'] = self.providerInstance.sapSid
all_results.extend(parsed_results)
if len(all_results) == 0:
self.tracer.info("%s no results found for: %s", self.logTag, apiName)
self.lastResult = all_results
# Update internal state
if not self.updateState():
raise Exception("[%s] failed to update state for web service request: %s [%d ms]" % \
(self.logTag, apiName, TimeUtils.getElapsedMilliseconds(startTime)))
self.tracer.info("%s successfully processed web service request: %s [%d ms]",
self.logTag, apiName, TimeUtils.getElapsedMilliseconds(startTime))
def _actionExecuteGenericWebServiceRequest(self, apiName: str, filterFeatures: list, filterType: str) -> None:
self._executeWebServiceRequest(apiName, filterFeatures, filterType, self._parseResults)
def _actionExecuteEnqGetStatistic(self, apiName: str, filterFeatures: list, filterType: str) -> None:
self._executeWebServiceRequest(apiName, filterFeatures, filterType, self._parseResult)
"""
Method to parse the value based on the key provided and set the values with None value to empty string ''
"""
def _getKeyValue(self, dictionary, key, apiName):
if key not in dictionary:
raise ValueError("Result received for api %s does not contain key: %s"% (apiName, key))
if(dictionary[key] == None):
dictionary[key] = ""
return dictionary[key]
"""
Method to parse the results from ABAPGetWPTable and set the strings with None value to empty string ''
"""
def _sanitizeABAPGetWPTable(self, records: list) -> list:
apiName = "ABAPGetWPTable"
processed_results = list()
for record in records:
processed_result = {
"Action": self._getKeyValue(record, 'Action', apiName),
"Client": self._getKeyValue(record, 'Client', apiName),
"Cpu": self._getKeyValue(record, 'Cpu', apiName),
"Err": self._getKeyValue(record, 'Err', apiName),
"No": self._getKeyValue(record, 'No', apiName),
"Pid": self._getKeyValue(record, 'Pid', apiName),
"Program": self._getKeyValue(record, 'Program', apiName),
"Reason": self._getKeyValue(record, 'Reason', apiName),
"Sem": self._getKeyValue(record, 'Sem', apiName),
"Start": self._getKeyValue(record, 'Start', apiName),
"Status": self._getKeyValue(record, 'Status', apiName),
"Table": self._getKeyValue(record, 'Table', apiName),
"Time": self._getKeyValue(record, 'Time', apiName),
"Typ": self._getKeyValue(record, 'Typ', apiName),
"User": self._getKeyValue(record, 'User', apiName)
}
processed_results.append(processed_result)
return processed_results
"""
Method to parse the results from GetProcessList and set the strings with None value to empty string ''
"""
def _sanitizeGetProcessList(self, records: list) -> list:
apiName = "GetProcessList"
processed_results = list()
for record in records:
processed_result = {
"description": self._getKeyValue(record, 'description', apiName),
"dispstatus": self._getKeyValue(record, 'dispstatus', apiName),
"elapsedtime": self._getKeyValue(record, 'elapsedtime', apiName),
"name": self._getKeyValue(record, 'name', apiName),
"pid": self._getKeyValue(record, 'pid', apiName),
"starttime": self._getKeyValue(record, 'starttime', apiName),
"textstatus": self._getKeyValue(record, 'textstatus', apiName)
}
processed_results.append(processed_result)
return processed_results
"""
netweaver provider check action to query for SDF/SMON Analysis Run metrics
"""
def _actionGetSmonAnalysisMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping SMON metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
# get metric query window based on our last successful query where results were returned
(startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer,
minimumRunIntervalSecs=self.frequencySecs,
logTag=self.logTag)
self.lastResult = client.getSmonMetrics(startDateTime=startTime, endDateTime=endTime, logTag=self.logTag)
self.tracer.info("%s successfully queried SMON metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
self.lastRunServer = endTime
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch SMON Analysis Run metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for SWNC workload statistics and decorate with ST03 metric calculations
"""
def _actionGetSwncWorkloadMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping SWNC metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
# get metric query window based on our last successful query where results were returned
(startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer,
minimumRunIntervalSecs=self.frequencySecs,
logTag=self.logTag)
self.lastResult = client.getSwncWorkloadMetrics(startDateTime=startTime, endDateTime=endTime, logTag=self.logTag)
self.tracer.info("%s successfully queried SWNC workload metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
self.lastRunServer = endTime
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch SWNC workload metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for short dumps
"""
def _actionGetShortDumpsMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping short dumps metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
# get metric query window based on our last successful query where results were returned
(startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer,
minimumRunIntervalSecs=self.frequencySecs,
logTag=self.logTag)
self.lastResult = client.getShortDumpsMetrics(startDateTime=startTime, endDateTime=endTime, logTag=self.logTag)
self.tracer.info("%s successfully queried short dumps metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
self.lastRunServer = endTime
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch short dumps metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for sys logs
"""
def _actionGetSysLogMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping sys logs metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
# get metric query window based on our last successful query where results were returned
(startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer,
minimumRunIntervalSecs=self.frequencySecs,
logTag=self.logTag)
self.lastResult = client.getSysLogMetrics(startDateTime=startTime, endDateTime=endTime, logTag=self.logTag)
self.tracer.info("%s successfully queried sys log metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
self.lastRunServer = endTime
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch sys logs metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for failed updates metrics
"""
def _actionGetFailedUpdatesMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping sys logs metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
# get metric query window based on our last successful query where results were returned
(startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer,
minimumRunIntervalSecs=self.frequencySecs,
logTag=self.logTag)
self.lastResult = client.getFailedUpdatesMetrics(logTag=self.logTag)
self.tracer.info("%s successfully queried failed updates metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
self.lastRunServer = endTime
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch failed updates metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for batch job metrics
"""
def _actionGetBatchJobMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping batch jobs metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
# get metric query window based on our last successful query where results were returned
(startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer,
minimumRunIntervalSecs=self.frequencySecs,
logTag=self.logTag)
self.lastResult = client.getBatchJobMetrics(startDateTime=startTime, endDateTime=endTime, logTag=self.logTag)
self.tracer.info("%s successfully queried batch job metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
self.lastRunServer = endTime
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch failed updates metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for inbound queues statistics
"""
def _actionGetInboundQueuesMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping Current Inbound Queues metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
self.lastResult = client.getInboundQueuesMetrics(logTag=self.logTag)
self.tracer.info("%s successfully queried Current Inbound Queues metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch Current Inbound Queues metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for outbound queues statistics
"""
def _actionGetOutboundQueuesMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping Current Outbound Queues metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
self.lastResult = client.getOutboundQueuesMetrics(logTag=self.logTag)
self.tracer.info("%s successfully queried Current Outbound Queues metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch Current Outbound Queues metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
"""
netweaver provider check action to query for object lock entries by connecting to ENQUEUE_READ RFC
"""
def _actionGetEnqueueReadMetrics(self) -> None:
# base class will always call generateJsonString(), so we must always be sure to set the lastResult
# regardless of success or failure
self.lastResult = []
try:
# initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host
sapHostnameStr = self.providerInstance.sapSid
if (not self.providerInstance.areRfcMetricsEnabled()):
self.tracer.info("%s Skipping ENQUEUE_READ metrics because RFC SDK metrics not enabled...", self.logTag)
return
# track latency of entire method excecution with dependencies
latencyStartTime = time()
# initialize a client for the first healthy MessageServer instance we find
client = self.providerInstance.getRfcClient(logTag=self.logTag)
# update logging prefix with the specific instance details of the client
sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr)
self.lastResult = client.getEnqueueReadMetrics(logTag=self.logTag)
self.tracer.info("%s successfully queried ENQUEUE_READ metrics for %s [%d ms]",
self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime))
self.lastRunLocal = datetime.now(timezone.utc)
# only update state on successful query attempt
self.updateState()
except Exception as e:
self.tracer.error("%s exception trying to fetch ENQUEUE_READ metrics for %s [%d ms], error: %s",
self.logTag,
sapHostnameStr,
TimeUtils.getElapsedMilliseconds(latencyStartTime),
e,
exc_info=True)
raise
def generateJsonString(self) -> str:
self.tracer.info("%s converting result to json string", self.logTag)
if self.lastResult is not None and len(self.lastResult) != 0:
for result in self.lastResult:
result['SAPMON_VERSION'] = PAYLOAD_VERSION
result['PROVIDER_INSTANCE'] = self.providerInstance.name
result['METADATA'] = self.providerInstance.metadata
resultJsonString = json.dumps(self.lastResult, sort_keys=True, indent=4, cls=JsonEncoder)
self.tracer.debug("%s resultJson=%s", self.logTag, str(resultJsonString))
return resultJsonString
def updateState(self) -> bool:
self.tracer.info("%s updating internal state", self.logTag)
self.state['lastRunLocal'] = self.lastRunLocal
self.state['lastRunServer'] = self.lastRunServer
self.tracer.info("%s internal state successfully updated", self.logTag)
return True
| en | 0.864491 | # Python modules # SOAP Client modules # Payload modules # Suppress SSLError warning due to missing SAP server certificate # wait time in between attempts to re-download and install RFC SDK package if we have a download blob # URL defined and previous install attempt was not successful # timeout to use for all SOAP WSDL fetch and other API calls # soap client cache expiration, after which amount of time both successful + failed soap client instantiation attempts will be refreshed # static / class variables to enforce singleton behavior around rfc sdk installation attempts across all # instances of SAP Netweaver provider # RFC SDK call settings # provider instance flag for whether RFC calls should be enabled for this specific Netweaver provider instance # cache WSDL SOAP clients so we can re-use them across checks for the same provider and cut down off-box calls # the RFC SDK does not allow client to specify a timeout and in fact appears to have a connection timeout of 60 secs. # In cases where RFC calls timeout due to some misconfiguration, multiple retries can lead to metric gaps of several minutes. # We are limiting retries here because it is extremely rare for SOAP or RFC call to fail on first attempt and succeed on retry, # as most of these failures are due to persistent issues. Better to not waste limited time budget. parse provider properties and get sid, host name and instance number # provider level common logging prefix # 0 is an acceptable value for Instance Number # if user did not specify password directly via UI, check to see if they instead # provided link to Key Vault secret # As per SAP documentation, default http port is of the form 5<NR>13 # As per SAP documentation, default https port is of the form 5<NR>14 # As per SAP documentation, default http port is of the form 81<NR> will first attempt to create SOAP client for hostname using the HTTPS port derived from the SAP instance number, and if that does not succeed will then try to create client using the derived HTTP port (if neither hostname or instance are specified, will default to the primary hostname/instance that the provider was initialized with from properties) attempt to create a SOAP client for the specified hostname using specific protocol and port (for when we already have a known hostconfig for this hostname, and already know whether HTTPS or HTTP should be used) Store successful clients in cache so we don't make unnecessary WSDL fetchs for future API calls to the same instance # respect cache expiration; if cache is expired allow client to be refreshed below # self.tracer.info("%s using cached SOAP client for wsdl: %s", self.logTag, url) # previously cached soap client attempt was failure # cache successsful and failed soap client attempts to reduce future API calls return a netweaver RFC client initialized with "MESSAGESERVER" instance we find for this SID. # RFC connections against application server instances can be made through 'MESSAGESERVER' instances # HACK: Load content json to fetch the list of APIs in the checks iterate through all SOAP API calls and attempt to validate that SOAP API client can be instantiated and expected APIs are callable ### # TODO: this entire function needs to be rethought to me more precise in terms of which instances # are called for which APIs, as some APIs will not work for some function types. ### # hard-coded list of checks that correspond to SOAP API calls to validate # Ensure that all APIs in the checks are valid and are marked as unprotected. # Some APIs are compatible with only specific instance types and throw a Fault if run against # an incompatible one. # However, here we suppress all errors except Unauthorized since the Monitor phase takes # care of calling the API against the right instance type. As long as we don't get an # Unauthorized error, we know we can safely call them during the Monitor phase. # this is not a SOAP API check # Returning None when API not found if customer provided RFC SDK configuration, then validate that all required properties are specified and validate we can establish RFC client connections to APIs we need to call # are any RFC SDK config properties populated? # customer has not chosen to enable RFC SDK, nothing to validate # are ALL RFC SDK config properties populated? # customer specified only partial set of config properties needed to enable RFC, so fail validation # initialize a client for the first healthy ABAP/Dispatcher instance we find # update logging prefix with the specific instance details of the client # get metric query window to lookback 10 minutes to see if any results are available. If not that probably # indicates customer has not enabled SMON on their SAP system query SAP SOAP API to return list of all instances in the SID, but if caller specifies that cached results are okay and we have cached instance list with the provider instance, then just return the cached results # Use cached list of instances if available since they should not change within a single monitor run; # but if cache is not available or if caller explicitly asks to skip cache then make the SOAP call # self.tracer.debug("%s using cached list of system instances", self.logTag) # Use last known hosts to fetch the updated list of hosts # Walk through the known hostnames and stop whenever any of them returns the list of all instances # if we have a cached host config with already defined protocol and port, then we can initialize # client directly from that, otherwise we have to instantiate client using ports derived from the instance number # which will try the derived HTTPS port first and then fallback to derived HTTP port # cache latest results in provider state fetch cached instance list for this provider and filter down to the list 'ABAP' feature functions that are healthy (ie. have dispstatus attribute of 'SAPControl-GREEN'). Just return first in the list. # Use cached list of instances if available since they don't change that frequently, # and filter down to only healthy dispatcher instances since RFC direct application server connection # only works against dispatchera # return first healthy instance in list fetch cached instance list for this provider and filter down to the list 'MESSAGESERVER' feature functions return the available message server # Use cached list of instances if available since they don't change that frequently, # and filter down to only healthy dispatcher instances since RFC direct application server connection # only works against dispatchera # return first healthy instance in list given a list of sap instances and a set of instance features (ie. functions) to include or exclude, apply filtering logic and return only those instances that match the filter conditions: 'include' filter type will include any instance that matches any of the feature filters 'exclude' filter type will exclude any instance that matches any of the feature filters # Inclusion filter # Only include instances that match at least one of the filter features # Exclusion filter # Only include instance that match none of the filter features helper method to deserialize result and return as list of dictionary objects private method to return default provider hostname config (what customer provided at time netweaver provided was added) or a fully fleshed out list of <hostname / instance # / https:Port> tuples based on a previous cached call to getInstances() # Fetch last known list from storage. If storage does not have list, use provided # hostname and instanceNr returns flag to indicate whether provider checks should attempt to use RFC SDK client calls to fetch certain metrics. First time may perform fairly expensive checks to validate if RFC SDK is installed anc configured, and may attempt to download user provided blob to install to local system. We only want to attempt this at most once per process, so first caller to this function will pay that cost and the resulting success/failure flag will be cached. # the flag for whether RFC is usable has already been initialzed, so return # there may be 1..N sapNetWeaverProviderInstance instances per sapmon process, and each instance # may choose to enable/disable RFC calls individually, but we should only attempt to install the # RFC SDK at most once per process. Use a static/class variable to determine if installation # attempt has already been attempted and was success/failure, and do all this inside of # a lock and cache flag for future checks # class singleton lock # check -> lock -> check # flag was initialized prior to obtaining the lock # ensure this provider instance has necessary config settings to enable RFC SDK calls # only attempt to install RFC SDK once per process execution validate that RFC SDK package has been installed and configured correctly and is usable by pyrfc module. If pyrfc module cannot be imported, then potentially attempt to download RFC SDK blob, install to local system, and configure necessary environment variables and system settings so that the libraries can be successfully loaded by the pyrfc module. Returns flag indicating whether pyrfc module can be imnported (ie. whether RFC calls can be enabled) Pre-requisites for RFC SDK installation attempt: 1.) Customer provided config property sapRfcSdkBlobUrl must be non-empty. 2.) python module for "pynwrfc" must be installed 3.) was the last failed SDK installation attempt more than N minutes ago (defined by MINIMUM_RFC_INSTALL_RETRY_INTERVAL) 4.) does the sapRfcSdkBlobUrl provided by customer actually exist in the storage account 5.) was the last_modified timestamp on the sapRfcSdkBlobUrl blob modified since the last failed installation attempt # if no RFC SDK download blob url specified, treat as kill switch to disable any RFC calls # environment variables must be initialized before RFC and pyrfc installation can be validated # if we are able to successfully import the pyrfc connector module, that means RFC SDK # libraries must be installed and were able to be found by pyrfc package initialization, # so no need to do any further checks. # pyrfc package is usable, which means RFC SDK is already installed and environment configured correctly # if pyrfc module cannot be imported, check to see if it is even installed. Assumption is that # pyrfc module is installed as part of container image, so if it is missing something is wrong # there is no need to even try to install the RFC SDK # check last sdk install attempt time so we can limit how often we retry # to download and install SDK on persistent failures (eg. no more than once every 30 mins) # first check that rfc sdk download blob exists in Azure Storage account, and if it # exixts also fetch the last_modified timestamp metadata # the user provided sdk blob exists, so before we download compare the last_modified timestamp # with the last modified time of the last download attempt. If nothing has changed, # then no need to try and download the package again # TODO: confirm, should we go ahead and try to re-download previously failed packages # once every 30 minutes anyway? just in case failure was something external? # try to download user provided RFC SDK blob, install to local system and configure necessary # environment variables and system settings so that it can be usable by pyrfc module # on Linux pyrfc module may not be usable upon first install attempt, as it appears that unpacking # libraries to the LD_LIBRARY_PATH env variable after the python process starts may not pick up the change. # The module should be usable on the next sapmon process run. ########################### # hard-coded set of action names that require RFC SDK to be usable # and can override runtime isEnabled() check if RFC is not usable # provider check common logging prefix return flag indicating whether this check instances requires the SAP RFC SDK to be installed and usable override base ProviderCheck implementation to allow RFC metric collection methods enabled in the default Provider JSON configuration yet treated as disabled at runtime if RFC SDK is not configured (to reduce log spam) # if this check requires RFC and RFC is not installed, then treat as disabled # Get timestamp from the first message server that returns a valid date # We only care about the date in the response header. so we ignore the response body # 'Thu, 04 Mar 2021 05:02:12 GMT' # NOTE: we don't need to follow redirects because the redirect response itself 300-3XX # will have the 'date' header as well. In some cases we were following a chain # of redirects that would terminate in a 404, which would not have the 'date' header # when performing the actual provider check action, always fetch fressh instance list snapshot and refresh the cache # Update host config, if new list is fetched # Parse dictionary and add current timestamp and SID to data and log it # Update internal state # track latency of entire method excecution with dependencies # Use cached list of instances if available since they don't change that frequently; else fetch afresh. # filter down to just the instances we need for this SOAP API type # Call web service # default to https unless the httpsPort was not defined, in which case fallback to http # fallback to http port instead # Update internal state Method to parse the value based on the key provided and set the values with None value to empty string '' Method to parse the results from ABAPGetWPTable and set the strings with None value to empty string '' Method to parse the results from GetProcessList and set the strings with None value to empty string '' netweaver provider check action to query for SDF/SMON Analysis Run metrics # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host # track latency of entire method excecution with dependencies # initialize a client for the first healthy MessageServer instance we find # update logging prefix with the specific instance details of the client # get metric query window based on our last successful query where results were returned # only update state on successful query attempt netweaver provider check action to query for SWNC workload statistics and decorate with ST03 metric calculations # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host # track latency of entire method excecution with dependencies # initialize a client for the first healthy MessageServer instance we find # update logging prefix with the specific instance details of the client # get metric query window based on our last successful query where results were returned # only update state on successful query attempt netweaver provider check action to query for short dumps # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host # track latency of entire method excecution with dependencies # initialize a client for the first healthy MessageServer instance we find # update logging prefix with the specific instance details of the client # get metric query window based on our last successful query where results were returned # only update state on successful query attempt netweaver provider check action to query for sys logs # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host # track latency of entire method excecution with dependencies # initialize a client for the first healthy MessageServer instance we find # update logging prefix with the specific instance details of the client # get metric query window based on our last successful query where results were returned # only update state on successful query attempt netweaver provider check action to query for failed updates metrics # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host # track latency of entire method excecution with dependencies # initialize a client for the first healthy MessageServer instance we find # update logging prefix with the specific instance details of the client # get metric query window based on our last successful query where results were returned # only update state on successful query attempt netweaver provider check action to query for batch job metrics # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host # track latency of entire method excecution with dependencies # initialize a client for the first healthy MessageServer instance we find # update logging prefix with the specific instance details of the client # get metric query window based on our last successful query where results were returned # only update state on successful query attempt netweaver provider check action to query for inbound queues statistics # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host # track latency of entire method excecution with dependencies # initialize a client for the first healthy MessageServer instance we find # update logging prefix with the specific instance details of the client # only update state on successful query attempt netweaver provider check action to query for outbound queues statistics # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host # track latency of entire method excecution with dependencies # initialize a client for the first healthy MessageServer instance we find # update logging prefix with the specific instance details of the client # only update state on successful query attempt netweaver provider check action to query for object lock entries by connecting to ENQUEUE_READ RFC # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host # track latency of entire method excecution with dependencies # initialize a client for the first healthy MessageServer instance we find # update logging prefix with the specific instance details of the client # only update state on successful query attempt | 1.723837 | 2 |
docker_squash/version.py | pombredanne/docker-scripts | 513 | 7622 | <reponame>pombredanne/docker-scripts
version = "1.0.10.dev0"
| version = "1.0.10.dev0" | none | 1 | 1.133893 | 1 |
|
example_usage/example_list_errors.py | oceanprotocol/plecos | 1 | 7623 | from pathlib import Path
import plecos
import json
print(plecos.__version__)
#%%
path_to_json_local = Path("~/ocn/plecos/plecos/samples/sample_metadata_local.json").expanduser()
path_to_json_remote = Path("~/ocn/plecos/plecos/samples/sample_metadata_remote.json").expanduser()
path_to_broken_json = Path("~/ocn/plecos/plecos/samples/metadata_local_broken.json").expanduser()
path_to_schema_local = Path("~/ocn/plecos/plecos/schemas/metadata_local_v0_3.json").expanduser()
path_to_schema_remote = Path("~/ocn/plecos/plecos/schemas/metadata_remote_v0_3.json").expanduser()
# Select remote or local metadata
LOCAL=True
if LOCAL:
path_json_file = path_to_json_local
path_schema_file = path_to_schema_local
with open(path_to_json_local) as f:
json_dict = json.load(f)
else:
path_json_file = path_to_json_remote
path_schema_file = path_to_schema_remote
with open(path_to_json_remote) as f:
json_dict = json.load(f)
print("Json file:", path_json_file)
print("Schema file:", path_schema_file)
#%%
del json_dict['base']['files'][0]['index']
# del json_dict['base']['files'][0]['url']
# json_dict['base']['extra'] = 1
plecos.is_valid_dict(json_dict)
# json_dict['base']['files'][0]['url']
# json_dict['base']['EXTRA ATTRIB!'] = 0
# json_dict['base']['files'][0]['EXTRA_ATTR'] = "????"
# json_dict['base']['price'] = "A string is not allowed!"
errors = plecos.list_errors(json_dict, path_schema_file)
if errors:
print("ERRORS:")
for e in errors:
print(e)
else:
print("No errors")
raise
#%%
json_dict = {
"base": {
"name": "10 Monkey Species Small",
"author": "Mario",
"license": "CC0: Public Domain",
"contentType": "jpg/txt",
"price": 5,
"categories": [
"image"
],
"tags": [
"image data",
" animals"
],
"type": "dataset",
"description": "Example description",
"copyrightHolder": "",
"encoding": "",
"compression": "",
"workExample": "",
"inLanguage": "en",
"files": [
{
"url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/assets/training.zip"
},
{
"url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/assets/monkey_labels.txt"
},
{
"url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/assets/validation.zip"
}
],
"links": [
{
"url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/links/sample/sample.zip",
"name": "sample.zip",
"type": "sample"
},
{
"url": "https://github.com/slothkong/CNN_classification_10_monkey_species",
"name": "example code",
"type": "example code"
},
{
"url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/links/discovery/n5151.jpg",
"name": "n5151.jpg",
"type": "discovery"
}
],
"checksum": "0",
},
}
#%%
path_to_schema_local = Path("~/ocn/Plecos/plecos/schemas/metadata_local_190305.json").expanduser()
errors = plecos.list_errors(json_dict, path_to_schema_local)
if errors:
print("ERRORS:")
for e in errors:
print(e)
else:
print("No errors") | from pathlib import Path
import plecos
import json
print(plecos.__version__)
#%%
path_to_json_local = Path("~/ocn/plecos/plecos/samples/sample_metadata_local.json").expanduser()
path_to_json_remote = Path("~/ocn/plecos/plecos/samples/sample_metadata_remote.json").expanduser()
path_to_broken_json = Path("~/ocn/plecos/plecos/samples/metadata_local_broken.json").expanduser()
path_to_schema_local = Path("~/ocn/plecos/plecos/schemas/metadata_local_v0_3.json").expanduser()
path_to_schema_remote = Path("~/ocn/plecos/plecos/schemas/metadata_remote_v0_3.json").expanduser()
# Select remote or local metadata
LOCAL=True
if LOCAL:
path_json_file = path_to_json_local
path_schema_file = path_to_schema_local
with open(path_to_json_local) as f:
json_dict = json.load(f)
else:
path_json_file = path_to_json_remote
path_schema_file = path_to_schema_remote
with open(path_to_json_remote) as f:
json_dict = json.load(f)
print("Json file:", path_json_file)
print("Schema file:", path_schema_file)
#%%
del json_dict['base']['files'][0]['index']
# del json_dict['base']['files'][0]['url']
# json_dict['base']['extra'] = 1
plecos.is_valid_dict(json_dict)
# json_dict['base']['files'][0]['url']
# json_dict['base']['EXTRA ATTRIB!'] = 0
# json_dict['base']['files'][0]['EXTRA_ATTR'] = "????"
# json_dict['base']['price'] = "A string is not allowed!"
errors = plecos.list_errors(json_dict, path_schema_file)
if errors:
print("ERRORS:")
for e in errors:
print(e)
else:
print("No errors")
raise
#%%
json_dict = {
"base": {
"name": "10 Monkey Species Small",
"author": "Mario",
"license": "CC0: Public Domain",
"contentType": "jpg/txt",
"price": 5,
"categories": [
"image"
],
"tags": [
"image data",
" animals"
],
"type": "dataset",
"description": "Example description",
"copyrightHolder": "",
"encoding": "",
"compression": "",
"workExample": "",
"inLanguage": "en",
"files": [
{
"url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/assets/training.zip"
},
{
"url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/assets/monkey_labels.txt"
},
{
"url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/assets/validation.zip"
}
],
"links": [
{
"url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/links/sample/sample.zip",
"name": "sample.zip",
"type": "sample"
},
{
"url": "https://github.com/slothkong/CNN_classification_10_monkey_species",
"name": "example code",
"type": "example code"
},
{
"url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/links/discovery/n5151.jpg",
"name": "n5151.jpg",
"type": "discovery"
}
],
"checksum": "0",
},
}
#%%
path_to_schema_local = Path("~/ocn/Plecos/plecos/schemas/metadata_local_190305.json").expanduser()
errors = plecos.list_errors(json_dict, path_to_schema_local)
if errors:
print("ERRORS:")
for e in errors:
print(e)
else:
print("No errors") | en | 0.27759 | #%% # Select remote or local metadata #%% # del json_dict['base']['files'][0]['url'] # json_dict['base']['extra'] = 1 # json_dict['base']['files'][0]['url'] # json_dict['base']['EXTRA ATTRIB!'] = 0 # json_dict['base']['files'][0]['EXTRA_ATTR'] = "????" # json_dict['base']['price'] = "A string is not allowed!" #%% #%% | 2.221911 | 2 |
pangloss/backend.py | CLRafaelR/pangloss | 0 | 7624 | <gh_stars>0
import re
import panflute as pf
from functools import partial
from pangloss.util import smallcapify, break_plain
# regular expression for label formats
label_re = re.compile(r'\{#ex:(\w+)\}')
gb4e_fmt_labelled = """
\\ex\\label{{ex:{label}}}
\\gll {} \\\\
{} \\\\
\\trans {}
"""
gb4e_fmt = """
\\ex
\\gll {} \\\\
{} \\\\
\\trans {}
"""
def gb4e(lst):
"""
Convert an example list into a series of gb4e-formatted interlinear
glosses.
Because example list references are replaced at parsing by Pandoc, the
normal syntax of (@foo) cannot be used for labels; instead, a label syntax
similar to that used for headers (and tables and figures with
pandoc-crossref) is used, namely a {#ex:foo} inserted after the
translation, which will be stripped and replaced with a LaTeX label on the
relevant example.
"""
latex = "\\begin{exe}\n"
for li in lst.content:
lines = break_plain(li.content[0])
if len(lines) != 3: continue
orig, gloss, trans = map(partial(pf.stringify, newlines=False), lines)
gloss = smallcapify(gloss)
label_match = label_re.search(trans)
if label_match:
label = label_match.group(1)
trans = trans[:label_match.start() - 1]
latex += gb4e_fmt_labelled.format(orig, gloss, trans, label=label)
else:
latex += gb4e_fmt.format(orig, gloss, trans)
latex += "\\end{exe}"
return pf.RawBlock(latex, format='latex')
leipzigjs_fmt = """
<div data-gloss>
<p>{}</p>
<p>{}</p>
<p>{}</p>
</div>
"""
def leipzigjs(lst):
"""
Convert an example list into a series of div's suitable for use with
Leipzig.js.
"""
html = ''
for li in lst.content:
lines = break_plain(li.content[0])
if len(lines) != 3: continue
orig, gloss, trans = map(partial(pf.stringify, newlines=False), lines)
html += leipzigjs_fmt.format(orig, gloss, trans)
return pf.RawBlock(html, format='html')
# available formats and backends
formats = {
'latex': {
'gb4e': gb4e
},
'html': {
'leipzigjs': leipzigjs
}
}
| import re
import panflute as pf
from functools import partial
from pangloss.util import smallcapify, break_plain
# regular expression for label formats
label_re = re.compile(r'\{#ex:(\w+)\}')
gb4e_fmt_labelled = """
\\ex\\label{{ex:{label}}}
\\gll {} \\\\
{} \\\\
\\trans {}
"""
gb4e_fmt = """
\\ex
\\gll {} \\\\
{} \\\\
\\trans {}
"""
def gb4e(lst):
"""
Convert an example list into a series of gb4e-formatted interlinear
glosses.
Because example list references are replaced at parsing by Pandoc, the
normal syntax of (@foo) cannot be used for labels; instead, a label syntax
similar to that used for headers (and tables and figures with
pandoc-crossref) is used, namely a {#ex:foo} inserted after the
translation, which will be stripped and replaced with a LaTeX label on the
relevant example.
"""
latex = "\\begin{exe}\n"
for li in lst.content:
lines = break_plain(li.content[0])
if len(lines) != 3: continue
orig, gloss, trans = map(partial(pf.stringify, newlines=False), lines)
gloss = smallcapify(gloss)
label_match = label_re.search(trans)
if label_match:
label = label_match.group(1)
trans = trans[:label_match.start() - 1]
latex += gb4e_fmt_labelled.format(orig, gloss, trans, label=label)
else:
latex += gb4e_fmt.format(orig, gloss, trans)
latex += "\\end{exe}"
return pf.RawBlock(latex, format='latex')
leipzigjs_fmt = """
<div data-gloss>
<p>{}</p>
<p>{}</p>
<p>{}</p>
</div>
"""
def leipzigjs(lst):
"""
Convert an example list into a series of div's suitable for use with
Leipzig.js.
"""
html = ''
for li in lst.content:
lines = break_plain(li.content[0])
if len(lines) != 3: continue
orig, gloss, trans = map(partial(pf.stringify, newlines=False), lines)
html += leipzigjs_fmt.format(orig, gloss, trans)
return pf.RawBlock(html, format='html')
# available formats and backends
formats = {
'latex': {
'gb4e': gb4e
},
'html': {
'leipzigjs': leipzigjs
}
} | en | 0.687521 | # regular expression for label formats #ex:(\w+)\}') \\ex\\label{{ex:{label}}} \\gll {} \\\\ {} \\\\ \\trans {} \\ex \\gll {} \\\\ {} \\\\ \\trans {} Convert an example list into a series of gb4e-formatted interlinear glosses. Because example list references are replaced at parsing by Pandoc, the normal syntax of (@foo) cannot be used for labels; instead, a label syntax similar to that used for headers (and tables and figures with pandoc-crossref) is used, namely a {#ex:foo} inserted after the translation, which will be stripped and replaced with a LaTeX label on the relevant example. <div data-gloss> <p>{}</p> <p>{}</p> <p>{}</p> </div> Convert an example list into a series of div's suitable for use with Leipzig.js. # available formats and backends | 2.61622 | 3 |
tests/unit/discovery/test_py_spec.py | xavfernandez/virtualenv | 1 | 7625 | from __future__ import absolute_import, unicode_literals
import itertools
import os
import sys
from copy import copy
import pytest
from virtualenv.discovery.py_spec import PythonSpec
def test_bad_py_spec():
text = "python2.3.4.5"
spec = PythonSpec.from_string_spec(text)
assert text in repr(spec)
assert spec.str_spec == text
assert spec.path == os.path.abspath(text)
content = vars(spec)
del content[str("str_spec")]
del content[str("path")]
assert all(v is None for v in content.values())
def test_py_spec_first_digit_only_major():
spec = PythonSpec.from_string_spec("278")
assert spec.major == 2
assert spec.minor == 78
def test_spec_satisfies_path_ok():
spec = PythonSpec.from_string_spec(sys.executable)
assert spec.satisfies(spec) is True
def test_spec_satisfies_path_nok(tmp_path):
spec = PythonSpec.from_string_spec(sys.executable)
of = PythonSpec.from_string_spec(str(tmp_path))
assert spec.satisfies(of) is False
def test_spec_satisfies_arch():
spec_1 = PythonSpec.from_string_spec("python-32")
spec_2 = PythonSpec.from_string_spec("python-64")
assert spec_1.satisfies(spec_1) is True
assert spec_2.satisfies(spec_1) is False
@pytest.mark.parametrize(
"req, spec",
list(itertools.combinations(["py", "CPython", "python"], 2)) + [("jython", "jython")] + [("CPython", "cpython")],
)
def test_spec_satisfies_implementation_ok(req, spec):
spec_1 = PythonSpec.from_string_spec(req)
spec_2 = PythonSpec.from_string_spec(spec)
assert spec_1.satisfies(spec_1) is True
assert spec_2.satisfies(spec_1) is True
def test_spec_satisfies_implementation_nok():
spec_1 = PythonSpec.from_string_spec("python")
spec_2 = PythonSpec.from_string_spec("jython")
assert spec_2.satisfies(spec_1) is False
assert spec_1.satisfies(spec_2) is False
def _version_satisfies_pairs():
target = set()
version = tuple(str(i) for i in sys.version_info[0:3])
for i in range(len(version) + 1):
req = ".".join(version[0:i])
for j in range(i + 1):
sat = ".".join(version[0:j])
# can be satisfied in both directions
target.add((req, sat))
target.add((sat, req))
return sorted(target)
@pytest.mark.parametrize("req, spec", _version_satisfies_pairs())
def test_version_satisfies_ok(req, spec):
req_spec = PythonSpec.from_string_spec("python{}".format(req))
sat_spec = PythonSpec.from_string_spec("python{}".format(spec))
assert sat_spec.satisfies(req_spec) is True
def _version_not_satisfies_pairs():
target = set()
version = tuple(str(i) for i in sys.version_info[0:3])
for i in range(len(version)):
req = ".".join(version[0 : i + 1])
for j in range(i + 1):
sat_ver = list(sys.version_info[0 : j + 1])
for l in range(j + 1):
for o in [1, -1]:
temp = copy(sat_ver)
temp[l] += o
sat = ".".join(str(i) for i in temp)
target.add((req, sat))
return sorted(target)
@pytest.mark.parametrize("req, spec", _version_not_satisfies_pairs())
def test_version_satisfies_nok(req, spec):
req_spec = PythonSpec.from_string_spec("python{}".format(req))
sat_spec = PythonSpec.from_string_spec("python{}".format(spec))
assert sat_spec.satisfies(req_spec) is False
def test_relative_spec(tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path)
a_relative_path = str((tmp_path / "a" / "b").relative_to(tmp_path))
spec = PythonSpec.from_string_spec(a_relative_path)
assert spec.path == os.path.abspath(str(tmp_path / a_relative_path))
| from __future__ import absolute_import, unicode_literals
import itertools
import os
import sys
from copy import copy
import pytest
from virtualenv.discovery.py_spec import PythonSpec
def test_bad_py_spec():
text = "python2.3.4.5"
spec = PythonSpec.from_string_spec(text)
assert text in repr(spec)
assert spec.str_spec == text
assert spec.path == os.path.abspath(text)
content = vars(spec)
del content[str("str_spec")]
del content[str("path")]
assert all(v is None for v in content.values())
def test_py_spec_first_digit_only_major():
spec = PythonSpec.from_string_spec("278")
assert spec.major == 2
assert spec.minor == 78
def test_spec_satisfies_path_ok():
spec = PythonSpec.from_string_spec(sys.executable)
assert spec.satisfies(spec) is True
def test_spec_satisfies_path_nok(tmp_path):
spec = PythonSpec.from_string_spec(sys.executable)
of = PythonSpec.from_string_spec(str(tmp_path))
assert spec.satisfies(of) is False
def test_spec_satisfies_arch():
spec_1 = PythonSpec.from_string_spec("python-32")
spec_2 = PythonSpec.from_string_spec("python-64")
assert spec_1.satisfies(spec_1) is True
assert spec_2.satisfies(spec_1) is False
@pytest.mark.parametrize(
"req, spec",
list(itertools.combinations(["py", "CPython", "python"], 2)) + [("jython", "jython")] + [("CPython", "cpython")],
)
def test_spec_satisfies_implementation_ok(req, spec):
spec_1 = PythonSpec.from_string_spec(req)
spec_2 = PythonSpec.from_string_spec(spec)
assert spec_1.satisfies(spec_1) is True
assert spec_2.satisfies(spec_1) is True
def test_spec_satisfies_implementation_nok():
spec_1 = PythonSpec.from_string_spec("python")
spec_2 = PythonSpec.from_string_spec("jython")
assert spec_2.satisfies(spec_1) is False
assert spec_1.satisfies(spec_2) is False
def _version_satisfies_pairs():
target = set()
version = tuple(str(i) for i in sys.version_info[0:3])
for i in range(len(version) + 1):
req = ".".join(version[0:i])
for j in range(i + 1):
sat = ".".join(version[0:j])
# can be satisfied in both directions
target.add((req, sat))
target.add((sat, req))
return sorted(target)
@pytest.mark.parametrize("req, spec", _version_satisfies_pairs())
def test_version_satisfies_ok(req, spec):
req_spec = PythonSpec.from_string_spec("python{}".format(req))
sat_spec = PythonSpec.from_string_spec("python{}".format(spec))
assert sat_spec.satisfies(req_spec) is True
def _version_not_satisfies_pairs():
target = set()
version = tuple(str(i) for i in sys.version_info[0:3])
for i in range(len(version)):
req = ".".join(version[0 : i + 1])
for j in range(i + 1):
sat_ver = list(sys.version_info[0 : j + 1])
for l in range(j + 1):
for o in [1, -1]:
temp = copy(sat_ver)
temp[l] += o
sat = ".".join(str(i) for i in temp)
target.add((req, sat))
return sorted(target)
@pytest.mark.parametrize("req, spec", _version_not_satisfies_pairs())
def test_version_satisfies_nok(req, spec):
req_spec = PythonSpec.from_string_spec("python{}".format(req))
sat_spec = PythonSpec.from_string_spec("python{}".format(spec))
assert sat_spec.satisfies(req_spec) is False
def test_relative_spec(tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path)
a_relative_path = str((tmp_path / "a" / "b").relative_to(tmp_path))
spec = PythonSpec.from_string_spec(a_relative_path)
assert spec.path == os.path.abspath(str(tmp_path / a_relative_path))
| en | 0.952971 | # can be satisfied in both directions | 2.174502 | 2 |
plugins/module_utils/definitions/trigger_image_activation.py | robertcsapo/dnacenter-ansible | 0 | 7626 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
module_definition = json.loads(
"""{
"family": "software_image_management_swim",
"name": "trigger_image_activation",
"operations": {
"post": [
"trigger_software_image_activation"
]
},
"parameters": {
"trigger_software_image_activation": [
{
"name": "schedule_validate",
"required": false,
"type": "boolean"
},
{
"array_type": "object",
"name": "payload",
"required": true,
"schema": [
{
"name": "activateLowerImageVersion",
"required": false,
"type": "boolean"
},
{
"name": "deviceUpgradeMode",
"required": false,
"type": "string"
},
{
"name": "deviceUuid",
"required": false,
"type": "string"
},
{
"name": "distributeIfNeeded",
"required": false,
"type": "boolean"
},
{
"array_type": "string",
"name": "imageUuidList",
"required": false,
"schema": [],
"type": "array"
},
{
"array_type": "string",
"name": "smuImageUuidList",
"required": false,
"schema": [],
"type": "array"
}
],
"type": "array"
}
]
},
"responses": {
"trigger_software_image_activation": {
"properties": [
"response",
"version"
],
"type": "object"
}
}
}"""
)
| from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
module_definition = json.loads(
"""{
"family": "software_image_management_swim",
"name": "trigger_image_activation",
"operations": {
"post": [
"trigger_software_image_activation"
]
},
"parameters": {
"trigger_software_image_activation": [
{
"name": "schedule_validate",
"required": false,
"type": "boolean"
},
{
"array_type": "object",
"name": "payload",
"required": true,
"schema": [
{
"name": "activateLowerImageVersion",
"required": false,
"type": "boolean"
},
{
"name": "deviceUpgradeMode",
"required": false,
"type": "string"
},
{
"name": "deviceUuid",
"required": false,
"type": "string"
},
{
"name": "distributeIfNeeded",
"required": false,
"type": "boolean"
},
{
"array_type": "string",
"name": "imageUuidList",
"required": false,
"schema": [],
"type": "array"
},
{
"array_type": "string",
"name": "smuImageUuidList",
"required": false,
"schema": [],
"type": "array"
}
],
"type": "array"
}
]
},
"responses": {
"trigger_software_image_activation": {
"properties": [
"response",
"version"
],
"type": "object"
}
}
}"""
)
| en | 0.248682 | { "family": "software_image_management_swim", "name": "trigger_image_activation", "operations": { "post": [ "trigger_software_image_activation" ] }, "parameters": { "trigger_software_image_activation": [ { "name": "schedule_validate", "required": false, "type": "boolean" }, { "array_type": "object", "name": "payload", "required": true, "schema": [ { "name": "activateLowerImageVersion", "required": false, "type": "boolean" }, { "name": "deviceUpgradeMode", "required": false, "type": "string" }, { "name": "deviceUuid", "required": false, "type": "string" }, { "name": "distributeIfNeeded", "required": false, "type": "boolean" }, { "array_type": "string", "name": "imageUuidList", "required": false, "schema": [], "type": "array" }, { "array_type": "string", "name": "smuImageUuidList", "required": false, "schema": [], "type": "array" } ], "type": "array" } ] }, "responses": { "trigger_software_image_activation": { "properties": [ "response", "version" ], "type": "object" } } } | 2.088445 | 2 |
minecraft_launcher_lib/fabric.py | bopchik/Simple-minecraft-mod-launcher | 1 | 7627 | <filename>minecraft_launcher_lib/fabric.py
from .helper import download_file, get_user_agent
from .install import install_minecraft_version
from typing import List, Dict, Union
from xml.dom import minidom
import subprocess
import requests
import tempfile
import random
import os
def get_all_minecraft_versions() -> List[Dict[str,Union[str,bool]]]:
"""
Returns all available Minecraft Versions for fabric
"""
FABRIC_MINECARFT_VERSIONS_URL = "https://meta.fabricmc.net/v2/versions/game"
return requests.get(FABRIC_MINECARFT_VERSIONS_URL,headers={"user-agent": get_user_agent()}).json()
def get_stable_minecraft_versions() -> List[str]:
"""
Returns a list which only contains the stable Minecraft versions that supports fabric
"""
minecraft_versions = get_all_minecraft_versions()
stable_versions = []
for i in minecraft_versions:
if i["stable"] == True:
stable_versions.append(i["version"])
return stable_versions
def get_latest_minecraft_version() -> str:
"""
Returns the latest unstable Minecraft versions that supports fabric. This could be a snapshot.
"""
minecraft_versions = get_all_minecraft_versions()
return minecraft_versions[0]["version"]
def get_latest_stable_minecraft_version() -> str:
"""
Returns the latest stable Minecraft version that supports fabric
"""
stable_versions = get_stable_minecraft_versions()
return stable_versions[0]
def is_minecraft_version_supported(version: str) -> bool:
"""
Checks if a Minecraft version supported by fabric
"""
minecraft_versions = get_all_minecraft_versions()
for i in minecraft_versions:
if i["version"] == version:
return True
return False
def get_all_loader_versions() -> List[Dict[str,Union[str,bool,int]]]:
"""
Returns all loader versions
"""
FABRIC_LOADER_VERSIONS_URL = "https://meta.fabricmc.net/v2/versions/loader"
return requests.get(FABRIC_LOADER_VERSIONS_URL,headers={"user-agent": get_user_agent()}).json()
def get_latest_loader_version() -> str:
"""
Get the latest loader version
"""
loader_versions = get_all_loader_versions()
return loader_versions[0]["version"]
def get_latest_installer_version() -> str:
"""
Returns the latest installer version
"""
FABRIC_INSTALLER_MAVEN_URL = "https://maven.fabricmc.net/net/fabricmc/fabric-installer/maven-metadata.xml"
r = requests.get(FABRIC_INSTALLER_MAVEN_URL,headers={"user-agent": get_user_agent()})
xml_data = minidom.parseString(r.text)
release = xml_data.getElementsByTagName("release")
return release.item(0).lastChild.data
def install_fabric(path: str, minecraft_version: str,loader_version: str=None):
"""
Install a fabric version
"""
#Get latest loader version if not given
if not loader_version:
loader_version = get_latest_loader_version()
#Make sure the Minecraft version is installed
install_minecraft_version(path,minecraft_version)
#Get installer version
installer_version = get_latest_installer_version()
installer_download_url = f"https://maven.fabricmc.net/net/fabricmc/fabric-installer/{installer_version}/fabric-installer-{installer_version}.jar"
#Generate a temporary path for downloading the installer
installer_path = os.path.join(tempfile.gettempdir(),f"fabric-installer-{random.randrange(100,10000)}.tmp")
#Download the installer
download_file(installer_download_url,installer_path)
#Run the installer see https://fabricmc.net/wiki/install#cli_installation
subprocess.run(["java","-jar",installer_path,"client","-dir",path,"-mcversion",minecraft_version,"-loader",loader_version,"-noprofile","-snapshot"])
#Delete the installer we don't need them anymore
os.remove(installer_path)
#Install all libs of fabric
fabric_minecraft_version = f"fabric-loader-{loader_version}-{minecraft_version}"
install_minecraft_version(path,fabric_minecraft_version)
| <filename>minecraft_launcher_lib/fabric.py
from .helper import download_file, get_user_agent
from .install import install_minecraft_version
from typing import List, Dict, Union
from xml.dom import minidom
import subprocess
import requests
import tempfile
import random
import os
def get_all_minecraft_versions() -> List[Dict[str,Union[str,bool]]]:
"""
Returns all available Minecraft Versions for fabric
"""
FABRIC_MINECARFT_VERSIONS_URL = "https://meta.fabricmc.net/v2/versions/game"
return requests.get(FABRIC_MINECARFT_VERSIONS_URL,headers={"user-agent": get_user_agent()}).json()
def get_stable_minecraft_versions() -> List[str]:
"""
Returns a list which only contains the stable Minecraft versions that supports fabric
"""
minecraft_versions = get_all_minecraft_versions()
stable_versions = []
for i in minecraft_versions:
if i["stable"] == True:
stable_versions.append(i["version"])
return stable_versions
def get_latest_minecraft_version() -> str:
"""
Returns the latest unstable Minecraft versions that supports fabric. This could be a snapshot.
"""
minecraft_versions = get_all_minecraft_versions()
return minecraft_versions[0]["version"]
def get_latest_stable_minecraft_version() -> str:
"""
Returns the latest stable Minecraft version that supports fabric
"""
stable_versions = get_stable_minecraft_versions()
return stable_versions[0]
def is_minecraft_version_supported(version: str) -> bool:
"""
Checks if a Minecraft version supported by fabric
"""
minecraft_versions = get_all_minecraft_versions()
for i in minecraft_versions:
if i["version"] == version:
return True
return False
def get_all_loader_versions() -> List[Dict[str,Union[str,bool,int]]]:
"""
Returns all loader versions
"""
FABRIC_LOADER_VERSIONS_URL = "https://meta.fabricmc.net/v2/versions/loader"
return requests.get(FABRIC_LOADER_VERSIONS_URL,headers={"user-agent": get_user_agent()}).json()
def get_latest_loader_version() -> str:
"""
Get the latest loader version
"""
loader_versions = get_all_loader_versions()
return loader_versions[0]["version"]
def get_latest_installer_version() -> str:
"""
Returns the latest installer version
"""
FABRIC_INSTALLER_MAVEN_URL = "https://maven.fabricmc.net/net/fabricmc/fabric-installer/maven-metadata.xml"
r = requests.get(FABRIC_INSTALLER_MAVEN_URL,headers={"user-agent": get_user_agent()})
xml_data = minidom.parseString(r.text)
release = xml_data.getElementsByTagName("release")
return release.item(0).lastChild.data
def install_fabric(path: str, minecraft_version: str,loader_version: str=None):
"""
Install a fabric version
"""
#Get latest loader version if not given
if not loader_version:
loader_version = get_latest_loader_version()
#Make sure the Minecraft version is installed
install_minecraft_version(path,minecraft_version)
#Get installer version
installer_version = get_latest_installer_version()
installer_download_url = f"https://maven.fabricmc.net/net/fabricmc/fabric-installer/{installer_version}/fabric-installer-{installer_version}.jar"
#Generate a temporary path for downloading the installer
installer_path = os.path.join(tempfile.gettempdir(),f"fabric-installer-{random.randrange(100,10000)}.tmp")
#Download the installer
download_file(installer_download_url,installer_path)
#Run the installer see https://fabricmc.net/wiki/install#cli_installation
subprocess.run(["java","-jar",installer_path,"client","-dir",path,"-mcversion",minecraft_version,"-loader",loader_version,"-noprofile","-snapshot"])
#Delete the installer we don't need them anymore
os.remove(installer_path)
#Install all libs of fabric
fabric_minecraft_version = f"fabric-loader-{loader_version}-{minecraft_version}"
install_minecraft_version(path,fabric_minecraft_version)
| en | 0.770355 | Returns all available Minecraft Versions for fabric Returns a list which only contains the stable Minecraft versions that supports fabric Returns the latest unstable Minecraft versions that supports fabric. This could be a snapshot. Returns the latest stable Minecraft version that supports fabric Checks if a Minecraft version supported by fabric Returns all loader versions Get the latest loader version Returns the latest installer version Install a fabric version #Get latest loader version if not given #Make sure the Minecraft version is installed #Get installer version #Generate a temporary path for downloading the installer #Download the installer #Run the installer see https://fabricmc.net/wiki/install#cli_installation #Delete the installer we don't need them anymore #Install all libs of fabric | 2.698596 | 3 |
Strand Sort.py | Nishkarsh-Tripathi/Sorting-algorithms- | 5 | 7628 | # STRAND SORT
# It is a recursive comparison based sorting technique which sorts in increasing order.
# It works by repeatedly pulling sorted sub-lists out of the list to be sorted and merging them
# with a result array.
# Algorithm:
# Create a empty strand (list) and append the first element to it popping it from the input array
# Compare this element with the rest of elements of the input array
# if a greater element is found then pop and append it to strand otherwise skip
# Now merge this array to the final output array
# Recur for remaining items in strand and input array.
# Utility Function to merge two arrays
def merge(arr1, arr2):
# list to store merged output
merged_list = []
# while there are elements in both arrays
while len(arr1) and len(arr2):
# the array having smaller first elements gets appended as the resultant array must be sorted
if arr1[0] < arr2[0]:
merged_list.append(arr1.pop(0))
else:
merged_list.append(arr2.pop(0))
# if the length of either of array is exhausted , merge the remaining part to
# the merge sublist
merged_list += arr1
merged_list += arr2
# return the merged list
return merged_list
# Function to return the strand (sorted sub-list)
def strand(arr):
# append the first element to the strand
s = [arr.pop(0)]
# initialise a pointer
i = 0
# while it is less then length
while i > len(arr):
# compare the input array elements to the last element of the strand
if arr[i] > s[-1]:
# if we found a greater element than s[-1] then pop it and append to the strand
s.append(arr.pop(i))
else:
# else increment
i += 1
# return the strand
return s
# Strand Sort Function
def strand_sort(arr):
# initialise the output array with the strand
output = strand(arr)
# while there are elements in the array
while len(arr):
# merge the strand and previous output list to create a new list
output = merge(output, strand(arr))
# return the sorted output
return output
# Driver Code
arr = [1, 6, 3, 8, 2, 0, 9]
print(strand_sort(arr))
# Time Complexity : O(n^2) [Worst]
# O(n*log(n)) [Average]
# Space Complexity : O(n)
# Stable : Yes
# Inplace : No
| # STRAND SORT
# It is a recursive comparison based sorting technique which sorts in increasing order.
# It works by repeatedly pulling sorted sub-lists out of the list to be sorted and merging them
# with a result array.
# Algorithm:
# Create a empty strand (list) and append the first element to it popping it from the input array
# Compare this element with the rest of elements of the input array
# if a greater element is found then pop and append it to strand otherwise skip
# Now merge this array to the final output array
# Recur for remaining items in strand and input array.
# Utility Function to merge two arrays
def merge(arr1, arr2):
# list to store merged output
merged_list = []
# while there are elements in both arrays
while len(arr1) and len(arr2):
# the array having smaller first elements gets appended as the resultant array must be sorted
if arr1[0] < arr2[0]:
merged_list.append(arr1.pop(0))
else:
merged_list.append(arr2.pop(0))
# if the length of either of array is exhausted , merge the remaining part to
# the merge sublist
merged_list += arr1
merged_list += arr2
# return the merged list
return merged_list
# Function to return the strand (sorted sub-list)
def strand(arr):
# append the first element to the strand
s = [arr.pop(0)]
# initialise a pointer
i = 0
# while it is less then length
while i > len(arr):
# compare the input array elements to the last element of the strand
if arr[i] > s[-1]:
# if we found a greater element than s[-1] then pop it and append to the strand
s.append(arr.pop(i))
else:
# else increment
i += 1
# return the strand
return s
# Strand Sort Function
def strand_sort(arr):
# initialise the output array with the strand
output = strand(arr)
# while there are elements in the array
while len(arr):
# merge the strand and previous output list to create a new list
output = merge(output, strand(arr))
# return the sorted output
return output
# Driver Code
arr = [1, 6, 3, 8, 2, 0, 9]
print(strand_sort(arr))
# Time Complexity : O(n^2) [Worst]
# O(n*log(n)) [Average]
# Space Complexity : O(n)
# Stable : Yes
# Inplace : No
| en | 0.792996 | # STRAND SORT # It is a recursive comparison based sorting technique which sorts in increasing order. # It works by repeatedly pulling sorted sub-lists out of the list to be sorted and merging them # with a result array. # Algorithm: # Create a empty strand (list) and append the first element to it popping it from the input array # Compare this element with the rest of elements of the input array # if a greater element is found then pop and append it to strand otherwise skip # Now merge this array to the final output array # Recur for remaining items in strand and input array. # Utility Function to merge two arrays # list to store merged output # while there are elements in both arrays # the array having smaller first elements gets appended as the resultant array must be sorted # if the length of either of array is exhausted , merge the remaining part to # the merge sublist # return the merged list # Function to return the strand (sorted sub-list) # append the first element to the strand # initialise a pointer # while it is less then length # compare the input array elements to the last element of the strand # if we found a greater element than s[-1] then pop it and append to the strand # else increment # return the strand # Strand Sort Function # initialise the output array with the strand # while there are elements in the array # merge the strand and previous output list to create a new list # return the sorted output # Driver Code # Time Complexity : O(n^2) [Worst] # O(n*log(n)) [Average] # Space Complexity : O(n) # Stable : Yes # Inplace : No | 4.44916 | 4 |
gamestonk_terminal/cryptocurrency/overview/pycoingecko_model.py | minhhoang1023/GamestonkTerminal | 0 | 7629 | """CoinGecko model"""
__docformat__ = "numpy"
# pylint: disable=C0301, E1101
import logging
import re
from typing import Any, List
import numpy as np
import pandas as pd
from pycoingecko import CoinGeckoAPI
from gamestonk_terminal.cryptocurrency.dataframe_helpers import (
create_df_index,
long_number_format_with_type_check,
replace_underscores_in_column_names,
)
from gamestonk_terminal.cryptocurrency.discovery.pycoingecko_model import get_coins
from gamestonk_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
HOLD_COINS = ["ethereum", "bitcoin"]
NEWS_FILTERS = ["Index", "Title", "Author", "Posted"]
CATEGORIES_FILTERS = [
"Rank",
"Name",
"Change_1h",
"Change_24h",
"Change_7d",
"Market_Cap",
"Volume_24h",
"Coins",
]
STABLES_FILTERS = [
"Rank",
"Name",
"Symbol",
"Price",
"Change_24h",
"Exchanges",
"Market_Cap",
"Change_30d",
]
PRODUCTS_FILTERS = [
"Rank",
"Platform",
"Identifier",
"Supply_Rate",
"Borrow_Rate",
]
PLATFORMS_FILTERS = ["Rank", "Name", "Category", "Centralized"]
EXCHANGES_FILTERS = [
"Rank",
"Trust_Score",
"Id",
"Name",
"Country",
"Year Established",
"Trade_Volume_24h_BTC",
]
EXRATES_FILTERS = ["Index", "Name", "Unit", "Value", "Type"]
INDEXES_FILTERS = ["Rank", "Name", "Id", "Market", "Last", "MultiAsset"]
DERIVATIVES_FILTERS = [
"Rank",
"Market",
"Symbol",
"Price",
"Pct_Change_24h",
"Contract_Type",
"Basis",
"Spread",
"Funding_Rate",
"Volume_24h",
]
COINS_COLUMNS = [
"symbol",
"name",
"current_price",
"market_cap",
"market_cap_rank",
"price_change_percentage_7d_in_currency",
"price_change_percentage_24h_in_currency",
"total_volume",
]
@log_start_end(log=logger)
def get_holdings_overview(endpoint: str = "bitcoin") -> List[Any]:
"""Returns public companies that holds ethereum or bitcoin [Source: CoinGecko]
Parameters
----------
endpoint : str
"bitcoin" or "ethereum"
Returns
-------
List:
- str: Overall statistics
- pandas.DataFrame: Companies holding crypto
"""
cg = CoinGeckoAPI()
data = cg.get_companies_public_treasury_by_coin_id(coin_id=endpoint)
stats_str = f"""{len(data["companies"])} companies hold a total of {long_number_format_with_type_check(data["total_holdings"])} {endpoint} ({data["market_cap_dominance"]}% of market cap dominance) with the current value of {long_number_format_with_type_check(int(data["total_value_usd"]))} USD dollars""" # noqa
df = pd.json_normalize(data, record_path=["companies"])
df.columns = list(
map(
lambda x: replace_underscores_in_column_names(x)
if isinstance(x, str)
else x,
df.columns,
)
)
return [stats_str, df]
SORT_VALUES = [
"market_cap_desc",
"market_cap_asc",
"name_desc",
"name_asc",
"market_cap_change_24h_desc",
"market_cap_change_24h_asc",
]
@log_start_end(log=logger)
def coin_formatter(n):
# TODO: can be improved
coins = []
re_str = "small/(.*)(.jpg|.png|.JPG|.PNG)"
for coin in n:
if re.search(re_str, coin):
coin_stripped = re.search(re_str, coin).group(1)
coins.append(coin_stripped)
return ",".join(coins)
@log_start_end(log=logger)
def get_top_crypto_categories(sort_filter: str = SORT_VALUES[0]) -> pd.DataFrame:
"""Returns top crypto categories [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Rank, Name, Change_1h, Change_7d, Market_Cap, Volume_24h,Coins, Url
"""
if sort_filter in SORT_VALUES:
client = CoinGeckoAPI()
data = client.get_coins_categories()
df = pd.DataFrame(data)
del df["id"]
del df["content"]
del df["updated_at"]
df["top_3_coins"] = df["top_3_coins"].apply(coin_formatter)
df.columns = [
replace_underscores_in_column_names(col) if isinstance(col, str) else col
for col in df.columns
]
return df
return pd.DataFrame()
# TODO: add string with overview
@log_start_end(log=logger)
def get_stable_coins(top: int = 20) -> pd.DataFrame:
"""Returns top stable coins [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Rank, Name, Symbol, Price, Change_24h, Exchanges, Market_Cap, Change_30d, Url
"""
df = get_coins(top=top, category="stablecoins")
return df[COINS_COLUMNS]
@log_start_end(log=logger)
def get_exchanges() -> pd.DataFrame:
"""Get list of top exchanges from CoinGecko API [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Trust_Score, Id, Name, Country, Year_Established, Trade_Volume_24h_BTC, Url
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_exchanges_list(per_page=250))
df.replace({float(np.NaN): None}, inplace=True)
df = df[
[
"trust_score",
"id",
"name",
"country",
"year_established",
"trade_volume_24h_btc",
"url",
]
]
df.columns = [
"Trust_Score",
"Id",
"Name",
"Country",
"Year_Established",
"Trade_Volume_24h_BTC",
"Url",
]
create_df_index(df, "Rank")
return df
@log_start_end(log=logger)
def get_financial_platforms() -> pd.DataFrame:
"""Get list of financial platforms from CoinGecko API [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Rank, Name, Category, Centralized, Url
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_finance_platforms())
df.drop("facts", axis=1, inplace=True)
create_df_index(df, "rank")
df.columns = ["Rank", "Name", "Category", "Centralized", "Url"]
return df
@log_start_end(log=logger)
def get_finance_products() -> pd.DataFrame:
"""Get list of financial products from CoinGecko API
Returns
-------
pandas.DataFrame
Rank, Platform, Identifier, Supply_Rate, Borrow_Rate
"""
client = CoinGeckoAPI()
df = pd.DataFrame(
client.get_finance_products(per_page=250),
columns=[
"platform",
"identifier",
"supply_rate_percentage",
"borrow_rate_percentage",
],
)
df.columns = ["Platform", "Identifier", "Supply_Rate", "Borrow_Rate"]
create_df_index(df, "Rank")
return df
@log_start_end(log=logger)
def get_indexes() -> pd.DataFrame:
"""Get list of crypto indexes from CoinGecko API [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Name, Id, Market, Last, MultiAsset
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_indexes(per_page=250))
df.columns = ["Name", "Id", "Market", "Last", "MultiAsset"]
create_df_index(df, "Rank")
return df
@log_start_end(log=logger)
def get_derivatives() -> pd.DataFrame:
"""Get list of crypto derivatives from CoinGecko API [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Rank, Market, Symbol, Price, Pct_Change_24h, Contract_Type, Basis, Spread, Funding_Rate, Volume_24h,
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_derivatives(include_tickers="unexpired"))
df.drop(
["index", "last_traded_at", "expired_at", "index_id", "open_interest"],
axis=1,
inplace=True,
)
df.rename(columns={"price_percentage_change_24h": "pct_change_24h"}, inplace=True)
create_df_index(df, "rank")
df["price"] = df["price"].apply(
lambda x: "" if not x else float(x.strip("$").replace(",", ""))
)
df.columns = [
"Rank",
"Market",
"Symbol",
"Price",
"Pct_Change_24h",
"Contract_Type",
"Basis",
"Spread",
"Funding_Rate",
"Volume_24h",
]
return df
@log_start_end(log=logger)
def get_exchange_rates() -> pd.DataFrame:
"""Get list of crypto, fiats, commodity exchange rates from CoinGecko API [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Index, Name, Unit, Value, Type
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_exchange_rates()["rates"]).T.reset_index()
df.drop("index", axis=1, inplace=True)
create_df_index(df, "index")
df.columns = ["Index", "Name", "Unit", "Value", "Type"]
return df
@log_start_end(log=logger)
def get_global_info() -> pd.DataFrame:
"""Get global statistics about crypto from CoinGecko API like:
- market cap change
- number of markets
- icos
- number of active crypto
[Source: CoinGecko]
Returns
-------
pandas.DataFrame
Metric, Value
"""
client = CoinGeckoAPI()
results = client.get_global()
total_mcap = results.pop("market_cap_percentage")
btc, eth = total_mcap.get("btc"), total_mcap.get("eth")
for key in ["total_market_cap", "total_volume", "updated_at"]:
del results[key]
results["btc_market_cap_in_pct"] = btc
results["eth_market_cap_in_pct"] = eth
results["altcoin_market_cap_in_pct"] = 100 - (float(eth) + float(btc))
df = pd.Series(results).reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: replace_underscores_in_column_names(x) if isinstance(x, str) else x
)
return df
@log_start_end(log=logger)
def get_global_markets_info() -> pd.DataFrame:
"""Get global statistics about crypto markets from CoinGecko API like:
Market_Cap, Volume, Market_Cap_Percentage
[Source: CoinGecko]
Returns
-------
pandas.DataFrame
Market_Cap, Volume, Market_Cap_Percentage
"""
columns = [
"Market_Cap",
"Volume",
"Market_Cap_Percentage",
]
data = []
client = CoinGeckoAPI()
results = client.get_global()
for key in columns:
data.append(results.get(key))
df = pd.DataFrame(data).T
df.columns = columns
df.replace({float("nan"): None}, inplace=True)
return df.reset_index()
@log_start_end(log=logger)
def get_global_defi_info() -> pd.DataFrame:
"""Get global statistics about Decentralized Finances [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Metric, Value
"""
client = CoinGeckoAPI()
results = client.get_global_decentralized_finance_defi()
for key, value in results.items():
try:
results[key] = round(float(value), 4)
except (ValueError, TypeError):
pass
df = pd.Series(results).reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: replace_underscores_in_column_names(x) if isinstance(x, str) else x
)
return df
| """CoinGecko model"""
__docformat__ = "numpy"
# pylint: disable=C0301, E1101
import logging
import re
from typing import Any, List
import numpy as np
import pandas as pd
from pycoingecko import CoinGeckoAPI
from gamestonk_terminal.cryptocurrency.dataframe_helpers import (
create_df_index,
long_number_format_with_type_check,
replace_underscores_in_column_names,
)
from gamestonk_terminal.cryptocurrency.discovery.pycoingecko_model import get_coins
from gamestonk_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
HOLD_COINS = ["ethereum", "bitcoin"]
NEWS_FILTERS = ["Index", "Title", "Author", "Posted"]
CATEGORIES_FILTERS = [
"Rank",
"Name",
"Change_1h",
"Change_24h",
"Change_7d",
"Market_Cap",
"Volume_24h",
"Coins",
]
STABLES_FILTERS = [
"Rank",
"Name",
"Symbol",
"Price",
"Change_24h",
"Exchanges",
"Market_Cap",
"Change_30d",
]
PRODUCTS_FILTERS = [
"Rank",
"Platform",
"Identifier",
"Supply_Rate",
"Borrow_Rate",
]
PLATFORMS_FILTERS = ["Rank", "Name", "Category", "Centralized"]
EXCHANGES_FILTERS = [
"Rank",
"Trust_Score",
"Id",
"Name",
"Country",
"Year Established",
"Trade_Volume_24h_BTC",
]
EXRATES_FILTERS = ["Index", "Name", "Unit", "Value", "Type"]
INDEXES_FILTERS = ["Rank", "Name", "Id", "Market", "Last", "MultiAsset"]
DERIVATIVES_FILTERS = [
"Rank",
"Market",
"Symbol",
"Price",
"Pct_Change_24h",
"Contract_Type",
"Basis",
"Spread",
"Funding_Rate",
"Volume_24h",
]
COINS_COLUMNS = [
"symbol",
"name",
"current_price",
"market_cap",
"market_cap_rank",
"price_change_percentage_7d_in_currency",
"price_change_percentage_24h_in_currency",
"total_volume",
]
@log_start_end(log=logger)
def get_holdings_overview(endpoint: str = "bitcoin") -> List[Any]:
"""Returns public companies that holds ethereum or bitcoin [Source: CoinGecko]
Parameters
----------
endpoint : str
"bitcoin" or "ethereum"
Returns
-------
List:
- str: Overall statistics
- pandas.DataFrame: Companies holding crypto
"""
cg = CoinGeckoAPI()
data = cg.get_companies_public_treasury_by_coin_id(coin_id=endpoint)
stats_str = f"""{len(data["companies"])} companies hold a total of {long_number_format_with_type_check(data["total_holdings"])} {endpoint} ({data["market_cap_dominance"]}% of market cap dominance) with the current value of {long_number_format_with_type_check(int(data["total_value_usd"]))} USD dollars""" # noqa
df = pd.json_normalize(data, record_path=["companies"])
df.columns = list(
map(
lambda x: replace_underscores_in_column_names(x)
if isinstance(x, str)
else x,
df.columns,
)
)
return [stats_str, df]
SORT_VALUES = [
"market_cap_desc",
"market_cap_asc",
"name_desc",
"name_asc",
"market_cap_change_24h_desc",
"market_cap_change_24h_asc",
]
@log_start_end(log=logger)
def coin_formatter(n):
# TODO: can be improved
coins = []
re_str = "small/(.*)(.jpg|.png|.JPG|.PNG)"
for coin in n:
if re.search(re_str, coin):
coin_stripped = re.search(re_str, coin).group(1)
coins.append(coin_stripped)
return ",".join(coins)
@log_start_end(log=logger)
def get_top_crypto_categories(sort_filter: str = SORT_VALUES[0]) -> pd.DataFrame:
"""Returns top crypto categories [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Rank, Name, Change_1h, Change_7d, Market_Cap, Volume_24h,Coins, Url
"""
if sort_filter in SORT_VALUES:
client = CoinGeckoAPI()
data = client.get_coins_categories()
df = pd.DataFrame(data)
del df["id"]
del df["content"]
del df["updated_at"]
df["top_3_coins"] = df["top_3_coins"].apply(coin_formatter)
df.columns = [
replace_underscores_in_column_names(col) if isinstance(col, str) else col
for col in df.columns
]
return df
return pd.DataFrame()
# TODO: add string with overview
@log_start_end(log=logger)
def get_stable_coins(top: int = 20) -> pd.DataFrame:
"""Returns top stable coins [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Rank, Name, Symbol, Price, Change_24h, Exchanges, Market_Cap, Change_30d, Url
"""
df = get_coins(top=top, category="stablecoins")
return df[COINS_COLUMNS]
@log_start_end(log=logger)
def get_exchanges() -> pd.DataFrame:
"""Get list of top exchanges from CoinGecko API [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Trust_Score, Id, Name, Country, Year_Established, Trade_Volume_24h_BTC, Url
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_exchanges_list(per_page=250))
df.replace({float(np.NaN): None}, inplace=True)
df = df[
[
"trust_score",
"id",
"name",
"country",
"year_established",
"trade_volume_24h_btc",
"url",
]
]
df.columns = [
"Trust_Score",
"Id",
"Name",
"Country",
"Year_Established",
"Trade_Volume_24h_BTC",
"Url",
]
create_df_index(df, "Rank")
return df
@log_start_end(log=logger)
def get_financial_platforms() -> pd.DataFrame:
"""Get list of financial platforms from CoinGecko API [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Rank, Name, Category, Centralized, Url
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_finance_platforms())
df.drop("facts", axis=1, inplace=True)
create_df_index(df, "rank")
df.columns = ["Rank", "Name", "Category", "Centralized", "Url"]
return df
@log_start_end(log=logger)
def get_finance_products() -> pd.DataFrame:
"""Get list of financial products from CoinGecko API
Returns
-------
pandas.DataFrame
Rank, Platform, Identifier, Supply_Rate, Borrow_Rate
"""
client = CoinGeckoAPI()
df = pd.DataFrame(
client.get_finance_products(per_page=250),
columns=[
"platform",
"identifier",
"supply_rate_percentage",
"borrow_rate_percentage",
],
)
df.columns = ["Platform", "Identifier", "Supply_Rate", "Borrow_Rate"]
create_df_index(df, "Rank")
return df
@log_start_end(log=logger)
def get_indexes() -> pd.DataFrame:
"""Get list of crypto indexes from CoinGecko API [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Name, Id, Market, Last, MultiAsset
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_indexes(per_page=250))
df.columns = ["Name", "Id", "Market", "Last", "MultiAsset"]
create_df_index(df, "Rank")
return df
@log_start_end(log=logger)
def get_derivatives() -> pd.DataFrame:
"""Get list of crypto derivatives from CoinGecko API [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Rank, Market, Symbol, Price, Pct_Change_24h, Contract_Type, Basis, Spread, Funding_Rate, Volume_24h,
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_derivatives(include_tickers="unexpired"))
df.drop(
["index", "last_traded_at", "expired_at", "index_id", "open_interest"],
axis=1,
inplace=True,
)
df.rename(columns={"price_percentage_change_24h": "pct_change_24h"}, inplace=True)
create_df_index(df, "rank")
df["price"] = df["price"].apply(
lambda x: "" if not x else float(x.strip("$").replace(",", ""))
)
df.columns = [
"Rank",
"Market",
"Symbol",
"Price",
"Pct_Change_24h",
"Contract_Type",
"Basis",
"Spread",
"Funding_Rate",
"Volume_24h",
]
return df
@log_start_end(log=logger)
def get_exchange_rates() -> pd.DataFrame:
"""Get list of crypto, fiats, commodity exchange rates from CoinGecko API [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Index, Name, Unit, Value, Type
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_exchange_rates()["rates"]).T.reset_index()
df.drop("index", axis=1, inplace=True)
create_df_index(df, "index")
df.columns = ["Index", "Name", "Unit", "Value", "Type"]
return df
@log_start_end(log=logger)
def get_global_info() -> pd.DataFrame:
"""Get global statistics about crypto from CoinGecko API like:
- market cap change
- number of markets
- icos
- number of active crypto
[Source: CoinGecko]
Returns
-------
pandas.DataFrame
Metric, Value
"""
client = CoinGeckoAPI()
results = client.get_global()
total_mcap = results.pop("market_cap_percentage")
btc, eth = total_mcap.get("btc"), total_mcap.get("eth")
for key in ["total_market_cap", "total_volume", "updated_at"]:
del results[key]
results["btc_market_cap_in_pct"] = btc
results["eth_market_cap_in_pct"] = eth
results["altcoin_market_cap_in_pct"] = 100 - (float(eth) + float(btc))
df = pd.Series(results).reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: replace_underscores_in_column_names(x) if isinstance(x, str) else x
)
return df
@log_start_end(log=logger)
def get_global_markets_info() -> pd.DataFrame:
"""Get global statistics about crypto markets from CoinGecko API like:
Market_Cap, Volume, Market_Cap_Percentage
[Source: CoinGecko]
Returns
-------
pandas.DataFrame
Market_Cap, Volume, Market_Cap_Percentage
"""
columns = [
"Market_Cap",
"Volume",
"Market_Cap_Percentage",
]
data = []
client = CoinGeckoAPI()
results = client.get_global()
for key in columns:
data.append(results.get(key))
df = pd.DataFrame(data).T
df.columns = columns
df.replace({float("nan"): None}, inplace=True)
return df.reset_index()
@log_start_end(log=logger)
def get_global_defi_info() -> pd.DataFrame:
"""Get global statistics about Decentralized Finances [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Metric, Value
"""
client = CoinGeckoAPI()
results = client.get_global_decentralized_finance_defi()
for key, value in results.items():
try:
results[key] = round(float(value), 4)
except (ValueError, TypeError):
pass
df = pd.Series(results).reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: replace_underscores_in_column_names(x) if isinstance(x, str) else x
)
return df
| en | 0.589894 | CoinGecko model # pylint: disable=C0301, E1101 Returns public companies that holds ethereum or bitcoin [Source: CoinGecko] Parameters ---------- endpoint : str "bitcoin" or "ethereum" Returns ------- List: - str: Overall statistics - pandas.DataFrame: Companies holding crypto {len(data["companies"])} companies hold a total of {long_number_format_with_type_check(data["total_holdings"])} {endpoint} ({data["market_cap_dominance"]}% of market cap dominance) with the current value of {long_number_format_with_type_check(int(data["total_value_usd"]))} USD dollars # noqa # TODO: can be improved Returns top crypto categories [Source: CoinGecko] Returns ------- pandas.DataFrame Rank, Name, Change_1h, Change_7d, Market_Cap, Volume_24h,Coins, Url # TODO: add string with overview Returns top stable coins [Source: CoinGecko] Returns ------- pandas.DataFrame Rank, Name, Symbol, Price, Change_24h, Exchanges, Market_Cap, Change_30d, Url Get list of top exchanges from CoinGecko API [Source: CoinGecko] Returns ------- pandas.DataFrame Trust_Score, Id, Name, Country, Year_Established, Trade_Volume_24h_BTC, Url Get list of financial platforms from CoinGecko API [Source: CoinGecko] Returns ------- pandas.DataFrame Rank, Name, Category, Centralized, Url Get list of financial products from CoinGecko API Returns ------- pandas.DataFrame Rank, Platform, Identifier, Supply_Rate, Borrow_Rate Get list of crypto indexes from CoinGecko API [Source: CoinGecko] Returns ------- pandas.DataFrame Name, Id, Market, Last, MultiAsset Get list of crypto derivatives from CoinGecko API [Source: CoinGecko] Returns ------- pandas.DataFrame Rank, Market, Symbol, Price, Pct_Change_24h, Contract_Type, Basis, Spread, Funding_Rate, Volume_24h, Get list of crypto, fiats, commodity exchange rates from CoinGecko API [Source: CoinGecko] Returns ------- pandas.DataFrame Index, Name, Unit, Value, Type Get global statistics about crypto from CoinGecko API like: - market cap change - number of markets - icos - number of active crypto [Source: CoinGecko] Returns ------- pandas.DataFrame Metric, Value Get global statistics about crypto markets from CoinGecko API like: Market_Cap, Volume, Market_Cap_Percentage [Source: CoinGecko] Returns ------- pandas.DataFrame Market_Cap, Volume, Market_Cap_Percentage Get global statistics about Decentralized Finances [Source: CoinGecko] Returns ------- pandas.DataFrame Metric, Value | 2.133292 | 2 |
docker/messein/board-import-app/app.py | sourceperl/tk-dashboard | 0 | 7630 | <filename>docker/messein/board-import-app/app.py
#!/usr/bin/env python3
from configparser import ConfigParser
from datetime import datetime
import urllib.parse
import hashlib
import io
import json
import logging
import os
import re
import time
from xml.dom import minidom
import feedparser
import requests
import schedule
import PIL.Image
import PIL.ImageDraw
import PIL.ImageFont
from metar.Metar import Metar
import pytz
import pdf2image
import PIL.Image
import PIL.ImageDraw
from board_lib import CustomRedis, catch_log_except, dt_utc_to_local
from webdav import WebDAV
# some const
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'
# some var
owc_doc_dir_last_sync = 0
owc_car_dir_last_sync = 0
# read config
cnf = ConfigParser()
cnf.read('/data/conf/board.conf')
# redis
main_redis_user = cnf.get('redis', 'user')
main_redis_pass = cnf.get('redis', 'pass')
# redis-loos for share
loos_redis_user = cnf.get('redis-loos', 'user')
loos_redis_pass = cnf.get('redis-loos', 'pass')
# gmap img traffic
gmap_img_url = cnf.get('gmap_img', 'img_url')
# gsheet
gsheet_url = cnf.get('gsheet', 'url')
# openweathermap
ow_app_id = cnf.get('openweathermap', 'app_id')
# webdav
webdav_url = cnf.get('owncloud_dashboard', 'webdav_url')
webdav_user = cnf.get('owncloud_dashboard', 'webdav_user')
webdav_pass = cnf.get('owncloud_dashboard', 'webdav_pass')
webdav_reglement_doc_dir = cnf.get('owncloud_dashboard', 'webdav_reglement_doc_dir')
webdav_carousel_img_dir = cnf.get('owncloud_dashboard', 'webdav_carousel_img_dir')
# some class
class DB:
# create connector
main = CustomRedis(host='board-redis-srv', username=main_redis_user, password=<PASSWORD>,
socket_timeout=4, socket_keepalive=True)
loos = CustomRedis(host='board-redis-loos-tls-cli', username=loos_redis_user, password=lo<PASSWORD>,
socket_timeout=4, socket_keepalive=True)
# some function
@catch_log_except()
def air_quality_atmo_ge_job():
url = 'https://services3.arcgis.com/' + \
'Is0UwT37raQYl9Jj/arcgis/rest/services/ind_grandest_5j/FeatureServer/0/query' + \
'?where=%s' % urllib.parse.quote('code_zone IN (54395, 57463, 51454, 67482)') + \
'&outFields=date_ech, code_qual, lib_qual, lib_zone, code_zone' + \
'&returnGeometry=false&resultRecordCount=48' + \
'&orderByFields=%s&f=json' % urllib.parse.quote('date_ech ASC')
today_dt_date = datetime.today().date()
# https request
r = requests.get(url, timeout=5.0)
# check error
if r.status_code == 200:
# decode json message
atmo_raw_d = r.json()
# populate zones dict with receive values
zones_d = {}
for record in atmo_raw_d['features']:
# load record data
r_code_zone = record['attributes']['code_zone']
r_ts = int(record['attributes']['date_ech'])
r_dt = datetime.utcfromtimestamp(r_ts / 1000)
r_value = record['attributes']['code_qual']
# retain today value
if r_dt.date() == today_dt_date:
zones_d[r_code_zone] = r_value
# skip key publish if zones_d is empty
if not zones_d:
raise ValueError('dataset is empty')
# create and populate result dict
d_air_quality = {'nancy': zones_d.get(54395, 0),
'metz': zones_d.get(57463, 0),
'reims': zones_d.get(51454, 0),
'strasbourg': zones_d.get(67482, 0)}
# update redis
DB.main.set_as_json('json:atmo', d_air_quality, ex=6 * 3600)
@catch_log_except()
def dir_est_img_job():
# retrieve DIR-est webcams: Houdemont, Velaine-en-Haye, Saint-Nicolas, Côte de Flavigny
for id_redis, lbl_cam, get_code in [('houdemont', 'Houdemont', '18'), ('velaine', 'Velaine', '53'),
('st-nicolas', 'Saint-Nicolas', '49'), ('flavigny', 'Flavigny', '5')]:
r = requests.get('https://webcam.dir-est.fr/app.php/lastimg/%s' % get_code)
if r.status_code == 200:
# load image to PIL and resize it
img = PIL.Image.open(io.BytesIO(r.content))
img.thumbnail([224, 235])
# add text to image
txt_img = '%s - %s' % (lbl_cam, datetime.now().strftime('%H:%M'))
font = PIL.ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMono.ttf', 16)
draw = PIL.ImageDraw.Draw(img)
draw.text((5, 5), txt_img, (0x10, 0x0e, 0x0e), font=font)
# save image as PNG for redis
redis_io = io.BytesIO()
img.save(redis_io, format='PNG')
# update redis
DB.main.set('img:dir-est:%s:png' % id_redis, redis_io.getvalue(), ex=3600)
@catch_log_except()
def gsheet_job():
# https request
response = requests.get(gsheet_url, timeout=5.0)
# process response
d = dict()
for line in response.iter_lines(decode_unicode=True):
tag, value = line.split(',')
d[tag] = value
redis_d = dict(update=datetime.now().isoformat('T'), tags=d)
DB.main.set_as_json('json:gsheet', redis_d, ex=2 * 3600)
@catch_log_except()
def img_gmap_traffic_job():
# http request
r = requests.get(gmap_img_url, stream=True, timeout=5.0)
if r.status_code == 200:
# convert RAW img format (bytes) to Pillow image
pil_img = PIL.Image.open(io.BytesIO(r.raw.read()))
# crop image
pil_img = pil_img.crop((0, 0, 560, 328))
# pil_img.thumbnail([632, 328])
img_io = io.BytesIO()
pil_img.save(img_io, format='PNG')
# store RAW PNG to redis key
DB.main.set('img:traffic-map:png', img_io.getvalue(), ex=2 * 3600)
@catch_log_except()
def local_info_job():
# do request
l_titles = []
for post in feedparser.parse('https://france3-regions.francetvinfo.fr/societe/rss?r=grand-est').entries:
title = post.title
title = title.strip()
title = title.replace('\n', ' ')
l_titles.append(title)
DB.main.set_as_json('json:news', l_titles, ex=2 * 3600)
@catch_log_except()
def owc_updated_job():
# check if the owncloud directories has been updated by users (start sync jobs if need)
global owc_doc_dir_last_sync, owc_car_dir_last_sync
for f in wdv.ls():
item = f['file_path']
item_last_modified = int(f['dt_last_modified'].timestamp())
# document update ?
if item == webdav_reglement_doc_dir:
# update need
if item_last_modified > owc_doc_dir_last_sync:
logging.debug(f'"{webdav_reglement_doc_dir}" seem updated: run "owncloud_sync_doc_job"')
owc_sync_doc_job()
owc_doc_dir_last_sync = item_last_modified
# carousel update ?
elif item == webdav_carousel_img_dir:
# update need
if item_last_modified > owc_car_dir_last_sync:
logging.debug(f'"{webdav_carousel_img_dir}" seem updated: run "owncloud_sync_carousel_job"')
owc_sync_carousel_job()
owc_car_dir_last_sync = item_last_modified
@catch_log_except()
def owc_sync_carousel_job():
# sync owncloud carousel directory with local
# local constants
DIR_CAR_INFOS = 'dir:carousel:infos'
DIR_CAR_RAW = 'dir:carousel:raw:min-png'
# local functions
def update_carousel_raw_data(filename, raw_data):
# build json infos record
md5 = hashlib.md5(raw_data).hexdigest()
js_infos = json.dumps(dict(size=len(raw_data), md5=md5))
# convert raw data to PNG thumbnails
# create default error image
img_to_redis = PIL.Image.new('RGB', (655, 453), (255, 255, 255))
draw = PIL.ImageDraw.Draw(img_to_redis)
draw.text((0, 0), f'loading error (src: "{filename}")', (0, 0, 0))
# replace default image by convert result
try:
# convert png and jpg file
if filename.lower().endswith('.png') or filename.lower().endswith('.jpg'):
# image to PIL
img_to_redis = PIL.Image.open(io.BytesIO(raw_data))
# convert pdf file
elif filename.lower().endswith('.pdf'):
# PDF to PIL: convert first page to PIL image
img_to_redis = pdf2image.convert_from_bytes(raw_data)[0]
except Exception:
pass
# resize and format as raw png
img_to_redis.thumbnail([655, 453])
io_to_redis = io.BytesIO()
img_to_redis.save(io_to_redis, format='PNG')
# redis add (atomic write)
pipe = DB.main.pipeline()
pipe.hset(DIR_CAR_INFOS, filename, js_infos)
pipe.hset(DIR_CAR_RAW, filename, io_to_redis.getvalue())
pipe.execute()
# log sync start
logging.info('start of sync for owncloud carousel')
# list local redis files
local_files_d = {}
for f_name, js_infos in DB.main.hgetall(DIR_CAR_INFOS).items():
try:
filename = f_name.decode()
size = json.loads(js_infos)['size']
local_files_d[filename] = size
except ValueError:
pass
# check "dir:carousel:raw:min-png" consistency
raw_file_l = [f.decode() for f in DB.main.hkeys(DIR_CAR_RAW)]
# remove orphan infos record
for f in list(set(local_files_d) - set(raw_file_l)):
logging.debug(f'remove orphan "{f}" record in hash "{DIR_CAR_INFOS}"')
DB.main.hdel(DIR_CAR_INFOS, f)
del local_files_d[f]
# remove orphan raw-png record
for f in list(set(raw_file_l) - set(local_files_d)):
logging.debug(f'remove orphan "{f}" record in hash "{DIR_CAR_RAW}"')
DB.main.hdel(DIR_CAR_RAW, f)
# list owncloud files (disallow directory)
own_files_d = {}
for f_d in wdv.ls(webdav_carousel_img_dir):
file_path = f_d['file_path']
size = f_d['content_length']
if file_path and not file_path.endswith('/'):
# search site only tags (_@loos_, _@messein_...) in filename
# site id is 16 chars max
site_tag_l = re.findall(r'_@([a-zA-Z0-9\-]{1,16})', file_path)
site_tag_l = [s.strip().lower() for s in site_tag_l]
site_tag_ok = 'messein' in site_tag_l or not site_tag_l
# download filter: ignore txt type, heavy file (>10 MB) or name tags mismatch
filter_ok = not file_path.lower().endswith('.txt') \
and (size < 10 * 1024 * 1024) \
and site_tag_ok
# add file to owncloud dict
if filter_ok:
own_files_d[f_d['file_path']] = size
# exist only on local redis
for f in list(set(local_files_d) - set(own_files_d)):
logging.info(f'"{f}" exist only on local -> remove it')
# redis remove (atomic)
pipe = DB.main.pipeline()
pipe.hdel(DIR_CAR_INFOS, f)
pipe.hdel(DIR_CAR_RAW, f)
pipe.execute()
# exist only on remote owncloud
for f in list(set(own_files_d) - set(local_files_d)):
logging.info('"%s" exist only on remote -> download it' % f)
data = wdv.download(os.path.join(webdav_carousel_img_dir, f))
if data:
update_carousel_raw_data(f, data)
# exist at both side (update only if file size change)
for f in list(set(local_files_d).intersection(own_files_d)):
local_size = local_files_d[f]
remote_size = own_files_d[f]
logging.debug(f'check "{f}" remote size [{remote_size}]/local size [{local_size}]')
if local_size != remote_size:
logging.info(f'"{f}" size mismatch -> download it')
data = wdv.download(os.path.join(webdav_carousel_img_dir, f))
if data:
update_carousel_raw_data(f, data)
# log sync end
logging.info('end of sync for owncloud carousel')
@catch_log_except()
def owc_sync_doc_job():
# sync owncloud document directory with local
# local constants
DIR_DOC_INFOS = 'dir:doc:infos'
DIR_DOC_RAW = 'dir:doc:raw'
# local functions
def update_doc_raw_data(filename, raw_data):
# build json infos record
md5 = hashlib.md5(raw_data).hexdigest()
js_infos = json.dumps(dict(size=len(raw_data), md5=md5))
# redis add (atomic write)
pipe = DB.main.pipeline()
pipe.hset(DIR_DOC_INFOS, filename, js_infos)
pipe.hset(DIR_DOC_RAW, filename, raw_data)
pipe.execute()
# log sync start
logging.info('start of sync for owncloud doc')
# list local redis files
local_files_d = {}
for f_name, js_infos in DB.main.hgetall(DIR_DOC_INFOS).items():
try:
filename = f_name.decode()
size = json.loads(js_infos)['size']
local_files_d[filename] = size
except ValueError:
pass
# check "dir:doc:raw:min-png" consistency
raw_file_l = [f.decode() for f in DB.main.hkeys(DIR_DOC_RAW)]
# remove orphan infos record
for f in list(set(local_files_d) - set(raw_file_l)):
logging.debug(f'remove orphan "{f}" record in hash "{DIR_DOC_INFOS}"')
DB.main.hdel(DIR_DOC_INFOS, f)
del local_files_d[f]
# remove orphan raw-png record
for f in list(set(raw_file_l) - set(local_files_d)):
logging.debug(f'remove orphan "{f}" record in hash "{DIR_DOC_RAW}"')
DB.main.hdel(DIR_DOC_RAW, f)
# list owncloud files (disallow directory)
own_files_d = {}
for f_d in wdv.ls(webdav_reglement_doc_dir):
file_path = f_d['file_path']
size = f_d['content_length']
if file_path and not file_path.endswith('/'):
# download filter: ignore txt file or heavy fie (>10 MB)
ok_load = not file_path.lower().endswith('.txt') \
and (size < 10 * 1024 * 1024)
if ok_load:
own_files_d[f_d['file_path']] = size
# exist only on local redis
for f in list(set(local_files_d) - set(own_files_d)):
logging.info(f'"{f}" exist only on local -> remove it')
# redis remove (atomic)
pipe = DB.main.pipeline()
pipe.hdel(DIR_DOC_INFOS, f)
pipe.hdel(DIR_DOC_RAW, f)
pipe.execute()
# exist only on remote owncloud
for f in list(set(own_files_d) - set(local_files_d)):
logging.info(f'"{f}" exist only on remote -> download it')
data = wdv.download(os.path.join(webdav_reglement_doc_dir, f))
if data:
update_doc_raw_data(f, data)
# exist at both side (update only if file size change)
for f in list(set(local_files_d).intersection(own_files_d)):
local_size = local_files_d[f]
remote_size = own_files_d[f]
logging.debug(f'check "{f}" remote size [{remote_size}]/local size [{local_size}]')
if local_size != remote_size:
logging.info(f'"{f}" size mismatch -> download it')
data = wdv.download(os.path.join(webdav_reglement_doc_dir, f))
if data:
update_doc_raw_data(f, data)
# log sync end
logging.info('end of sync for owncloud doc')
@catch_log_except()
def loos_redis_import_job():
share_keys_l = [('to:messein:json:tweets:@grtgaz', 'from:loos:json:tweets:@grtgaz'),
('to:messein:img:grt-twitter-cloud:png', 'from:loos:img:grt-twitter-cloud:png'),
('to:messein:json:flyspray-est', 'from:loos:json:flyspray-est')]
for from_remote_key, to_local_key in share_keys_l:
# copy redis data from loos key to local key
data = DB.loos.get(from_remote_key)
if data:
DB.main.set(to_local_key, data, ex=4 * 3600)
@catch_log_except()
def vigilance_job():
# request XML data from server
r = requests.get('http://vigilance.meteofrance.com/data/NXFR34_LFPW_.xml', timeout=10.0)
# check error
if r.status_code == 200:
# dom parsing (convert UTF-8 r.text to XML char)
dom = minidom.parseString(r.text.encode('ascii', 'xmlcharrefreplace'))
# set dict for dep data
vig_data = {'update': '', 'department': {}}
# map build date
tz = pytz.timezone('Europe/Paris')
map_date = str(dom.getElementsByTagName('entetevigilance')[0].getAttribute('dateinsert'))
map_dt = tz.localize(datetime(int(map_date[0:4]), int(map_date[4:6]),
int(map_date[6:8]), int(map_date[8:10]),
int(map_date[10:12])))
vig_data['update'] = map_dt.isoformat()
# parse every departments
for items in dom.getElementsByTagName('datavigilance'):
# current department
dep_code = str(items.attributes['dep'].value)
# get risk ID if exist
risk_id = []
for risk in items.getElementsByTagName('risque'):
risk_id.append(int(risk.attributes['valeur'].value))
# get flood ID if exist
flood_id = None
for flood in items.getElementsByTagName('crue'):
flood_id = int(flood.attributes['valeur'].value)
# get color ID
color_id = int(items.attributes['couleur'].value)
# build vig_data
vig_data['department'][dep_code] = {'vig_level': color_id,
'flood_level': flood_id,
'risk_id': risk_id}
DB.main.set_as_json('json:vigilance', vig_data, ex=2 * 3600)
@catch_log_except()
def weather_today_job():
# request data from NOAA server (METAR of Nancy-Essey Airport)
r = requests.get('http://tgftp.nws.noaa.gov/data/observations/metar/stations/LFSN.TXT',
timeout=10.0, headers={'User-Agent': USER_AGENT})
# check error
if r.status_code == 200:
# extract METAR message
metar_msg = r.content.decode().split('\n')[1]
# METAR parse
obs = Metar(metar_msg)
# init and populate d_today dict
d_today = {}
# message date and time
if obs.time:
d_today['update_iso'] = obs.time.strftime('%Y-%m-%dT%H:%M:%SZ')
d_today['update_fr'] = dt_utc_to_local(obs.time).strftime('%H:%M %d/%m')
# current temperature
if obs.temp:
d_today['temp'] = round(obs.temp.value('C'))
# current dew point
if obs.dewpt:
d_today['dewpt'] = round(obs.dewpt.value('C'))
# current pressure
if obs.press:
d_today['press'] = round(obs.press.value('hpa'))
# current wind speed
if obs.wind_speed:
d_today['w_speed'] = round(obs.wind_speed.value('KMH'))
# current wind gust
if obs.wind_gust:
d_today['w_gust'] = round(obs.wind_gust.value('KMH'))
# current wind direction
if obs.wind_dir:
# replace 'W'est by 'O'uest
d_today['w_dir'] = obs.wind_dir.compass().replace('W', 'O')
# weather status str
d_today['descr'] = 'n/a'
# store to redis
DB.main.set_as_json('json:weather:today:nancy', d_today, ex=2 * 3600)
# main
if __name__ == '__main__':
# logging setup
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logging.getLogger('PIL').setLevel(logging.INFO)
logging.info('board-import-app started')
# init webdav client
wdv = WebDAV(webdav_url, username=webdav_user, password=webdav_pass)
# init scheduler
schedule.every(5).minutes.do(owc_updated_job)
schedule.every(1).hours.do(owc_sync_carousel_job)
schedule.every(1).hours.do(owc_sync_doc_job)
schedule.every(2).minutes.do(loos_redis_import_job)
schedule.every(60).minutes.do(air_quality_atmo_ge_job)
schedule.every(5).minutes.do(dir_est_img_job)
schedule.every(5).minutes.do(gsheet_job)
schedule.every(2).minutes.do(img_gmap_traffic_job)
schedule.every(5).minutes.do(local_info_job)
schedule.every(5).minutes.do(vigilance_job)
schedule.every(5).minutes.do(weather_today_job)
# first call
air_quality_atmo_ge_job()
dir_est_img_job()
gsheet_job()
img_gmap_traffic_job()
local_info_job()
loos_redis_import_job()
vigilance_job()
weather_today_job()
owc_updated_job()
# main loop
while True:
schedule.run_pending()
time.sleep(1)
| <filename>docker/messein/board-import-app/app.py
#!/usr/bin/env python3
from configparser import ConfigParser
from datetime import datetime
import urllib.parse
import hashlib
import io
import json
import logging
import os
import re
import time
from xml.dom import minidom
import feedparser
import requests
import schedule
import PIL.Image
import PIL.ImageDraw
import PIL.ImageFont
from metar.Metar import Metar
import pytz
import pdf2image
import PIL.Image
import PIL.ImageDraw
from board_lib import CustomRedis, catch_log_except, dt_utc_to_local
from webdav import WebDAV
# some const
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'
# some var
owc_doc_dir_last_sync = 0
owc_car_dir_last_sync = 0
# read config
cnf = ConfigParser()
cnf.read('/data/conf/board.conf')
# redis
main_redis_user = cnf.get('redis', 'user')
main_redis_pass = cnf.get('redis', 'pass')
# redis-loos for share
loos_redis_user = cnf.get('redis-loos', 'user')
loos_redis_pass = cnf.get('redis-loos', 'pass')
# gmap img traffic
gmap_img_url = cnf.get('gmap_img', 'img_url')
# gsheet
gsheet_url = cnf.get('gsheet', 'url')
# openweathermap
ow_app_id = cnf.get('openweathermap', 'app_id')
# webdav
webdav_url = cnf.get('owncloud_dashboard', 'webdav_url')
webdav_user = cnf.get('owncloud_dashboard', 'webdav_user')
webdav_pass = cnf.get('owncloud_dashboard', 'webdav_pass')
webdav_reglement_doc_dir = cnf.get('owncloud_dashboard', 'webdav_reglement_doc_dir')
webdav_carousel_img_dir = cnf.get('owncloud_dashboard', 'webdav_carousel_img_dir')
# some class
class DB:
# create connector
main = CustomRedis(host='board-redis-srv', username=main_redis_user, password=<PASSWORD>,
socket_timeout=4, socket_keepalive=True)
loos = CustomRedis(host='board-redis-loos-tls-cli', username=loos_redis_user, password=lo<PASSWORD>,
socket_timeout=4, socket_keepalive=True)
# some function
@catch_log_except()
def air_quality_atmo_ge_job():
url = 'https://services3.arcgis.com/' + \
'Is0UwT37raQYl9Jj/arcgis/rest/services/ind_grandest_5j/FeatureServer/0/query' + \
'?where=%s' % urllib.parse.quote('code_zone IN (54395, 57463, 51454, 67482)') + \
'&outFields=date_ech, code_qual, lib_qual, lib_zone, code_zone' + \
'&returnGeometry=false&resultRecordCount=48' + \
'&orderByFields=%s&f=json' % urllib.parse.quote('date_ech ASC')
today_dt_date = datetime.today().date()
# https request
r = requests.get(url, timeout=5.0)
# check error
if r.status_code == 200:
# decode json message
atmo_raw_d = r.json()
# populate zones dict with receive values
zones_d = {}
for record in atmo_raw_d['features']:
# load record data
r_code_zone = record['attributes']['code_zone']
r_ts = int(record['attributes']['date_ech'])
r_dt = datetime.utcfromtimestamp(r_ts / 1000)
r_value = record['attributes']['code_qual']
# retain today value
if r_dt.date() == today_dt_date:
zones_d[r_code_zone] = r_value
# skip key publish if zones_d is empty
if not zones_d:
raise ValueError('dataset is empty')
# create and populate result dict
d_air_quality = {'nancy': zones_d.get(54395, 0),
'metz': zones_d.get(57463, 0),
'reims': zones_d.get(51454, 0),
'strasbourg': zones_d.get(67482, 0)}
# update redis
DB.main.set_as_json('json:atmo', d_air_quality, ex=6 * 3600)
@catch_log_except()
def dir_est_img_job():
# retrieve DIR-est webcams: Houdemont, Velaine-en-Haye, Saint-Nicolas, Côte de Flavigny
for id_redis, lbl_cam, get_code in [('houdemont', 'Houdemont', '18'), ('velaine', 'Velaine', '53'),
('st-nicolas', 'Saint-Nicolas', '49'), ('flavigny', 'Flavigny', '5')]:
r = requests.get('https://webcam.dir-est.fr/app.php/lastimg/%s' % get_code)
if r.status_code == 200:
# load image to PIL and resize it
img = PIL.Image.open(io.BytesIO(r.content))
img.thumbnail([224, 235])
# add text to image
txt_img = '%s - %s' % (lbl_cam, datetime.now().strftime('%H:%M'))
font = PIL.ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMono.ttf', 16)
draw = PIL.ImageDraw.Draw(img)
draw.text((5, 5), txt_img, (0x10, 0x0e, 0x0e), font=font)
# save image as PNG for redis
redis_io = io.BytesIO()
img.save(redis_io, format='PNG')
# update redis
DB.main.set('img:dir-est:%s:png' % id_redis, redis_io.getvalue(), ex=3600)
@catch_log_except()
def gsheet_job():
# https request
response = requests.get(gsheet_url, timeout=5.0)
# process response
d = dict()
for line in response.iter_lines(decode_unicode=True):
tag, value = line.split(',')
d[tag] = value
redis_d = dict(update=datetime.now().isoformat('T'), tags=d)
DB.main.set_as_json('json:gsheet', redis_d, ex=2 * 3600)
@catch_log_except()
def img_gmap_traffic_job():
# http request
r = requests.get(gmap_img_url, stream=True, timeout=5.0)
if r.status_code == 200:
# convert RAW img format (bytes) to Pillow image
pil_img = PIL.Image.open(io.BytesIO(r.raw.read()))
# crop image
pil_img = pil_img.crop((0, 0, 560, 328))
# pil_img.thumbnail([632, 328])
img_io = io.BytesIO()
pil_img.save(img_io, format='PNG')
# store RAW PNG to redis key
DB.main.set('img:traffic-map:png', img_io.getvalue(), ex=2 * 3600)
@catch_log_except()
def local_info_job():
# do request
l_titles = []
for post in feedparser.parse('https://france3-regions.francetvinfo.fr/societe/rss?r=grand-est').entries:
title = post.title
title = title.strip()
title = title.replace('\n', ' ')
l_titles.append(title)
DB.main.set_as_json('json:news', l_titles, ex=2 * 3600)
@catch_log_except()
def owc_updated_job():
# check if the owncloud directories has been updated by users (start sync jobs if need)
global owc_doc_dir_last_sync, owc_car_dir_last_sync
for f in wdv.ls():
item = f['file_path']
item_last_modified = int(f['dt_last_modified'].timestamp())
# document update ?
if item == webdav_reglement_doc_dir:
# update need
if item_last_modified > owc_doc_dir_last_sync:
logging.debug(f'"{webdav_reglement_doc_dir}" seem updated: run "owncloud_sync_doc_job"')
owc_sync_doc_job()
owc_doc_dir_last_sync = item_last_modified
# carousel update ?
elif item == webdav_carousel_img_dir:
# update need
if item_last_modified > owc_car_dir_last_sync:
logging.debug(f'"{webdav_carousel_img_dir}" seem updated: run "owncloud_sync_carousel_job"')
owc_sync_carousel_job()
owc_car_dir_last_sync = item_last_modified
@catch_log_except()
def owc_sync_carousel_job():
# sync owncloud carousel directory with local
# local constants
DIR_CAR_INFOS = 'dir:carousel:infos'
DIR_CAR_RAW = 'dir:carousel:raw:min-png'
# local functions
def update_carousel_raw_data(filename, raw_data):
# build json infos record
md5 = hashlib.md5(raw_data).hexdigest()
js_infos = json.dumps(dict(size=len(raw_data), md5=md5))
# convert raw data to PNG thumbnails
# create default error image
img_to_redis = PIL.Image.new('RGB', (655, 453), (255, 255, 255))
draw = PIL.ImageDraw.Draw(img_to_redis)
draw.text((0, 0), f'loading error (src: "{filename}")', (0, 0, 0))
# replace default image by convert result
try:
# convert png and jpg file
if filename.lower().endswith('.png') or filename.lower().endswith('.jpg'):
# image to PIL
img_to_redis = PIL.Image.open(io.BytesIO(raw_data))
# convert pdf file
elif filename.lower().endswith('.pdf'):
# PDF to PIL: convert first page to PIL image
img_to_redis = pdf2image.convert_from_bytes(raw_data)[0]
except Exception:
pass
# resize and format as raw png
img_to_redis.thumbnail([655, 453])
io_to_redis = io.BytesIO()
img_to_redis.save(io_to_redis, format='PNG')
# redis add (atomic write)
pipe = DB.main.pipeline()
pipe.hset(DIR_CAR_INFOS, filename, js_infos)
pipe.hset(DIR_CAR_RAW, filename, io_to_redis.getvalue())
pipe.execute()
# log sync start
logging.info('start of sync for owncloud carousel')
# list local redis files
local_files_d = {}
for f_name, js_infos in DB.main.hgetall(DIR_CAR_INFOS).items():
try:
filename = f_name.decode()
size = json.loads(js_infos)['size']
local_files_d[filename] = size
except ValueError:
pass
# check "dir:carousel:raw:min-png" consistency
raw_file_l = [f.decode() for f in DB.main.hkeys(DIR_CAR_RAW)]
# remove orphan infos record
for f in list(set(local_files_d) - set(raw_file_l)):
logging.debug(f'remove orphan "{f}" record in hash "{DIR_CAR_INFOS}"')
DB.main.hdel(DIR_CAR_INFOS, f)
del local_files_d[f]
# remove orphan raw-png record
for f in list(set(raw_file_l) - set(local_files_d)):
logging.debug(f'remove orphan "{f}" record in hash "{DIR_CAR_RAW}"')
DB.main.hdel(DIR_CAR_RAW, f)
# list owncloud files (disallow directory)
own_files_d = {}
for f_d in wdv.ls(webdav_carousel_img_dir):
file_path = f_d['file_path']
size = f_d['content_length']
if file_path and not file_path.endswith('/'):
# search site only tags (_@loos_, _@messein_...) in filename
# site id is 16 chars max
site_tag_l = re.findall(r'_@([a-zA-Z0-9\-]{1,16})', file_path)
site_tag_l = [s.strip().lower() for s in site_tag_l]
site_tag_ok = 'messein' in site_tag_l or not site_tag_l
# download filter: ignore txt type, heavy file (>10 MB) or name tags mismatch
filter_ok = not file_path.lower().endswith('.txt') \
and (size < 10 * 1024 * 1024) \
and site_tag_ok
# add file to owncloud dict
if filter_ok:
own_files_d[f_d['file_path']] = size
# exist only on local redis
for f in list(set(local_files_d) - set(own_files_d)):
logging.info(f'"{f}" exist only on local -> remove it')
# redis remove (atomic)
pipe = DB.main.pipeline()
pipe.hdel(DIR_CAR_INFOS, f)
pipe.hdel(DIR_CAR_RAW, f)
pipe.execute()
# exist only on remote owncloud
for f in list(set(own_files_d) - set(local_files_d)):
logging.info('"%s" exist only on remote -> download it' % f)
data = wdv.download(os.path.join(webdav_carousel_img_dir, f))
if data:
update_carousel_raw_data(f, data)
# exist at both side (update only if file size change)
for f in list(set(local_files_d).intersection(own_files_d)):
local_size = local_files_d[f]
remote_size = own_files_d[f]
logging.debug(f'check "{f}" remote size [{remote_size}]/local size [{local_size}]')
if local_size != remote_size:
logging.info(f'"{f}" size mismatch -> download it')
data = wdv.download(os.path.join(webdav_carousel_img_dir, f))
if data:
update_carousel_raw_data(f, data)
# log sync end
logging.info('end of sync for owncloud carousel')
@catch_log_except()
def owc_sync_doc_job():
# sync owncloud document directory with local
# local constants
DIR_DOC_INFOS = 'dir:doc:infos'
DIR_DOC_RAW = 'dir:doc:raw'
# local functions
def update_doc_raw_data(filename, raw_data):
# build json infos record
md5 = hashlib.md5(raw_data).hexdigest()
js_infos = json.dumps(dict(size=len(raw_data), md5=md5))
# redis add (atomic write)
pipe = DB.main.pipeline()
pipe.hset(DIR_DOC_INFOS, filename, js_infos)
pipe.hset(DIR_DOC_RAW, filename, raw_data)
pipe.execute()
# log sync start
logging.info('start of sync for owncloud doc')
# list local redis files
local_files_d = {}
for f_name, js_infos in DB.main.hgetall(DIR_DOC_INFOS).items():
try:
filename = f_name.decode()
size = json.loads(js_infos)['size']
local_files_d[filename] = size
except ValueError:
pass
# check "dir:doc:raw:min-png" consistency
raw_file_l = [f.decode() for f in DB.main.hkeys(DIR_DOC_RAW)]
# remove orphan infos record
for f in list(set(local_files_d) - set(raw_file_l)):
logging.debug(f'remove orphan "{f}" record in hash "{DIR_DOC_INFOS}"')
DB.main.hdel(DIR_DOC_INFOS, f)
del local_files_d[f]
# remove orphan raw-png record
for f in list(set(raw_file_l) - set(local_files_d)):
logging.debug(f'remove orphan "{f}" record in hash "{DIR_DOC_RAW}"')
DB.main.hdel(DIR_DOC_RAW, f)
# list owncloud files (disallow directory)
own_files_d = {}
for f_d in wdv.ls(webdav_reglement_doc_dir):
file_path = f_d['file_path']
size = f_d['content_length']
if file_path and not file_path.endswith('/'):
# download filter: ignore txt file or heavy fie (>10 MB)
ok_load = not file_path.lower().endswith('.txt') \
and (size < 10 * 1024 * 1024)
if ok_load:
own_files_d[f_d['file_path']] = size
# exist only on local redis
for f in list(set(local_files_d) - set(own_files_d)):
logging.info(f'"{f}" exist only on local -> remove it')
# redis remove (atomic)
pipe = DB.main.pipeline()
pipe.hdel(DIR_DOC_INFOS, f)
pipe.hdel(DIR_DOC_RAW, f)
pipe.execute()
# exist only on remote owncloud
for f in list(set(own_files_d) - set(local_files_d)):
logging.info(f'"{f}" exist only on remote -> download it')
data = wdv.download(os.path.join(webdav_reglement_doc_dir, f))
if data:
update_doc_raw_data(f, data)
# exist at both side (update only if file size change)
for f in list(set(local_files_d).intersection(own_files_d)):
local_size = local_files_d[f]
remote_size = own_files_d[f]
logging.debug(f'check "{f}" remote size [{remote_size}]/local size [{local_size}]')
if local_size != remote_size:
logging.info(f'"{f}" size mismatch -> download it')
data = wdv.download(os.path.join(webdav_reglement_doc_dir, f))
if data:
update_doc_raw_data(f, data)
# log sync end
logging.info('end of sync for owncloud doc')
@catch_log_except()
def loos_redis_import_job():
share_keys_l = [('to:messein:json:tweets:@grtgaz', 'from:loos:json:tweets:@grtgaz'),
('to:messein:img:grt-twitter-cloud:png', 'from:loos:img:grt-twitter-cloud:png'),
('to:messein:json:flyspray-est', 'from:loos:json:flyspray-est')]
for from_remote_key, to_local_key in share_keys_l:
# copy redis data from loos key to local key
data = DB.loos.get(from_remote_key)
if data:
DB.main.set(to_local_key, data, ex=4 * 3600)
@catch_log_except()
def vigilance_job():
# request XML data from server
r = requests.get('http://vigilance.meteofrance.com/data/NXFR34_LFPW_.xml', timeout=10.0)
# check error
if r.status_code == 200:
# dom parsing (convert UTF-8 r.text to XML char)
dom = minidom.parseString(r.text.encode('ascii', 'xmlcharrefreplace'))
# set dict for dep data
vig_data = {'update': '', 'department': {}}
# map build date
tz = pytz.timezone('Europe/Paris')
map_date = str(dom.getElementsByTagName('entetevigilance')[0].getAttribute('dateinsert'))
map_dt = tz.localize(datetime(int(map_date[0:4]), int(map_date[4:6]),
int(map_date[6:8]), int(map_date[8:10]),
int(map_date[10:12])))
vig_data['update'] = map_dt.isoformat()
# parse every departments
for items in dom.getElementsByTagName('datavigilance'):
# current department
dep_code = str(items.attributes['dep'].value)
# get risk ID if exist
risk_id = []
for risk in items.getElementsByTagName('risque'):
risk_id.append(int(risk.attributes['valeur'].value))
# get flood ID if exist
flood_id = None
for flood in items.getElementsByTagName('crue'):
flood_id = int(flood.attributes['valeur'].value)
# get color ID
color_id = int(items.attributes['couleur'].value)
# build vig_data
vig_data['department'][dep_code] = {'vig_level': color_id,
'flood_level': flood_id,
'risk_id': risk_id}
DB.main.set_as_json('json:vigilance', vig_data, ex=2 * 3600)
@catch_log_except()
def weather_today_job():
# request data from NOAA server (METAR of Nancy-Essey Airport)
r = requests.get('http://tgftp.nws.noaa.gov/data/observations/metar/stations/LFSN.TXT',
timeout=10.0, headers={'User-Agent': USER_AGENT})
# check error
if r.status_code == 200:
# extract METAR message
metar_msg = r.content.decode().split('\n')[1]
# METAR parse
obs = Metar(metar_msg)
# init and populate d_today dict
d_today = {}
# message date and time
if obs.time:
d_today['update_iso'] = obs.time.strftime('%Y-%m-%dT%H:%M:%SZ')
d_today['update_fr'] = dt_utc_to_local(obs.time).strftime('%H:%M %d/%m')
# current temperature
if obs.temp:
d_today['temp'] = round(obs.temp.value('C'))
# current dew point
if obs.dewpt:
d_today['dewpt'] = round(obs.dewpt.value('C'))
# current pressure
if obs.press:
d_today['press'] = round(obs.press.value('hpa'))
# current wind speed
if obs.wind_speed:
d_today['w_speed'] = round(obs.wind_speed.value('KMH'))
# current wind gust
if obs.wind_gust:
d_today['w_gust'] = round(obs.wind_gust.value('KMH'))
# current wind direction
if obs.wind_dir:
# replace 'W'est by 'O'uest
d_today['w_dir'] = obs.wind_dir.compass().replace('W', 'O')
# weather status str
d_today['descr'] = 'n/a'
# store to redis
DB.main.set_as_json('json:weather:today:nancy', d_today, ex=2 * 3600)
# main
if __name__ == '__main__':
# logging setup
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logging.getLogger('PIL').setLevel(logging.INFO)
logging.info('board-import-app started')
# init webdav client
wdv = WebDAV(webdav_url, username=webdav_user, password=webdav_pass)
# init scheduler
schedule.every(5).minutes.do(owc_updated_job)
schedule.every(1).hours.do(owc_sync_carousel_job)
schedule.every(1).hours.do(owc_sync_doc_job)
schedule.every(2).minutes.do(loos_redis_import_job)
schedule.every(60).minutes.do(air_quality_atmo_ge_job)
schedule.every(5).minutes.do(dir_est_img_job)
schedule.every(5).minutes.do(gsheet_job)
schedule.every(2).minutes.do(img_gmap_traffic_job)
schedule.every(5).minutes.do(local_info_job)
schedule.every(5).minutes.do(vigilance_job)
schedule.every(5).minutes.do(weather_today_job)
# first call
air_quality_atmo_ge_job()
dir_est_img_job()
gsheet_job()
img_gmap_traffic_job()
local_info_job()
loos_redis_import_job()
vigilance_job()
weather_today_job()
owc_updated_job()
# main loop
while True:
schedule.run_pending()
time.sleep(1)
| en | 0.542574 | #!/usr/bin/env python3 # some const # some var # read config # redis # redis-loos for share # gmap img traffic # gsheet # openweathermap # webdav # some class # create connector # some function # https request # check error # decode json message # populate zones dict with receive values # load record data # retain today value # skip key publish if zones_d is empty # create and populate result dict # update redis # retrieve DIR-est webcams: Houdemont, Velaine-en-Haye, Saint-Nicolas, Côte de Flavigny # load image to PIL and resize it # add text to image # save image as PNG for redis # update redis # https request # process response # http request # convert RAW img format (bytes) to Pillow image # crop image # pil_img.thumbnail([632, 328]) # store RAW PNG to redis key # do request # check if the owncloud directories has been updated by users (start sync jobs if need) # document update ? # update need # carousel update ? # update need # sync owncloud carousel directory with local # local constants # local functions # build json infos record # convert raw data to PNG thumbnails # create default error image # replace default image by convert result # convert png and jpg file # image to PIL # convert pdf file # PDF to PIL: convert first page to PIL image # resize and format as raw png # redis add (atomic write) # log sync start # list local redis files # check "dir:carousel:raw:min-png" consistency # remove orphan infos record # remove orphan raw-png record # list owncloud files (disallow directory) # search site only tags (_@loos_, _@messein_...) in filename # site id is 16 chars max # download filter: ignore txt type, heavy file (>10 MB) or name tags mismatch # add file to owncloud dict # exist only on local redis # redis remove (atomic) # exist only on remote owncloud # exist at both side (update only if file size change) # log sync end # sync owncloud document directory with local # local constants # local functions # build json infos record # redis add (atomic write) # log sync start # list local redis files # check "dir:doc:raw:min-png" consistency # remove orphan infos record # remove orphan raw-png record # list owncloud files (disallow directory) # download filter: ignore txt file or heavy fie (>10 MB) # exist only on local redis # redis remove (atomic) # exist only on remote owncloud # exist at both side (update only if file size change) # log sync end # copy redis data from loos key to local key # request XML data from server # check error # dom parsing (convert UTF-8 r.text to XML char) # set dict for dep data # map build date # parse every departments # current department # get risk ID if exist # get flood ID if exist # get color ID # build vig_data # request data from NOAA server (METAR of Nancy-Essey Airport) # check error # extract METAR message # METAR parse # init and populate d_today dict # message date and time # current temperature # current dew point # current pressure # current wind speed # current wind gust # current wind direction # replace 'W'est by 'O'uest # weather status str # store to redis # main # logging setup # init webdav client # init scheduler # first call # main loop | 1.72463 | 2 |
fsleyes_widgets/widgetlist.py | pauldmccarthy/fsleyes-widgets | 1 | 7631 | <filename>fsleyes_widgets/widgetlist.py
#!/usr/bin/env python
#
# widgetlist.py - A widget which displays a list of groupable widgets.
#
# Author: <NAME> <<EMAIL>>
#
"""This module provides the :class:`WidgetList` class, which displays a list
of widgets.
"""
import wx
import wx.lib.newevent as wxevent
import wx.lib.scrolledpanel as scrolledpanel
import fsleyes_widgets.togglepanel as togglepanel
class WidgetList(scrolledpanel.ScrolledPanel):
"""A scrollable list of widgets.
The ``WidgetList`` provides a number of features:
- Widgets can be grouped.
- A label can be shown next to each widget.
- Widget groups can be collapsed/expanded.
- Widgets and groups can be dynamically added/removed.
The most important methods are:
.. autosummary::
:nosignatures:
AddWidget
AddGroup
A ``WidgetList`` looks something like this:
.. image:: images/widgetlist.png
:scale: 50%
:align: center
A ``WidgetList`` emits a :data:`WidgetListChangeEvent` whenever its
contents change.
"""
_defaultOddColour = None
"""Background colour for widgets on odd rows.
Iniitalised in :meth:`__init__`.
"""
_defaultEvenColour = None
"""Background colour for widgets on even rows.
Iniitalised in :meth:`__init__`.
"""
_defaultGroupColour = None
"""Border and title background colour for widget groups.
Iniitalised in :meth:`__init__`.
"""
def __init__(self, parent, style=0, minHeight=-1):
"""Create a ``WidgetList``.
:arg parent: The :mod:`wx` parent object.
:arg style: Passed through to ``wx.ScrolledPanel.__init__``
:arg minHeight: Minimum height of each row
"""
odd = wx.SystemSettings.GetColour(wx.SYS_COLOUR_LISTBOX)
even = odd.ChangeLightness(90)
group = odd
if WidgetList._defaultOddColour is None:
WidgetList._defaultOddColour = odd
if WidgetList._defaultEvenColour is None:
WidgetList._defaultEvenColour = even
if WidgetList._defaultGroupColour is None:
WidgetList._defaultGroupColour = group
self.__minHeight = minHeight
self.__widgSizer = wx.BoxSizer(wx.VERTICAL)
self.__sizer = wx.BoxSizer(wx.VERTICAL)
self.__groupSizer = wx.BoxSizer(wx.VERTICAL)
self.__widgets = {}
self.__groups = {}
self.__oddColour = WidgetList._defaultOddColour
self.__evenColour = WidgetList._defaultEvenColour
self.__groupColour = WidgetList._defaultGroupColour
self.__sizer.Add(self.__widgSizer, flag=wx.EXPAND)
self.__sizer.Add(self.__groupSizer, flag=wx.EXPAND)
self.__oneExpanded = style & WL_ONE_EXPANDED
# The SP.__init__ method seemingly
# induces a call to DoGetBestSize,
# which assumes that all of the
# things above exist. So we call
# init after we've created those
# things.
scrolledpanel.ScrolledPanel.__init__(self, parent)
self.SetSizer(self.__sizer)
self.SetupScrolling()
self.SetAutoLayout(1)
def DoGetBestSize(self):
"""Returns the best size for the widget list, with all group
widgets expanded.
"""
width, height = self.__widgSizer.GetSize().Get()
for name, group in self.__groups.items():
w, h = group.parentPanel.GetBestSize().Get()
w += 20
h += 10
if w > width:
width = w
height += h
return wx.Size(width, height)
def __makeWidgetKey(self, widget):
"""Widgets are stored in a dictionary - this method generates a
string to use as a key, based on the widget ``id``.
"""
return str(id(widget))
def __setLabelWidths(self, widgets):
"""Calculates the maximum width of all widget labels, and sets all
labels to that width.
This ensures that all labels/widgets line are horizontally aligned.
"""
if len(widgets) == 0:
return
dc = wx.ClientDC(widgets[0].label)
lblWidths = [dc.GetTextExtent(w.displayName)[0] for w in widgets]
maxWidth = max(lblWidths)
for w in widgets:
w.label.SetMinSize((maxWidth + 10, -1))
w.label.SetMaxSize((maxWidth + 10, -1))
def __setColours(self):
"""Called whenever the widget list needs to be refreshed.
Makes sure that odd/even widgets and their labels have the correct
background colour.
"""
def setWidgetColours(widgDict):
for i, widg in enumerate(widgDict.values()):
if i % 2: colour = self.__oddColour
else: colour = self.__evenColour
widg.SetBackgroundColour(colour)
setWidgetColours(self.__widgets)
for group in self.__groups.values():
setWidgetColours(group.widgets)
group.parentPanel.SetBackgroundColour(self.__groupColour)
group.colPanel .SetBackgroundColour(self.__groupColour)
def __refresh(self, *args, **kwargs):
"""Updates widget colours (see :meth:`__setColours`), and lays out
the widget list.
:arg postEvent: If ``True`` (the default), a
:data:`WidgetListChangeEvent` is posted.
"""
self.__setColours()
self.FitInside()
self.Layout()
if kwargs.get('postEvent', True):
wx.PostEvent(self, WidgetListChangeEvent())
def SetColours(self, odd=None, even=None, group=None):
"""Sets the colours used on this ``WidgetList``.
Each argument is assumed to be a tuple of ``(r, g, b)`` values,
each in the range ``[0 - 255]``.
:arg odd: Background colour for widgets on odd rows.
:arg even: Background colour for widgets on even rows.
:arg group: Border/title colour for widget groups.
"""
if odd is not None: self.__oddColour = odd
if even is not None: self.__evenColour = even
if group is not None: self.__groupColour = group
self.__setColours()
def GetGroups(self):
"""Returns a list containing the name of every group in this
``WidgetList``.
"""
return list(self.__groups.keys())
def HasGroup(self, groupName):
"""Returns ``True`` if this ``WidgetList`` contains a group
with the specified name.
"""
return groupName in self.__groups
def RenameGroup(self, groupName, newDisplayName):
"""Changes the display name of the specified group.
.. note:: This method only changes the *display name* of a group,
not the group identifier name. See the :meth:`AddGroup`
method.
:arg groupName: Name of the group.
:arg newDisplayName: New display name for the group.
"""
group = self.__groups[groupName]
group.displayName = newDisplayName
group.colPanel.SetLabel(newDisplayName)
def AddGroup(self, groupName, displayName=None):
"""Add a new group to this ``WidgetList``.
A :exc:`ValueError` is raised if a group with the specified name
already exists.
:arg groupName: The name of the group - this is used as an
identifier for the group.
:arg displayName: A string to be shown in the title bar for the
group. This can be changed later via the
:meth:`RenameGroup` method.
"""
if displayName is None:
displayName = groupName
if groupName in self.__groups:
raise ValueError('A group with name {} '
'already exists'.format(groupName))
parentPanel = wx.Panel(self, style=wx.SUNKEN_BORDER)
colPanel = togglepanel.TogglePanel(parentPanel, label=displayName)
widgPanel = colPanel.GetPane()
widgSizer = wx.BoxSizer(wx.VERTICAL)
widgPanel.SetSizer(widgSizer)
gapSizer = wx.BoxSizer(wx.VERTICAL)
# A spacer exists at the top,
# and between, every group.
gapSizer.Add((-1, 5))
gapSizer.Add(parentPanel, border=10, flag=(wx.EXPAND |
wx.LEFT |
wx.RIGHT))
self.__groupSizer.Add(gapSizer, flag=wx.EXPAND)
parentSizer = wx.BoxSizer(wx.VERTICAL)
parentSizer.Add(colPanel,
border=5,
flag=wx.EXPAND | wx.BOTTOM,
proportion=0)
parentPanel.SetSizer(parentSizer)
group = _Group(groupName,
displayName,
gapSizer,
parentPanel,
colPanel,
widgPanel,
widgSizer)
self.__groups[groupName] = group
self.__refresh()
# Mouse wheel listener needed
# on all children under linux/GTK
if wx.Platform == '__WXGTK__':
parentPanel.Bind(wx.EVT_MOUSEWHEEL, self.__onMouseWheel)
colPanel .Bind(wx.EVT_MOUSEWHEEL, self.__onMouseWheel)
colPanel.Bind(togglepanel.EVT_TOGGLEPANEL_EVENT, self.__onGroupExpand)
def GetWidgets(self, groupName=None):
"""Returns a list containing all of the widgets that have been added
to this ``WidgetList``.
:arg groupName: If provided, only widgets in the specified group will
be returned. Otherwise, ungrouped widgets are returned.
"""
if groupName is None: widgDict = self.__widgets
else: widgDict = self.__groups[groupName].widgets
widgets = [w.widget for w in widgDict.values()]
return widgets
def AddWidget(self, widget, displayName, tooltip=None, groupName=None):
"""Add an arbitrary widget to the property list.
If the ``groupName`` is not provided, the widget is added to a list
of *top level* widgets, which appear at the top of the list, above
any groups. Otherwise, the widget is added to the collapsible panel
corresponding to the specified group.
A :exc:`ValueError` is raised if the widget is already contained
in the list.
:arg widget: The widget to add to the list.
:arg displayName: The widget label/display name.
:arg tooltip: A tooltip for the widget.
:arg groupName: Name of the group to which the widget should be
added.
.. note:: The provided ``widget`` may also be a :class:`wx.Sizer`
instances, although support for this is basic. Specifically,
only one level of nesting is possible, i.e. the provided
``wx.Sizer`` may not have any other ``wx.Sizer``
instances as its children.
"""
if groupName is None:
widgDict = self.__widgets
parent = self
parentSizer = self.__widgSizer
else:
group = self.__groups[groupName]
widgDict = group.widgets
parent = group.widgPanel
parentSizer = group.sizer
key = self.__makeWidgetKey(widget)
if key in widgDict:
raise ValueError('Widgets {} already exist'.format(key))
widgPanel = wx.Panel(parent)
widgSizer = wx.BoxSizer(wx.HORIZONTAL)
widgPanel.SetSizer(widgSizer)
if isinstance(widget, wx.Sizer):
for child in widget.GetChildren():
child.GetWindow().Reparent(widgPanel)
else:
w, h = widget.GetBestSize().Get()
if self.__minHeight > h:
h = self.__minHeight
widget.SetMinSize( (w, h))
widget.Reparent(widgPanel)
label = wx.StaticText(widgPanel,
label=displayName,
style=wx.ALIGN_RIGHT)
widgSizer.Add(label, flag=wx.EXPAND)
widgSizer.Add(widget, flag=wx.EXPAND, proportion=1)
parentSizer.Add(widgPanel,
flag=wx.EXPAND | wx.LEFT | wx.RIGHT,
border=5)
widg = _Widget(displayName,
tooltip,
label,
widget,
widgPanel,
widgSizer)
if tooltip is not None:
widg.SetTooltip(tooltip)
# Under linux/GTK, mouse events are
# captured by child windows, so if
# we want scrolling to work, we need
# to capture scroll events on every
# child. Under OSX/cocoa, this is
# not necessary.
if wx.Platform == '__WXGTK__':
widg.Bind(wx.EVT_MOUSEWHEEL, self.__onMouseWheel)
widgDict[key] = widg
self.__setLabelWidths(list(widgDict.values()))
self.__refresh()
def __onMouseWheel(self, ev):
"""Only called if running on GTK. Scrolls the widget list according
to the mouse wheel rotation.
"""
posx, posy = self.GetViewStart()
rotation = ev.GetWheelRotation()
if rotation > 0: delta = 5
elif rotation < 0: delta = -5
else: return
if ev.GetWheelAxis() == wx.MOUSE_WHEEL_VERTICAL: posy -= delta
else: posx += delta
self.Scroll(posx, posy)
def __onGroupExpand(self, ev):
"""Called when the user expands or collapses a group. Enforces
the :data:`WL_ONE_EXPANDED` style if it is enabled, and refreshes
the panel.
"""
panel = ev.GetEventObject()
if panel.IsExpanded() and self.__oneExpanded:
for group in self.__groups.values():
if group.colPanel is not panel:
group.colPanel.Collapse()
self.__refresh()
def AddSpace(self, groupName=None):
"""Adds some empty vertical space to the widget list.
:arg groupName: Name of the group tio which the space should be added.
If not specified, the space is added to the *top level*
widget list - see the :meth:`AddWidget` method.
"""
if groupName is None: parentSizer = self.__widgSizer
else: parentSizer = self.__groups[groupName].sizer
parentSizer.Add((-1, 10))
def RemoveWidget(self, widget, groupName=None):
"""Removes and destroys the specified widget from this ``WidgetList``.
:arg widget: The widget to remove.
:arg groupName: Name of the group in which the widget is contained.
"""
key = self.__makeWidgetKey(widget)
if groupName is None:
parentSizer = self.__widgSizer
widgDict = self.__widgets
else:
group = self.__groups[groupName]
parentSizer = group.sizer
widgDict = group.widgets
widg = widgDict.pop(key)
parentSizer.Detach(widg.panel)
widg.Destroy()
self.__refresh()
def RemoveGroup(self, groupName):
"""Removes the specified group, and destroys all of the widgets
contained within it.
"""
group = self.__groups.pop(groupName)
self.__groupSizer.Detach(group.gapSizer)
group.parentPanel.Destroy()
self.__refresh()
def Clear(self):
"""Removes and destroys all widgets and groups. """
for key in list(self.__widgets.keys()):
widg = self.__widgets.pop(key)
self.__widgSizer.Detach(widg.sizer)
widg.Destroy()
for group in self.GetGroups():
self.RemoveGroup(group)
self.__refresh()
def ClearGroup(self, groupName):
"""Removes and destroys all widgets in the specified group, but
does not remove the group.
"""
group = self.__groups[groupName]
group.sizer.Clear(True)
group.widgets.clear()
self.__refresh()
def GroupSize(self, groupName):
"""Returns the number of widgets that have been added to the
specified group.
"""
return len(self.__groups[groupName].widgets)
def IsExpanded(self, groupName):
"""Returns ``True`` if the panel for the specified group is currently
expanded, ``False`` if it is collapsed
"""
return self.__groups[groupName].colPanel.IsExpanded()
def Expand(self, groupName, expand=True):
"""Expands or collapses the panel for the specified group. """
panel = self.__groups[groupName].colPanel
if expand: panel.Expand()
else: panel.Collapse()
self.__refresh()
class _Widget:
"""The ``_Widget`` class is used internally by the :class:`WidgetList`
to organise references to each widget in the list.
"""
def __init__(self,
displayName,
tooltip,
label,
widget,
panel,
sizer):
self.displayName = displayName
self.tooltip = tooltip
self.label = label
self.widget = widget
self.panel = panel
self.sizer = sizer
def SetBackgroundColour(self, colour):
self.panel.SetBackgroundColour(colour)
self.label.SetBackgroundColour(colour)
def SetTooltip(self, tooltip):
self.label.SetToolTip(wx.ToolTip(tooltip))
if isinstance(self.widget, wx.Sizer):
for child in self.widget.GetChildren():
child.GetWindow().SetToolTip(wx.ToolTip(tooltip))
else:
self.widget.SetToolTip(wx.ToolTip(tooltip))
def Bind(self, evType, callback):
self.panel.Bind(evType, callback)
self.label.Bind(evType, callback)
if isinstance(self.widget, wx.Sizer):
for c in self.widget.GetChildren():
c.GetWindow().Bind(evType, callback)
else:
self.widget.Bind(evType, callback)
def Destroy(self):
self.label.Destroy()
if isinstance(self.widget, wx.Sizer):
self.widget.Clear(True)
else:
self.widget.Destroy()
class _Group:
"""The ``_Group`` class is used internally by :class:`WidgetList`
instances to represent groups of widgets that are in the list.
"""
def __init__(self,
groupName,
displayName,
gapSizer,
parentPanel,
colPanel,
widgPanel,
sizer):
self.groupName = groupName
self.displayName = displayName
self.gapSizer = gapSizer
self.parentPanel = parentPanel
self.colPanel = colPanel
self.widgPanel = widgPanel
self.sizer = sizer
self.widgets = {}
_WidgetListChangeEvent, _EVT_WL_CHANGE_EVENT = wxevent.NewEvent()
WidgetListChangeEvent = _WidgetListChangeEvent
"""Event emitted by a :class:`WidgetList` when its contents change. """
EVT_WL_CHANGE_EVENT = _EVT_WL_CHANGE_EVENT
"""Identifier for the :data:`WidgetListChangeEvent`. """
WL_ONE_EXPANDED = 1
""":class:`WidgetList` style flag. When applied, at most one group will
be expanded at any one time.
"""
| <filename>fsleyes_widgets/widgetlist.py
#!/usr/bin/env python
#
# widgetlist.py - A widget which displays a list of groupable widgets.
#
# Author: <NAME> <<EMAIL>>
#
"""This module provides the :class:`WidgetList` class, which displays a list
of widgets.
"""
import wx
import wx.lib.newevent as wxevent
import wx.lib.scrolledpanel as scrolledpanel
import fsleyes_widgets.togglepanel as togglepanel
class WidgetList(scrolledpanel.ScrolledPanel):
"""A scrollable list of widgets.
The ``WidgetList`` provides a number of features:
- Widgets can be grouped.
- A label can be shown next to each widget.
- Widget groups can be collapsed/expanded.
- Widgets and groups can be dynamically added/removed.
The most important methods are:
.. autosummary::
:nosignatures:
AddWidget
AddGroup
A ``WidgetList`` looks something like this:
.. image:: images/widgetlist.png
:scale: 50%
:align: center
A ``WidgetList`` emits a :data:`WidgetListChangeEvent` whenever its
contents change.
"""
_defaultOddColour = None
"""Background colour for widgets on odd rows.
Iniitalised in :meth:`__init__`.
"""
_defaultEvenColour = None
"""Background colour for widgets on even rows.
Iniitalised in :meth:`__init__`.
"""
_defaultGroupColour = None
"""Border and title background colour for widget groups.
Iniitalised in :meth:`__init__`.
"""
def __init__(self, parent, style=0, minHeight=-1):
"""Create a ``WidgetList``.
:arg parent: The :mod:`wx` parent object.
:arg style: Passed through to ``wx.ScrolledPanel.__init__``
:arg minHeight: Minimum height of each row
"""
odd = wx.SystemSettings.GetColour(wx.SYS_COLOUR_LISTBOX)
even = odd.ChangeLightness(90)
group = odd
if WidgetList._defaultOddColour is None:
WidgetList._defaultOddColour = odd
if WidgetList._defaultEvenColour is None:
WidgetList._defaultEvenColour = even
if WidgetList._defaultGroupColour is None:
WidgetList._defaultGroupColour = group
self.__minHeight = minHeight
self.__widgSizer = wx.BoxSizer(wx.VERTICAL)
self.__sizer = wx.BoxSizer(wx.VERTICAL)
self.__groupSizer = wx.BoxSizer(wx.VERTICAL)
self.__widgets = {}
self.__groups = {}
self.__oddColour = WidgetList._defaultOddColour
self.__evenColour = WidgetList._defaultEvenColour
self.__groupColour = WidgetList._defaultGroupColour
self.__sizer.Add(self.__widgSizer, flag=wx.EXPAND)
self.__sizer.Add(self.__groupSizer, flag=wx.EXPAND)
self.__oneExpanded = style & WL_ONE_EXPANDED
# The SP.__init__ method seemingly
# induces a call to DoGetBestSize,
# which assumes that all of the
# things above exist. So we call
# init after we've created those
# things.
scrolledpanel.ScrolledPanel.__init__(self, parent)
self.SetSizer(self.__sizer)
self.SetupScrolling()
self.SetAutoLayout(1)
def DoGetBestSize(self):
"""Returns the best size for the widget list, with all group
widgets expanded.
"""
width, height = self.__widgSizer.GetSize().Get()
for name, group in self.__groups.items():
w, h = group.parentPanel.GetBestSize().Get()
w += 20
h += 10
if w > width:
width = w
height += h
return wx.Size(width, height)
def __makeWidgetKey(self, widget):
"""Widgets are stored in a dictionary - this method generates a
string to use as a key, based on the widget ``id``.
"""
return str(id(widget))
def __setLabelWidths(self, widgets):
"""Calculates the maximum width of all widget labels, and sets all
labels to that width.
This ensures that all labels/widgets line are horizontally aligned.
"""
if len(widgets) == 0:
return
dc = wx.ClientDC(widgets[0].label)
lblWidths = [dc.GetTextExtent(w.displayName)[0] for w in widgets]
maxWidth = max(lblWidths)
for w in widgets:
w.label.SetMinSize((maxWidth + 10, -1))
w.label.SetMaxSize((maxWidth + 10, -1))
def __setColours(self):
"""Called whenever the widget list needs to be refreshed.
Makes sure that odd/even widgets and their labels have the correct
background colour.
"""
def setWidgetColours(widgDict):
for i, widg in enumerate(widgDict.values()):
if i % 2: colour = self.__oddColour
else: colour = self.__evenColour
widg.SetBackgroundColour(colour)
setWidgetColours(self.__widgets)
for group in self.__groups.values():
setWidgetColours(group.widgets)
group.parentPanel.SetBackgroundColour(self.__groupColour)
group.colPanel .SetBackgroundColour(self.__groupColour)
def __refresh(self, *args, **kwargs):
"""Updates widget colours (see :meth:`__setColours`), and lays out
the widget list.
:arg postEvent: If ``True`` (the default), a
:data:`WidgetListChangeEvent` is posted.
"""
self.__setColours()
self.FitInside()
self.Layout()
if kwargs.get('postEvent', True):
wx.PostEvent(self, WidgetListChangeEvent())
def SetColours(self, odd=None, even=None, group=None):
"""Sets the colours used on this ``WidgetList``.
Each argument is assumed to be a tuple of ``(r, g, b)`` values,
each in the range ``[0 - 255]``.
:arg odd: Background colour for widgets on odd rows.
:arg even: Background colour for widgets on even rows.
:arg group: Border/title colour for widget groups.
"""
if odd is not None: self.__oddColour = odd
if even is not None: self.__evenColour = even
if group is not None: self.__groupColour = group
self.__setColours()
def GetGroups(self):
"""Returns a list containing the name of every group in this
``WidgetList``.
"""
return list(self.__groups.keys())
def HasGroup(self, groupName):
"""Returns ``True`` if this ``WidgetList`` contains a group
with the specified name.
"""
return groupName in self.__groups
def RenameGroup(self, groupName, newDisplayName):
"""Changes the display name of the specified group.
.. note:: This method only changes the *display name* of a group,
not the group identifier name. See the :meth:`AddGroup`
method.
:arg groupName: Name of the group.
:arg newDisplayName: New display name for the group.
"""
group = self.__groups[groupName]
group.displayName = newDisplayName
group.colPanel.SetLabel(newDisplayName)
def AddGroup(self, groupName, displayName=None):
"""Add a new group to this ``WidgetList``.
A :exc:`ValueError` is raised if a group with the specified name
already exists.
:arg groupName: The name of the group - this is used as an
identifier for the group.
:arg displayName: A string to be shown in the title bar for the
group. This can be changed later via the
:meth:`RenameGroup` method.
"""
if displayName is None:
displayName = groupName
if groupName in self.__groups:
raise ValueError('A group with name {} '
'already exists'.format(groupName))
parentPanel = wx.Panel(self, style=wx.SUNKEN_BORDER)
colPanel = togglepanel.TogglePanel(parentPanel, label=displayName)
widgPanel = colPanel.GetPane()
widgSizer = wx.BoxSizer(wx.VERTICAL)
widgPanel.SetSizer(widgSizer)
gapSizer = wx.BoxSizer(wx.VERTICAL)
# A spacer exists at the top,
# and between, every group.
gapSizer.Add((-1, 5))
gapSizer.Add(parentPanel, border=10, flag=(wx.EXPAND |
wx.LEFT |
wx.RIGHT))
self.__groupSizer.Add(gapSizer, flag=wx.EXPAND)
parentSizer = wx.BoxSizer(wx.VERTICAL)
parentSizer.Add(colPanel,
border=5,
flag=wx.EXPAND | wx.BOTTOM,
proportion=0)
parentPanel.SetSizer(parentSizer)
group = _Group(groupName,
displayName,
gapSizer,
parentPanel,
colPanel,
widgPanel,
widgSizer)
self.__groups[groupName] = group
self.__refresh()
# Mouse wheel listener needed
# on all children under linux/GTK
if wx.Platform == '__WXGTK__':
parentPanel.Bind(wx.EVT_MOUSEWHEEL, self.__onMouseWheel)
colPanel .Bind(wx.EVT_MOUSEWHEEL, self.__onMouseWheel)
colPanel.Bind(togglepanel.EVT_TOGGLEPANEL_EVENT, self.__onGroupExpand)
def GetWidgets(self, groupName=None):
"""Returns a list containing all of the widgets that have been added
to this ``WidgetList``.
:arg groupName: If provided, only widgets in the specified group will
be returned. Otherwise, ungrouped widgets are returned.
"""
if groupName is None: widgDict = self.__widgets
else: widgDict = self.__groups[groupName].widgets
widgets = [w.widget for w in widgDict.values()]
return widgets
def AddWidget(self, widget, displayName, tooltip=None, groupName=None):
"""Add an arbitrary widget to the property list.
If the ``groupName`` is not provided, the widget is added to a list
of *top level* widgets, which appear at the top of the list, above
any groups. Otherwise, the widget is added to the collapsible panel
corresponding to the specified group.
A :exc:`ValueError` is raised if the widget is already contained
in the list.
:arg widget: The widget to add to the list.
:arg displayName: The widget label/display name.
:arg tooltip: A tooltip for the widget.
:arg groupName: Name of the group to which the widget should be
added.
.. note:: The provided ``widget`` may also be a :class:`wx.Sizer`
instances, although support for this is basic. Specifically,
only one level of nesting is possible, i.e. the provided
``wx.Sizer`` may not have any other ``wx.Sizer``
instances as its children.
"""
if groupName is None:
widgDict = self.__widgets
parent = self
parentSizer = self.__widgSizer
else:
group = self.__groups[groupName]
widgDict = group.widgets
parent = group.widgPanel
parentSizer = group.sizer
key = self.__makeWidgetKey(widget)
if key in widgDict:
raise ValueError('Widgets {} already exist'.format(key))
widgPanel = wx.Panel(parent)
widgSizer = wx.BoxSizer(wx.HORIZONTAL)
widgPanel.SetSizer(widgSizer)
if isinstance(widget, wx.Sizer):
for child in widget.GetChildren():
child.GetWindow().Reparent(widgPanel)
else:
w, h = widget.GetBestSize().Get()
if self.__minHeight > h:
h = self.__minHeight
widget.SetMinSize( (w, h))
widget.Reparent(widgPanel)
label = wx.StaticText(widgPanel,
label=displayName,
style=wx.ALIGN_RIGHT)
widgSizer.Add(label, flag=wx.EXPAND)
widgSizer.Add(widget, flag=wx.EXPAND, proportion=1)
parentSizer.Add(widgPanel,
flag=wx.EXPAND | wx.LEFT | wx.RIGHT,
border=5)
widg = _Widget(displayName,
tooltip,
label,
widget,
widgPanel,
widgSizer)
if tooltip is not None:
widg.SetTooltip(tooltip)
# Under linux/GTK, mouse events are
# captured by child windows, so if
# we want scrolling to work, we need
# to capture scroll events on every
# child. Under OSX/cocoa, this is
# not necessary.
if wx.Platform == '__WXGTK__':
widg.Bind(wx.EVT_MOUSEWHEEL, self.__onMouseWheel)
widgDict[key] = widg
self.__setLabelWidths(list(widgDict.values()))
self.__refresh()
def __onMouseWheel(self, ev):
"""Only called if running on GTK. Scrolls the widget list according
to the mouse wheel rotation.
"""
posx, posy = self.GetViewStart()
rotation = ev.GetWheelRotation()
if rotation > 0: delta = 5
elif rotation < 0: delta = -5
else: return
if ev.GetWheelAxis() == wx.MOUSE_WHEEL_VERTICAL: posy -= delta
else: posx += delta
self.Scroll(posx, posy)
def __onGroupExpand(self, ev):
"""Called when the user expands or collapses a group. Enforces
the :data:`WL_ONE_EXPANDED` style if it is enabled, and refreshes
the panel.
"""
panel = ev.GetEventObject()
if panel.IsExpanded() and self.__oneExpanded:
for group in self.__groups.values():
if group.colPanel is not panel:
group.colPanel.Collapse()
self.__refresh()
def AddSpace(self, groupName=None):
"""Adds some empty vertical space to the widget list.
:arg groupName: Name of the group tio which the space should be added.
If not specified, the space is added to the *top level*
widget list - see the :meth:`AddWidget` method.
"""
if groupName is None: parentSizer = self.__widgSizer
else: parentSizer = self.__groups[groupName].sizer
parentSizer.Add((-1, 10))
def RemoveWidget(self, widget, groupName=None):
"""Removes and destroys the specified widget from this ``WidgetList``.
:arg widget: The widget to remove.
:arg groupName: Name of the group in which the widget is contained.
"""
key = self.__makeWidgetKey(widget)
if groupName is None:
parentSizer = self.__widgSizer
widgDict = self.__widgets
else:
group = self.__groups[groupName]
parentSizer = group.sizer
widgDict = group.widgets
widg = widgDict.pop(key)
parentSizer.Detach(widg.panel)
widg.Destroy()
self.__refresh()
def RemoveGroup(self, groupName):
"""Removes the specified group, and destroys all of the widgets
contained within it.
"""
group = self.__groups.pop(groupName)
self.__groupSizer.Detach(group.gapSizer)
group.parentPanel.Destroy()
self.__refresh()
def Clear(self):
"""Removes and destroys all widgets and groups. """
for key in list(self.__widgets.keys()):
widg = self.__widgets.pop(key)
self.__widgSizer.Detach(widg.sizer)
widg.Destroy()
for group in self.GetGroups():
self.RemoveGroup(group)
self.__refresh()
def ClearGroup(self, groupName):
"""Removes and destroys all widgets in the specified group, but
does not remove the group.
"""
group = self.__groups[groupName]
group.sizer.Clear(True)
group.widgets.clear()
self.__refresh()
def GroupSize(self, groupName):
"""Returns the number of widgets that have been added to the
specified group.
"""
return len(self.__groups[groupName].widgets)
def IsExpanded(self, groupName):
"""Returns ``True`` if the panel for the specified group is currently
expanded, ``False`` if it is collapsed
"""
return self.__groups[groupName].colPanel.IsExpanded()
def Expand(self, groupName, expand=True):
"""Expands or collapses the panel for the specified group. """
panel = self.__groups[groupName].colPanel
if expand: panel.Expand()
else: panel.Collapse()
self.__refresh()
class _Widget:
"""The ``_Widget`` class is used internally by the :class:`WidgetList`
to organise references to each widget in the list.
"""
def __init__(self,
displayName,
tooltip,
label,
widget,
panel,
sizer):
self.displayName = displayName
self.tooltip = tooltip
self.label = label
self.widget = widget
self.panel = panel
self.sizer = sizer
def SetBackgroundColour(self, colour):
self.panel.SetBackgroundColour(colour)
self.label.SetBackgroundColour(colour)
def SetTooltip(self, tooltip):
self.label.SetToolTip(wx.ToolTip(tooltip))
if isinstance(self.widget, wx.Sizer):
for child in self.widget.GetChildren():
child.GetWindow().SetToolTip(wx.ToolTip(tooltip))
else:
self.widget.SetToolTip(wx.ToolTip(tooltip))
def Bind(self, evType, callback):
self.panel.Bind(evType, callback)
self.label.Bind(evType, callback)
if isinstance(self.widget, wx.Sizer):
for c in self.widget.GetChildren():
c.GetWindow().Bind(evType, callback)
else:
self.widget.Bind(evType, callback)
def Destroy(self):
self.label.Destroy()
if isinstance(self.widget, wx.Sizer):
self.widget.Clear(True)
else:
self.widget.Destroy()
class _Group:
"""The ``_Group`` class is used internally by :class:`WidgetList`
instances to represent groups of widgets that are in the list.
"""
def __init__(self,
groupName,
displayName,
gapSizer,
parentPanel,
colPanel,
widgPanel,
sizer):
self.groupName = groupName
self.displayName = displayName
self.gapSizer = gapSizer
self.parentPanel = parentPanel
self.colPanel = colPanel
self.widgPanel = widgPanel
self.sizer = sizer
self.widgets = {}
_WidgetListChangeEvent, _EVT_WL_CHANGE_EVENT = wxevent.NewEvent()
WidgetListChangeEvent = _WidgetListChangeEvent
"""Event emitted by a :class:`WidgetList` when its contents change. """
EVT_WL_CHANGE_EVENT = _EVT_WL_CHANGE_EVENT
"""Identifier for the :data:`WidgetListChangeEvent`. """
WL_ONE_EXPANDED = 1
""":class:`WidgetList` style flag. When applied, at most one group will
be expanded at any one time.
"""
| en | 0.781098 | #!/usr/bin/env python # # widgetlist.py - A widget which displays a list of groupable widgets. # # Author: <NAME> <<EMAIL>> # This module provides the :class:`WidgetList` class, which displays a list of widgets. A scrollable list of widgets. The ``WidgetList`` provides a number of features: - Widgets can be grouped. - A label can be shown next to each widget. - Widget groups can be collapsed/expanded. - Widgets and groups can be dynamically added/removed. The most important methods are: .. autosummary:: :nosignatures: AddWidget AddGroup A ``WidgetList`` looks something like this: .. image:: images/widgetlist.png :scale: 50% :align: center A ``WidgetList`` emits a :data:`WidgetListChangeEvent` whenever its contents change. Background colour for widgets on odd rows. Iniitalised in :meth:`__init__`. Background colour for widgets on even rows. Iniitalised in :meth:`__init__`. Border and title background colour for widget groups. Iniitalised in :meth:`__init__`. Create a ``WidgetList``. :arg parent: The :mod:`wx` parent object. :arg style: Passed through to ``wx.ScrolledPanel.__init__`` :arg minHeight: Minimum height of each row # The SP.__init__ method seemingly # induces a call to DoGetBestSize, # which assumes that all of the # things above exist. So we call # init after we've created those # things. Returns the best size for the widget list, with all group widgets expanded. Widgets are stored in a dictionary - this method generates a string to use as a key, based on the widget ``id``. Calculates the maximum width of all widget labels, and sets all labels to that width. This ensures that all labels/widgets line are horizontally aligned. Called whenever the widget list needs to be refreshed. Makes sure that odd/even widgets and their labels have the correct background colour. Updates widget colours (see :meth:`__setColours`), and lays out the widget list. :arg postEvent: If ``True`` (the default), a :data:`WidgetListChangeEvent` is posted. Sets the colours used on this ``WidgetList``. Each argument is assumed to be a tuple of ``(r, g, b)`` values, each in the range ``[0 - 255]``. :arg odd: Background colour for widgets on odd rows. :arg even: Background colour for widgets on even rows. :arg group: Border/title colour for widget groups. Returns a list containing the name of every group in this ``WidgetList``. Returns ``True`` if this ``WidgetList`` contains a group with the specified name. Changes the display name of the specified group. .. note:: This method only changes the *display name* of a group, not the group identifier name. See the :meth:`AddGroup` method. :arg groupName: Name of the group. :arg newDisplayName: New display name for the group. Add a new group to this ``WidgetList``. A :exc:`ValueError` is raised if a group with the specified name already exists. :arg groupName: The name of the group - this is used as an identifier for the group. :arg displayName: A string to be shown in the title bar for the group. This can be changed later via the :meth:`RenameGroup` method. # A spacer exists at the top, # and between, every group. # Mouse wheel listener needed # on all children under linux/GTK Returns a list containing all of the widgets that have been added to this ``WidgetList``. :arg groupName: If provided, only widgets in the specified group will be returned. Otherwise, ungrouped widgets are returned. Add an arbitrary widget to the property list. If the ``groupName`` is not provided, the widget is added to a list of *top level* widgets, which appear at the top of the list, above any groups. Otherwise, the widget is added to the collapsible panel corresponding to the specified group. A :exc:`ValueError` is raised if the widget is already contained in the list. :arg widget: The widget to add to the list. :arg displayName: The widget label/display name. :arg tooltip: A tooltip for the widget. :arg groupName: Name of the group to which the widget should be added. .. note:: The provided ``widget`` may also be a :class:`wx.Sizer` instances, although support for this is basic. Specifically, only one level of nesting is possible, i.e. the provided ``wx.Sizer`` may not have any other ``wx.Sizer`` instances as its children. # Under linux/GTK, mouse events are # captured by child windows, so if # we want scrolling to work, we need # to capture scroll events on every # child. Under OSX/cocoa, this is # not necessary. Only called if running on GTK. Scrolls the widget list according to the mouse wheel rotation. Called when the user expands or collapses a group. Enforces the :data:`WL_ONE_EXPANDED` style if it is enabled, and refreshes the panel. Adds some empty vertical space to the widget list. :arg groupName: Name of the group tio which the space should be added. If not specified, the space is added to the *top level* widget list - see the :meth:`AddWidget` method. Removes and destroys the specified widget from this ``WidgetList``. :arg widget: The widget to remove. :arg groupName: Name of the group in which the widget is contained. Removes the specified group, and destroys all of the widgets contained within it. Removes and destroys all widgets and groups. Removes and destroys all widgets in the specified group, but does not remove the group. Returns the number of widgets that have been added to the specified group. Returns ``True`` if the panel for the specified group is currently expanded, ``False`` if it is collapsed Expands or collapses the panel for the specified group. The ``_Widget`` class is used internally by the :class:`WidgetList` to organise references to each widget in the list. The ``_Group`` class is used internally by :class:`WidgetList` instances to represent groups of widgets that are in the list. Event emitted by a :class:`WidgetList` when its contents change. Identifier for the :data:`WidgetListChangeEvent`. :class:`WidgetList` style flag. When applied, at most one group will be expanded at any one time. | 3.104602 | 3 |
setup.py | TransactPRO/gw3-python-client | 1 | 7632 | #!/usr/bin/env python
import setuptools
MAINTAINER_NAME = '<NAME>'
MAINTAINER_EMAIL = '<EMAIL>'
URL_GIT = 'https://github.com/TransactPRO/gw3-python-client'
try:
import pypandoc
LONG_DESCRIPTION = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError, OSError, RuntimeError):
LONG_DESCRIPTION = ''
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries :: Python Modules'
]
required = [
'requests',
]
setuptools.setup(
name='transactpro-gw3-client',
version='1.7.6',
description='Transact PRO Gateway3 implementation in Python.',
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
install_requires=required,
url=URL_GIT,
packages=setuptools.find_packages(),
license='MIT',
classifiers=CLASSIFIERS,
keywords='GW3 gateway3 integration gateway TransactPRO python python3',
python_requires='>=3.6',
)
| #!/usr/bin/env python
import setuptools
MAINTAINER_NAME = '<NAME>'
MAINTAINER_EMAIL = '<EMAIL>'
URL_GIT = 'https://github.com/TransactPRO/gw3-python-client'
try:
import pypandoc
LONG_DESCRIPTION = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError, OSError, RuntimeError):
LONG_DESCRIPTION = ''
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries :: Python Modules'
]
required = [
'requests',
]
setuptools.setup(
name='transactpro-gw3-client',
version='1.7.6',
description='Transact PRO Gateway3 implementation in Python.',
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
install_requires=required,
url=URL_GIT,
packages=setuptools.find_packages(),
license='MIT',
classifiers=CLASSIFIERS,
keywords='GW3 gateway3 integration gateway TransactPRO python python3',
python_requires='>=3.6',
)
| ru | 0.26433 | #!/usr/bin/env python | 1.323531 | 1 |
social_auth_mitxpro/backends_test.py | mitodl/social-auth-mitxpro | 0 | 7633 | <filename>social_auth_mitxpro/backends_test.py
"""Tests for our backend"""
from urllib.parse import urljoin
import pytest
from social_auth_mitxpro.backends import MITxProOAuth2
# pylint: disable=redefined-outer-name
@pytest.fixture
def strategy(mocker):
"""Mock strategy"""
return mocker.Mock()
@pytest.fixture
def backend(strategy):
"""MITxProOAuth2 backend fixture"""
return MITxProOAuth2(strategy)
@pytest.mark.parametrize(
"response, expected",
[
(
{"username": "abc123", "email": "<EMAIL>", "name": "<NAME>"},
{"username": "abc123", "email": "<EMAIL>", "name": "<NAME>"},
),
({"username": "abc123"}, {"username": "abc123", "email": "", "name": ""}),
],
)
def test_get_user_details(backend, response, expected):
"""Test that get_user_details produces expected results"""
assert backend.get_user_details(response) == expected
def test_user_data(backend, strategy, mocked_responses):
"""Tests that the backend makes a correct appropriate request"""
access_token = "user_token"
api_root = "http://xpro.example.com/"
response = {"username": "abc123", "email": "<EMAIL>", "name": "<NAME>"}
mocked_responses.add(
mocked_responses.GET, urljoin(api_root, "/api/users/me"), json=response
)
settings = {"API_ROOT": api_root}
def _setting(name, *, backend, default=None): # pylint: disable=unused-argument
"""Dummy setting func"""
return settings.get(name, default)
strategy.setting.side_effect = _setting
assert backend.user_data(access_token) == response
request, _ = mocked_responses.calls[0]
assert request.headers["Authorization"] == "Bearer user_token"
strategy.setting.assert_any_call("API_ROOT", default=None, backend=backend)
def test_authorization_url(backend, strategy):
"""Test authorization_url()"""
strategy.setting.return_value = "abc"
assert backend.authorization_url() == "abc"
strategy.setting.assert_called_once_with(
"AUTHORIZATION_URL", default=None, backend=backend
)
def test_access_token_url(backend, strategy):
"""Test access_token_url()"""
strategy.setting.return_value = "abc"
assert backend.access_token_url() == "abc"
strategy.setting.assert_called_once_with(
"ACCESS_TOKEN_URL", default=None, backend=backend
)
| <filename>social_auth_mitxpro/backends_test.py
"""Tests for our backend"""
from urllib.parse import urljoin
import pytest
from social_auth_mitxpro.backends import MITxProOAuth2
# pylint: disable=redefined-outer-name
@pytest.fixture
def strategy(mocker):
"""Mock strategy"""
return mocker.Mock()
@pytest.fixture
def backend(strategy):
"""MITxProOAuth2 backend fixture"""
return MITxProOAuth2(strategy)
@pytest.mark.parametrize(
"response, expected",
[
(
{"username": "abc123", "email": "<EMAIL>", "name": "<NAME>"},
{"username": "abc123", "email": "<EMAIL>", "name": "<NAME>"},
),
({"username": "abc123"}, {"username": "abc123", "email": "", "name": ""}),
],
)
def test_get_user_details(backend, response, expected):
"""Test that get_user_details produces expected results"""
assert backend.get_user_details(response) == expected
def test_user_data(backend, strategy, mocked_responses):
"""Tests that the backend makes a correct appropriate request"""
access_token = "user_token"
api_root = "http://xpro.example.com/"
response = {"username": "abc123", "email": "<EMAIL>", "name": "<NAME>"}
mocked_responses.add(
mocked_responses.GET, urljoin(api_root, "/api/users/me"), json=response
)
settings = {"API_ROOT": api_root}
def _setting(name, *, backend, default=None): # pylint: disable=unused-argument
"""Dummy setting func"""
return settings.get(name, default)
strategy.setting.side_effect = _setting
assert backend.user_data(access_token) == response
request, _ = mocked_responses.calls[0]
assert request.headers["Authorization"] == "Bearer user_token"
strategy.setting.assert_any_call("API_ROOT", default=None, backend=backend)
def test_authorization_url(backend, strategy):
"""Test authorization_url()"""
strategy.setting.return_value = "abc"
assert backend.authorization_url() == "abc"
strategy.setting.assert_called_once_with(
"AUTHORIZATION_URL", default=None, backend=backend
)
def test_access_token_url(backend, strategy):
"""Test access_token_url()"""
strategy.setting.return_value = "abc"
assert backend.access_token_url() == "abc"
strategy.setting.assert_called_once_with(
"ACCESS_TOKEN_URL", default=None, backend=backend
)
| en | 0.588228 | Tests for our backend # pylint: disable=redefined-outer-name Mock strategy MITxProOAuth2 backend fixture Test that get_user_details produces expected results Tests that the backend makes a correct appropriate request # pylint: disable=unused-argument Dummy setting func Test authorization_url() Test access_token_url() | 2.413482 | 2 |
Scripts/simulation/careers/detective/detective_crime_scene.py | velocist/TS4CheatsInfo | 0 | 7634 | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\careers\detective\detective_crime_scene.py
# Compiled at: 2015-02-08 03:00:54
# Size of source mod 2**32: 1608 bytes
from careers.career_event_zone_director import CareerEventZoneDirector
import sims4.log
logger = sims4.log.Logger('Crime Scene', default_owner='bhill')
class CrimeSceneZoneDirector(CareerEventZoneDirector):
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self._should_load_sims = False
def _load_custom_zone_director(self, zone_director_proto, reader):
self._should_load_sims = True
super()._load_custom_zone_director(zone_director_proto, reader)
def _on_maintain_zone_saved_sim(self, sim_info):
if self._should_load_sims:
super()._on_maintain_zone_saved_sim(sim_info)
else:
logger.info('Discarding saved sim: {}', sim_info)
def _process_injected_sim(self, sim_info):
logger.info('Discarding injected sim: {}', sim_info) | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\careers\detective\detective_crime_scene.py
# Compiled at: 2015-02-08 03:00:54
# Size of source mod 2**32: 1608 bytes
from careers.career_event_zone_director import CareerEventZoneDirector
import sims4.log
logger = sims4.log.Logger('Crime Scene', default_owner='bhill')
class CrimeSceneZoneDirector(CareerEventZoneDirector):
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self._should_load_sims = False
def _load_custom_zone_director(self, zone_director_proto, reader):
self._should_load_sims = True
super()._load_custom_zone_director(zone_director_proto, reader)
def _on_maintain_zone_saved_sim(self, sim_info):
if self._should_load_sims:
super()._on_maintain_zone_saved_sim(sim_info)
else:
logger.info('Discarding saved sim: {}', sim_info)
def _process_injected_sim(self, sim_info):
logger.info('Discarding injected sim: {}', sim_info) | en | 0.517288 | # uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\InGame\Gameplay\Scripts\Server\careers\detective\detective_crime_scene.py # Compiled at: 2015-02-08 03:00:54 # Size of source mod 2**32: 1608 bytes | 1.94666 | 2 |
classifier/interpretation_exp.py | methylgrammarlab/proj_scwgbs | 0 | 7635 | <reponame>methylgrammarlab/proj_scwgbs
"""
Code adapted from https://github.com/ohlerlab/DeepRiPe with changes
Extract information and graphs from the Integrated gradients output
"""
import argparse
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from classifier.plotseqlogo import seqlogo_fig
from commons import files_tools
sns.set()
sns.set_style('whitegrid')
def parse_input():
parser = argparse.ArgumentParser()
parser.add_argument('--interpretation_file', help='path for the input file', required=True)
parser.add_argument('--output_folder', help='Path of the output folder', required=False,
default=os.path.dirname(sys.argv[0]))
args = parser.parse_args()
return args
def plot_one_seq(seq, output, title, yl=None):
fig = seqlogo_fig(seq[:, :], vocab="DNA", yl=yl, figsize=(20, 4), ncol=1, plot_name=title)
fig.savefig(output)
plt.close()
def plot_multi_seq(sequences_dict, number_of_seq, output_folder=None):
"""
Plot the multiple sequences in one figure
:param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each
sequence in this label
:param number_of_seq: number of sequences in one figure
:param output_folder: Output folder
"""
for k in sequences_dict:
ex_seq = sequences_dict[k][:number_of_seq]
fig = seqlogo_fig(np.transpose(ex_seq[:, :, :], axes=(1, 2, 0)), vocab="DNA",
figsize=(8, 4), ncol=1, yl=0.1,
plot_name="seq for top %s of type %s" % (number_of_seq, k))
if output_folder:
fig.savefig(os.path.join(output_folder, "seq_for_top_%s_of_type_%s" % (number_of_seq, k)))
else:
plt.show()
plt.close()
def plot_avg_sequence(sequences_dict, output_folder=None):
"""
Plot the average sequence across 30 letters and all the sequence
:param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each
sequence in this label
:param output_folder: Output folder
"""
for k in sequences_dict:
ex_seq = sequences_dict[k]
mean_seq = np.transpose(np.mean(ex_seq[:, 60:90, :], axis=0).reshape(1, 30, 4), axes=(1, 2, 0))
name = k
fig = seqlogo_fig(mean_seq, vocab="DNA", figsize=(20, 4), ncol=1,
plot_name="Average attribution score for prediction %s" % name)
ax = fig.axes[0]
ax.set_title("Average sequence for prediction %s" % name, fontsize=16)
if output_folder:
fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s30.png" % k))
else:
plt.show()
plt.close()
for k in sequences_dict:
ex_seq = sequences_dict[k]
mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0))
fig = seqlogo_fig(mean_seq, vocab="DNA", figsize=(20, 4), ncol=1,
plot_name="Avg seq for %s" % k)
if output_folder:
fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s.png" % k))
else:
plt.show()
plt.close()
def plot_avg_sequence_sw(sequences_dict, output_folder=None):
"""
plot the avg sequence using SW, flatten the AT to W and CG to S
:param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each
sequence in this label
:param output_folder: Output folder
"""
for k in sequences_dict:
ex_seq = sequences_dict[k]
mean_seq = np.transpose(np.mean(ex_seq[:, 60:90, :], axis=0).reshape(1, 30, 4), axes=(1, 2, 0))
new_seq = np.zeros_like(mean_seq)
for i in range(mean_seq.shape[0]):
new_seq[i][0] = mean_seq[i][0] + mean_seq[i][3]
new_seq[i][1] = mean_seq[i][1] + mean_seq[i][2]
fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(20, 4), ncol=1, plot_name="Avg seq for %s" % k)
if output_folder:
fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw30.png" % k))
else:
plt.show()
for k in sequences_dict:
ex_seq = sequences_dict[k]
mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0))
new_seq = np.zeros_like(mean_seq)
for i in range(mean_seq.shape[0]):
new_seq[i][0] = mean_seq[i][0] + mean_seq[i][3]
new_seq[i][1] = mean_seq[i][1] + mean_seq[i][2]
fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(20, 4), ncol=1, plot_name="Avg seq for %s" % k)
if output_folder:
fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw.png" % k))
else:
plt.show()
plt.close()
def plot_avg_sequence_sw_flatten_values(sequences_dict, output_folder=None):
"""
plot the avg sequence using SW, flatten the AT to W and CG to S and combining both options to get one
number per sequence place
:param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each
sequence in this label
:param output_folder: Output folder
"""
for k in sequences_dict:
ex_seq = sequences_dict[k]
mean_seq = np.transpose(np.mean(ex_seq[:, 60:90, :], axis=0).reshape(1, 30, 4), axes=(1, 2, 0))
new_seq = np.zeros_like(mean_seq)
for i in range(mean_seq.shape[0]):
w = mean_seq[i][0] + mean_seq[i][3]
s = mean_seq[i][1] + mean_seq[i][2]
delta = s - w
sw_index = 1 if delta > 0 else 0
new_seq[i][sw_index] = abs(delta)
fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(8, 4), ncol=1, plot_name="Avg seq for %s" % k)
if output_folder:
fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw30_flatten.png" % k))
else:
fig.show()
for k in sequences_dict:
ex_seq = sequences_dict[k]
mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0))
new_seq = np.zeros_like(mean_seq)
for i in range(mean_seq.shape[0]):
w = mean_seq[i][0] + mean_seq[i][3]
s = mean_seq[i][1] + mean_seq[i][2]
delta = s - w
sw_index = 1 if delta > 0 else 0
new_seq[i][sw_index] = abs(delta)
fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(20, 4), ncol=1, plot_name="Avg seq for %s" % k)
if output_folder:
fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw_flatten.png" % k))
else:
plt.show()
plt.close()
def plot_distance_weight_two_sides(sequences_dict, output_folder=None):
"""
Plot the integrated gradient value of each feature based on distance from center, two ways graph(-74->74)
We wanted to see if there are indexes and some periodicity
:param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each
sequence in this label
:param output_folder: Output folder
"""
for k in sequences_dict:
class_type = k
ex_seq = np.abs(sequences_dict[k])
mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0))
seq_weight = np.sum(mean_seq, axis=1)
middle = int(seq_weight.shape[0] / 2) - 1
seq_weight[middle] = None
seq_weight[middle + 1] = None
x = np.arange(-74, 1).astype(np.int)
x = np.append(x, x[::-1] * -1)
x_ticks = [i for i in range(-70, 80, 10)]
plt.xticks(x_ticks)
plt.plot(x, seq_weight, '.-')
plt.legend()
plt.grid(axis="y")
plt.xlabel("Distance from CpG Site", fontsize=12)
plt.ylabel("Attribute score", fontsize=12)
plt.title("Attribute score base on distance from CpG site for %s" % class_type, fontsize=14)
if output_folder:
plt.savefig(
os.path.join(output_folder,
"distance_importance_of_flanking_letters_type_%s_two_way.png" % k))
else:
plt.show()
plt.close()
def plot_distance_weight_one_side(sequences_dict, output_folder=None):
"""
Plot the integrated gradient value of each feature based on distance from center, one way graph (0->74)
We wanted to see if there are indexes and some periodicity
:param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each
sequence in this label
:param output_folder: Output folder
"""
for k in sequences_dict:
class_type = k
ex_seq = np.abs(sequences_dict[k])
mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0))
seq_weight = np.sum(mean_seq, axis=1)
std_seq = np.std(mean_seq, axis=1)
middle = int(seq_weight.shape[0] / 2) - 1
seq_to_values = np.flip(seq_weight[:middle])
seq_from_values = seq_weight[middle + 2:]
seq_to_std = np.flip(std_seq[:middle])
seq_from_std = std_seq[middle + 2:]
x = np.arange(1, seq_from_values.shape[0] + 1)
plt.errorbar(x, seq_to_values, seq_to_std, marker='^', label="to", alpha=0.5)
plt.errorbar(x, seq_from_values, seq_from_std, marker='^', label="from", alpha=0.5)
plt.legend()
x_ticks = [i for i in range(1, 5)] + [i for i in range(5, 75, 5)]
plt.xticks(x_ticks)
plt.xlabel("Distance from CG")
plt.ylabel("Importance shannon values")
plt.title("Importance of flanking letters - %s" % (class_type))
if output_folder:
plt.savefig(os.path.join(output_folder,
"distance_importance_of_flanking_letters_type_%s_one_way.png" % k))
else:
plt.show()
plt.close()
def print_each_seq(sequences_dict, output_folder):
"""
Plot all the sequences on after the other
:param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each
sequence in this label
:param output_folder: Output folder
"""
cl_list = []
pl_list = []
# Remove duplicates
seq = None
for i in range(sequences_dict["cl"].shape[0]):
new_seq = sequences_dict["cl"][i]
if np.all(new_seq == seq):
continue
else:
cl_list.append(new_seq)
seq = new_seq
seq = None
for i in range(sequences_dict["pl"].shape[0]):
new_seq = sequences_dict["pl"][i]
if np.all(new_seq == seq):
continue
else:
pl_list.append(new_seq)
seq = new_seq
for i in range(1000):
plot_one_seq(seq=cl_list[i], output=os.path.join(output_folder, "cl_seq_%s.png" % i),
title="CL seq num %s" % i, yl=0.1)
for i in range(1000):
plot_one_seq(seq=pl_list[i], output=os.path.join(output_folder, "pl_seq_%s.png" % i),
title="PL seq num %s" % i, yl=0.1)
def main():
args = parse_input()
ex_seq_d = files_tools.load_pickle(args.interpretation_file)
new_d = {"cl": ex_seq_d["cl"], "pl": ex_seq_d["pl"]}
plot_distance_weight_one_side(new_d, args.output_folder)
plot_distance_weight_two_sides(new_d, args.output_folder)
plot_multi_seq(new_d, 1000, args.output_folder)
plot_avg_sequence(new_d, args.output_folder)
plot_avg_sequence_sw(new_d, args.output_folder)
plot_avg_sequence_sw_flatten_values(new_d, args.output_folder)
print_each_seq(new_d, args.output_folder)
if __name__ == '__main__':
main()
| """
Code adapted from https://github.com/ohlerlab/DeepRiPe with changes
Extract information and graphs from the Integrated gradients output
"""
import argparse
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from classifier.plotseqlogo import seqlogo_fig
from commons import files_tools
sns.set()
sns.set_style('whitegrid')
def parse_input():
parser = argparse.ArgumentParser()
parser.add_argument('--interpretation_file', help='path for the input file', required=True)
parser.add_argument('--output_folder', help='Path of the output folder', required=False,
default=os.path.dirname(sys.argv[0]))
args = parser.parse_args()
return args
def plot_one_seq(seq, output, title, yl=None):
fig = seqlogo_fig(seq[:, :], vocab="DNA", yl=yl, figsize=(20, 4), ncol=1, plot_name=title)
fig.savefig(output)
plt.close()
def plot_multi_seq(sequences_dict, number_of_seq, output_folder=None):
"""
Plot the multiple sequences in one figure
:param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each
sequence in this label
:param number_of_seq: number of sequences in one figure
:param output_folder: Output folder
"""
for k in sequences_dict:
ex_seq = sequences_dict[k][:number_of_seq]
fig = seqlogo_fig(np.transpose(ex_seq[:, :, :], axes=(1, 2, 0)), vocab="DNA",
figsize=(8, 4), ncol=1, yl=0.1,
plot_name="seq for top %s of type %s" % (number_of_seq, k))
if output_folder:
fig.savefig(os.path.join(output_folder, "seq_for_top_%s_of_type_%s" % (number_of_seq, k)))
else:
plt.show()
plt.close()
def plot_avg_sequence(sequences_dict, output_folder=None):
"""
Plot the average sequence across 30 letters and all the sequence
:param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each
sequence in this label
:param output_folder: Output folder
"""
for k in sequences_dict:
ex_seq = sequences_dict[k]
mean_seq = np.transpose(np.mean(ex_seq[:, 60:90, :], axis=0).reshape(1, 30, 4), axes=(1, 2, 0))
name = k
fig = seqlogo_fig(mean_seq, vocab="DNA", figsize=(20, 4), ncol=1,
plot_name="Average attribution score for prediction %s" % name)
ax = fig.axes[0]
ax.set_title("Average sequence for prediction %s" % name, fontsize=16)
if output_folder:
fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s30.png" % k))
else:
plt.show()
plt.close()
for k in sequences_dict:
ex_seq = sequences_dict[k]
mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0))
fig = seqlogo_fig(mean_seq, vocab="DNA", figsize=(20, 4), ncol=1,
plot_name="Avg seq for %s" % k)
if output_folder:
fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s.png" % k))
else:
plt.show()
plt.close()
def plot_avg_sequence_sw(sequences_dict, output_folder=None):
"""
plot the avg sequence using SW, flatten the AT to W and CG to S
:param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each
sequence in this label
:param output_folder: Output folder
"""
for k in sequences_dict:
ex_seq = sequences_dict[k]
mean_seq = np.transpose(np.mean(ex_seq[:, 60:90, :], axis=0).reshape(1, 30, 4), axes=(1, 2, 0))
new_seq = np.zeros_like(mean_seq)
for i in range(mean_seq.shape[0]):
new_seq[i][0] = mean_seq[i][0] + mean_seq[i][3]
new_seq[i][1] = mean_seq[i][1] + mean_seq[i][2]
fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(20, 4), ncol=1, plot_name="Avg seq for %s" % k)
if output_folder:
fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw30.png" % k))
else:
plt.show()
for k in sequences_dict:
ex_seq = sequences_dict[k]
mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0))
new_seq = np.zeros_like(mean_seq)
for i in range(mean_seq.shape[0]):
new_seq[i][0] = mean_seq[i][0] + mean_seq[i][3]
new_seq[i][1] = mean_seq[i][1] + mean_seq[i][2]
fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(20, 4), ncol=1, plot_name="Avg seq for %s" % k)
if output_folder:
fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw.png" % k))
else:
plt.show()
plt.close()
def plot_avg_sequence_sw_flatten_values(sequences_dict, output_folder=None):
"""
plot the avg sequence using SW, flatten the AT to W and CG to S and combining both options to get one
number per sequence place
:param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each
sequence in this label
:param output_folder: Output folder
"""
for k in sequences_dict:
ex_seq = sequences_dict[k]
mean_seq = np.transpose(np.mean(ex_seq[:, 60:90, :], axis=0).reshape(1, 30, 4), axes=(1, 2, 0))
new_seq = np.zeros_like(mean_seq)
for i in range(mean_seq.shape[0]):
w = mean_seq[i][0] + mean_seq[i][3]
s = mean_seq[i][1] + mean_seq[i][2]
delta = s - w
sw_index = 1 if delta > 0 else 0
new_seq[i][sw_index] = abs(delta)
fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(8, 4), ncol=1, plot_name="Avg seq for %s" % k)
if output_folder:
fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw30_flatten.png" % k))
else:
fig.show()
for k in sequences_dict:
ex_seq = sequences_dict[k]
mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0))
new_seq = np.zeros_like(mean_seq)
for i in range(mean_seq.shape[0]):
w = mean_seq[i][0] + mean_seq[i][3]
s = mean_seq[i][1] + mean_seq[i][2]
delta = s - w
sw_index = 1 if delta > 0 else 0
new_seq[i][sw_index] = abs(delta)
fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(20, 4), ncol=1, plot_name="Avg seq for %s" % k)
if output_folder:
fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw_flatten.png" % k))
else:
plt.show()
plt.close()
def plot_distance_weight_two_sides(sequences_dict, output_folder=None):
"""
Plot the integrated gradient value of each feature based on distance from center, two ways graph(-74->74)
We wanted to see if there are indexes and some periodicity
:param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each
sequence in this label
:param output_folder: Output folder
"""
for k in sequences_dict:
class_type = k
ex_seq = np.abs(sequences_dict[k])
mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0))
seq_weight = np.sum(mean_seq, axis=1)
middle = int(seq_weight.shape[0] / 2) - 1
seq_weight[middle] = None
seq_weight[middle + 1] = None
x = np.arange(-74, 1).astype(np.int)
x = np.append(x, x[::-1] * -1)
x_ticks = [i for i in range(-70, 80, 10)]
plt.xticks(x_ticks)
plt.plot(x, seq_weight, '.-')
plt.legend()
plt.grid(axis="y")
plt.xlabel("Distance from CpG Site", fontsize=12)
plt.ylabel("Attribute score", fontsize=12)
plt.title("Attribute score base on distance from CpG site for %s" % class_type, fontsize=14)
if output_folder:
plt.savefig(
os.path.join(output_folder,
"distance_importance_of_flanking_letters_type_%s_two_way.png" % k))
else:
plt.show()
plt.close()
def plot_distance_weight_one_side(sequences_dict, output_folder=None):
"""
Plot the integrated gradient value of each feature based on distance from center, one way graph (0->74)
We wanted to see if there are indexes and some periodicity
:param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each
sequence in this label
:param output_folder: Output folder
"""
for k in sequences_dict:
class_type = k
ex_seq = np.abs(sequences_dict[k])
mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0))
seq_weight = np.sum(mean_seq, axis=1)
std_seq = np.std(mean_seq, axis=1)
middle = int(seq_weight.shape[0] / 2) - 1
seq_to_values = np.flip(seq_weight[:middle])
seq_from_values = seq_weight[middle + 2:]
seq_to_std = np.flip(std_seq[:middle])
seq_from_std = std_seq[middle + 2:]
x = np.arange(1, seq_from_values.shape[0] + 1)
plt.errorbar(x, seq_to_values, seq_to_std, marker='^', label="to", alpha=0.5)
plt.errorbar(x, seq_from_values, seq_from_std, marker='^', label="from", alpha=0.5)
plt.legend()
x_ticks = [i for i in range(1, 5)] + [i for i in range(5, 75, 5)]
plt.xticks(x_ticks)
plt.xlabel("Distance from CG")
plt.ylabel("Importance shannon values")
plt.title("Importance of flanking letters - %s" % (class_type))
if output_folder:
plt.savefig(os.path.join(output_folder,
"distance_importance_of_flanking_letters_type_%s_one_way.png" % k))
else:
plt.show()
plt.close()
def print_each_seq(sequences_dict, output_folder):
"""
Plot all the sequences on after the other
:param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each
sequence in this label
:param output_folder: Output folder
"""
cl_list = []
pl_list = []
# Remove duplicates
seq = None
for i in range(sequences_dict["cl"].shape[0]):
new_seq = sequences_dict["cl"][i]
if np.all(new_seq == seq):
continue
else:
cl_list.append(new_seq)
seq = new_seq
seq = None
for i in range(sequences_dict["pl"].shape[0]):
new_seq = sequences_dict["pl"][i]
if np.all(new_seq == seq):
continue
else:
pl_list.append(new_seq)
seq = new_seq
for i in range(1000):
plot_one_seq(seq=cl_list[i], output=os.path.join(output_folder, "cl_seq_%s.png" % i),
title="CL seq num %s" % i, yl=0.1)
for i in range(1000):
plot_one_seq(seq=pl_list[i], output=os.path.join(output_folder, "pl_seq_%s.png" % i),
title="PL seq num %s" % i, yl=0.1)
def main():
args = parse_input()
ex_seq_d = files_tools.load_pickle(args.interpretation_file)
new_d = {"cl": ex_seq_d["cl"], "pl": ex_seq_d["pl"]}
plot_distance_weight_one_side(new_d, args.output_folder)
plot_distance_weight_two_sides(new_d, args.output_folder)
plot_multi_seq(new_d, 1000, args.output_folder)
plot_avg_sequence(new_d, args.output_folder)
plot_avg_sequence_sw(new_d, args.output_folder)
plot_avg_sequence_sw_flatten_values(new_d, args.output_folder)
print_each_seq(new_d, args.output_folder)
if __name__ == '__main__':
main() | en | 0.862805 | Code adapted from https://github.com/ohlerlab/DeepRiPe with changes Extract information and graphs from the Integrated gradients output Plot the multiple sequences in one figure :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param number_of_seq: number of sequences in one figure :param output_folder: Output folder Plot the average sequence across 30 letters and all the sequence :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder plot the avg sequence using SW, flatten the AT to W and CG to S :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder plot the avg sequence using SW, flatten the AT to W and CG to S and combining both options to get one number per sequence place :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder Plot the integrated gradient value of each feature based on distance from center, two ways graph(-74->74) We wanted to see if there are indexes and some periodicity :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder Plot the integrated gradient value of each feature based on distance from center, one way graph (0->74) We wanted to see if there are indexes and some periodicity :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder Plot all the sequences on after the other :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder # Remove duplicates | 2.687476 | 3 |
scripts/pythonutils/autorepr.py | shulinye/dotfiles | 2 | 7636 | <reponame>shulinye/dotfiles<filename>scripts/pythonutils/autorepr.py
#!/usr/bin/python3
from collections import OrderedDict
from functools import partial
from ordered_set import OrderedSet
import inspect
import itertools
import types
from .utils import walk_getattr
__all__ = ['autoinit', 'autorepr', 'TotalCompareByKey']
def autoinit(obj=None, *args, params=None, **kwargs):
"""Takes __slots__ and _slots and writes an __init__
Can be used as a class decorator, or by setting
__init__ = autoinit"""
if obj is None: return partial(autoinit, params=params)
if params:
pass
elif hasattr(obj, '__slots__'):
params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '__slots__')))
elif hasattr(obj, '_slots'):
params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '_slots')))
else:
raise RuntimeError("Can't autocreate __init__, please supply '__slots__' or '_slots'")
if inspect.isclass(obj): #I'm being used as a decorator
s = ["def __init__(self,{}):".format(", ".join(i for i in params))]
s.extend("self.{0} = {0}".format(i) for i in params)
scope = {}
exec('\n '.join(s), scope)
setattr(obj, '__init__', scope['__init__'])
return obj
else:
signature = inspect.Signature(inspect.Parameter(i, inspect.Parameter.POSITIONAL_OR_KEYWORD) for i in params)
signature.bind(*args, **kwargs)
for p, val in itertools.chain(zip(params, args), kwargs.items()):
setattr(obj, p, val)
def autorepr(obj=None, *, params=None):
"""Function that automagically gives you a __repr__.
If no params are given, uses __slots__, _slots, and at last resort,
inspects __init__
Can be used as a class decorator or by setting
__repr__ = autorepr"""
if obj is None: return partial(autorepr, params = params)
discard_first = False
if params:
pass
elif hasattr(obj, '__slots__'):
params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '__slots__')))
elif hasattr(obj, '_slots'):
params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '_slots')))
else:
sig = inspect.signature(obj.__init__)
params = sig.parameters
discard_first = True
if inspect.isclass(obj): #I'm being used as a decorator
if discard_first: params = list(params)[1:] #drop the first argument, that's self
s = ["def __repr__(self):\n return '%s(" + ", ".join(["%s=%r"]*(len(params)))]
s.append(")' % (self.__class__.__name__, ")
s.append(', '.join("'{0}', self.{0}".format(i) for i in params) + ')')
scope = {}
exec("".join(s), scope)
setattr(obj, '__repr__', scope['__repr__'])
return obj
else: #Being a normal function here :P
return "%s(%s)" % (obj.__class__.__name__, ", ".join("%s=%r" % (i, getattr(obj,i)) for i in params))
class TotalCompareByKey(object):
"""Writes all comparison methods using one key"""
__slots__ = ['key', 'check_type']
def __init__(self, key, *, check_type=True):
self.key = key
self.check_type = check_type
def __call__(self, cls):
orderings = {'__lt__': '<',
'__le__': '<=',
'__gt__': '>',
'__ge__': '>=',
'__eq__': '==',
'__ne__': '!='}
for dunder, symbol in orderings.items():
if dunder in cls.__dict__: continue
s = ["def {dunder}(self, other):".format(dunder=dunder)]
if self.check_type:
s.append("if not isinstance(other, self.__class__):")
s.append(" return NotImplemented")
s.append("return self.{k} {symbol} other.{k}".format(k=self.key, symbol=symbol))
scope = {}
exec("\n ".join(s), scope)
setattr(cls, dunder, scope[dunder])
return cls
| #!/usr/bin/python3
from collections import OrderedDict
from functools import partial
from ordered_set import OrderedSet
import inspect
import itertools
import types
from .utils import walk_getattr
__all__ = ['autoinit', 'autorepr', 'TotalCompareByKey']
def autoinit(obj=None, *args, params=None, **kwargs):
"""Takes __slots__ and _slots and writes an __init__
Can be used as a class decorator, or by setting
__init__ = autoinit"""
if obj is None: return partial(autoinit, params=params)
if params:
pass
elif hasattr(obj, '__slots__'):
params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '__slots__')))
elif hasattr(obj, '_slots'):
params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '_slots')))
else:
raise RuntimeError("Can't autocreate __init__, please supply '__slots__' or '_slots'")
if inspect.isclass(obj): #I'm being used as a decorator
s = ["def __init__(self,{}):".format(", ".join(i for i in params))]
s.extend("self.{0} = {0}".format(i) for i in params)
scope = {}
exec('\n '.join(s), scope)
setattr(obj, '__init__', scope['__init__'])
return obj
else:
signature = inspect.Signature(inspect.Parameter(i, inspect.Parameter.POSITIONAL_OR_KEYWORD) for i in params)
signature.bind(*args, **kwargs)
for p, val in itertools.chain(zip(params, args), kwargs.items()):
setattr(obj, p, val)
def autorepr(obj=None, *, params=None):
"""Function that automagically gives you a __repr__.
If no params are given, uses __slots__, _slots, and at last resort,
inspects __init__
Can be used as a class decorator or by setting
__repr__ = autorepr"""
if obj is None: return partial(autorepr, params = params)
discard_first = False
if params:
pass
elif hasattr(obj, '__slots__'):
params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '__slots__')))
elif hasattr(obj, '_slots'):
params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '_slots')))
else:
sig = inspect.signature(obj.__init__)
params = sig.parameters
discard_first = True
if inspect.isclass(obj): #I'm being used as a decorator
if discard_first: params = list(params)[1:] #drop the first argument, that's self
s = ["def __repr__(self):\n return '%s(" + ", ".join(["%s=%r"]*(len(params)))]
s.append(")' % (self.__class__.__name__, ")
s.append(', '.join("'{0}', self.{0}".format(i) for i in params) + ')')
scope = {}
exec("".join(s), scope)
setattr(obj, '__repr__', scope['__repr__'])
return obj
else: #Being a normal function here :P
return "%s(%s)" % (obj.__class__.__name__, ", ".join("%s=%r" % (i, getattr(obj,i)) for i in params))
class TotalCompareByKey(object):
"""Writes all comparison methods using one key"""
__slots__ = ['key', 'check_type']
def __init__(self, key, *, check_type=True):
self.key = key
self.check_type = check_type
def __call__(self, cls):
orderings = {'__lt__': '<',
'__le__': '<=',
'__gt__': '>',
'__ge__': '>=',
'__eq__': '==',
'__ne__': '!='}
for dunder, symbol in orderings.items():
if dunder in cls.__dict__: continue
s = ["def {dunder}(self, other):".format(dunder=dunder)]
if self.check_type:
s.append("if not isinstance(other, self.__class__):")
s.append(" return NotImplemented")
s.append("return self.{k} {symbol} other.{k}".format(k=self.key, symbol=symbol))
scope = {}
exec("\n ".join(s), scope)
setattr(cls, dunder, scope[dunder])
return cls | en | 0.814307 | #!/usr/bin/python3 Takes __slots__ and _slots and writes an __init__ Can be used as a class decorator, or by setting __init__ = autoinit #I'm being used as a decorator Function that automagically gives you a __repr__. If no params are given, uses __slots__, _slots, and at last resort, inspects __init__ Can be used as a class decorator or by setting __repr__ = autorepr #I'm being used as a decorator #drop the first argument, that's self #Being a normal function here :P Writes all comparison methods using one key | 2.584168 | 3 |
v1.0.0.test/toontown/estate/DistributedGardenPlotAI.py | TTOFFLINE-LEAK/ttoffline | 4 | 7637 | <filename>v1.0.0.test/toontown/estate/DistributedGardenPlotAI.py<gh_stars>1-10
from direct.directnotify import DirectNotifyGlobal
from toontown.estate import GardenGlobals
from toontown.estate.DistributedLawnDecorAI import DistributedLawnDecorAI
FLOWER_X_OFFSETS = (
None, (0, ), (-1.5, 1.5), (-3.4, 0, 3.5))
class DistributedGardenPlotAI(DistributedLawnDecorAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGardenPlotAI')
def __init__(self, mgr):
DistributedLawnDecorAI.__init__(self, mgr)
self.plotType = 0
self.__plantingAvId = 0
self.treeIndex = 0
self.flowerIndex = 0
def announceGenerate(self):
DistributedLawnDecorAI.announceGenerate(self)
self.plotType = GardenGlobals.whatCanBePlanted(self.ownerIndex, self.plot)
self.__plantingAvId = 0
def setTreeIndex(self, treeIndex):
self.treeIndex = treeIndex
def getTreeIndex(self):
return self.treeIndex
def setFlowerIndex(self, flowerIndex):
self.flowerIndex = flowerIndex
def getFlowerIndex(self):
return self.flowerIndex
def __initialSanityCheck(self, wantedType=None, forceOwner=False):
if self.__plantingAvId:
return
else:
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if not av:
self.air.writeServerEvent('suspicious', avId, 'called DistributedGardenPlotAI method outside shard!')
return
if wantedType is not None and self.plotType != wantedType:
self.air.writeServerEvent('suspicious', avId, 'called incorrect DistributedGardenPlotAI method!', plotType=self.plotType, wantedType=wantedType)
return self.d_interactionDenied()
if avId != self.ownerDoId and not forceOwner:
self.air.writeServerEvent('suspicious', avId, "called someone else's DistributedGardenPlotAI plant method!", ownerDoId=self.ownerDoId)
return self.d_interactionDenied()
return av
def plantFlower(self, species, variety, usingFlowerAll=False):
av = self.__initialSanityCheck(GardenGlobals.FLOWER_TYPE if not usingFlowerAll else None, usingFlowerAll)
if not av:
return
else:
def invalid(problem):
msg = 'tried to plant flower but something went wrong: %s' % problem
self.notify.warning('%d %s' % (av.doId, msg))
self.air.writeServerEvent('suspicious', av.doId, msg)
if not usingFlowerAll:
return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED)
plantAttributes = GardenGlobals.PlantAttributes.get(species, {})
if plantAttributes.get('plantType') != GardenGlobals.FLOWER_TYPE:
return invalid('invalid species: %d' % species)
if variety >= len(plantAttributes['varieties']):
return invalid('invalid variety: %d' % variety)
if not usingFlowerAll:
cost = len(GardenGlobals.Recipes[plantAttributes['varieties'][variety][0]]['beans'])
av.takeMoney(cost)
self.d_setMovie(GardenGlobals.MOVIE_PLANT)
def handlePlantFlower(task):
flower = self.mgr.plantFlower(self.getFlowerIndex(), species, variety, plot=self, ownerIndex=self.ownerIndex, plotId=self.plot, waterLevel=0, generate=False)
index = (0, 1, 2, 2, 2, 3, 3, 3, 4, 4)[self.getFlowerIndex()]
idx = (0, 0, 0, 1, 2, 0, 1, 2, 0, 1)[self.getFlowerIndex()]
zOffset = 1.5
gardenBox = self.mgr._estateBoxes[index]
xOffset = FLOWER_X_OFFSETS[gardenBox.getTypeIndex()][idx]
flower.setPos(gardenBox, 0, 0, 0)
flower.setZ(gardenBox, zOffset)
flower.setX(gardenBox, xOffset)
flower.setH(gardenBox, 0)
flower.generateWithRequired(self.mgr.estate.zoneId)
if not usingFlowerAll:
flower.d_setMovie(GardenGlobals.MOVIE_FINISHPLANTING, self.__plantingAvId)
flower.d_setMovie(GardenGlobals.MOVIE_CLEAR, self.__plantingAvId)
self.air.writeServerEvent('plant-flower', self.__plantingAvId, species=species, variety=variety, plot=self.plot, name=plantAttributes.get('name', 'unknown flower'))
if task:
return task.done
if usingFlowerAll:
handlePlantFlower(None)
else:
taskMgr.doMethodLater(7, handlePlantFlower, self.uniqueName('handle-plant-flower'))
self.__plantingAvId = av.doId
return 1
def plantGagTree(self, track, index):
av = self.__initialSanityCheck(GardenGlobals.GAG_TREE_TYPE)
if not av:
return
for i in xrange(index):
if not self.mgr.hasTree(track, i):
msg = 'tried to plant tree but an index is missing: %d' % index
self.notify.warning('%d %s' % (av.doId, msg))
self.air.writeServerEvent('suspicious', av.doId, msg)
return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED)
if self.mgr.hasTree(track, index):
msg = 'tried to plant tree but gag already planted'
self.notify.warning('%d %s' % (av.doId, msg))
self.air.writeServerEvent('suspicious', av.doId, msg)
return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED)
if av.inventory.useItem(track, index) == -1:
msg = 'tried to plant tree but not carrying selected gag'
self.notify.warning('%d %s' % (av.doId, msg))
self.air.writeServerEvent('suspicious', av.doId, msg)
return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED)
av.d_setInventory(av.getInventory())
self.d_setMovie(GardenGlobals.MOVIE_PLANT)
def handlePlantTree(task):
if not self.air:
return
tree = self.mgr.plantTree(self.getTreeIndex(), GardenGlobals.getTreeTypeIndex(track, index), plot=self, ownerIndex=self.ownerIndex, plotId=self.plot, pos=(self.getPos(), self.getH()))
tree.d_setMovie(GardenGlobals.MOVIE_FINISHPLANTING, self.__plantingAvId)
tree.d_setMovie(GardenGlobals.MOVIE_CLEAR, self.__plantingAvId)
self.air.writeServerEvent('plant-tree', self.__plantingAvId, track=track, index=index, plot=self.plot)
return task.done
taskMgr.doMethodLater(7, handlePlantTree, self.uniqueName('handle-plant-tree'))
self.__plantingAvId = av.doId
def plantStatuary(self, species):
av = self.__initialSanityCheck(GardenGlobals.STATUARY_TYPE)
if not av:
return
def invalid(problem):
msg = 'tried to plant statuary but something went wrong: %s' % problem
self.notify.warning('%d %s' % (av.doId, msg))
self.air.writeServerEvent('suspicious', av.doId, msg)
return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED)
plantAttributes = GardenGlobals.PlantAttributes.get(species, {})
if plantAttributes.get('plantType') != GardenGlobals.STATUARY_TYPE:
return invalid('invalid species: %d' % species)
gardenItem = species - 100
if gardenItem == 134:
gardenItem = 135
if not av.removeGardenItem(gardenItem, 1):
return invalid("av doesn't own item: %d" % species)
self.d_setMovie(GardenGlobals.MOVIE_PLANT)
def handlePlaceStatuary(task):
if not self.air:
return
statuary = self.mgr.placeStatuary(self.mgr.S_pack(0, 0, species, 0), plot=self, ownerIndex=self.ownerIndex, plotId=self.plot, pos=(
self.getPos(), self.getH()), generate=False)
statuary.generateWithRequired(self.zoneId)
statuary.d_setMovie(GardenGlobals.MOVIE_FINISHPLANTING, self.__plantingAvId)
statuary.d_setMovie(GardenGlobals.MOVIE_CLEAR, self.__plantingAvId)
self.air.writeServerEvent('plant-statuary', self.__plantingAvId, species=species, plot=self.plot)
return task.done
taskMgr.doMethodLater(7, handlePlaceStatuary, self.uniqueName('handle-place-statuary'))
self.__plantingAvId = av.doId
def plantToonStatuary(self, species, dnaCode):
av = self.__initialSanityCheck(GardenGlobals.STATUARY_TYPE)
if not av:
return
def invalid(problem):
msg = 'tried to plant statuary but something went wrong: %s' % problem
self.notify.warning('%d %s' % (av.doId, msg))
self.air.writeServerEvent('suspicious', av.doId, msg)
return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED)
plantAttributes = GardenGlobals.PlantAttributes.get(species, {})
if plantAttributes.get('plantType') != GardenGlobals.STATUARY_TYPE:
return invalid('invalid species: %d' % species)
if not av.removeGardenItem(species - 100, 1):
return invalid("av doesn't own item: %d" % species)
self.d_setMovie(GardenGlobals.MOVIE_PLANT)
def handlePlaceStatuary(task):
if not self.air:
return
statuary = self.mgr.placeStatuary(self.mgr.S_pack(dnaCode, 0, species, 0), plot=self, ownerIndex=self.ownerIndex, plotId=self.plot, pos=(
self.getPos(), self.getH()), generate=False)
statuary.generateWithRequired(self.zoneId)
statuary.d_setMovie(GardenGlobals.MOVIE_FINISHPLANTING, self.__plantingAvId)
self.air.writeServerEvent('plant-statuary', self.__plantingAvId, species=species, plot=self.plot)
return task.done
taskMgr.doMethodLater(7, handlePlaceStatuary, self.uniqueName('handle-place-statuary'))
self.__plantingAvId = av.doId
def plantNothing(self, burntBeans):
av = self.__initialSanityCheck()
if av:
av.takeMoney(burntBeans) | <filename>v1.0.0.test/toontown/estate/DistributedGardenPlotAI.py<gh_stars>1-10
from direct.directnotify import DirectNotifyGlobal
from toontown.estate import GardenGlobals
from toontown.estate.DistributedLawnDecorAI import DistributedLawnDecorAI
FLOWER_X_OFFSETS = (
None, (0, ), (-1.5, 1.5), (-3.4, 0, 3.5))
class DistributedGardenPlotAI(DistributedLawnDecorAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGardenPlotAI')
def __init__(self, mgr):
DistributedLawnDecorAI.__init__(self, mgr)
self.plotType = 0
self.__plantingAvId = 0
self.treeIndex = 0
self.flowerIndex = 0
def announceGenerate(self):
DistributedLawnDecorAI.announceGenerate(self)
self.plotType = GardenGlobals.whatCanBePlanted(self.ownerIndex, self.plot)
self.__plantingAvId = 0
def setTreeIndex(self, treeIndex):
self.treeIndex = treeIndex
def getTreeIndex(self):
return self.treeIndex
def setFlowerIndex(self, flowerIndex):
self.flowerIndex = flowerIndex
def getFlowerIndex(self):
return self.flowerIndex
def __initialSanityCheck(self, wantedType=None, forceOwner=False):
if self.__plantingAvId:
return
else:
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if not av:
self.air.writeServerEvent('suspicious', avId, 'called DistributedGardenPlotAI method outside shard!')
return
if wantedType is not None and self.plotType != wantedType:
self.air.writeServerEvent('suspicious', avId, 'called incorrect DistributedGardenPlotAI method!', plotType=self.plotType, wantedType=wantedType)
return self.d_interactionDenied()
if avId != self.ownerDoId and not forceOwner:
self.air.writeServerEvent('suspicious', avId, "called someone else's DistributedGardenPlotAI plant method!", ownerDoId=self.ownerDoId)
return self.d_interactionDenied()
return av
def plantFlower(self, species, variety, usingFlowerAll=False):
av = self.__initialSanityCheck(GardenGlobals.FLOWER_TYPE if not usingFlowerAll else None, usingFlowerAll)
if not av:
return
else:
def invalid(problem):
msg = 'tried to plant flower but something went wrong: %s' % problem
self.notify.warning('%d %s' % (av.doId, msg))
self.air.writeServerEvent('suspicious', av.doId, msg)
if not usingFlowerAll:
return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED)
plantAttributes = GardenGlobals.PlantAttributes.get(species, {})
if plantAttributes.get('plantType') != GardenGlobals.FLOWER_TYPE:
return invalid('invalid species: %d' % species)
if variety >= len(plantAttributes['varieties']):
return invalid('invalid variety: %d' % variety)
if not usingFlowerAll:
cost = len(GardenGlobals.Recipes[plantAttributes['varieties'][variety][0]]['beans'])
av.takeMoney(cost)
self.d_setMovie(GardenGlobals.MOVIE_PLANT)
def handlePlantFlower(task):
flower = self.mgr.plantFlower(self.getFlowerIndex(), species, variety, plot=self, ownerIndex=self.ownerIndex, plotId=self.plot, waterLevel=0, generate=False)
index = (0, 1, 2, 2, 2, 3, 3, 3, 4, 4)[self.getFlowerIndex()]
idx = (0, 0, 0, 1, 2, 0, 1, 2, 0, 1)[self.getFlowerIndex()]
zOffset = 1.5
gardenBox = self.mgr._estateBoxes[index]
xOffset = FLOWER_X_OFFSETS[gardenBox.getTypeIndex()][idx]
flower.setPos(gardenBox, 0, 0, 0)
flower.setZ(gardenBox, zOffset)
flower.setX(gardenBox, xOffset)
flower.setH(gardenBox, 0)
flower.generateWithRequired(self.mgr.estate.zoneId)
if not usingFlowerAll:
flower.d_setMovie(GardenGlobals.MOVIE_FINISHPLANTING, self.__plantingAvId)
flower.d_setMovie(GardenGlobals.MOVIE_CLEAR, self.__plantingAvId)
self.air.writeServerEvent('plant-flower', self.__plantingAvId, species=species, variety=variety, plot=self.plot, name=plantAttributes.get('name', 'unknown flower'))
if task:
return task.done
if usingFlowerAll:
handlePlantFlower(None)
else:
taskMgr.doMethodLater(7, handlePlantFlower, self.uniqueName('handle-plant-flower'))
self.__plantingAvId = av.doId
return 1
def plantGagTree(self, track, index):
av = self.__initialSanityCheck(GardenGlobals.GAG_TREE_TYPE)
if not av:
return
for i in xrange(index):
if not self.mgr.hasTree(track, i):
msg = 'tried to plant tree but an index is missing: %d' % index
self.notify.warning('%d %s' % (av.doId, msg))
self.air.writeServerEvent('suspicious', av.doId, msg)
return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED)
if self.mgr.hasTree(track, index):
msg = 'tried to plant tree but gag already planted'
self.notify.warning('%d %s' % (av.doId, msg))
self.air.writeServerEvent('suspicious', av.doId, msg)
return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED)
if av.inventory.useItem(track, index) == -1:
msg = 'tried to plant tree but not carrying selected gag'
self.notify.warning('%d %s' % (av.doId, msg))
self.air.writeServerEvent('suspicious', av.doId, msg)
return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED)
av.d_setInventory(av.getInventory())
self.d_setMovie(GardenGlobals.MOVIE_PLANT)
def handlePlantTree(task):
if not self.air:
return
tree = self.mgr.plantTree(self.getTreeIndex(), GardenGlobals.getTreeTypeIndex(track, index), plot=self, ownerIndex=self.ownerIndex, plotId=self.plot, pos=(self.getPos(), self.getH()))
tree.d_setMovie(GardenGlobals.MOVIE_FINISHPLANTING, self.__plantingAvId)
tree.d_setMovie(GardenGlobals.MOVIE_CLEAR, self.__plantingAvId)
self.air.writeServerEvent('plant-tree', self.__plantingAvId, track=track, index=index, plot=self.plot)
return task.done
taskMgr.doMethodLater(7, handlePlantTree, self.uniqueName('handle-plant-tree'))
self.__plantingAvId = av.doId
def plantStatuary(self, species):
av = self.__initialSanityCheck(GardenGlobals.STATUARY_TYPE)
if not av:
return
def invalid(problem):
msg = 'tried to plant statuary but something went wrong: %s' % problem
self.notify.warning('%d %s' % (av.doId, msg))
self.air.writeServerEvent('suspicious', av.doId, msg)
return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED)
plantAttributes = GardenGlobals.PlantAttributes.get(species, {})
if plantAttributes.get('plantType') != GardenGlobals.STATUARY_TYPE:
return invalid('invalid species: %d' % species)
gardenItem = species - 100
if gardenItem == 134:
gardenItem = 135
if not av.removeGardenItem(gardenItem, 1):
return invalid("av doesn't own item: %d" % species)
self.d_setMovie(GardenGlobals.MOVIE_PLANT)
def handlePlaceStatuary(task):
if not self.air:
return
statuary = self.mgr.placeStatuary(self.mgr.S_pack(0, 0, species, 0), plot=self, ownerIndex=self.ownerIndex, plotId=self.plot, pos=(
self.getPos(), self.getH()), generate=False)
statuary.generateWithRequired(self.zoneId)
statuary.d_setMovie(GardenGlobals.MOVIE_FINISHPLANTING, self.__plantingAvId)
statuary.d_setMovie(GardenGlobals.MOVIE_CLEAR, self.__plantingAvId)
self.air.writeServerEvent('plant-statuary', self.__plantingAvId, species=species, plot=self.plot)
return task.done
taskMgr.doMethodLater(7, handlePlaceStatuary, self.uniqueName('handle-place-statuary'))
self.__plantingAvId = av.doId
def plantToonStatuary(self, species, dnaCode):
av = self.__initialSanityCheck(GardenGlobals.STATUARY_TYPE)
if not av:
return
def invalid(problem):
msg = 'tried to plant statuary but something went wrong: %s' % problem
self.notify.warning('%d %s' % (av.doId, msg))
self.air.writeServerEvent('suspicious', av.doId, msg)
return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED)
plantAttributes = GardenGlobals.PlantAttributes.get(species, {})
if plantAttributes.get('plantType') != GardenGlobals.STATUARY_TYPE:
return invalid('invalid species: %d' % species)
if not av.removeGardenItem(species - 100, 1):
return invalid("av doesn't own item: %d" % species)
self.d_setMovie(GardenGlobals.MOVIE_PLANT)
def handlePlaceStatuary(task):
if not self.air:
return
statuary = self.mgr.placeStatuary(self.mgr.S_pack(dnaCode, 0, species, 0), plot=self, ownerIndex=self.ownerIndex, plotId=self.plot, pos=(
self.getPos(), self.getH()), generate=False)
statuary.generateWithRequired(self.zoneId)
statuary.d_setMovie(GardenGlobals.MOVIE_FINISHPLANTING, self.__plantingAvId)
self.air.writeServerEvent('plant-statuary', self.__plantingAvId, species=species, plot=self.plot)
return task.done
taskMgr.doMethodLater(7, handlePlaceStatuary, self.uniqueName('handle-place-statuary'))
self.__plantingAvId = av.doId
def plantNothing(self, burntBeans):
av = self.__initialSanityCheck()
if av:
av.takeMoney(burntBeans) | none | 1 | 2.159944 | 2 |
|
python/handwritten_baseline/pipeline/model/feature_extr/debug.py | UKPLab/cdcr-beyond-corpus-tailored | 10 | 7638 | import pprint
from typing import Optional, List, Tuple, Set, Dict
import numpy as np
from overrides import overrides
from python.handwritten_baseline.pipeline.data.base import Dataset
from python.handwritten_baseline.pipeline.model.feature_extr import DEBUG_EXTR
from python.handwritten_baseline.pipeline.model.feature_extr.base_mixin import FeatureExtractorMixin
class DebugFeatureExtractor(FeatureExtractorMixin):
"""
Returns constant or random feature value for testing purposes.
"""
def __init__(self,
strategy: str,
num_features: int,
use_cache: bool,
features_to_select: Optional[List[str]]):
super(DebugFeatureExtractor, self).__init__(DEBUG_EXTR, use_cache, features_to_select)
self.strategy = strategy
self.num_features = num_features
@overrides
def _transform(self, dataset: Dataset, pairs: List[Tuple[Tuple, Tuple]], unique_mentions: Set[Tuple]):
if self.strategy == "random":
return np.random.normal(0, 1, (len(pairs), self.num_features))
elif self.strategy == "zero":
return np.zeros((len(pairs), self.num_features))
elif self.strategy == "mix":
num_zero_features = self.num_features // 2
print(f"Generating {num_zero_features} zero features and {self.num_features - num_zero_features} random features.")
zero_features = np.zeros((len(pairs), num_zero_features))
random_features = np.random.normal(0, 1, (len(pairs), self.num_features - num_zero_features))
feature_matrix = np.hstack([zero_features, random_features])
np.random.shuffle(np.transpose(feature_matrix))
return feature_matrix
@overrides
def _get_plain_names_of_all_features(self) -> List[str]:
return [str(i) for i in range(self.num_features)]
@classmethod
@overrides
def from_params(cls, config: Dict):
strategy = config.pop("strategy")
num_features = config.pop("num_features")
use_cache = config.pop("use_cache", False)
features_to_select = config.pop("features_to_select", None)
obj = DebugFeatureExtractor(strategy, num_features, use_cache, features_to_select)
if config:
raise ValueError("Leftover configuration: " + pprint.pformat(config))
return obj | import pprint
from typing import Optional, List, Tuple, Set, Dict
import numpy as np
from overrides import overrides
from python.handwritten_baseline.pipeline.data.base import Dataset
from python.handwritten_baseline.pipeline.model.feature_extr import DEBUG_EXTR
from python.handwritten_baseline.pipeline.model.feature_extr.base_mixin import FeatureExtractorMixin
class DebugFeatureExtractor(FeatureExtractorMixin):
"""
Returns constant or random feature value for testing purposes.
"""
def __init__(self,
strategy: str,
num_features: int,
use_cache: bool,
features_to_select: Optional[List[str]]):
super(DebugFeatureExtractor, self).__init__(DEBUG_EXTR, use_cache, features_to_select)
self.strategy = strategy
self.num_features = num_features
@overrides
def _transform(self, dataset: Dataset, pairs: List[Tuple[Tuple, Tuple]], unique_mentions: Set[Tuple]):
if self.strategy == "random":
return np.random.normal(0, 1, (len(pairs), self.num_features))
elif self.strategy == "zero":
return np.zeros((len(pairs), self.num_features))
elif self.strategy == "mix":
num_zero_features = self.num_features // 2
print(f"Generating {num_zero_features} zero features and {self.num_features - num_zero_features} random features.")
zero_features = np.zeros((len(pairs), num_zero_features))
random_features = np.random.normal(0, 1, (len(pairs), self.num_features - num_zero_features))
feature_matrix = np.hstack([zero_features, random_features])
np.random.shuffle(np.transpose(feature_matrix))
return feature_matrix
@overrides
def _get_plain_names_of_all_features(self) -> List[str]:
return [str(i) for i in range(self.num_features)]
@classmethod
@overrides
def from_params(cls, config: Dict):
strategy = config.pop("strategy")
num_features = config.pop("num_features")
use_cache = config.pop("use_cache", False)
features_to_select = config.pop("features_to_select", None)
obj = DebugFeatureExtractor(strategy, num_features, use_cache, features_to_select)
if config:
raise ValueError("Leftover configuration: " + pprint.pformat(config))
return obj | en | 0.612206 | Returns constant or random feature value for testing purposes. | 2.331641 | 2 |
kunquat/tracker/errorbase.py | cyberixae/kunquat | 0 | 7639 | # -*- coding: utf-8 -*-
#
# Author: <NAME>, Finland 2014
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
from __future__ import print_function
import sys
import traceback
import os
_ERROR_BRIEF = 'Kunquat Tracker encountered an error.'
_SUBMIT_INFO = \
'''Please submit an issue to Kunquat issue tracker at
https://github.com/kunquat/kunquat/issues with the following
information attached.'''
def get_error_details(eclass, einst, trace):
details_list = traceback.format_exception(eclass, einst, trace)
return ''.join(details_list)
def print_error_msg(eclass, einst, trace):
details = get_error_details(eclass, einst, trace)
print('\n{}\n{}\n\n{}'.format(_ERROR_BRIEF, _SUBMIT_INFO, details),
file=sys.stderr)
def log_error(eclass, einst, trace):
pass # TODO: implement once we decide where to write
def setup_basic_error_handler():
sys.excepthook = _basic_handler
def _basic_handler(eclass, einst, trace):
print_error_msg(eclass, einst, trace)
log_error(eclass, einst, trace)
os.abort()
| # -*- coding: utf-8 -*-
#
# Author: <NAME>, Finland 2014
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
from __future__ import print_function
import sys
import traceback
import os
_ERROR_BRIEF = 'Kunquat Tracker encountered an error.'
_SUBMIT_INFO = \
'''Please submit an issue to Kunquat issue tracker at
https://github.com/kunquat/kunquat/issues with the following
information attached.'''
def get_error_details(eclass, einst, trace):
details_list = traceback.format_exception(eclass, einst, trace)
return ''.join(details_list)
def print_error_msg(eclass, einst, trace):
details = get_error_details(eclass, einst, trace)
print('\n{}\n{}\n\n{}'.format(_ERROR_BRIEF, _SUBMIT_INFO, details),
file=sys.stderr)
def log_error(eclass, einst, trace):
pass # TODO: implement once we decide where to write
def setup_basic_error_handler():
sys.excepthook = _basic_handler
def _basic_handler(eclass, einst, trace):
print_error_msg(eclass, einst, trace)
log_error(eclass, einst, trace)
os.abort()
| en | 0.8139 | # -*- coding: utf-8 -*- # # Author: <NAME>, Finland 2014 # # This file is part of Kunquat. # # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent possible under law, Kunquat Affirmers have waived all # copyright and related or neighboring rights to Kunquat. # Please submit an issue to Kunquat issue tracker at https://github.com/kunquat/kunquat/issues with the following information attached. # TODO: implement once we decide where to write | 1.928511 | 2 |
venv/lib/python3.5/site-packages/igraph/test/atlas.py | dtklinh/Protein-Rigid-Domains-Estimation | 2 | 7640 | <reponame>dtklinh/Protein-Rigid-Domains-Estimation
import warnings
import unittest
from igraph import *
class TestBase(unittest.TestCase):
def testPageRank(self):
for idx, g in enumerate(self.__class__.graphs):
try:
pr = g.pagerank()
except Exception as ex:
self.assertTrue(False, msg="PageRank calculation threw exception for graph #%d: %s" % (idx, ex))
raise
if g.vcount() == 0:
self.assertEqual([], pr)
continue
self.assertAlmostEqual(1.0, sum(pr), places=5, \
msg="PageRank sum is not 1.0 for graph #%d (%r)" % (idx, pr))
self.assertTrue(min(pr) >= 0, \
msg="Minimum PageRank is less than 0 for graph #%d (%r)" % (idx, pr))
def testEigenvectorCentrality(self):
# Temporarily turn off the warning handler because g.evcent() will print
# a warning for DAGs
warnings.simplefilter("ignore")
try:
for idx, g in enumerate(self.__class__.graphs):
try:
ec, eval = g.evcent(return_eigenvalue=True)
except Exception as ex:
self.assertTrue(False, msg="Eigenvector centrality threw exception for graph #%d: %s" % (idx, ex))
raise
if g.vcount() == 0:
self.assertEqual([], ec)
continue
if not g.is_connected():
# Skip disconnected graphs; this will be fixed in igraph 0.7
continue
n = g.vcount()
if abs(eval) < 1e-4:
self.assertTrue(min(ec) >= -1e-10,
msg="Minimum eigenvector centrality is smaller than 0 for graph #%d" % idx)
self.assertTrue(max(ec) <= 1,
msg="Maximum eigenvector centrality is greater than 1 for graph #%d" % idx)
continue
self.assertAlmostEqual(max(ec), 1, places=7, \
msg="Maximum eigenvector centrality is %r (not 1) for graph #%d (%r)" % \
(max(ec), idx, ec))
self.assertTrue(min(ec) >= 0, \
msg="Minimum eigenvector centrality is less than 0 for graph #%d" % idx)
ec2 = [sum(ec[u.index] for u in v.predecessors()) for v in g.vs]
for i in range(n):
self.assertAlmostEqual(ec[i] * eval, ec2[i], places=7, \
msg="Eigenvector centrality in graph #%d seems to be invalid "\
"for vertex %d" % (idx, i))
finally:
# Reset the warning handler
warnings.resetwarnings()
def testHubScore(self):
for idx, g in enumerate(self.__class__.graphs):
sc = g.hub_score()
if g.vcount() == 0:
self.assertEqual([], sc)
continue
self.assertAlmostEqual(max(sc), 1, places=7, \
msg="Maximum authority score is not 1 for graph #%d" % idx)
self.assertTrue(min(sc) >= 0, \
msg="Minimum hub score is less than 0 for graph #%d" % idx)
def testAuthorityScore(self):
for idx, g in enumerate(self.__class__.graphs):
sc = g.authority_score()
if g.vcount() == 0:
self.assertEqual([], sc)
continue
self.assertAlmostEqual(max(sc), 1, places=7, \
msg="Maximum authority score is not 1 for graph #%d" % idx)
self.assertTrue(min(sc) >= 0, \
msg="Minimum authority score is less than 0 for graph #%d" % idx)
class GraphAtlasTests(TestBase):
graphs = [Graph.Atlas(i) for i in range(1253)]
class IsoclassTests(TestBase):
graphs = [Graph.Isoclass(3, i, directed=True) for i in range(16)] + \
[Graph.Isoclass(4, i, directed=True) for i in range(218)]
def suite():
atlas_suite = unittest.makeSuite(GraphAtlasTests)
isoclass_suite = unittest.makeSuite(IsoclassTests)
return unittest.TestSuite([atlas_suite, isoclass_suite])
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| import warnings
import unittest
from igraph import *
class TestBase(unittest.TestCase):
def testPageRank(self):
for idx, g in enumerate(self.__class__.graphs):
try:
pr = g.pagerank()
except Exception as ex:
self.assertTrue(False, msg="PageRank calculation threw exception for graph #%d: %s" % (idx, ex))
raise
if g.vcount() == 0:
self.assertEqual([], pr)
continue
self.assertAlmostEqual(1.0, sum(pr), places=5, \
msg="PageRank sum is not 1.0 for graph #%d (%r)" % (idx, pr))
self.assertTrue(min(pr) >= 0, \
msg="Minimum PageRank is less than 0 for graph #%d (%r)" % (idx, pr))
def testEigenvectorCentrality(self):
# Temporarily turn off the warning handler because g.evcent() will print
# a warning for DAGs
warnings.simplefilter("ignore")
try:
for idx, g in enumerate(self.__class__.graphs):
try:
ec, eval = g.evcent(return_eigenvalue=True)
except Exception as ex:
self.assertTrue(False, msg="Eigenvector centrality threw exception for graph #%d: %s" % (idx, ex))
raise
if g.vcount() == 0:
self.assertEqual([], ec)
continue
if not g.is_connected():
# Skip disconnected graphs; this will be fixed in igraph 0.7
continue
n = g.vcount()
if abs(eval) < 1e-4:
self.assertTrue(min(ec) >= -1e-10,
msg="Minimum eigenvector centrality is smaller than 0 for graph #%d" % idx)
self.assertTrue(max(ec) <= 1,
msg="Maximum eigenvector centrality is greater than 1 for graph #%d" % idx)
continue
self.assertAlmostEqual(max(ec), 1, places=7, \
msg="Maximum eigenvector centrality is %r (not 1) for graph #%d (%r)" % \
(max(ec), idx, ec))
self.assertTrue(min(ec) >= 0, \
msg="Minimum eigenvector centrality is less than 0 for graph #%d" % idx)
ec2 = [sum(ec[u.index] for u in v.predecessors()) for v in g.vs]
for i in range(n):
self.assertAlmostEqual(ec[i] * eval, ec2[i], places=7, \
msg="Eigenvector centrality in graph #%d seems to be invalid "\
"for vertex %d" % (idx, i))
finally:
# Reset the warning handler
warnings.resetwarnings()
def testHubScore(self):
for idx, g in enumerate(self.__class__.graphs):
sc = g.hub_score()
if g.vcount() == 0:
self.assertEqual([], sc)
continue
self.assertAlmostEqual(max(sc), 1, places=7, \
msg="Maximum authority score is not 1 for graph #%d" % idx)
self.assertTrue(min(sc) >= 0, \
msg="Minimum hub score is less than 0 for graph #%d" % idx)
def testAuthorityScore(self):
for idx, g in enumerate(self.__class__.graphs):
sc = g.authority_score()
if g.vcount() == 0:
self.assertEqual([], sc)
continue
self.assertAlmostEqual(max(sc), 1, places=7, \
msg="Maximum authority score is not 1 for graph #%d" % idx)
self.assertTrue(min(sc) >= 0, \
msg="Minimum authority score is less than 0 for graph #%d" % idx)
class GraphAtlasTests(TestBase):
graphs = [Graph.Atlas(i) for i in range(1253)]
class IsoclassTests(TestBase):
graphs = [Graph.Isoclass(3, i, directed=True) for i in range(16)] + \
[Graph.Isoclass(4, i, directed=True) for i in range(218)]
def suite():
atlas_suite = unittest.makeSuite(GraphAtlasTests)
isoclass_suite = unittest.makeSuite(IsoclassTests)
return unittest.TestSuite([atlas_suite, isoclass_suite])
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test() | en | 0.148066 | #%d: %s" % (idx, ex)) #%d (%r)" % (idx, pr)) #%d (%r)" % (idx, pr)) # Temporarily turn off the warning handler because g.evcent() will print # a warning for DAGs #%d: %s" % (idx, ex)) # Skip disconnected graphs; this will be fixed in igraph 0.7 #%d" % idx) #%d" % idx) #%d (%r)" % \ #%d" % idx) #%d seems to be invalid "\ # Reset the warning handler #%d" % idx) #%d" % idx) #%d" % idx) #%d" % idx) | 2.834119 | 3 |
pycspr/types/cl.py | momipsl/pycspr | 2 | 7641 | import dataclasses
import enum
class CLType(enum.Enum):
"""Enumeration over set of CL types.
"""
BOOL = 0
I32 = 1
I64 = 2
U8 = 3
U32 = 4
U64 = 5
U128 = 6
U256 = 7
U512 = 8
UNIT = 9
STRING = 10
KEY = 11
UREF = 12
OPTION = 13
LIST = 14
BYTE_ARRAY = 15
RESULT = 16
MAP = 17
TUPLE_1 = 18
TUPLE_2 = 19
TUPLE_3 = 20
ANY = 21
PUBLIC_KEY = 22
# Set of types considered to be simple.
CL_TYPES_SIMPLE = {
CLType.BOOL,
CLType.I32,
CLType.I64,
CLType.KEY,
CLType.PUBLIC_KEY,
CLType.STRING,
CLType.U8,
CLType.U32,
CLType.U64,
CLType.U128,
CLType.U256,
CLType.U512,
CLType.UNIT,
CLType.UREF,
}
@dataclasses.dataclass
class CLTypeInfo():
"""Encapsulates CL type information associated with a value.
"""
# Associated type within CSPR type system.
typeof: CLType
@property
def type_tag(self) -> int:
"""Returns a tag used when encoding/decoding."""
return self.typeof.value
@dataclasses.dataclass
class CLTypeInfoForByteArray(CLTypeInfo):
"""Encapsulates CL type information associated with a byte array value.
"""
# Size of associated byte array value.
size: int
@dataclasses.dataclass
class CLTypeInfoForList(CLTypeInfo):
"""Encapsulates CL type information associated with a list value.
"""
# Inner type within CSPR type system.
inner_type_info: CLTypeInfo
@dataclasses.dataclass
class CLTypeInfoForMap(CLTypeInfo):
"""Encapsulates CL type information associated with a byte array value.
"""
# Type info of map's key.
key_type_info: CLType
# Type info of map's value.
value_type_info: CLTypeInfo
@dataclasses.dataclass
class CLTypeInfoForOption(CLTypeInfo):
"""Encapsulates CL type information associated with an optional value.
"""
# Inner type within CSPR type system.
inner_type_info: CLTypeInfo
@dataclasses.dataclass
class CLTypeInfoForSimple(CLTypeInfo):
"""Encapsulates CL type information associated with a simple value.
"""
pass
@dataclasses.dataclass
class CLTypeInfoForTuple1(CLTypeInfo):
"""Encapsulates CL type information associated with a 1-ary tuple value value.
"""
# Type of first value within 1-ary tuple value.
t0_type_info: CLTypeInfo
@dataclasses.dataclass
class CLTypeInfoForTuple2(CLTypeInfo):
"""Encapsulates CL type information associated with a 2-ary tuple value value.
"""
# Type of first value within 1-ary tuple value.
t0_type_info: CLTypeInfo
# Type of first value within 2-ary tuple value.
t1_type_info: CLTypeInfo
@dataclasses.dataclass
class CLTypeInfoForTuple3(CLTypeInfo):
"""Encapsulates CL type information associated with a 3-ary tuple value value.
"""
# Type of first value within 1-ary tuple value.
t0_type_info: CLTypeInfo
# Type of first value within 2-ary tuple value.
t1_type_info: CLTypeInfo
# Type of first value within 3-ary tuple value.
t2_type_info: CLTypeInfo
@dataclasses.dataclass
class CLValue():
"""A CL value mapped from python type system.
"""
# Byte array representation of underlying data.
bytes: bytes
# Parsed pythonic representation of underlying data (for human convenience only).
parsed: object
# Type information used by a deserializer.
type_info: CLTypeInfo
| import dataclasses
import enum
class CLType(enum.Enum):
"""Enumeration over set of CL types.
"""
BOOL = 0
I32 = 1
I64 = 2
U8 = 3
U32 = 4
U64 = 5
U128 = 6
U256 = 7
U512 = 8
UNIT = 9
STRING = 10
KEY = 11
UREF = 12
OPTION = 13
LIST = 14
BYTE_ARRAY = 15
RESULT = 16
MAP = 17
TUPLE_1 = 18
TUPLE_2 = 19
TUPLE_3 = 20
ANY = 21
PUBLIC_KEY = 22
# Set of types considered to be simple.
CL_TYPES_SIMPLE = {
CLType.BOOL,
CLType.I32,
CLType.I64,
CLType.KEY,
CLType.PUBLIC_KEY,
CLType.STRING,
CLType.U8,
CLType.U32,
CLType.U64,
CLType.U128,
CLType.U256,
CLType.U512,
CLType.UNIT,
CLType.UREF,
}
@dataclasses.dataclass
class CLTypeInfo():
"""Encapsulates CL type information associated with a value.
"""
# Associated type within CSPR type system.
typeof: CLType
@property
def type_tag(self) -> int:
"""Returns a tag used when encoding/decoding."""
return self.typeof.value
@dataclasses.dataclass
class CLTypeInfoForByteArray(CLTypeInfo):
"""Encapsulates CL type information associated with a byte array value.
"""
# Size of associated byte array value.
size: int
@dataclasses.dataclass
class CLTypeInfoForList(CLTypeInfo):
"""Encapsulates CL type information associated with a list value.
"""
# Inner type within CSPR type system.
inner_type_info: CLTypeInfo
@dataclasses.dataclass
class CLTypeInfoForMap(CLTypeInfo):
"""Encapsulates CL type information associated with a byte array value.
"""
# Type info of map's key.
key_type_info: CLType
# Type info of map's value.
value_type_info: CLTypeInfo
@dataclasses.dataclass
class CLTypeInfoForOption(CLTypeInfo):
"""Encapsulates CL type information associated with an optional value.
"""
# Inner type within CSPR type system.
inner_type_info: CLTypeInfo
@dataclasses.dataclass
class CLTypeInfoForSimple(CLTypeInfo):
"""Encapsulates CL type information associated with a simple value.
"""
pass
@dataclasses.dataclass
class CLTypeInfoForTuple1(CLTypeInfo):
"""Encapsulates CL type information associated with a 1-ary tuple value value.
"""
# Type of first value within 1-ary tuple value.
t0_type_info: CLTypeInfo
@dataclasses.dataclass
class CLTypeInfoForTuple2(CLTypeInfo):
"""Encapsulates CL type information associated with a 2-ary tuple value value.
"""
# Type of first value within 1-ary tuple value.
t0_type_info: CLTypeInfo
# Type of first value within 2-ary tuple value.
t1_type_info: CLTypeInfo
@dataclasses.dataclass
class CLTypeInfoForTuple3(CLTypeInfo):
"""Encapsulates CL type information associated with a 3-ary tuple value value.
"""
# Type of first value within 1-ary tuple value.
t0_type_info: CLTypeInfo
# Type of first value within 2-ary tuple value.
t1_type_info: CLTypeInfo
# Type of first value within 3-ary tuple value.
t2_type_info: CLTypeInfo
@dataclasses.dataclass
class CLValue():
"""A CL value mapped from python type system.
"""
# Byte array representation of underlying data.
bytes: bytes
# Parsed pythonic representation of underlying data (for human convenience only).
parsed: object
# Type information used by a deserializer.
type_info: CLTypeInfo
| en | 0.6857 | Enumeration over set of CL types. # Set of types considered to be simple. Encapsulates CL type information associated with a value. # Associated type within CSPR type system. Returns a tag used when encoding/decoding. Encapsulates CL type information associated with a byte array value. # Size of associated byte array value. Encapsulates CL type information associated with a list value. # Inner type within CSPR type system. Encapsulates CL type information associated with a byte array value. # Type info of map's key. # Type info of map's value. Encapsulates CL type information associated with an optional value. # Inner type within CSPR type system. Encapsulates CL type information associated with a simple value. Encapsulates CL type information associated with a 1-ary tuple value value. # Type of first value within 1-ary tuple value. Encapsulates CL type information associated with a 2-ary tuple value value. # Type of first value within 1-ary tuple value. # Type of first value within 2-ary tuple value. Encapsulates CL type information associated with a 3-ary tuple value value. # Type of first value within 1-ary tuple value. # Type of first value within 2-ary tuple value. # Type of first value within 3-ary tuple value. A CL value mapped from python type system. # Byte array representation of underlying data. # Parsed pythonic representation of underlying data (for human convenience only). # Type information used by a deserializer. | 2.787342 | 3 |
google/cloud/aiplatform_v1/types/env_var.py | nachocano/python-aiplatform | 0 | 7642 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(package="google.cloud.aiplatform.v1", manifest={"EnvVar",},)
class EnvVar(proto.Message):
r"""Represents an environment variable present in a Container or
Python Module.
Attributes:
name (str):
Required. Name of the environment variable.
Must be a valid C identifier.
value (str):
Required. Variables that reference a $(VAR_NAME) are
expanded using the previous defined environment variables in
the container and any service environment variables. If a
variable cannot be resolved, the reference in the input
string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped
references will never be expanded, regardless of whether the
variable exists or not.
"""
name = proto.Field(proto.STRING, number=1)
value = proto.Field(proto.STRING, number=2)
__all__ = tuple(sorted(__protobuf__.manifest))
| # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(package="google.cloud.aiplatform.v1", manifest={"EnvVar",},)
class EnvVar(proto.Message):
r"""Represents an environment variable present in a Container or
Python Module.
Attributes:
name (str):
Required. Name of the environment variable.
Must be a valid C identifier.
value (str):
Required. Variables that reference a $(VAR_NAME) are
expanded using the previous defined environment variables in
the container and any service environment variables. If a
variable cannot be resolved, the reference in the input
string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped
references will never be expanded, regardless of whether the
variable exists or not.
"""
name = proto.Field(proto.STRING, number=1)
value = proto.Field(proto.STRING, number=2)
__all__ = tuple(sorted(__protobuf__.manifest))
| en | 0.731116 | # -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # type: ignore Represents an environment variable present in a Container or Python Module. Attributes: name (str): Required. Name of the environment variable. Must be a valid C identifier. value (str): Required. Variables that reference a $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. | 1.925861 | 2 |
tools/borplay/packlib.py | MrCoolSpan/openbor | 25 | 7643 | <filename>tools/borplay/packlib.py<gh_stars>10-100
# Copyright (c) 2009 <NAME> ("Plombo")
# Class and functions to read .PAK files.
import struct
from cStringIO import StringIO
class PackFileReader(object):
''' Represents a BOR packfile. '''
files = dict() # the index holding the location of each file
packfile = None # the file object
def __init__(self, fp):
'''fp is a file path (string) or file-like object (file, StringIO,
etc.) in binary read mode'''
if isinstance(fp, str):
self.packfile = open(fp, 'rb')
else:
self.packfile = fp
self.read_index()
# reads the packfile's index into self.files
def read_index(self):
f = self.packfile
# read through file
tmp = True # placeholder that doesn't evaluate to false
while tmp: tmp = f.read(8192)
# read index start postition and seek there
f.seek(-4, 1)
endpos = f.tell()
f.seek(struct.unpack('<I', f.read(4))[0])
while f.tell() < endpos:
ssize, fpos, fsize = struct.unpack('<III', f.read(12))
name = f.read(ssize-12).strip('\x00').replace('\\', '/').lower()
self.files[name] = fpos, fsize
# reads a file with its full path.
def read_file(self, filename):
'''Returns a file-like object for the file or None if the file isn't
contained in this packfile.
This method takes the full path starting with "data/" as a parameter.'''
key = filename.replace('\\', '/').lower().strip('\x00').strip()
if key not in self.files.keys(): return None
start, size = self.files[key]
self.packfile.seek(start)
f = StringIO()
bytesrem = size
while bytesrem >= 8192:
f.write(self.packfile.read(8192))
bytesrem -= 8192
if bytesrem: f.write(self.packfile.read(bytesrem))
f.seek(0)
return f
def find_file(self, filename):
'''Returns a file-like object for the file or None if the file isn't
contained in this packfile.
This method searches for the file by its filename.'''
filename = filename.lower().strip()
start, size = None, None
for key in self.files.keys():
if key.endswith(filename):
return self.read_file(key)
return None # file not found if it gets to this point
def list_music_files(self):
'''Lists the BOR files in the packfile.'''
borfiles = []
for key in self.files.keys():
if key.endswith('.bor'): borfiles.append(key)
borfiles.sort()
for key in borfiles: print key
def get_file(pak, borfile):
'''Prevents a need to directly use PackFileReader when you only want to get
one file, like in borplay and bor2wav. Returns a file-like object.'''
rdr = PackFileReader(pak)
if ('/' not in borfile) and ('\\' not in borfile): # only the filename is given; search for the file
return rdr.find_file(borfile)
else: # full path given
return rdr.read_file(borfile)
# For testing
if __name__ == '__main__':
rdr = PackFileReader('K:/BOR/OpenBOR/Paks/BOR.PAK')
#keys = rdr.files.keys(); keys.sort()
#print '\n'.join(keys)
#print rdr.read_file('data/chars/yamazaki/yamazaki.txt').read()
#print rdr.find_file('yamazaki.txt').read()
rdr.list_music_files()
| <filename>tools/borplay/packlib.py<gh_stars>10-100
# Copyright (c) 2009 <NAME> ("Plombo")
# Class and functions to read .PAK files.
import struct
from cStringIO import StringIO
class PackFileReader(object):
''' Represents a BOR packfile. '''
files = dict() # the index holding the location of each file
packfile = None # the file object
def __init__(self, fp):
'''fp is a file path (string) or file-like object (file, StringIO,
etc.) in binary read mode'''
if isinstance(fp, str):
self.packfile = open(fp, 'rb')
else:
self.packfile = fp
self.read_index()
# reads the packfile's index into self.files
def read_index(self):
f = self.packfile
# read through file
tmp = True # placeholder that doesn't evaluate to false
while tmp: tmp = f.read(8192)
# read index start postition and seek there
f.seek(-4, 1)
endpos = f.tell()
f.seek(struct.unpack('<I', f.read(4))[0])
while f.tell() < endpos:
ssize, fpos, fsize = struct.unpack('<III', f.read(12))
name = f.read(ssize-12).strip('\x00').replace('\\', '/').lower()
self.files[name] = fpos, fsize
# reads a file with its full path.
def read_file(self, filename):
'''Returns a file-like object for the file or None if the file isn't
contained in this packfile.
This method takes the full path starting with "data/" as a parameter.'''
key = filename.replace('\\', '/').lower().strip('\x00').strip()
if key not in self.files.keys(): return None
start, size = self.files[key]
self.packfile.seek(start)
f = StringIO()
bytesrem = size
while bytesrem >= 8192:
f.write(self.packfile.read(8192))
bytesrem -= 8192
if bytesrem: f.write(self.packfile.read(bytesrem))
f.seek(0)
return f
def find_file(self, filename):
'''Returns a file-like object for the file or None if the file isn't
contained in this packfile.
This method searches for the file by its filename.'''
filename = filename.lower().strip()
start, size = None, None
for key in self.files.keys():
if key.endswith(filename):
return self.read_file(key)
return None # file not found if it gets to this point
def list_music_files(self):
'''Lists the BOR files in the packfile.'''
borfiles = []
for key in self.files.keys():
if key.endswith('.bor'): borfiles.append(key)
borfiles.sort()
for key in borfiles: print key
def get_file(pak, borfile):
'''Prevents a need to directly use PackFileReader when you only want to get
one file, like in borplay and bor2wav. Returns a file-like object.'''
rdr = PackFileReader(pak)
if ('/' not in borfile) and ('\\' not in borfile): # only the filename is given; search for the file
return rdr.find_file(borfile)
else: # full path given
return rdr.read_file(borfile)
# For testing
if __name__ == '__main__':
rdr = PackFileReader('K:/BOR/OpenBOR/Paks/BOR.PAK')
#keys = rdr.files.keys(); keys.sort()
#print '\n'.join(keys)
#print rdr.read_file('data/chars/yamazaki/yamazaki.txt').read()
#print rdr.find_file('yamazaki.txt').read()
rdr.list_music_files()
| en | 0.849537 | # Copyright (c) 2009 <NAME> ("Plombo") # Class and functions to read .PAK files. Represents a BOR packfile. # the index holding the location of each file # the file object fp is a file path (string) or file-like object (file, StringIO,
etc.) in binary read mode # reads the packfile's index into self.files # read through file # placeholder that doesn't evaluate to false # read index start postition and seek there # reads a file with its full path. Returns a file-like object for the file or None if the file isn't
contained in this packfile.
This method takes the full path starting with "data/" as a parameter. Returns a file-like object for the file or None if the file isn't
contained in this packfile.
This method searches for the file by its filename. # file not found if it gets to this point Lists the BOR files in the packfile. Prevents a need to directly use PackFileReader when you only want to get
one file, like in borplay and bor2wav. Returns a file-like object. # only the filename is given; search for the file # full path given # For testing #keys = rdr.files.keys(); keys.sort() #print '\n'.join(keys) #print rdr.read_file('data/chars/yamazaki/yamazaki.txt').read() #print rdr.find_file('yamazaki.txt').read() | 2.967646 | 3 |
artascope/src/web/app.py | magus0219/icloud-photo-downloader | 3 | 7644 | <filename>artascope/src/web/app.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Created by magus0219[<EMAIL>] on 2020/3/23
from types import FunctionType
from flask import (
Flask,
redirect,
url_for,
)
import artascope.src.web.lib.filter as module_filter
from artascope.src.web.lib.content_processor import inject_version
def index():
return redirect(url_for("task.get_task_list"))
def create_app():
# create and configure the app
app = Flask(__name__)
app.jinja_env.filters.update(
{
key: val
for key, val in module_filter.__dict__.items()
if isinstance(val, FunctionType)
}
)
from . import user
from . import task
from . import scheduler
# register blueprint
app.register_blueprint(user.bp)
app.register_blueprint(task.bp)
app.register_blueprint(scheduler.bp)
# register index
app.add_url_rule("/", "index", index)
# register context processor
app.context_processor(inject_version)
return app
| <filename>artascope/src/web/app.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Created by magus0219[<EMAIL>] on 2020/3/23
from types import FunctionType
from flask import (
Flask,
redirect,
url_for,
)
import artascope.src.web.lib.filter as module_filter
from artascope.src.web.lib.content_processor import inject_version
def index():
return redirect(url_for("task.get_task_list"))
def create_app():
# create and configure the app
app = Flask(__name__)
app.jinja_env.filters.update(
{
key: val
for key, val in module_filter.__dict__.items()
if isinstance(val, FunctionType)
}
)
from . import user
from . import task
from . import scheduler
# register blueprint
app.register_blueprint(user.bp)
app.register_blueprint(task.bp)
app.register_blueprint(scheduler.bp)
# register index
app.add_url_rule("/", "index", index)
# register context processor
app.context_processor(inject_version)
return app
| en | 0.599839 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Created by magus0219[<EMAIL>] on 2020/3/23 # create and configure the app # register blueprint # register index # register context processor | 1.712298 | 2 |
tests/common/test_op/scatter_nd.py | KnowingNothing/akg-test | 1 | 7645 | <filename>tests/common/test_op/scatter_nd.py
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: scatter_nd"""
import akg.tvm
from akg.utils import validation_check as vc_util
def scatter_nd(indices, updates, shape):
"""
Scatters input tensor updates to a new tensor according to indices.
Args:
indices(akg.tvm.Tensor): Tensor of type int32.
updates(akg.tvm.Tensor): Tensor of type float16, float32, int32.
shape(list, tuple): Specifies the shape of output tensor.
Returns:
Scattered tensor with same type as input tensor updates and shape specified by parameter shape.
"""
# check shapes dtype
indices_shape = [x.value for x in indices.shape]
data_shape = [x.value for x in updates.shape]
vc_util.check_shape(indices_shape)
vc_util.check_shape(data_shape)
indices_dtype = indices.dtype
if not indices_dtype in "int32":
raise TypeError("indices_dtype only support int32 while dtype is %s" % indices_dtype)
dtype = updates.dtype
support_list = {"float16", "float32", "int32"}
if not (dtype in support_list):
raise TypeError("scatter_nd only support %s while dtype is %s" % (",".join(support_list), dtype))
n = indices.shape[0].value
def pick(i, j, *indexes):
return akg.tvm.expr.Select(j == indices[i][0],
akg.tvm.const(1, updates.dtype),
akg.tvm.const(0, updates.dtype)) * updates[(i,) + indexes]
reducible = akg.tvm.compute([n] + list(shape), lambda *i: pick(i[0], i[1], *i[2:]), name="reduc")
k = akg.tvm.reduce_axis((0, n))
res = akg.tvm.compute(shape, lambda *i: akg.tvm.sum(reducible[(k,) + i], axis=k))
return res
| <filename>tests/common/test_op/scatter_nd.py
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: scatter_nd"""
import akg.tvm
from akg.utils import validation_check as vc_util
def scatter_nd(indices, updates, shape):
"""
Scatters input tensor updates to a new tensor according to indices.
Args:
indices(akg.tvm.Tensor): Tensor of type int32.
updates(akg.tvm.Tensor): Tensor of type float16, float32, int32.
shape(list, tuple): Specifies the shape of output tensor.
Returns:
Scattered tensor with same type as input tensor updates and shape specified by parameter shape.
"""
# check shapes dtype
indices_shape = [x.value for x in indices.shape]
data_shape = [x.value for x in updates.shape]
vc_util.check_shape(indices_shape)
vc_util.check_shape(data_shape)
indices_dtype = indices.dtype
if not indices_dtype in "int32":
raise TypeError("indices_dtype only support int32 while dtype is %s" % indices_dtype)
dtype = updates.dtype
support_list = {"float16", "float32", "int32"}
if not (dtype in support_list):
raise TypeError("scatter_nd only support %s while dtype is %s" % (",".join(support_list), dtype))
n = indices.shape[0].value
def pick(i, j, *indexes):
return akg.tvm.expr.Select(j == indices[i][0],
akg.tvm.const(1, updates.dtype),
akg.tvm.const(0, updates.dtype)) * updates[(i,) + indexes]
reducible = akg.tvm.compute([n] + list(shape), lambda *i: pick(i[0], i[1], *i[2:]), name="reduc")
k = akg.tvm.reduce_axis((0, n))
res = akg.tvm.compute(shape, lambda *i: akg.tvm.sum(reducible[(k,) + i], axis=k))
return res
| en | 0.777584 | # Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. operator dsl function: scatter_nd Scatters input tensor updates to a new tensor according to indices. Args: indices(akg.tvm.Tensor): Tensor of type int32. updates(akg.tvm.Tensor): Tensor of type float16, float32, int32. shape(list, tuple): Specifies the shape of output tensor. Returns: Scattered tensor with same type as input tensor updates and shape specified by parameter shape. # check shapes dtype | 2.271925 | 2 |
spc/backend_utils.py | adamnew123456/spc | 1 | 7646 | """
Utility functions and classes shared by multiple backends
"""
from collections import namedtuple
import logging
from . import symbols
from . import types
LOGGER = logging.getLogger('spc.backend_utils')
# NameContexts encapsulate both the function stack (which holds values) and
# the symbol table context (which binds them)
NameContext = namedtuple('NameContext', ['symbol_ctx', 'func_stack'])
# While loops are identified by two labels - the start label, for re-running
# the condition, and the end label, for exiting when the condition is false
WhileLabels = namedtuple('WhileLabels', ['cond', 'exit'])
# If conditions are identified by two labels - the else label, for when
# the condition is false (to skip the then block) and the end label, for
# when the condition is true (to skip the else block)
IfLabels = namedtuple('IfLabels', ['else_body', 'end'])
# Switch conditionals are handled sort of like if conditionals:
#
# (switch |
# (case T1 B1) | jump-if-not T1, l1prime; ...; jump l4; l1prime:
# (case T2 B2) | jump-if-not T2, l2prime; ...; jump l4; l2prime:
# (else B3)) | ...
# | l4:
class SwitchLabels:
"""
Switch labels are similar to conditionals:
(switch |
(case T1 B1) | jump-if-not T1, case_lbl_1; ...; jump end; case_lbl_1:
(case T2 B2) | jump-if-not T2, case_lbl_2; ...; jump end; case_lbl_2:
(else B3) | ...; end_lbl:
Since each case is processed in order, only the current case end label and
the end switch label is available at any given time.
"""
def __init__(self, end_label):
self.end_label = end_label
self.case_end_label = None
class CoercionContext:
"""
This is used to wrap up all the information needed to coerce values from
one type to another.
"""
def __init__(self, backend, temp_context, code_templates):
self.backend = backend
self.temp_context = temp_context
self.templates = code_templates
def copy_with_context(self, new_context):
"""
Creates a copy of this object, but within a new temporary context.
"""
return CoercionContext(self.backend, new_context, self.templates)
def coerce(self, input_offset, input_type, output_type):
"""
Coerces a value, located on the stack, from the given input type to the
given output type. Returns the stack offset of the converted
variable and the output type.
Raises a TypeError if this is not possible.
"""
if input_type == output_type:
return input_offset, output_type
elif (input_type, output_type) == (types.Integer, types.Byte):
return self._coerce_int_to_byte(input_offset), output_type
elif (input_type, output_type) == (types.Byte, types.Integer):
return self._coerce_byte_to_int(input_offset), output_type
else:
raise TypeError('Cannot coerce {} -> {}'.format(input_type, output_type))
def _coerce_int_to_byte(self, input_offset):
"""
Coerces an integer to a byte, returning the stack offset of the
resulting byte.
"""
byte_size = self.backend._type_size(types.Byte)
byte_align = self.backend._type_alignment(types.Byte)
dest_offset = self.temp_context.add_temp(byte_size, byte_align)
tmp_reg = self.templates.tmp_regs[0]
self.backend._write_comment('Coercing int@{} to byte@{}',
input_offset, dest_offset)
self.templates.emit_load_stack_word(tmp_reg, input_offset)
self.templates.emit_int_to_byte(tmp_reg)
self.templates.emit_save_stack_byte(tmp_reg, dest_offset)
return dest_offset
def _coerce_byte_to_int(self, input_offset):
"""
Coerces a byte to an integer, returning the stack offset of the
resulting integer.
"""
int_size = self.backend._type_size(types.Integer)
int_align = self.backend._type_alignment(types.Integer)
dest_offset = self.temp_context.add_temp(int_size, int_align)
tmp_reg = self.templates.tmp_regs[0]
self.backend._write_comment('Coercing byte@{} to int@{}',
input_offset, dest_offset)
self.templates.emit_load_stack_byte(tmp_reg, input_offset)
self.templates.emit_byte_to_int(tmp_reg)
self.templates.emit_save_stack_word(tmp_reg, dest_offset)
return dest_offset
class FunctionStack:
"""
Tracks where variables are on the function's stack.
Note that this makes a number of assumptions about how things are stored:
- All arguments are stored on the stack, in reverse order. This goes
against the calling conventions for register rich architectures, like
MIPS, but there are enough corner cases (like copying structs by value)
that ignoring the calling convention is worthwhile for a non-optimizing
compiler like this.
- Locals and temporaries are stored on the stack, in order of creation.
"""
def __init__(self, backend):
self.backend = backend
self.local_offset = self._starting_locals_offset()
self.param_offset = self._starting_param_offset()
self.vars = {}
def _starting_locals_offset(self):
"""
Returns the starting offset of the local variables on the stack.
"""
raise NotImplementedError
def _starting_param_offset(self):
"""
Returns the starting offset of the parameter on the stack.
"""
raise NotImplementedError
def _expand_stack(self, size):
"""
Emits code to expand the stack frame by the given size.
"""
raise NotImplementedError
def _shrink_stack(self, size):
"""
Emits code to reduce the stack frame by the given size.
"""
raise NotImplementedError
def pad_param(self, space):
"""
Adds blank space before the next parameter.
"""
self.param_offset += space
def add_param(self, name, size, alignment):
"""
Adds a new parameter to the stack.
"""
self.param_offset = types.align_address(self.param_offset, alignment)
self.vars[name] = self.param_offset
self.param_offset += size
self.backend._write_comment('Binding param "{}" to offset {}', name, self.vars[name])
def add_local(self, name, size, alignment):
"""
Adds a local variable to the stack.
"""
self.local_offset = (
types.align_address(self.local_offset - size, alignment,
types.Alignment.Down))
self.vars[name] = self.local_offset
self.backend._write_comment('Binding local "{}" to offset {}', name, self.vars[name])
def get_temp_context(self, backend):
"""
Returns a context which can be used for putting temporary values on
the stack. When the context exits, the space used by the temporary
variables is cleaned up.
"""
root = self
class TemporaryContext:
def __init__(self, start_offset):
self.tmp_offset = start_offset
self.total_tmp_size = 0
def __enter__(self):
pass
def __exit__(self, *exc_info):
root._shrink_stack(self.total_tmp_size)
def add_temp(self, size, alignment):
"""
Makes space for a new temporary, returning the $fp offset at
which to write it.
"""
old_tmp_offset = self.tmp_offset
self.tmp_offset = (
types.align_address(self.tmp_offset - size, alignment,
types.Alignment.Down))
size_used = old_tmp_offset - self.tmp_offset
self.total_tmp_size += size_used
root._expand_stack(size_used)
return self.tmp_offset
def get_temp_context(self):
"""
Creates a temporary context, which starts at this temporary context.
"""
return TemporaryContext(self.tmp_offset)
return TemporaryContext(self.local_offset)
def expand_locals(self):
"""
Makes enough space for the local variables on the stack.
"""
self._expand_stack(self.locals_size())
def cleanup_locals(self):
"""
Cleans up the space used by the local variables on the stack.
"""
self._shrink_stack(self.locals_size())
def locals_size(self):
"""
Gets the size used by all the locals.
"""
return abs(self.local_offset) - abs(self._starting_locals_offset())
def __getitem__(self, name):
"""
Gets the offset to the variable on the stack, or a Register (if the
name was bound to one of the first four parameters)
"""
return self.vars[name]
class VerificationContext:
"""
Used to record all values and types defined all at once (i.e. inside the
same declaration block), so that they can be verified all at once.
"Verification" here means that their types are checked to be valid, which
means different things for different types.
"""
def __init__(self):
self.types = []
self.values = []
def add_value(self, name):
"""
Registers a new value to be verified.
"""
self.values.append(name)
def add_type(self, name):
"""
Registers a new type to be defined.
"""
self.types.append(types)
def verify(self, backend):
"""
Verifies all the definitions against the backend.
"""
backend._check_valid_types(backend.ctx_types[name] for name in self.types)
backend._check_valid_types(backend.ctx_values[name] for name in self.values)
class ContextMixin:
"""
Manages the symbol table contexts for this backend (as well as its function stack
Depends upon the user of this mixin to inherit from BaseBackend in
addition to this one.
"""
def __init__(self):
self.parent_contexts = []
self.current_context = NameContext(symbols.Context(), None)
self.verify_context = VerificationContext()
def _register_file_ns(self, namespace):
"""
Replaces the current context, with one where the symbol context is
expanded to contain the file's namespace.
"""
file_context = self.current_context.symbol_ctx.register(namespace)
self.current_context = self.current_context._replace(symbol_ctx=file_context)
@property
def ctx_namespace(self):
"""
Gets the current namespace
"""
return self.current_context.symbol_ctx.search_path[0]
@property
def ctx_values(self):
"""
Returns the current context's value symbols.
"""
return self.current_context.symbol_ctx.values
@property
def ctx_types(self):
"""
Returns the current context's type symbols.
"""
return self.current_context.symbol_ctx.types
@property
def ctx_stack(self):
"""
Returns the current context's stack information.
"""
return self.current_context.func_stack
def _value_is_defined(self, name):
"""
Returns True if the given variable is defined in the current scope, or
False otherwise.
This is for the static expression processor function, var-def?
"""
return (name in self.ctx_values and
self.ctx_values.is_visible(name))
def _type_is_defined(self, name):
"""
Returns True if the given type is defined in the current scope, or
False otherwise.
This is for the static expression processor function, var-def?
"""
return (name in self.ctx_types and
self.ctx_types.is_visible(name))
def _make_func_stack(self):
raise NotImplementedError
def _push_context(self):
"""
Pushes a new binding context.
"""
old_context = self.current_context
self.parent_contexts.append(old_context)
self.current_context = NameContext(
self.current_context.symbol_ctx.enter(),
self._make_func_stack())
def _pop_context(self):
"""
Loads the previous binding context.
"""
self.current_context = self.parent_contexts.pop()
def _resolve_if_type_name(self, name):
"""
Resolves a type name into a concrete type.
"""
try:
return types.resolve_name(name, self.ctx_types)
except PermissionError as exn:
self.error(self.line, self.col,
'Cannot resolve hidden type "{}"', str(exn))
except RecursionError:
self.error(self.line, self.col,
'Type aliases too deep, when resolving "{}"', name)
except KeyError as exn:
self.error(self.line, self.col,
'Invalid type "{}"', str(exn))
def _verify_types(self):
"""
Verifies all the types across all this current context's symbols.
"""
self.verify_context.verify(self)
self.verify_context = VerificationContext()
class ThirtyTwoMixin:
"""
Defines some information about type sizes and alignment which 32-bit
platforms have in common.
Depends upon the user of this mixin to inherit from ContextMixin.
"""
def _type_alignment(self, type_obj):
"""
Returns alignment of the given type (1 for byte, 4 for word, etc.)
"""
type_obj = self._resolve_if_type_name(type_obj)
if type_obj is types.Integer:
return 4
elif type_obj is types.Byte:
return 1
elif isinstance(type_obj, (types.PointerTo, types.FunctionPointer)):
return 4
elif isinstance(type_obj, types.ArrayOf):
return self._type_alignment(type_obj.type)
elif isinstance(type_obj, types.Struct):
# The alignment only concerns the first element of the struct -
# the struct's internal alignment doesn't come into play
#
# Also, an OrderdDict's fields are not iterable, for whatever reason
struct_types = list(type_obj.fields.values())
return self._type_alignment(struct_types[0])
else:
raise TypeError('Not a compiler type: {}'.format(type_obj))
def _type_size(self, type_obj, depth=0):
"""
Returns the size of a type object in bytes.
"""
MAX_DEPTH = 100
if depth >= MAX_DEPTH:
self.error(self.line, self.col,
"Type nested too deeply - potential self-referential type")
type_obj = self._resolve_if_type_name(type_obj)
if type_obj is types.Integer:
return 4
elif type_obj is types.Byte:
return 1
elif isinstance(type_obj, (types.PointerTo, types.FunctionPointer)):
return 4
elif isinstance(type_obj, types.ArrayOf):
# To avoid wasting space on the last element, this pads all the
# elements but the last
base_size = self._type_size(type_obj.type, depth + 1)
return self._array_offset(type_obj, type_obj.count - 1) + base_size
elif isinstance(type_obj, types.Struct):
last_field = list(type_obj.fields)[-1]
last_field_type = type_obj.fields[last_field]
last_field_offset = self._field_offset(type_obj, last_field)
return last_field_offset + self._type_size(last_field_type, depth + 1)
else:
raise TypeError('Not a compiler type: {}'.format(type_obj))
class comment_after:
"""
Wraps a method - after the method executes, something is written to
the log.
"""
def __init__(self, fmt, *args, **kwargs):
self.fmt = fmt
self.args = args
self.kwargs = kwargs
def __call__(self, func):
def wrapper(parent, *args, **kwargs):
x = func(parent, *args, **kwargs)
parent._write_comment(self.fmt, *self.args, **self.kwargs)
return x
return wrapper
| """
Utility functions and classes shared by multiple backends
"""
from collections import namedtuple
import logging
from . import symbols
from . import types
LOGGER = logging.getLogger('spc.backend_utils')
# NameContexts encapsulate both the function stack (which holds values) and
# the symbol table context (which binds them)
NameContext = namedtuple('NameContext', ['symbol_ctx', 'func_stack'])
# While loops are identified by two labels - the start label, for re-running
# the condition, and the end label, for exiting when the condition is false
WhileLabels = namedtuple('WhileLabels', ['cond', 'exit'])
# If conditions are identified by two labels - the else label, for when
# the condition is false (to skip the then block) and the end label, for
# when the condition is true (to skip the else block)
IfLabels = namedtuple('IfLabels', ['else_body', 'end'])
# Switch conditionals are handled sort of like if conditionals:
#
# (switch |
# (case T1 B1) | jump-if-not T1, l1prime; ...; jump l4; l1prime:
# (case T2 B2) | jump-if-not T2, l2prime; ...; jump l4; l2prime:
# (else B3)) | ...
# | l4:
class SwitchLabels:
"""
Switch labels are similar to conditionals:
(switch |
(case T1 B1) | jump-if-not T1, case_lbl_1; ...; jump end; case_lbl_1:
(case T2 B2) | jump-if-not T2, case_lbl_2; ...; jump end; case_lbl_2:
(else B3) | ...; end_lbl:
Since each case is processed in order, only the current case end label and
the end switch label is available at any given time.
"""
def __init__(self, end_label):
self.end_label = end_label
self.case_end_label = None
class CoercionContext:
"""
This is used to wrap up all the information needed to coerce values from
one type to another.
"""
def __init__(self, backend, temp_context, code_templates):
self.backend = backend
self.temp_context = temp_context
self.templates = code_templates
def copy_with_context(self, new_context):
"""
Creates a copy of this object, but within a new temporary context.
"""
return CoercionContext(self.backend, new_context, self.templates)
def coerce(self, input_offset, input_type, output_type):
"""
Coerces a value, located on the stack, from the given input type to the
given output type. Returns the stack offset of the converted
variable and the output type.
Raises a TypeError if this is not possible.
"""
if input_type == output_type:
return input_offset, output_type
elif (input_type, output_type) == (types.Integer, types.Byte):
return self._coerce_int_to_byte(input_offset), output_type
elif (input_type, output_type) == (types.Byte, types.Integer):
return self._coerce_byte_to_int(input_offset), output_type
else:
raise TypeError('Cannot coerce {} -> {}'.format(input_type, output_type))
def _coerce_int_to_byte(self, input_offset):
"""
Coerces an integer to a byte, returning the stack offset of the
resulting byte.
"""
byte_size = self.backend._type_size(types.Byte)
byte_align = self.backend._type_alignment(types.Byte)
dest_offset = self.temp_context.add_temp(byte_size, byte_align)
tmp_reg = self.templates.tmp_regs[0]
self.backend._write_comment('Coercing int@{} to byte@{}',
input_offset, dest_offset)
self.templates.emit_load_stack_word(tmp_reg, input_offset)
self.templates.emit_int_to_byte(tmp_reg)
self.templates.emit_save_stack_byte(tmp_reg, dest_offset)
return dest_offset
def _coerce_byte_to_int(self, input_offset):
"""
Coerces a byte to an integer, returning the stack offset of the
resulting integer.
"""
int_size = self.backend._type_size(types.Integer)
int_align = self.backend._type_alignment(types.Integer)
dest_offset = self.temp_context.add_temp(int_size, int_align)
tmp_reg = self.templates.tmp_regs[0]
self.backend._write_comment('Coercing byte@{} to int@{}',
input_offset, dest_offset)
self.templates.emit_load_stack_byte(tmp_reg, input_offset)
self.templates.emit_byte_to_int(tmp_reg)
self.templates.emit_save_stack_word(tmp_reg, dest_offset)
return dest_offset
class FunctionStack:
"""
Tracks where variables are on the function's stack.
Note that this makes a number of assumptions about how things are stored:
- All arguments are stored on the stack, in reverse order. This goes
against the calling conventions for register rich architectures, like
MIPS, but there are enough corner cases (like copying structs by value)
that ignoring the calling convention is worthwhile for a non-optimizing
compiler like this.
- Locals and temporaries are stored on the stack, in order of creation.
"""
def __init__(self, backend):
self.backend = backend
self.local_offset = self._starting_locals_offset()
self.param_offset = self._starting_param_offset()
self.vars = {}
def _starting_locals_offset(self):
"""
Returns the starting offset of the local variables on the stack.
"""
raise NotImplementedError
def _starting_param_offset(self):
"""
Returns the starting offset of the parameter on the stack.
"""
raise NotImplementedError
def _expand_stack(self, size):
"""
Emits code to expand the stack frame by the given size.
"""
raise NotImplementedError
def _shrink_stack(self, size):
"""
Emits code to reduce the stack frame by the given size.
"""
raise NotImplementedError
def pad_param(self, space):
"""
Adds blank space before the next parameter.
"""
self.param_offset += space
def add_param(self, name, size, alignment):
"""
Adds a new parameter to the stack.
"""
self.param_offset = types.align_address(self.param_offset, alignment)
self.vars[name] = self.param_offset
self.param_offset += size
self.backend._write_comment('Binding param "{}" to offset {}', name, self.vars[name])
def add_local(self, name, size, alignment):
"""
Adds a local variable to the stack.
"""
self.local_offset = (
types.align_address(self.local_offset - size, alignment,
types.Alignment.Down))
self.vars[name] = self.local_offset
self.backend._write_comment('Binding local "{}" to offset {}', name, self.vars[name])
def get_temp_context(self, backend):
"""
Returns a context which can be used for putting temporary values on
the stack. When the context exits, the space used by the temporary
variables is cleaned up.
"""
root = self
class TemporaryContext:
def __init__(self, start_offset):
self.tmp_offset = start_offset
self.total_tmp_size = 0
def __enter__(self):
pass
def __exit__(self, *exc_info):
root._shrink_stack(self.total_tmp_size)
def add_temp(self, size, alignment):
"""
Makes space for a new temporary, returning the $fp offset at
which to write it.
"""
old_tmp_offset = self.tmp_offset
self.tmp_offset = (
types.align_address(self.tmp_offset - size, alignment,
types.Alignment.Down))
size_used = old_tmp_offset - self.tmp_offset
self.total_tmp_size += size_used
root._expand_stack(size_used)
return self.tmp_offset
def get_temp_context(self):
"""
Creates a temporary context, which starts at this temporary context.
"""
return TemporaryContext(self.tmp_offset)
return TemporaryContext(self.local_offset)
def expand_locals(self):
"""
Makes enough space for the local variables on the stack.
"""
self._expand_stack(self.locals_size())
def cleanup_locals(self):
"""
Cleans up the space used by the local variables on the stack.
"""
self._shrink_stack(self.locals_size())
def locals_size(self):
"""
Gets the size used by all the locals.
"""
return abs(self.local_offset) - abs(self._starting_locals_offset())
def __getitem__(self, name):
"""
Gets the offset to the variable on the stack, or a Register (if the
name was bound to one of the first four parameters)
"""
return self.vars[name]
class VerificationContext:
"""
Used to record all values and types defined all at once (i.e. inside the
same declaration block), so that they can be verified all at once.
"Verification" here means that their types are checked to be valid, which
means different things for different types.
"""
def __init__(self):
self.types = []
self.values = []
def add_value(self, name):
"""
Registers a new value to be verified.
"""
self.values.append(name)
def add_type(self, name):
"""
Registers a new type to be defined.
"""
self.types.append(types)
def verify(self, backend):
"""
Verifies all the definitions against the backend.
"""
backend._check_valid_types(backend.ctx_types[name] for name in self.types)
backend._check_valid_types(backend.ctx_values[name] for name in self.values)
class ContextMixin:
"""
Manages the symbol table contexts for this backend (as well as its function stack
Depends upon the user of this mixin to inherit from BaseBackend in
addition to this one.
"""
def __init__(self):
self.parent_contexts = []
self.current_context = NameContext(symbols.Context(), None)
self.verify_context = VerificationContext()
def _register_file_ns(self, namespace):
"""
Replaces the current context, with one where the symbol context is
expanded to contain the file's namespace.
"""
file_context = self.current_context.symbol_ctx.register(namespace)
self.current_context = self.current_context._replace(symbol_ctx=file_context)
@property
def ctx_namespace(self):
"""
Gets the current namespace
"""
return self.current_context.symbol_ctx.search_path[0]
@property
def ctx_values(self):
"""
Returns the current context's value symbols.
"""
return self.current_context.symbol_ctx.values
@property
def ctx_types(self):
"""
Returns the current context's type symbols.
"""
return self.current_context.symbol_ctx.types
@property
def ctx_stack(self):
"""
Returns the current context's stack information.
"""
return self.current_context.func_stack
def _value_is_defined(self, name):
"""
Returns True if the given variable is defined in the current scope, or
False otherwise.
This is for the static expression processor function, var-def?
"""
return (name in self.ctx_values and
self.ctx_values.is_visible(name))
def _type_is_defined(self, name):
"""
Returns True if the given type is defined in the current scope, or
False otherwise.
This is for the static expression processor function, var-def?
"""
return (name in self.ctx_types and
self.ctx_types.is_visible(name))
def _make_func_stack(self):
raise NotImplementedError
def _push_context(self):
"""
Pushes a new binding context.
"""
old_context = self.current_context
self.parent_contexts.append(old_context)
self.current_context = NameContext(
self.current_context.symbol_ctx.enter(),
self._make_func_stack())
def _pop_context(self):
"""
Loads the previous binding context.
"""
self.current_context = self.parent_contexts.pop()
def _resolve_if_type_name(self, name):
"""
Resolves a type name into a concrete type.
"""
try:
return types.resolve_name(name, self.ctx_types)
except PermissionError as exn:
self.error(self.line, self.col,
'Cannot resolve hidden type "{}"', str(exn))
except RecursionError:
self.error(self.line, self.col,
'Type aliases too deep, when resolving "{}"', name)
except KeyError as exn:
self.error(self.line, self.col,
'Invalid type "{}"', str(exn))
def _verify_types(self):
"""
Verifies all the types across all this current context's symbols.
"""
self.verify_context.verify(self)
self.verify_context = VerificationContext()
class ThirtyTwoMixin:
"""
Defines some information about type sizes and alignment which 32-bit
platforms have in common.
Depends upon the user of this mixin to inherit from ContextMixin.
"""
def _type_alignment(self, type_obj):
"""
Returns alignment of the given type (1 for byte, 4 for word, etc.)
"""
type_obj = self._resolve_if_type_name(type_obj)
if type_obj is types.Integer:
return 4
elif type_obj is types.Byte:
return 1
elif isinstance(type_obj, (types.PointerTo, types.FunctionPointer)):
return 4
elif isinstance(type_obj, types.ArrayOf):
return self._type_alignment(type_obj.type)
elif isinstance(type_obj, types.Struct):
# The alignment only concerns the first element of the struct -
# the struct's internal alignment doesn't come into play
#
# Also, an OrderdDict's fields are not iterable, for whatever reason
struct_types = list(type_obj.fields.values())
return self._type_alignment(struct_types[0])
else:
raise TypeError('Not a compiler type: {}'.format(type_obj))
def _type_size(self, type_obj, depth=0):
"""
Returns the size of a type object in bytes.
"""
MAX_DEPTH = 100
if depth >= MAX_DEPTH:
self.error(self.line, self.col,
"Type nested too deeply - potential self-referential type")
type_obj = self._resolve_if_type_name(type_obj)
if type_obj is types.Integer:
return 4
elif type_obj is types.Byte:
return 1
elif isinstance(type_obj, (types.PointerTo, types.FunctionPointer)):
return 4
elif isinstance(type_obj, types.ArrayOf):
# To avoid wasting space on the last element, this pads all the
# elements but the last
base_size = self._type_size(type_obj.type, depth + 1)
return self._array_offset(type_obj, type_obj.count - 1) + base_size
elif isinstance(type_obj, types.Struct):
last_field = list(type_obj.fields)[-1]
last_field_type = type_obj.fields[last_field]
last_field_offset = self._field_offset(type_obj, last_field)
return last_field_offset + self._type_size(last_field_type, depth + 1)
else:
raise TypeError('Not a compiler type: {}'.format(type_obj))
class comment_after:
"""
Wraps a method - after the method executes, something is written to
the log.
"""
def __init__(self, fmt, *args, **kwargs):
self.fmt = fmt
self.args = args
self.kwargs = kwargs
def __call__(self, func):
def wrapper(parent, *args, **kwargs):
x = func(parent, *args, **kwargs)
parent._write_comment(self.fmt, *self.args, **self.kwargs)
return x
return wrapper
| en | 0.839775 | Utility functions and classes shared by multiple backends # NameContexts encapsulate both the function stack (which holds values) and # the symbol table context (which binds them) # While loops are identified by two labels - the start label, for re-running # the condition, and the end label, for exiting when the condition is false # If conditions are identified by two labels - the else label, for when # the condition is false (to skip the then block) and the end label, for # when the condition is true (to skip the else block) # Switch conditionals are handled sort of like if conditionals: # # (switch | # (case T1 B1) | jump-if-not T1, l1prime; ...; jump l4; l1prime: # (case T2 B2) | jump-if-not T2, l2prime; ...; jump l4; l2prime: # (else B3)) | ... # | l4: Switch labels are similar to conditionals: (switch | (case T1 B1) | jump-if-not T1, case_lbl_1; ...; jump end; case_lbl_1: (case T2 B2) | jump-if-not T2, case_lbl_2; ...; jump end; case_lbl_2: (else B3) | ...; end_lbl: Since each case is processed in order, only the current case end label and the end switch label is available at any given time. This is used to wrap up all the information needed to coerce values from one type to another. Creates a copy of this object, but within a new temporary context. Coerces a value, located on the stack, from the given input type to the given output type. Returns the stack offset of the converted variable and the output type. Raises a TypeError if this is not possible. Coerces an integer to a byte, returning the stack offset of the resulting byte. Coerces a byte to an integer, returning the stack offset of the resulting integer. Tracks where variables are on the function's stack. Note that this makes a number of assumptions about how things are stored: - All arguments are stored on the stack, in reverse order. This goes against the calling conventions for register rich architectures, like MIPS, but there are enough corner cases (like copying structs by value) that ignoring the calling convention is worthwhile for a non-optimizing compiler like this. - Locals and temporaries are stored on the stack, in order of creation. Returns the starting offset of the local variables on the stack. Returns the starting offset of the parameter on the stack. Emits code to expand the stack frame by the given size. Emits code to reduce the stack frame by the given size. Adds blank space before the next parameter. Adds a new parameter to the stack. Adds a local variable to the stack. Returns a context which can be used for putting temporary values on the stack. When the context exits, the space used by the temporary variables is cleaned up. Makes space for a new temporary, returning the $fp offset at which to write it. Creates a temporary context, which starts at this temporary context. Makes enough space for the local variables on the stack. Cleans up the space used by the local variables on the stack. Gets the size used by all the locals. Gets the offset to the variable on the stack, or a Register (if the name was bound to one of the first four parameters) Used to record all values and types defined all at once (i.e. inside the same declaration block), so that they can be verified all at once. "Verification" here means that their types are checked to be valid, which means different things for different types. Registers a new value to be verified. Registers a new type to be defined. Verifies all the definitions against the backend. Manages the symbol table contexts for this backend (as well as its function stack Depends upon the user of this mixin to inherit from BaseBackend in addition to this one. Replaces the current context, with one where the symbol context is expanded to contain the file's namespace. Gets the current namespace Returns the current context's value symbols. Returns the current context's type symbols. Returns the current context's stack information. Returns True if the given variable is defined in the current scope, or False otherwise. This is for the static expression processor function, var-def? Returns True if the given type is defined in the current scope, or False otherwise. This is for the static expression processor function, var-def? Pushes a new binding context. Loads the previous binding context. Resolves a type name into a concrete type. Verifies all the types across all this current context's symbols. Defines some information about type sizes and alignment which 32-bit platforms have in common. Depends upon the user of this mixin to inherit from ContextMixin. Returns alignment of the given type (1 for byte, 4 for word, etc.) # The alignment only concerns the first element of the struct - # the struct's internal alignment doesn't come into play # # Also, an OrderdDict's fields are not iterable, for whatever reason Returns the size of a type object in bytes. # To avoid wasting space on the last element, this pads all the # elements but the last Wraps a method - after the method executes, something is written to the log. | 2.563542 | 3 |
heareval/__init__.py | neuralaudio/hear-eval-kit | 24 | 7647 | __version__ = "2021.0.6"
| __version__ = "2021.0.6"
| none | 1 | 1.050445 | 1 |
|
recommender_engine/similarity_measure/__init__.py | tranlyvu/recommender | 8 | 7648 | """
recommender_engine
-----
recommender_engine is a recommendation application using either item-based or user-based approaches
:copyright: (c) 2016 - 2019 by <NAME>. All Rights Reserved.
:license: Apache License 2.0
"""
from .cosine import cosine
from .euclidean_distance import euclidean_distance
from .pearson_correlation import pearson_correlation
name="similarity_measure"
__all__ = ["cosine", "euclidean_distance", "pearson_correlation"]
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright (c) 2016 - 2019 <NAME>. All Rights Reserved."
__license__ = "Apache License 2.0"
__credits__ = ["<NAME>"]
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Beta"
| """
recommender_engine
-----
recommender_engine is a recommendation application using either item-based or user-based approaches
:copyright: (c) 2016 - 2019 by <NAME>. All Rights Reserved.
:license: Apache License 2.0
"""
from .cosine import cosine
from .euclidean_distance import euclidean_distance
from .pearson_correlation import pearson_correlation
name="similarity_measure"
__all__ = ["cosine", "euclidean_distance", "pearson_correlation"]
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright (c) 2016 - 2019 <NAME>. All Rights Reserved."
__license__ = "Apache License 2.0"
__credits__ = ["<NAME>"]
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Beta"
| en | 0.804016 | recommender_engine ----- recommender_engine is a recommendation application using either item-based or user-based approaches :copyright: (c) 2016 - 2019 by <NAME>. All Rights Reserved. :license: Apache License 2.0 | 1.584651 | 2 |
code/abc057_a_02.py | KoyanagiHitoshi/AtCoder | 3 | 7649 | a,b=map(int,input().split())
print((a+b)%24) | a,b=map(int,input().split())
print((a+b)%24) | none | 1 | 2.3394 | 2 |
|
8/8_9.py | kopsh/python_cookbook | 0 | 7650 | <filename>8/8_9.py
class CheckType:
r"""
8.9 创建新的类或实例属性
使用描述器,实现参数类型检查
>>> @ParamAssert(a=int, b=list)
... class A:
... def __init__(self, a, b):
... self.a = a
... self.b = b
>>> a = A(1, [])
"""
def __init__(self, name, expected_type):
self.name = name
self.expected_type = expected_type
def __get__(self, instance, owner):
if instance is None:
return self
else:
return instance.__dict__[self.name]
def __set__(self, instance, value):
if not isinstance(value, self.expected_type):
raise TypeError("{} cannot be assigned by {!r}, it`s type is {!r}".format(self.name, value,
self.expected_type))
instance.__dict__[self.name] = value
class ParamAssert:
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
for name, expected_type in self.kwargs.items():
setattr(cls, name, CheckType(name, expected_type))
return cls
class Integer:
def __init__(self, name):
self.name = name
def __get__(self, instance, cls):
if instance is None:
return self
else:
return instance.__dict__.get(self.name, None)
def __set__(self, instance, value):
if not isinstance(value, int):
raise TypeError("{} cannot be assigned by {!r}".format(self.name, value))
instance.__dict__[self.name] = value
class Point:
"""
>>> p = Point(0, 0)
>>> print(p.x)
0
>>> p.y = "1"
Traceback (most recent call last):
...
TypeError: y cannot be assigned by '1'
"""
x = Integer('x')
y = Integer('y')
def __init__(self, x, y):
self.x = x
self.y = y
if __name__ == '__main__':
import doctest
doctest.testmod() | <filename>8/8_9.py
class CheckType:
r"""
8.9 创建新的类或实例属性
使用描述器,实现参数类型检查
>>> @ParamAssert(a=int, b=list)
... class A:
... def __init__(self, a, b):
... self.a = a
... self.b = b
>>> a = A(1, [])
"""
def __init__(self, name, expected_type):
self.name = name
self.expected_type = expected_type
def __get__(self, instance, owner):
if instance is None:
return self
else:
return instance.__dict__[self.name]
def __set__(self, instance, value):
if not isinstance(value, self.expected_type):
raise TypeError("{} cannot be assigned by {!r}, it`s type is {!r}".format(self.name, value,
self.expected_type))
instance.__dict__[self.name] = value
class ParamAssert:
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
for name, expected_type in self.kwargs.items():
setattr(cls, name, CheckType(name, expected_type))
return cls
class Integer:
def __init__(self, name):
self.name = name
def __get__(self, instance, cls):
if instance is None:
return self
else:
return instance.__dict__.get(self.name, None)
def __set__(self, instance, value):
if not isinstance(value, int):
raise TypeError("{} cannot be assigned by {!r}".format(self.name, value))
instance.__dict__[self.name] = value
class Point:
"""
>>> p = Point(0, 0)
>>> print(p.x)
0
>>> p.y = "1"
Traceback (most recent call last):
...
TypeError: y cannot be assigned by '1'
"""
x = Integer('x')
y = Integer('y')
def __init__(self, x, y):
self.x = x
self.y = y
if __name__ == '__main__':
import doctest
doctest.testmod() | en | 0.358631 | 8.9 创建新的类或实例属性 使用描述器,实现参数类型检查 >>> @ParamAssert(a=int, b=list) ... class A: ... def __init__(self, a, b): ... self.a = a ... self.b = b >>> a = A(1, []) >>> p = Point(0, 0) >>> print(p.x) 0 >>> p.y = "1" Traceback (most recent call last): ... TypeError: y cannot be assigned by '1' | 3.358447 | 3 |
test/examples/test_simple_gp_regression.py | ediphy-dwild/gpytorch | 0 | 7651 | import math
import torch
import unittest
import gpytorch
from torch import optim
from torch.autograd import Variable
from gpytorch.kernels import RBFKernel
from gpytorch.means import ConstantMean
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.random_variables import GaussianRandomVariable
# Simple training data: let's try to learn a sine function
train_x = Variable(torch.linspace(0, 1, 11))
train_y = Variable(torch.sin(train_x.data * (2 * math.pi)))
test_x = Variable(torch.linspace(0, 1, 51))
test_y = Variable(torch.sin(test_x.data * (2 * math.pi)))
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_inputs, train_targets, likelihood):
super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood)
self.mean_module = ConstantMean(constant_bounds=(-1, 1))
self.covar_module = RBFKernel(log_lengthscale_bounds=(-3, 3))
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return GaussianRandomVariable(mean_x, covar_x)
class TestSimpleGPRegression(unittest.TestCase):
def test_posterior_latent_gp_and_likelihood_without_optimization(self):
# We're manually going to set the hyperparameters to be ridiculous
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3))
gp_model = ExactGPModel(train_x.data, train_y.data, likelihood)
# Update bounds to accommodate extreme parameters
gp_model.covar_module.set_bounds(log_lengthscale=(-10, 10))
likelihood.set_bounds(log_noise=(-10, 10))
# Update parameters
gp_model.covar_module.initialize(log_lengthscale=-10)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=-10)
# Compute posterior distribution
gp_model.eval()
likelihood.eval()
# Let's see how our model does, conditioned with weird hyperparams
# The posterior should fit all the data
function_predictions = likelihood(gp_model(train_x))
self.assertLess(
torch.norm(function_predictions.mean().data - train_y.data),
1e-3,
)
self.assertLess(torch.norm(function_predictions.var().data), 1e-3)
# It shouldn't fit much else though
test_function_predictions = gp_model(Variable(torch.Tensor([1.1])))
self.assertLess(
torch.norm(test_function_predictions.mean().data - 0),
1e-4,
)
self.assertLess(torch.norm(test_function_predictions.var().data - 1), 1e-4)
def test_posterior_latent_gp_and_likelihood_with_optimization(self):
# We're manually going to set the hyperparameters to something they shouldn't be
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3))
gp_model = ExactGPModel(train_x.data, train_y.data, likelihood)
mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.covar_module.initialize(log_lengthscale=1)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=1)
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(
list(gp_model.parameters()) + list(likelihood.parameters()),
lr=0.1,
)
optimizer.n_iter = 0
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
test_function_predictions = likelihood(gp_model(test_x))
mean_abs_error = torch.mean(
torch.abs(test_y - test_function_predictions.mean())
)
self.assertLess(mean_abs_error.data.squeeze()[0], 0.05)
def test_posterior_latent_gp_and_likelihood_fast_pred_var(self):
with gpytorch.fast_pred_var():
# We're manually going to set the hyperparameters to
# something they shouldn't be
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3))
gp_model = ExactGPModel(train_x.data, train_y.data, likelihood)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.covar_module.initialize(log_lengthscale=1)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=1)
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(
list(gp_model.parameters()) + list(likelihood.parameters()),
lr=0.1,
)
optimizer.n_iter = 0
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
# Set the cache
test_function_predictions = likelihood(gp_model(train_x))
# Now bump up the likelihood to something huge
# This will make it easy to calculate the variance
likelihood.log_noise.data.fill_(3)
test_function_predictions = likelihood(gp_model(train_x))
noise = likelihood.log_noise.exp()
var_diff = (test_function_predictions.var() - noise).abs()
self.assertLess(torch.max(var_diff.data / noise.data), 0.05)
def test_posterior_latent_gp_and_likelihood_with_optimization_cuda(self):
if torch.cuda.is_available():
# We're manually going to set the hyperparameters to
# something they shouldn't be
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3)).cuda()
gp_model = ExactGPModel(
train_x.data.cuda(),
train_y.data.cuda(),
likelihood
).cuda()
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.covar_module.initialize(log_lengthscale=1)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=1)
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
optimizer.n_iter = 0
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x.cuda())
loss = -mll(output, train_y.cuda())
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
test_function_predictions = likelihood(gp_model(test_x.cuda()))
mean_abs_error = torch.mean(
torch.abs(test_y.cuda() - test_function_predictions.mean())
)
self.assertLess(mean_abs_error.data.squeeze()[0], 0.05)
if __name__ == '__main__':
unittest.main()
| import math
import torch
import unittest
import gpytorch
from torch import optim
from torch.autograd import Variable
from gpytorch.kernels import RBFKernel
from gpytorch.means import ConstantMean
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.random_variables import GaussianRandomVariable
# Simple training data: let's try to learn a sine function
train_x = Variable(torch.linspace(0, 1, 11))
train_y = Variable(torch.sin(train_x.data * (2 * math.pi)))
test_x = Variable(torch.linspace(0, 1, 51))
test_y = Variable(torch.sin(test_x.data * (2 * math.pi)))
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_inputs, train_targets, likelihood):
super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood)
self.mean_module = ConstantMean(constant_bounds=(-1, 1))
self.covar_module = RBFKernel(log_lengthscale_bounds=(-3, 3))
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return GaussianRandomVariable(mean_x, covar_x)
class TestSimpleGPRegression(unittest.TestCase):
def test_posterior_latent_gp_and_likelihood_without_optimization(self):
# We're manually going to set the hyperparameters to be ridiculous
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3))
gp_model = ExactGPModel(train_x.data, train_y.data, likelihood)
# Update bounds to accommodate extreme parameters
gp_model.covar_module.set_bounds(log_lengthscale=(-10, 10))
likelihood.set_bounds(log_noise=(-10, 10))
# Update parameters
gp_model.covar_module.initialize(log_lengthscale=-10)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=-10)
# Compute posterior distribution
gp_model.eval()
likelihood.eval()
# Let's see how our model does, conditioned with weird hyperparams
# The posterior should fit all the data
function_predictions = likelihood(gp_model(train_x))
self.assertLess(
torch.norm(function_predictions.mean().data - train_y.data),
1e-3,
)
self.assertLess(torch.norm(function_predictions.var().data), 1e-3)
# It shouldn't fit much else though
test_function_predictions = gp_model(Variable(torch.Tensor([1.1])))
self.assertLess(
torch.norm(test_function_predictions.mean().data - 0),
1e-4,
)
self.assertLess(torch.norm(test_function_predictions.var().data - 1), 1e-4)
def test_posterior_latent_gp_and_likelihood_with_optimization(self):
# We're manually going to set the hyperparameters to something they shouldn't be
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3))
gp_model = ExactGPModel(train_x.data, train_y.data, likelihood)
mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.covar_module.initialize(log_lengthscale=1)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=1)
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(
list(gp_model.parameters()) + list(likelihood.parameters()),
lr=0.1,
)
optimizer.n_iter = 0
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
test_function_predictions = likelihood(gp_model(test_x))
mean_abs_error = torch.mean(
torch.abs(test_y - test_function_predictions.mean())
)
self.assertLess(mean_abs_error.data.squeeze()[0], 0.05)
def test_posterior_latent_gp_and_likelihood_fast_pred_var(self):
with gpytorch.fast_pred_var():
# We're manually going to set the hyperparameters to
# something they shouldn't be
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3))
gp_model = ExactGPModel(train_x.data, train_y.data, likelihood)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.covar_module.initialize(log_lengthscale=1)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=1)
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(
list(gp_model.parameters()) + list(likelihood.parameters()),
lr=0.1,
)
optimizer.n_iter = 0
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
# Set the cache
test_function_predictions = likelihood(gp_model(train_x))
# Now bump up the likelihood to something huge
# This will make it easy to calculate the variance
likelihood.log_noise.data.fill_(3)
test_function_predictions = likelihood(gp_model(train_x))
noise = likelihood.log_noise.exp()
var_diff = (test_function_predictions.var() - noise).abs()
self.assertLess(torch.max(var_diff.data / noise.data), 0.05)
def test_posterior_latent_gp_and_likelihood_with_optimization_cuda(self):
if torch.cuda.is_available():
# We're manually going to set the hyperparameters to
# something they shouldn't be
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3)).cuda()
gp_model = ExactGPModel(
train_x.data.cuda(),
train_y.data.cuda(),
likelihood
).cuda()
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.covar_module.initialize(log_lengthscale=1)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=1)
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
optimizer.n_iter = 0
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x.cuda())
loss = -mll(output, train_y.cuda())
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
test_function_predictions = likelihood(gp_model(test_x.cuda()))
mean_abs_error = torch.mean(
torch.abs(test_y.cuda() - test_function_predictions.mean())
)
self.assertLess(mean_abs_error.data.squeeze()[0], 0.05)
if __name__ == '__main__':
unittest.main()
| en | 0.856111 | # Simple training data: let's try to learn a sine function # We're manually going to set the hyperparameters to be ridiculous # Update bounds to accommodate extreme parameters # Update parameters # Compute posterior distribution # Let's see how our model does, conditioned with weird hyperparams # The posterior should fit all the data # It shouldn't fit much else though # We're manually going to set the hyperparameters to something they shouldn't be # Find optimal model hyperparameters # Test the model # We're manually going to set the hyperparameters to # something they shouldn't be # Find optimal model hyperparameters # Test the model # Set the cache # Now bump up the likelihood to something huge # This will make it easy to calculate the variance # We're manually going to set the hyperparameters to # something they shouldn't be # Find optimal model hyperparameters # Test the model | 2.572127 | 3 |
samples/RiskManagement/Verification/customer-match-denied-parties-list.py | snavinch/cybersource-rest-samples-python | 21 | 7652 | from CyberSource import *
import os
import json
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
# To delete None values in Input Request Json body
def del_none(d):
for key, value in list(d.items()):
if value is None:
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
def customer_match_denied_parties_list():
clientReferenceInformationCode = "verification example"
clientReferenceInformationComments = "Export-basic"
clientReferenceInformationPartnerDeveloperId = "7891234"
clientReferenceInformationPartnerSolutionId = "89012345"
clientReferenceInformationPartner = Riskv1decisionsClientReferenceInformationPartner(
developer_id = clientReferenceInformationPartnerDeveloperId,
solution_id = clientReferenceInformationPartnerSolutionId
)
clientReferenceInformation = Riskv1decisionsClientReferenceInformation(
code = clientReferenceInformationCode,
comments = clientReferenceInformationComments,
partner = clientReferenceInformationPartner.__dict__
)
orderInformationBillToAddress1 = "901 Metro Centre Blvd"
orderInformationBillToAdministrativeArea = "CA"
orderInformationBillToCountry = "US"
orderInformationBillToLocality = "Foster City"
orderInformationBillToPostalCode = "94404"
orderInformationBillToCompanyName = "A & C International Trade, Inc"
orderInformationBillToCompany = Riskv1exportcomplianceinquiriesOrderInformationBillToCompany(
name = orderInformationBillToCompanyName
)
orderInformationBillToFirstName = "ANDREE"
orderInformationBillToLastName = "AGNESE"
orderInformationBillToEmail = "<EMAIL>"
orderInformationBillTo = Riskv1exportcomplianceinquiriesOrderInformationBillTo(
address1 = orderInformationBillToAddress1,
administrative_area = orderInformationBillToAdministrativeArea,
country = orderInformationBillToCountry,
locality = orderInformationBillToLocality,
postal_code = orderInformationBillToPostalCode,
company = orderInformationBillToCompany.__dict__,
first_name = orderInformationBillToFirstName,
last_name = orderInformationBillToLastName,
email = orderInformationBillToEmail
)
orderInformationShipToCountry = "IN"
orderInformationShipToFirstName = "DumbelDore"
orderInformationShipToLastName = "Albus"
orderInformationShipTo = Riskv1exportcomplianceinquiriesOrderInformationShipTo(
country = orderInformationShipToCountry,
first_name = orderInformationShipToFirstName,
last_name = orderInformationShipToLastName
)
orderInformationLineItems = []
orderInformationLineItems1 = Riskv1exportcomplianceinquiriesOrderInformationLineItems(
unit_price = "120.50",
quantity = 3,
product_sku = "123456",
product_name = "Qwe",
product_code = "physical_software"
)
orderInformationLineItems.append(orderInformationLineItems1.__dict__)
orderInformation = Riskv1exportcomplianceinquiriesOrderInformation(
bill_to = orderInformationBillTo.__dict__,
ship_to = orderInformationShipTo.__dict__,
line_items = orderInformationLineItems
)
requestObj = ValidateExportComplianceRequest(
client_reference_information = clientReferenceInformation.__dict__,
order_information = orderInformation.__dict__
)
requestObj = del_none(requestObj.__dict__)
requestObj = json.dumps(requestObj)
try:
config_obj = configuration.Configuration()
client_config = config_obj.get_configuration()
api_instance = VerificationApi(client_config)
return_data, status, body = api_instance.validate_export_compliance(requestObj)
print("\nAPI RESPONSE CODE : ", status)
print("\nAPI RESPONSE BODY : ", body)
return return_data
except Exception as e:
print("\nException when calling VerificationApi->validate_export_compliance: %s\n" % e)
if __name__ == "__main__":
customer_match_denied_parties_list()
| from CyberSource import *
import os
import json
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
# To delete None values in Input Request Json body
def del_none(d):
for key, value in list(d.items()):
if value is None:
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
def customer_match_denied_parties_list():
clientReferenceInformationCode = "verification example"
clientReferenceInformationComments = "Export-basic"
clientReferenceInformationPartnerDeveloperId = "7891234"
clientReferenceInformationPartnerSolutionId = "89012345"
clientReferenceInformationPartner = Riskv1decisionsClientReferenceInformationPartner(
developer_id = clientReferenceInformationPartnerDeveloperId,
solution_id = clientReferenceInformationPartnerSolutionId
)
clientReferenceInformation = Riskv1decisionsClientReferenceInformation(
code = clientReferenceInformationCode,
comments = clientReferenceInformationComments,
partner = clientReferenceInformationPartner.__dict__
)
orderInformationBillToAddress1 = "901 Metro Centre Blvd"
orderInformationBillToAdministrativeArea = "CA"
orderInformationBillToCountry = "US"
orderInformationBillToLocality = "Foster City"
orderInformationBillToPostalCode = "94404"
orderInformationBillToCompanyName = "A & C International Trade, Inc"
orderInformationBillToCompany = Riskv1exportcomplianceinquiriesOrderInformationBillToCompany(
name = orderInformationBillToCompanyName
)
orderInformationBillToFirstName = "ANDREE"
orderInformationBillToLastName = "AGNESE"
orderInformationBillToEmail = "<EMAIL>"
orderInformationBillTo = Riskv1exportcomplianceinquiriesOrderInformationBillTo(
address1 = orderInformationBillToAddress1,
administrative_area = orderInformationBillToAdministrativeArea,
country = orderInformationBillToCountry,
locality = orderInformationBillToLocality,
postal_code = orderInformationBillToPostalCode,
company = orderInformationBillToCompany.__dict__,
first_name = orderInformationBillToFirstName,
last_name = orderInformationBillToLastName,
email = orderInformationBillToEmail
)
orderInformationShipToCountry = "IN"
orderInformationShipToFirstName = "DumbelDore"
orderInformationShipToLastName = "Albus"
orderInformationShipTo = Riskv1exportcomplianceinquiriesOrderInformationShipTo(
country = orderInformationShipToCountry,
first_name = orderInformationShipToFirstName,
last_name = orderInformationShipToLastName
)
orderInformationLineItems = []
orderInformationLineItems1 = Riskv1exportcomplianceinquiriesOrderInformationLineItems(
unit_price = "120.50",
quantity = 3,
product_sku = "123456",
product_name = "Qwe",
product_code = "physical_software"
)
orderInformationLineItems.append(orderInformationLineItems1.__dict__)
orderInformation = Riskv1exportcomplianceinquiriesOrderInformation(
bill_to = orderInformationBillTo.__dict__,
ship_to = orderInformationShipTo.__dict__,
line_items = orderInformationLineItems
)
requestObj = ValidateExportComplianceRequest(
client_reference_information = clientReferenceInformation.__dict__,
order_information = orderInformation.__dict__
)
requestObj = del_none(requestObj.__dict__)
requestObj = json.dumps(requestObj)
try:
config_obj = configuration.Configuration()
client_config = config_obj.get_configuration()
api_instance = VerificationApi(client_config)
return_data, status, body = api_instance.validate_export_compliance(requestObj)
print("\nAPI RESPONSE CODE : ", status)
print("\nAPI RESPONSE BODY : ", body)
return return_data
except Exception as e:
print("\nException when calling VerificationApi->validate_export_compliance: %s\n" % e)
if __name__ == "__main__":
customer_match_denied_parties_list()
| en | 0.751425 | # To delete None values in Input Request Json body | 2.423543 | 2 |
SimulatePi.py | Lucchese-Anthony/MonteCarloSimulation | 0 | 7653 | <filename>SimulatePi.py
import numpy as np
import random
import math
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
angle = np.linspace( 0 , 2 * np.pi , 150)
radius = 1
x = radius * np.cos(angle)
y = radius * np.sin(angle)
#prints the circle
style.use('fivethirtyeight')
fig = plt.figure()
axes = fig.add_subplot(1,1,1)
axes.plot( x, y, color="red")
inside = []
outside = []
def inCircle(x, y):
return math.sqrt( (x**2) + (y**2) ) =< 1
def animate(i):
x = random.uniform(1,-1)
y = random.uniform(1,-1)
if (inCircle(x, y)):
point = axes.scatter(x, y, color="blue")
inside.append(point)
else:
point = axes.scatter(x, y, color="red")
outside.append(point)
try:
ratio = len(inside) / len(outside)
print(ratio)
except ZeroDivisionError:
print(0)
ani = animation.FuncAnimation(fig, animate, interval=5)
plt.show()
| <filename>SimulatePi.py
import numpy as np
import random
import math
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
angle = np.linspace( 0 , 2 * np.pi , 150)
radius = 1
x = radius * np.cos(angle)
y = radius * np.sin(angle)
#prints the circle
style.use('fivethirtyeight')
fig = plt.figure()
axes = fig.add_subplot(1,1,1)
axes.plot( x, y, color="red")
inside = []
outside = []
def inCircle(x, y):
return math.sqrt( (x**2) + (y**2) ) =< 1
def animate(i):
x = random.uniform(1,-1)
y = random.uniform(1,-1)
if (inCircle(x, y)):
point = axes.scatter(x, y, color="blue")
inside.append(point)
else:
point = axes.scatter(x, y, color="red")
outside.append(point)
try:
ratio = len(inside) / len(outside)
print(ratio)
except ZeroDivisionError:
print(0)
ani = animation.FuncAnimation(fig, animate, interval=5)
plt.show()
| en | 0.210097 | #prints the circle | 3.729666 | 4 |
run.py | aarvanitii/adminWebsite | 0 | 7654 | """
This is where the web application starts running
"""
from app.index import create_app
app = create_app()
if __name__ == "__main__":
app.secret_key = 'mysecret'
app.run(port=8080, host="0.0.0.0", debug=True) | """
This is where the web application starts running
"""
from app.index import create_app
app = create_app()
if __name__ == "__main__":
app.secret_key = 'mysecret'
app.run(port=8080, host="0.0.0.0", debug=True) | en | 0.879538 | This is where the web application starts running | 1.94348 | 2 |
tareas/3/GarciaFigueroaAlberto-GarciaEdgar/Proceso.py | jorgelmp/sistop-2022-1 | 6 | 7655 | class Proceso:
def __init__(self,tiempo_de_llegada,t,id):
self.t=t
self.tiempo_de_llegada=tiempo_de_llegada
self.id=id
self.inicio=0
self.fin=0
self.T=0
self.E=0
self.P=0
self.tRestantes = t
| class Proceso:
def __init__(self,tiempo_de_llegada,t,id):
self.t=t
self.tiempo_de_llegada=tiempo_de_llegada
self.id=id
self.inicio=0
self.fin=0
self.T=0
self.E=0
self.P=0
self.tRestantes = t
| none | 1 | 2.755216 | 3 |
|
Algo and DSA/LeetCode-Solutions-master/Python/smallest-greater-multiple-made-of-two-digits.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 7656 | # Time: sum(O(l * 2^l) for l in range(1, 11)) = O(20 * 2^10) = O(1)
# Space: O(1)
class Solution(object):
def findInteger(self, k, digit1, digit2):
"""
:type k: int
:type digit1: int
:type digit2: int
:rtype: int
"""
MAX_NUM_OF_DIGITS = 10
INT_MAX = 2**31-1
if digit1 < digit2:
digit1, digit2 = digit2, digit1
total = 2
for l in xrange(1, MAX_NUM_OF_DIGITS+1):
for mask in xrange(total):
curr, bit = 0, total>>1
while bit:
curr = curr*10 + (digit1 if mask&bit else digit2)
bit >>= 1
if k < curr <= INT_MAX and curr%k == 0:
return curr
total <<= 1
return -1
| # Time: sum(O(l * 2^l) for l in range(1, 11)) = O(20 * 2^10) = O(1)
# Space: O(1)
class Solution(object):
def findInteger(self, k, digit1, digit2):
"""
:type k: int
:type digit1: int
:type digit2: int
:rtype: int
"""
MAX_NUM_OF_DIGITS = 10
INT_MAX = 2**31-1
if digit1 < digit2:
digit1, digit2 = digit2, digit1
total = 2
for l in xrange(1, MAX_NUM_OF_DIGITS+1):
for mask in xrange(total):
curr, bit = 0, total>>1
while bit:
curr = curr*10 + (digit1 if mask&bit else digit2)
bit >>= 1
if k < curr <= INT_MAX and curr%k == 0:
return curr
total <<= 1
return -1
| en | 0.426329 | # Time: sum(O(l * 2^l) for l in range(1, 11)) = O(20 * 2^10) = O(1) # Space: O(1) :type k: int :type digit1: int :type digit2: int :rtype: int | 3.223471 | 3 |
aldryn_people/tests/test_plugins.py | compoundpartners/js-people | 0 | 7657 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from django.core.urlresolvers import reverse
except ImportError:
# Django 2.0
from django.urls import reverse
from django.utils.translation import force_text
from cms import api
from cms.utils.i18n import force_language
from aldryn_people import DEFAULT_APP_NAMESPACE
from ..models import Person, Group
from ..cms_plugins import PeoplePlugin
from . import DefaultApphookMixin, BasePeopleTest
class TestPersonPlugins(DefaultApphookMixin, BasePeopleTest):
def test_add_people_list_plugin_api(self):
"""
We add a person to the People Plugin and look her up
"""
name = 'Donald'
Person.objects.create(name=name)
plugin = api.add_plugin(self.placeholder, PeoplePlugin, self.language)
plugin.people = Person.objects.all()
self.assertEqual(force_text(plugin), force_text(plugin.pk))
self.page.publish(self.language)
url = self.page.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, name)
# This fails because of Sane Add Plugin (I suspect). This will be refactored
# and re-enabled in a future commit.
# def test_add_people_list_plugin_client(self):
# """
# We log into the PeoplePlugin
# """
# self.client.login(
# username=self.su_username, password=self.su_password)
#
# plugin_data = {
# 'plugin_type': 'PeoplePlugin',
# 'plugin_language': self.language,
# 'placeholder_id': self.placeholder.pk,
# }
# response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# self.assertEqual(response.status_code, 200)
# self.assertTrue(CMSPlugin.objects.exists())
def test_hide_ungrouped(self):
"""
"""
the_bradys = Group.objects.create(name="The Bradys")
alice = Person.objects.create(name="Alice")
bobby = Person.objects.create(name="Bobby")
cindy = Person.objects.create(name="Cindy")
# Alice is the housekeeper, not a real Brady.
bobby.groups.add(the_bradys)
cindy.groups.add(the_bradys)
# Add a plugin where ungrouped people are not shown
plugin = api.add_plugin(self.placeholder, PeoplePlugin, self.language)
plugin.people = Person.objects.all()
plugin.group_by_group = True
plugin.show_ungrouped = False
plugin.save()
self.page.publish(self.language)
url = self.page.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, bobby.name)
self.assertContains(response, cindy.name)
self.assertNotContains(response, alice.name)
def test_show_ungrouped(self):
"""
"""
the_bradys = Group.objects.create(name="The Bradys")
alice = Person.objects.create(name="Alice")
bobby = Person.objects.create(name="Bobby")
cindy = Person.objects.create(name="Cindy")
# Alice is the housekeeper, not a real Brady.
bobby.groups.add(the_bradys)
cindy.groups.add(the_bradys)
# Now, add a new plugin where ungrouped people are shown
plugin = api.add_plugin(self.placeholder, PeoplePlugin, self.language)
plugin.people = Person.objects.all()
plugin.group_by_group = True
plugin.show_ungrouped = True
plugin.save()
self.page.publish(self.language)
url = self.page.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, bobby.name)
self.assertContains(response, cindy.name)
self.assertContains(response, alice.name)
class TestPeopleListPluginNoApphook(BasePeopleTest):
def setUp(self):
super(TestPeopleListPluginNoApphook, self).setUp()
# we are testing only en
self.person1.set_current_language('en')
self.namespace = DEFAULT_APP_NAMESPACE
def create_plugin(self, plugin_params=None):
if plugin_params is None:
plugin_params = {}
with force_language('en'):
plugin = api.add_plugin(
self.placeholder, PeoplePlugin, 'en', **plugin_params)
self.page.publish('en')
return plugin
def test_plugin_with_no_apphook_doesnot_breaks_page(self):
self.create_plugin()
url = self.page.get_absolute_url()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.person1.name)
from ..cms_plugins import NAMESPACE_ERROR
self.assertNotContains(response, NAMESPACE_ERROR[:20])
def test_plugin_with_no_apphook_shows_error_message(self):
self.create_plugin()
url = self.page.get_absolute_url()
self.client.login(username=self.su_username,
password=<PASSWORD>)
response = self.client.get(url, user=self.superuser)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.person1.name)
from ..cms_plugins import NAMESPACE_ERROR
self.assertContains(response, NAMESPACE_ERROR[:20])
def test_plugin_with_vcard_enabled_no_apphook(self):
self.create_plugin(plugin_params={'show_vcard': True})
url = self.page.get_absolute_url()
response = self.client.get(url)
self.assertContains(response, self.person1.name)
def test_plugin_with_vcard_disabled_no_apphook(self):
self.create_plugin(plugin_params={'show_vcard': False})
url = self.page.get_absolute_url()
response = self.client.get(url)
self.assertContains(response, self.person1.name)
def test_plugin_show_links_are_shown_if_enabled_and_apphook_page(self):
with force_language('en'):
app_page = self.create_apphook_page()
list_plugin = api.add_plugin(
placeholder=self.placeholder,
plugin_type=PeoplePlugin,
language='en',
)
list_plugin.show_links = True
list_plugin.save()
self.page.publish('en')
url = self.page.get_absolute_url()
person_url = self.person1.get_absolute_url()
# ensure that url is not the link to the home page and not app page
app_page_len = len(app_page.get_absolute_url())
self.assertGreater(len(person_url), app_page_len)
response = self.client.get(url, follow=True)
self.assertContains(response, person_url)
# ensure that url is not shown if not enabled for plugin.
list_plugin.show_links = False
list_plugin.save()
self.page.publish('en')
response = self.client.get(url, follow=True)
self.assertNotContains(response, person_url)
def test_plugin_with_vcard_enabled_with_apphook(self):
vcard_kwargs = {
'slug': self.person1.slug
}
with force_language('en'):
self.create_apphook_page()
person_vcard_url = reverse(
'{0}:download_vcard'.format(self.namespace),
kwargs=vcard_kwargs)
plugin = self.create_plugin(plugin_params={'show_vcard': True})
url = self.page.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, self.person1.name)
self.assertContains(response, person_vcard_url)
# test that vcard download link is not shown if disabled
plugin.show_vcard = False
plugin.save()
self.page.publish('en')
response = self.client.get(url, follow=True)
self.assertContains(response, self.person1.name)
self.assertNotContains(response, person_vcard_url)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from django.core.urlresolvers import reverse
except ImportError:
# Django 2.0
from django.urls import reverse
from django.utils.translation import force_text
from cms import api
from cms.utils.i18n import force_language
from aldryn_people import DEFAULT_APP_NAMESPACE
from ..models import Person, Group
from ..cms_plugins import PeoplePlugin
from . import DefaultApphookMixin, BasePeopleTest
class TestPersonPlugins(DefaultApphookMixin, BasePeopleTest):
def test_add_people_list_plugin_api(self):
"""
We add a person to the People Plugin and look her up
"""
name = 'Donald'
Person.objects.create(name=name)
plugin = api.add_plugin(self.placeholder, PeoplePlugin, self.language)
plugin.people = Person.objects.all()
self.assertEqual(force_text(plugin), force_text(plugin.pk))
self.page.publish(self.language)
url = self.page.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, name)
# This fails because of Sane Add Plugin (I suspect). This will be refactored
# and re-enabled in a future commit.
# def test_add_people_list_plugin_client(self):
# """
# We log into the PeoplePlugin
# """
# self.client.login(
# username=self.su_username, password=self.su_password)
#
# plugin_data = {
# 'plugin_type': 'PeoplePlugin',
# 'plugin_language': self.language,
# 'placeholder_id': self.placeholder.pk,
# }
# response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# self.assertEqual(response.status_code, 200)
# self.assertTrue(CMSPlugin.objects.exists())
def test_hide_ungrouped(self):
"""
"""
the_bradys = Group.objects.create(name="The Bradys")
alice = Person.objects.create(name="Alice")
bobby = Person.objects.create(name="Bobby")
cindy = Person.objects.create(name="Cindy")
# Alice is the housekeeper, not a real Brady.
bobby.groups.add(the_bradys)
cindy.groups.add(the_bradys)
# Add a plugin where ungrouped people are not shown
plugin = api.add_plugin(self.placeholder, PeoplePlugin, self.language)
plugin.people = Person.objects.all()
plugin.group_by_group = True
plugin.show_ungrouped = False
plugin.save()
self.page.publish(self.language)
url = self.page.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, bobby.name)
self.assertContains(response, cindy.name)
self.assertNotContains(response, alice.name)
def test_show_ungrouped(self):
"""
"""
the_bradys = Group.objects.create(name="The Bradys")
alice = Person.objects.create(name="Alice")
bobby = Person.objects.create(name="Bobby")
cindy = Person.objects.create(name="Cindy")
# Alice is the housekeeper, not a real Brady.
bobby.groups.add(the_bradys)
cindy.groups.add(the_bradys)
# Now, add a new plugin where ungrouped people are shown
plugin = api.add_plugin(self.placeholder, PeoplePlugin, self.language)
plugin.people = Person.objects.all()
plugin.group_by_group = True
plugin.show_ungrouped = True
plugin.save()
self.page.publish(self.language)
url = self.page.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, bobby.name)
self.assertContains(response, cindy.name)
self.assertContains(response, alice.name)
class TestPeopleListPluginNoApphook(BasePeopleTest):
def setUp(self):
super(TestPeopleListPluginNoApphook, self).setUp()
# we are testing only en
self.person1.set_current_language('en')
self.namespace = DEFAULT_APP_NAMESPACE
def create_plugin(self, plugin_params=None):
if plugin_params is None:
plugin_params = {}
with force_language('en'):
plugin = api.add_plugin(
self.placeholder, PeoplePlugin, 'en', **plugin_params)
self.page.publish('en')
return plugin
def test_plugin_with_no_apphook_doesnot_breaks_page(self):
self.create_plugin()
url = self.page.get_absolute_url()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.person1.name)
from ..cms_plugins import NAMESPACE_ERROR
self.assertNotContains(response, NAMESPACE_ERROR[:20])
def test_plugin_with_no_apphook_shows_error_message(self):
self.create_plugin()
url = self.page.get_absolute_url()
self.client.login(username=self.su_username,
password=<PASSWORD>)
response = self.client.get(url, user=self.superuser)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.person1.name)
from ..cms_plugins import NAMESPACE_ERROR
self.assertContains(response, NAMESPACE_ERROR[:20])
def test_plugin_with_vcard_enabled_no_apphook(self):
self.create_plugin(plugin_params={'show_vcard': True})
url = self.page.get_absolute_url()
response = self.client.get(url)
self.assertContains(response, self.person1.name)
def test_plugin_with_vcard_disabled_no_apphook(self):
self.create_plugin(plugin_params={'show_vcard': False})
url = self.page.get_absolute_url()
response = self.client.get(url)
self.assertContains(response, self.person1.name)
def test_plugin_show_links_are_shown_if_enabled_and_apphook_page(self):
with force_language('en'):
app_page = self.create_apphook_page()
list_plugin = api.add_plugin(
placeholder=self.placeholder,
plugin_type=PeoplePlugin,
language='en',
)
list_plugin.show_links = True
list_plugin.save()
self.page.publish('en')
url = self.page.get_absolute_url()
person_url = self.person1.get_absolute_url()
# ensure that url is not the link to the home page and not app page
app_page_len = len(app_page.get_absolute_url())
self.assertGreater(len(person_url), app_page_len)
response = self.client.get(url, follow=True)
self.assertContains(response, person_url)
# ensure that url is not shown if not enabled for plugin.
list_plugin.show_links = False
list_plugin.save()
self.page.publish('en')
response = self.client.get(url, follow=True)
self.assertNotContains(response, person_url)
def test_plugin_with_vcard_enabled_with_apphook(self):
vcard_kwargs = {
'slug': self.person1.slug
}
with force_language('en'):
self.create_apphook_page()
person_vcard_url = reverse(
'{0}:download_vcard'.format(self.namespace),
kwargs=vcard_kwargs)
plugin = self.create_plugin(plugin_params={'show_vcard': True})
url = self.page.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, self.person1.name)
self.assertContains(response, person_vcard_url)
# test that vcard download link is not shown if disabled
plugin.show_vcard = False
plugin.save()
self.page.publish('en')
response = self.client.get(url, follow=True)
self.assertContains(response, self.person1.name)
self.assertNotContains(response, person_vcard_url)
| en | 0.654985 | # -*- coding: utf-8 -*- # Django 2.0 We add a person to the People Plugin and look her up # This fails because of Sane Add Plugin (I suspect). This will be refactored # and re-enabled in a future commit. # def test_add_people_list_plugin_client(self): # """ # We log into the PeoplePlugin # """ # self.client.login( # username=self.su_username, password=self.su_password) # # plugin_data = { # 'plugin_type': 'PeoplePlugin', # 'plugin_language': self.language, # 'placeholder_id': self.placeholder.pk, # } # response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data) # self.assertEqual(response.status_code, 200) # self.assertTrue(CMSPlugin.objects.exists()) # Alice is the housekeeper, not a real Brady. # Add a plugin where ungrouped people are not shown # Alice is the housekeeper, not a real Brady. # Now, add a new plugin where ungrouped people are shown # we are testing only en # ensure that url is not the link to the home page and not app page # ensure that url is not shown if not enabled for plugin. # test that vcard download link is not shown if disabled | 2.320902 | 2 |
turbo_transformers/python/tests/__init__.py | xcnick/TurboTransformers | 1,147 | 7658 | <gh_stars>1000+
# Copyright (C) 2020 THL A29 Limited, a Tencent company.
# All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
# https://opensource.org/licenses/BSD-3-Clause
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# See the AUTHORS file for names of contributors.
| # Copyright (C) 2020 THL A29 Limited, a Tencent company.
# All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
# https://opensource.org/licenses/BSD-3-Clause
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# See the AUTHORS file for names of contributors. | en | 0.874476 | # Copyright (C) 2020 THL A29 Limited, a Tencent company. # All rights reserved. # Licensed under the BSD 3-Clause License (the "License"); you may # not use this file except in compliance with the License. You may # obtain a copy of the License at # https://opensource.org/licenses/BSD-3-Clause # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" basis, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # See the AUTHORS file for names of contributors. | 0.795338 | 1 |
generate_joke.py | audreymychan/djsmile | 5 | 7659 | # This script contains the get_joke() function to generate a new dad joke
import requests
def get_joke():
"""Return new joke string from icanhazdadjoke.com."""
url = "https://icanhazdadjoke.com/"
response = requests.get(url, headers={'Accept': 'application/json'})
raw_joke = response.json()
joke = raw_joke['joke']
return joke
| # This script contains the get_joke() function to generate a new dad joke
import requests
def get_joke():
"""Return new joke string from icanhazdadjoke.com."""
url = "https://icanhazdadjoke.com/"
response = requests.get(url, headers={'Accept': 'application/json'})
raw_joke = response.json()
joke = raw_joke['joke']
return joke
| en | 0.698863 | # This script contains the get_joke() function to generate a new dad joke Return new joke string from icanhazdadjoke.com. | 3.075229 | 3 |
bot/tests/test_triggers/__init__.py | elihschiff/Rubber-Duck-Python | 7 | 7660 | from .test_commands import all_commands
all_triggers = all_commands
from .test_quack import TestQuack
all_triggers.append(TestQuack)
| from .test_commands import all_commands
all_triggers = all_commands
from .test_quack import TestQuack
all_triggers.append(TestQuack)
| none | 1 | 1.19165 | 1 |
|
src/main/scripts/crassus_deployer_lambda.py | Scout24/crassus | 0 | 7661 | <reponame>Scout24/crassus
from __future__ import print_function
from crassus import Crassus
from crassus.output_converter import OutputConverter
def handler(event, context):
crassus = Crassus(event, context)
crassus.deploy()
def cfn_output_converter(event, context):
"""
Convert an AWS CloudFormation output message to our defined
ResultMessage format.
"""
output_converter = OutputConverter(event, context)
output_converter.convert()
| from __future__ import print_function
from crassus import Crassus
from crassus.output_converter import OutputConverter
def handler(event, context):
crassus = Crassus(event, context)
crassus.deploy()
def cfn_output_converter(event, context):
"""
Convert an AWS CloudFormation output message to our defined
ResultMessage format.
"""
output_converter = OutputConverter(event, context)
output_converter.convert() | en | 0.298993 | Convert an AWS CloudFormation output message to our defined ResultMessage format. | 2.440094 | 2 |
Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/pyreadline/clipboard/__init__.py | tchamabe1979/exareme | 0 | 7662 | import sys
success = False
in_ironpython = "IronPython" in sys.version
if in_ironpython:
try:
from ironpython_clipboard import GetClipboardText, SetClipboardText
success = True
except ImportError:
pass
else:
try:
from win32_clipboard import GetClipboardText, SetClipboardText
success = True
except ImportError:
raise
def send_data(lists):
SetClipboardText(make_tab(lists))
def set_clipboard_text(toclipboard):
SetClipboardText(str(toclipboard))
def make_tab(lists):
if hasattr(lists, "tolist"):
lists = lists.tolist()
ut = []
for rad in lists:
if type(rad) in [list, tuple]:
ut.append("\t".join(["%s" % x for x in rad]))
else:
ut.append("%s" % rad)
return "\n".join(ut)
def make_list_of_list(txt):
def make_num(x):
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
try:
return complex(x)
except ValueError:
return x
return x
ut = []
flag = False
for rad in [x for x in txt.split("\r\n") if x != ""]:
raden = [make_num(x) for x in rad.split("\t")]
if str in map(type, raden):
flag = True
ut.append(raden)
return ut, flag
def get_clipboard_text_and_convert(paste_list=False):
"""Get txt from clipboard. if paste_list==True the convert tab separated
data to list of lists. Enclose list of list in array() if all elements are
numeric"""
txt = GetClipboardText()
if txt:
if paste_list and "\t" in txt:
array, flag = make_list_of_list(txt)
if flag:
txt = repr(array)
else:
txt = "array(%s)" % repr(array)
txt = "".join([c for c in txt if c not in " \t\r\n"])
return txt
| import sys
success = False
in_ironpython = "IronPython" in sys.version
if in_ironpython:
try:
from ironpython_clipboard import GetClipboardText, SetClipboardText
success = True
except ImportError:
pass
else:
try:
from win32_clipboard import GetClipboardText, SetClipboardText
success = True
except ImportError:
raise
def send_data(lists):
SetClipboardText(make_tab(lists))
def set_clipboard_text(toclipboard):
SetClipboardText(str(toclipboard))
def make_tab(lists):
if hasattr(lists, "tolist"):
lists = lists.tolist()
ut = []
for rad in lists:
if type(rad) in [list, tuple]:
ut.append("\t".join(["%s" % x for x in rad]))
else:
ut.append("%s" % rad)
return "\n".join(ut)
def make_list_of_list(txt):
def make_num(x):
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
try:
return complex(x)
except ValueError:
return x
return x
ut = []
flag = False
for rad in [x for x in txt.split("\r\n") if x != ""]:
raden = [make_num(x) for x in rad.split("\t")]
if str in map(type, raden):
flag = True
ut.append(raden)
return ut, flag
def get_clipboard_text_and_convert(paste_list=False):
"""Get txt from clipboard. if paste_list==True the convert tab separated
data to list of lists. Enclose list of list in array() if all elements are
numeric"""
txt = GetClipboardText()
if txt:
if paste_list and "\t" in txt:
array, flag = make_list_of_list(txt)
if flag:
txt = repr(array)
else:
txt = "array(%s)" % repr(array)
txt = "".join([c for c in txt if c not in " \t\r\n"])
return txt
| en | 0.447312 | Get txt from clipboard. if paste_list==True the convert tab separated data to list of lists. Enclose list of list in array() if all elements are numeric | 2.461649 | 2 |
mjrl/utils/train_agent.py | YujieLu10/tslam | 0 | 7663 | import logging
logging.disable(logging.CRITICAL)
import math
from tabulate import tabulate
from mjrl.utils.make_train_plots import make_train_plots
from mjrl.utils.gym_env import GymEnv
from mjrl.samplers.core import sample_paths
import numpy as np
import torch
import pickle
import imageio
import time as timer
import os
import copy
import matplotlib.pyplot as plt
try:
import exptools
from colorsys import hsv_to_rgb
import pyvista as pv
except ImportError:
exptools = None
def _load_latest_policy_and_logs(agent, *, policy_dir, logs_dir):
"""Loads the latest policy.
Returns the next step number to begin with.
"""
assert os.path.isdir(policy_dir), str(policy_dir)
assert os.path.isdir(logs_dir), str(logs_dir)
log_csv_path = os.path.join(logs_dir, 'log.csv')
if not os.path.exists(log_csv_path):
return 0 # fresh start
print("Reading: {}".format(log_csv_path))
agent.logger.read_log(log_csv_path)
last_step = agent.logger.max_len - 1
if last_step <= 0:
return 0 # fresh start
# find latest policy/baseline
i = last_step
while i >= 0:
policy_path = os.path.join(policy_dir, 'policy_{}.pickle'.format(i))
baseline_path = os.path.join(policy_dir, 'baseline_{}.pickle'.format(i))
if not os.path.isfile(policy_path):
i = i -1
continue
else:
print("Loaded last saved iteration: {}".format(i))
with open(policy_path, 'rb') as fp:
agent.policy = pickle.load(fp)
with open(baseline_path, 'rb') as fp:
agent.baseline = pickle.load(fp)
# additional
# global_status_path = os.path.join(policy_dir, 'global_status.pickle')
# with open(global_status_path, 'rb') as fp:
# agent.load_global_status( pickle.load(fp) )
agent.logger.shrink_to(i + 1)
assert agent.logger.max_len == i + 1
return agent.logger.max_len
# cannot find any saved policy
raise RuntimeError("Log file exists, but cannot find any saved policy.")
def save_voxel_visualization(obj_name, reset_mode_conf, reward_conf, obj_orientation, obj_relative_position, obj_scale, pc_frame, iternum, is_best_policy):
uniform_gt_data = np.load("/home/jianrenw/prox/tslam/assets/uniform_gt/uniform_{}_o3d.npz".format(obj_name))['pcd']
data_scale = uniform_gt_data * obj_scale
data_rotate = data_scale.copy()
x = data_rotate[:, 0].copy()
y = data_rotate[:, 1].copy()
z = data_rotate[:, 2].copy()
x_theta = obj_orientation[0]
data_rotate[:, 0] = x
data_rotate[:, 1] = y*math.cos(x_theta) - z*math.sin(x_theta)
data_rotate[:, 2] = y*math.sin(x_theta) + z*math.cos(x_theta)
x = data_rotate[:, 0].copy()
y = data_rotate[:, 1].copy()
z = data_rotate[:, 2].copy()
y_theta = obj_orientation[1]
data_rotate[:, 0] = x * math.cos(y_theta) + z * math.sin(y_theta)
data_rotate[:, 1] = y
data_rotate[:, 2] = z * math.cos(y_theta) - x * math.sin(y_theta)
x = data_rotate[:, 0].copy()
y = data_rotate[:, 1].copy()
z = data_rotate[:, 2].copy()
z_theta = obj_orientation[2]
data_rotate[:, 0] = x * math.cos(z_theta) - y * math.sin(z_theta)
data_rotate[:, 1] = x * math.sin(z_theta) + y * math.cos(z_theta)
data_rotate[:, 2] = z
data_trans = data_rotate.copy()
data_trans[:, 0] += obj_relative_position[0]
data_trans[:, 1] += obj_relative_position[1]
data_trans[:, 2] += obj_relative_position[2]
uniform_gt_data = data_trans.copy()
data = pc_frame
resolution = 0.01
sep_x = math.ceil(0.3 / resolution)
sep_y = math.ceil(0.3 / resolution)
sep_z = math.ceil(0.3 / resolution)
x, y, z = np.indices((sep_x, sep_y, sep_z))
cube1 = (x<0) & (y <1) & (z<1)
gtcube = (x<0) & (y <1) & (z<1)
voxels = cube1
gt_voxels = gtcube
# draw gt
gt_map_list = []
for idx,val in enumerate(uniform_gt_data):
idx_x = math.floor((val[0] + 0.15) / resolution)
idx_y = math.floor((val[1] + 0.15) / resolution)
idx_z = math.floor((val[2]) / resolution)
# if idx_z > 6:
# continue
name = str(idx_x) + '_' + str(idx_y) + '_' + str(idx_z)
if name not in gt_map_list:
gt_map_list.append(name)
cube = (x < idx_x + 1) & (y < idx_y + 1) & (z < idx_z + 1) & (x >= idx_x) & (y >= idx_y) & (z >= idx_z)
# combine the objects into a single boolean array
gt_voxels += cube
# draw cuboids in the top left and bottom right corners, and a link between them
map_list = []
for idx,val in enumerate(data):
idx_x = math.floor((val[0] + 0.15) / resolution)
idx_y = math.floor((val[1] + 0.15) / resolution)
idx_z = math.floor((val[2]) / resolution)
# if idx_z > 6:
# continue
name = str(idx_x) + '_' + str(idx_y) + '_' + str(idx_z)
if name not in map_list and name in gt_map_list:
map_list.append(name)
cube = (x < idx_x + 1) & (y < idx_y + 1) & (z < idx_z + 1) & (x >= idx_x) & (y >= idx_y) & (z >= idx_z)
# combine the objects into a single boolean array
voxels += cube
# gt_obj4:668
occupancy = len(map_list) / len(gt_map_list)
# print(len(map_list) / sep_x / sep_y / sep_z )
is_best_reconstruct = True
files = os.listdir('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/'.format(obj_name, reset_mode_conf, reward_conf))
for file in files:
if "overlap" in file and "png" in file:
file_str = str(file)
previous_occup = file_str[(file_str.index("-")+1):file_str.index(".png")]
if occupancy < float(previous_occup):
is_best_reconstruct = False
# obj_name = "obj{}".format(obj_name)
# set the colors of each object
vis_voxel = gt_voxels | voxels
colors = np.empty(vis_voxel.shape, dtype=object)
colors[gt_voxels] = 'white'
colors[voxels] = 'cyan'
# and plot everything
ax = plt.figure().add_subplot(projection='3d')
ax.set_zlim(1,30)
ax.voxels(vis_voxel, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05)
# plt.savefig('uniform_gtbox_{}.png'.format(step))
if is_best_policy or is_best_reconstruct:
plt.savefig('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_overlap-{}.png'.format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct, occupancy))
plt.savefig('voxel/iter-{}-{}-overlap-{}.png'.format(iternum, obj_name, occupancy))
plt.close()
ax = plt.figure().add_subplot(projection='3d')
ax.set_zlim(1,30)
ax.voxels(gt_voxels, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05)
if is_best_policy or is_best_reconstruct:
plt.savefig('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/gt.png'.format(obj_name, reset_mode_conf, reward_conf))
plt.savefig('voxel/iter-{}-{}-gt.png'.format(iternum, obj_name))
plt.close()
ax = plt.figure().add_subplot(projection='3d')
ax.set_zlim(1,30)
ax.voxels(voxels, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05)
if is_best_policy or is_best_reconstruct:
plt.savefig('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_exp.png'.format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct))
plt.savefig('voxel/iter-{}-{}-exp.png'.format(iternum, obj_name))
plt.close()
return is_best_reconstruct, occupancy
def train_agent(job_name, agent,
seed = 0,
niter = 101,
gamma = 0.995,
gae_lambda = None,
num_cpu = 16,
sample_mode = 'trajectories',
horizon= int(150),
num_traj = 50,
num_samples = 50000, # has precedence, used with sample_mode = 'samples'
save_freq = 10,
evaluation_rollouts = None,
plot_keys = ['stoc_pol_mean'],
env_kwargs= dict(),
visualize_kwargs= dict(),
sample_paths_kwargs= dict(),
):
print("num_cpu{}".format(num_cpu))
np.random.seed(seed)
if os.path.isdir(job_name) == False:
os.mkdir(job_name)
previous_dir = os.getcwd()
obj_name = env_kwargs["obj_name"]
reset_mode_conf = env_kwargs["reset_mode"]
reward_conf = "cf{}knn{}voxel{}".format(env_kwargs["chamfer_r_factor"], env_kwargs["knn_r_factor"], env_kwargs["new_voxel_r_factor"])
os.chdir(job_name) # important! we are now in the directory to save data
if os.path.isdir('iterations') == False: os.mkdir('iterations')
if os.path.isdir('2dpointcloud') == False: os.mkdir('2dpointcloud')
if os.path.isdir('pointcloudnpz') == False: os.mkdir('pointcloudnpz')
if os.path.isdir('/home/jianrenw/prox/tslam/data/result/best_policy/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf)) == False: os.makedirs('/home/jianrenw/prox/tslam/data/result/best_policy/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf))
if os.path.isdir('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf)) == False: os.makedirs('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf))
if os.path.isdir('voxel') == False: os.mkdir('voxel')
if os.path.isdir('logs') == False and agent.save_logs == True: os.mkdir('logs')
best_policy = copy.deepcopy(agent.policy)
best_perf = -1e8
train_curve = best_perf*np.ones(niter)
mean_pol_perf = 0.0
e = GymEnv(agent.env.env_id, env_kwargs)
# Load from any existing checkpoint, policy, statistics, etc.
# Why no checkpointing.. :(
i_start = _load_latest_policy_and_logs(agent,
policy_dir='iterations',
logs_dir='logs')
if i_start:
print("Resuming from an existing job folder ...")
for i in range(i_start, niter):
print("......................................................................................")
print("ITERATION : %i " % i)
is_best_policy = False
if train_curve[i-1] > best_perf:
if exptools: exptools.logging.logger.log_text("update best_policy")
best_policy = copy.deepcopy(agent.policy)
best_perf = train_curve[i-1]
is_best_policy = True
N = num_traj if sample_mode == 'trajectories' else num_samples
stats = agent.train_step(
N=N,
sample_mode=sample_mode,
horizon= horizon,
gamma=gamma,
gae_lambda=gae_lambda,
num_cpu=num_cpu,
env_kwargs= env_kwargs,
sample_paths_kwargs= sample_paths_kwargs,
)
train_curve[i] = stats[0]
if evaluation_rollouts is not None and evaluation_rollouts > 0:
print("Performing evaluation rollouts ........")
eval_paths = sample_paths(
num_traj=evaluation_rollouts,
env=e.env_id,
policy=agent.policy,
eval_mode=True,
base_seed=seed,
num_cpu=num_cpu,
env_kwargs= env_kwargs,
**sample_paths_kwargs)
mean_pol_perf = np.mean([np.sum(path['rewards']) for path in eval_paths])
if agent.save_logs:
agent.logger.log_kv('eval_score', mean_pol_perf)
if exptools: exptools.logging.logger.log_scalar('eval_score', mean_pol_perf, i)
if exptools:
env_infos = [path["env_infos"] for path in eval_paths] # a list of dict
rewards = dict()
total_points = list()
if env_infos:
# get decomposed reward statistics
keys = [k for k in env_infos[0].keys() if "_p" in k[-2:] or "_r" in k[-2:] or "occupancy" in k]
for k in keys:
rewards[k] = list()
for env_info in env_infos:
rewards[k].append(env_info[k])
for env_info in env_infos:
total_points.append(len(env_info["pointcloud"]))
for k, v in rewards.items():
exptools.logging.logger.log_scalar_batch(k, v, i)
exptools.logging.logger.log_scalar_batch("total_num_points", total_points, i)
print(">>> finish evaluation rollouts")
if (i % save_freq == 0 and i > 0):
if agent.save_logs:
agent.logger.save_log('logs/')
make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc='logs/')
obj_orientation = env_kwargs["obj_orientation"]
obj_relative_position = env_kwargs["obj_relative_position"]
obj_scale = env_kwargs["obj_scale"]
policy_file = 'policy_%i.pickle' % i
baseline_file = 'baseline_%i.pickle' % i
pickle.dump(agent.policy, open('iterations/' + policy_file, 'wb'))
pickle.dump(agent.baseline, open('iterations/' + baseline_file, 'wb'))
pickle.dump(best_policy, open('iterations/best_policy.pickle', 'wb'))
pickle.dump(agent.global_status, open('iterations/global_status.pickle', 'wb'))
# save videos and pointcloud and reconstruted mesh
if exptools:
video, env_infos = e.visualize_policy_offscreen(
policy= agent.policy,
**visualize_kwargs,
) # (T, C, H, W)
video_explore, env_infos_explore = e.visualize_policy_explore(
policy= agent.policy,
**visualize_kwargs,
) # (T, C, H, W)
pc_frame = np.array(env_infos[-1]["pointcloud"] if len(env_infos[-1]["pointcloud"]) > 0 else np.empty((0, 3)))
# 3d voxel visualization
is_best_reconstruct, occupancy = save_voxel_visualization(obj_name, reset_mode_conf, reward_conf, obj_orientation, obj_relative_position, obj_scale, pc_frame, i, is_best_policy)
if is_best_policy or is_best_reconstruct:
pickle.dump(best_policy, open('/home/jianrenw/prox/tslam/data/result/best_policy/{}/{}/{}/bp{}_br{}_best_policy.pickle'.format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct), 'wb'))
if is_best_policy or is_best_reconstruct:
np.savez_compressed("pointcloudnpz/alpha_pointcloud_"+str(i)+".npz",pcd=pc_frame)
np.savez_compressed("/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_alpha_pointcloud_overlap-{}.npz".format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct, occupancy), pcd=pc_frame)
# else:
# np.savez_compressed("pointcloudnpz/pointcloud_"+str(i)+".npz",pcd=pc_frame)
# pc_frames.append(pc_frame)
ax = plt.axes()
ax.scatter(pc_frame[:, 0], pc_frame[:, 1], cmap='viridis', linewidth=0.5)
if is_best_policy or is_best_reconstruct:
plt.savefig("2dpointcloud/alpha_{}.png".format('2dpointcloud' + str(i)))
plt.savefig("/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_alpha_2dpointcloud_overlap-{}.png".format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct, occupancy))
# else:
# plt.savefig("2dpointcloud/{}.png".format('2dpointcloud' + str(i)))
plt.close()
# =======================================================
# if obj_name in ["airplane", "apple", "glass", "cup"]:
exptools.logging.logger.record_image("rendered", video[-1], i)
exptools.logging.logger.record_gif("rendered", video, i)
# exptools.logging.logger.record_image("rendered_explore", video_explore[-1], i)
# exptools.logging.logger.record_gif("rendered_explore", video_explore, i)
# print results to console
if i == 0:
result_file = open('results.txt', 'w')
print("Iter | Stoc Pol | Mean Pol | Best (Stoc) \n")
result_file.write("Iter | Sampling Pol | Evaluation Pol | Best (Sampled) \n")
result_file.close()
result_file = open('results.txt', 'a')
result_file.write("%4i %5.2f %5.2f %5.2f \n" % (i, train_curve[i], mean_pol_perf, best_perf))
result_file.close()
if agent.save_logs:
print_data = sorted(filter(lambda v: np.asarray(v[1]).size == 1,
agent.logger.get_current_log().items()))
print(tabulate(print_data))
if exptools:
exptools.logging.logger.log_scalar("Iter", i, i)
exptools.logging.logger.log_scalar("SamplingPol", train_curve[i], i)
exptools.logging.logger.log_scalar("EvaluationPol", mean_pol_perf, i)
exptools.logging.logger.log_scalar("BestSampled", best_perf, i)
exptools.logging.logger.dump_data()
# final save
pickle.dump(best_policy, open('iterations/best_policy.pickle', 'wb'))
if agent.save_logs:
agent.logger.save_log('logs/')
make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc='logs/')
os.chdir(previous_dir)
| import logging
logging.disable(logging.CRITICAL)
import math
from tabulate import tabulate
from mjrl.utils.make_train_plots import make_train_plots
from mjrl.utils.gym_env import GymEnv
from mjrl.samplers.core import sample_paths
import numpy as np
import torch
import pickle
import imageio
import time as timer
import os
import copy
import matplotlib.pyplot as plt
try:
import exptools
from colorsys import hsv_to_rgb
import pyvista as pv
except ImportError:
exptools = None
def _load_latest_policy_and_logs(agent, *, policy_dir, logs_dir):
"""Loads the latest policy.
Returns the next step number to begin with.
"""
assert os.path.isdir(policy_dir), str(policy_dir)
assert os.path.isdir(logs_dir), str(logs_dir)
log_csv_path = os.path.join(logs_dir, 'log.csv')
if not os.path.exists(log_csv_path):
return 0 # fresh start
print("Reading: {}".format(log_csv_path))
agent.logger.read_log(log_csv_path)
last_step = agent.logger.max_len - 1
if last_step <= 0:
return 0 # fresh start
# find latest policy/baseline
i = last_step
while i >= 0:
policy_path = os.path.join(policy_dir, 'policy_{}.pickle'.format(i))
baseline_path = os.path.join(policy_dir, 'baseline_{}.pickle'.format(i))
if not os.path.isfile(policy_path):
i = i -1
continue
else:
print("Loaded last saved iteration: {}".format(i))
with open(policy_path, 'rb') as fp:
agent.policy = pickle.load(fp)
with open(baseline_path, 'rb') as fp:
agent.baseline = pickle.load(fp)
# additional
# global_status_path = os.path.join(policy_dir, 'global_status.pickle')
# with open(global_status_path, 'rb') as fp:
# agent.load_global_status( pickle.load(fp) )
agent.logger.shrink_to(i + 1)
assert agent.logger.max_len == i + 1
return agent.logger.max_len
# cannot find any saved policy
raise RuntimeError("Log file exists, but cannot find any saved policy.")
def save_voxel_visualization(obj_name, reset_mode_conf, reward_conf, obj_orientation, obj_relative_position, obj_scale, pc_frame, iternum, is_best_policy):
uniform_gt_data = np.load("/home/jianrenw/prox/tslam/assets/uniform_gt/uniform_{}_o3d.npz".format(obj_name))['pcd']
data_scale = uniform_gt_data * obj_scale
data_rotate = data_scale.copy()
x = data_rotate[:, 0].copy()
y = data_rotate[:, 1].copy()
z = data_rotate[:, 2].copy()
x_theta = obj_orientation[0]
data_rotate[:, 0] = x
data_rotate[:, 1] = y*math.cos(x_theta) - z*math.sin(x_theta)
data_rotate[:, 2] = y*math.sin(x_theta) + z*math.cos(x_theta)
x = data_rotate[:, 0].copy()
y = data_rotate[:, 1].copy()
z = data_rotate[:, 2].copy()
y_theta = obj_orientation[1]
data_rotate[:, 0] = x * math.cos(y_theta) + z * math.sin(y_theta)
data_rotate[:, 1] = y
data_rotate[:, 2] = z * math.cos(y_theta) - x * math.sin(y_theta)
x = data_rotate[:, 0].copy()
y = data_rotate[:, 1].copy()
z = data_rotate[:, 2].copy()
z_theta = obj_orientation[2]
data_rotate[:, 0] = x * math.cos(z_theta) - y * math.sin(z_theta)
data_rotate[:, 1] = x * math.sin(z_theta) + y * math.cos(z_theta)
data_rotate[:, 2] = z
data_trans = data_rotate.copy()
data_trans[:, 0] += obj_relative_position[0]
data_trans[:, 1] += obj_relative_position[1]
data_trans[:, 2] += obj_relative_position[2]
uniform_gt_data = data_trans.copy()
data = pc_frame
resolution = 0.01
sep_x = math.ceil(0.3 / resolution)
sep_y = math.ceil(0.3 / resolution)
sep_z = math.ceil(0.3 / resolution)
x, y, z = np.indices((sep_x, sep_y, sep_z))
cube1 = (x<0) & (y <1) & (z<1)
gtcube = (x<0) & (y <1) & (z<1)
voxels = cube1
gt_voxels = gtcube
# draw gt
gt_map_list = []
for idx,val in enumerate(uniform_gt_data):
idx_x = math.floor((val[0] + 0.15) / resolution)
idx_y = math.floor((val[1] + 0.15) / resolution)
idx_z = math.floor((val[2]) / resolution)
# if idx_z > 6:
# continue
name = str(idx_x) + '_' + str(idx_y) + '_' + str(idx_z)
if name not in gt_map_list:
gt_map_list.append(name)
cube = (x < idx_x + 1) & (y < idx_y + 1) & (z < idx_z + 1) & (x >= idx_x) & (y >= idx_y) & (z >= idx_z)
# combine the objects into a single boolean array
gt_voxels += cube
# draw cuboids in the top left and bottom right corners, and a link between them
map_list = []
for idx,val in enumerate(data):
idx_x = math.floor((val[0] + 0.15) / resolution)
idx_y = math.floor((val[1] + 0.15) / resolution)
idx_z = math.floor((val[2]) / resolution)
# if idx_z > 6:
# continue
name = str(idx_x) + '_' + str(idx_y) + '_' + str(idx_z)
if name not in map_list and name in gt_map_list:
map_list.append(name)
cube = (x < idx_x + 1) & (y < idx_y + 1) & (z < idx_z + 1) & (x >= idx_x) & (y >= idx_y) & (z >= idx_z)
# combine the objects into a single boolean array
voxels += cube
# gt_obj4:668
occupancy = len(map_list) / len(gt_map_list)
# print(len(map_list) / sep_x / sep_y / sep_z )
is_best_reconstruct = True
files = os.listdir('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/'.format(obj_name, reset_mode_conf, reward_conf))
for file in files:
if "overlap" in file and "png" in file:
file_str = str(file)
previous_occup = file_str[(file_str.index("-")+1):file_str.index(".png")]
if occupancy < float(previous_occup):
is_best_reconstruct = False
# obj_name = "obj{}".format(obj_name)
# set the colors of each object
vis_voxel = gt_voxels | voxels
colors = np.empty(vis_voxel.shape, dtype=object)
colors[gt_voxels] = 'white'
colors[voxels] = 'cyan'
# and plot everything
ax = plt.figure().add_subplot(projection='3d')
ax.set_zlim(1,30)
ax.voxels(vis_voxel, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05)
# plt.savefig('uniform_gtbox_{}.png'.format(step))
if is_best_policy or is_best_reconstruct:
plt.savefig('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_overlap-{}.png'.format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct, occupancy))
plt.savefig('voxel/iter-{}-{}-overlap-{}.png'.format(iternum, obj_name, occupancy))
plt.close()
ax = plt.figure().add_subplot(projection='3d')
ax.set_zlim(1,30)
ax.voxels(gt_voxels, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05)
if is_best_policy or is_best_reconstruct:
plt.savefig('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/gt.png'.format(obj_name, reset_mode_conf, reward_conf))
plt.savefig('voxel/iter-{}-{}-gt.png'.format(iternum, obj_name))
plt.close()
ax = plt.figure().add_subplot(projection='3d')
ax.set_zlim(1,30)
ax.voxels(voxels, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05)
if is_best_policy or is_best_reconstruct:
plt.savefig('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_exp.png'.format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct))
plt.savefig('voxel/iter-{}-{}-exp.png'.format(iternum, obj_name))
plt.close()
return is_best_reconstruct, occupancy
def train_agent(job_name, agent,
seed = 0,
niter = 101,
gamma = 0.995,
gae_lambda = None,
num_cpu = 16,
sample_mode = 'trajectories',
horizon= int(150),
num_traj = 50,
num_samples = 50000, # has precedence, used with sample_mode = 'samples'
save_freq = 10,
evaluation_rollouts = None,
plot_keys = ['stoc_pol_mean'],
env_kwargs= dict(),
visualize_kwargs= dict(),
sample_paths_kwargs= dict(),
):
print("num_cpu{}".format(num_cpu))
np.random.seed(seed)
if os.path.isdir(job_name) == False:
os.mkdir(job_name)
previous_dir = os.getcwd()
obj_name = env_kwargs["obj_name"]
reset_mode_conf = env_kwargs["reset_mode"]
reward_conf = "cf{}knn{}voxel{}".format(env_kwargs["chamfer_r_factor"], env_kwargs["knn_r_factor"], env_kwargs["new_voxel_r_factor"])
os.chdir(job_name) # important! we are now in the directory to save data
if os.path.isdir('iterations') == False: os.mkdir('iterations')
if os.path.isdir('2dpointcloud') == False: os.mkdir('2dpointcloud')
if os.path.isdir('pointcloudnpz') == False: os.mkdir('pointcloudnpz')
if os.path.isdir('/home/jianrenw/prox/tslam/data/result/best_policy/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf)) == False: os.makedirs('/home/jianrenw/prox/tslam/data/result/best_policy/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf))
if os.path.isdir('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf)) == False: os.makedirs('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf))
if os.path.isdir('voxel') == False: os.mkdir('voxel')
if os.path.isdir('logs') == False and agent.save_logs == True: os.mkdir('logs')
best_policy = copy.deepcopy(agent.policy)
best_perf = -1e8
train_curve = best_perf*np.ones(niter)
mean_pol_perf = 0.0
e = GymEnv(agent.env.env_id, env_kwargs)
# Load from any existing checkpoint, policy, statistics, etc.
# Why no checkpointing.. :(
i_start = _load_latest_policy_and_logs(agent,
policy_dir='iterations',
logs_dir='logs')
if i_start:
print("Resuming from an existing job folder ...")
for i in range(i_start, niter):
print("......................................................................................")
print("ITERATION : %i " % i)
is_best_policy = False
if train_curve[i-1] > best_perf:
if exptools: exptools.logging.logger.log_text("update best_policy")
best_policy = copy.deepcopy(agent.policy)
best_perf = train_curve[i-1]
is_best_policy = True
N = num_traj if sample_mode == 'trajectories' else num_samples
stats = agent.train_step(
N=N,
sample_mode=sample_mode,
horizon= horizon,
gamma=gamma,
gae_lambda=gae_lambda,
num_cpu=num_cpu,
env_kwargs= env_kwargs,
sample_paths_kwargs= sample_paths_kwargs,
)
train_curve[i] = stats[0]
if evaluation_rollouts is not None and evaluation_rollouts > 0:
print("Performing evaluation rollouts ........")
eval_paths = sample_paths(
num_traj=evaluation_rollouts,
env=e.env_id,
policy=agent.policy,
eval_mode=True,
base_seed=seed,
num_cpu=num_cpu,
env_kwargs= env_kwargs,
**sample_paths_kwargs)
mean_pol_perf = np.mean([np.sum(path['rewards']) for path in eval_paths])
if agent.save_logs:
agent.logger.log_kv('eval_score', mean_pol_perf)
if exptools: exptools.logging.logger.log_scalar('eval_score', mean_pol_perf, i)
if exptools:
env_infos = [path["env_infos"] for path in eval_paths] # a list of dict
rewards = dict()
total_points = list()
if env_infos:
# get decomposed reward statistics
keys = [k for k in env_infos[0].keys() if "_p" in k[-2:] or "_r" in k[-2:] or "occupancy" in k]
for k in keys:
rewards[k] = list()
for env_info in env_infos:
rewards[k].append(env_info[k])
for env_info in env_infos:
total_points.append(len(env_info["pointcloud"]))
for k, v in rewards.items():
exptools.logging.logger.log_scalar_batch(k, v, i)
exptools.logging.logger.log_scalar_batch("total_num_points", total_points, i)
print(">>> finish evaluation rollouts")
if (i % save_freq == 0 and i > 0):
if agent.save_logs:
agent.logger.save_log('logs/')
make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc='logs/')
obj_orientation = env_kwargs["obj_orientation"]
obj_relative_position = env_kwargs["obj_relative_position"]
obj_scale = env_kwargs["obj_scale"]
policy_file = 'policy_%i.pickle' % i
baseline_file = 'baseline_%i.pickle' % i
pickle.dump(agent.policy, open('iterations/' + policy_file, 'wb'))
pickle.dump(agent.baseline, open('iterations/' + baseline_file, 'wb'))
pickle.dump(best_policy, open('iterations/best_policy.pickle', 'wb'))
pickle.dump(agent.global_status, open('iterations/global_status.pickle', 'wb'))
# save videos and pointcloud and reconstruted mesh
if exptools:
video, env_infos = e.visualize_policy_offscreen(
policy= agent.policy,
**visualize_kwargs,
) # (T, C, H, W)
video_explore, env_infos_explore = e.visualize_policy_explore(
policy= agent.policy,
**visualize_kwargs,
) # (T, C, H, W)
pc_frame = np.array(env_infos[-1]["pointcloud"] if len(env_infos[-1]["pointcloud"]) > 0 else np.empty((0, 3)))
# 3d voxel visualization
is_best_reconstruct, occupancy = save_voxel_visualization(obj_name, reset_mode_conf, reward_conf, obj_orientation, obj_relative_position, obj_scale, pc_frame, i, is_best_policy)
if is_best_policy or is_best_reconstruct:
pickle.dump(best_policy, open('/home/jianrenw/prox/tslam/data/result/best_policy/{}/{}/{}/bp{}_br{}_best_policy.pickle'.format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct), 'wb'))
if is_best_policy or is_best_reconstruct:
np.savez_compressed("pointcloudnpz/alpha_pointcloud_"+str(i)+".npz",pcd=pc_frame)
np.savez_compressed("/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_alpha_pointcloud_overlap-{}.npz".format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct, occupancy), pcd=pc_frame)
# else:
# np.savez_compressed("pointcloudnpz/pointcloud_"+str(i)+".npz",pcd=pc_frame)
# pc_frames.append(pc_frame)
ax = plt.axes()
ax.scatter(pc_frame[:, 0], pc_frame[:, 1], cmap='viridis', linewidth=0.5)
if is_best_policy or is_best_reconstruct:
plt.savefig("2dpointcloud/alpha_{}.png".format('2dpointcloud' + str(i)))
plt.savefig("/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_alpha_2dpointcloud_overlap-{}.png".format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct, occupancy))
# else:
# plt.savefig("2dpointcloud/{}.png".format('2dpointcloud' + str(i)))
plt.close()
# =======================================================
# if obj_name in ["airplane", "apple", "glass", "cup"]:
exptools.logging.logger.record_image("rendered", video[-1], i)
exptools.logging.logger.record_gif("rendered", video, i)
# exptools.logging.logger.record_image("rendered_explore", video_explore[-1], i)
# exptools.logging.logger.record_gif("rendered_explore", video_explore, i)
# print results to console
if i == 0:
result_file = open('results.txt', 'w')
print("Iter | Stoc Pol | Mean Pol | Best (Stoc) \n")
result_file.write("Iter | Sampling Pol | Evaluation Pol | Best (Sampled) \n")
result_file.close()
result_file = open('results.txt', 'a')
result_file.write("%4i %5.2f %5.2f %5.2f \n" % (i, train_curve[i], mean_pol_perf, best_perf))
result_file.close()
if agent.save_logs:
print_data = sorted(filter(lambda v: np.asarray(v[1]).size == 1,
agent.logger.get_current_log().items()))
print(tabulate(print_data))
if exptools:
exptools.logging.logger.log_scalar("Iter", i, i)
exptools.logging.logger.log_scalar("SamplingPol", train_curve[i], i)
exptools.logging.logger.log_scalar("EvaluationPol", mean_pol_perf, i)
exptools.logging.logger.log_scalar("BestSampled", best_perf, i)
exptools.logging.logger.dump_data()
# final save
pickle.dump(best_policy, open('iterations/best_policy.pickle', 'wb'))
if agent.save_logs:
agent.logger.save_log('logs/')
make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc='logs/')
os.chdir(previous_dir)
| en | 0.604145 | Loads the latest policy. Returns the next step number to begin with. # fresh start # fresh start # find latest policy/baseline # additional # global_status_path = os.path.join(policy_dir, 'global_status.pickle') # with open(global_status_path, 'rb') as fp: # agent.load_global_status( pickle.load(fp) ) # cannot find any saved policy # draw gt # if idx_z > 6: # continue # combine the objects into a single boolean array # draw cuboids in the top left and bottom right corners, and a link between them # if idx_z > 6: # continue # combine the objects into a single boolean array # gt_obj4:668 # print(len(map_list) / sep_x / sep_y / sep_z ) # obj_name = "obj{}".format(obj_name) # set the colors of each object # and plot everything # plt.savefig('uniform_gtbox_{}.png'.format(step)) # has precedence, used with sample_mode = 'samples' # important! we are now in the directory to save data # Load from any existing checkpoint, policy, statistics, etc. # Why no checkpointing.. :( # a list of dict # get decomposed reward statistics # save videos and pointcloud and reconstruted mesh # (T, C, H, W) # (T, C, H, W) # 3d voxel visualization # else: # np.savez_compressed("pointcloudnpz/pointcloud_"+str(i)+".npz",pcd=pc_frame) # pc_frames.append(pc_frame) # else: # plt.savefig("2dpointcloud/{}.png".format('2dpointcloud' + str(i))) # ======================================================= # if obj_name in ["airplane", "apple", "glass", "cup"]: # exptools.logging.logger.record_image("rendered_explore", video_explore[-1], i) # exptools.logging.logger.record_gif("rendered_explore", video_explore, i) # print results to console # final save | 1.841365 | 2 |
src/tests/plugins/banktransfer/test_refund_export.py | NicsTr/pretix | 0 | 7664 | <gh_stars>0
import json
from datetime import timedelta
from decimal import Decimal
import pytest
from django.utils.timezone import now
from pretix.base.models import Event, Order, OrderRefund, Organizer, Team, User
from pretix.plugins.banktransfer.models import RefundExport
from pretix.plugins.banktransfer.views import (
_row_key_func, _unite_transaction_rows,
)
@pytest.fixture
def env():
o = Organizer.objects.create(name='Dummy', slug='dummy')
event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(), plugins='pretix.plugins.banktransfer,pretix.plugins.paypal'
)
user = User.objects.create_user('<EMAIL>', 'dummy')
t = Team.objects.create(organizer=event.organizer, can_view_orders=True, can_change_orders=True)
t.members.add(user)
t.limit_events.add(event)
order = Order.objects.create(
code='1Z3AS', event=event, email='<EMAIL>',
status=Order.STATUS_PAID,
datetime=now(), expires=now() + timedelta(days=10),
total=23
)
refund = OrderRefund.objects.create(
order=order,
amount=Decimal("23"),
provider='banktransfer',
state=OrderRefund.REFUND_STATE_CREATED,
info=json.dumps({
'payer': "Abc Def",
'iban': "DE27520521540534534466",
'bic': "HELADEF1MEG",
})
)
return event, user, refund
url_prefixes = [
"/control/event/dummy/dummy/",
"/control/organizer/dummy/"
]
@pytest.mark.django_db
@pytest.mark.parametrize("url_prefix", url_prefixes)
def test_export_refunds_as_sepa_xml(client, env, url_prefix):
client.login(email='<EMAIL>', password='<PASSWORD>')
r = client.post(f'{url_prefix}banktransfer/refunds/', {"unite_transactions": True}, follow=True)
assert b"SEPA" in r.content
r = client.get(f'{url_prefix}banktransfer/sepa-export/{RefundExport.objects.last().id}/')
assert r.status_code == 200
r = client.post(f'{url_prefix}banktransfer/sepa-export/{RefundExport.objects.last().id}/', {
"account_holder": "<NAME>",
"iban": "DE71720690050653667120",
"bic": "GENODEF1AIL",
})
assert "DE27520521540534534466" in "".join(str(part) for part in r.streaming_content)
@pytest.mark.django_db
@pytest.mark.parametrize("url_prefix", url_prefixes)
def test_export_refunds(client, env, url_prefix):
client.login(email='<EMAIL>', password='<PASSWORD>')
r = client.get(f'{url_prefix}banktransfer/refunds/')
assert r.status_code == 200
r = client.post(f'{url_prefix}banktransfer/refunds/', {"unite_transactions": True}, follow=True)
assert r.status_code == 200
refund = RefundExport.objects.last()
assert refund is not None
assert b"Download CSV" in r.content
r = client.get(f'{url_prefix}banktransfer/export/{refund.id}/')
assert r.status_code == 200
assert "DE27520521540534534466" in "".join(str(part) for part in r.streaming_content)
def test_unite_transaction_rows():
rows = sorted([
{
'payer': "Abc Def",
'iban': 'DE12345678901234567890',
'bic': 'HARKE9000',
'id': "ROLLA-R-1",
'amount': Decimal("42.23"),
},
{
'payer': "First Last",
'iban': 'DE111111111111111111111',
'bic': 'ikswez2020',
'id': "PARTY-R-1",
'amount': Decimal("6.50"),
}
], key=_row_key_func)
assert _unite_transaction_rows(rows) == rows
rows = sorted(rows + [
{
'payer': "Abc Def",
'iban': 'DE12345678901234567890',
'bic': 'HARKE9000',
'id': "ROLLA-R-1",
'amount': Decimal("7.77"),
},
{
'payer': "Another Last",
'iban': 'DE111111111111111111111',
'bic': 'ikswez2020',
'id': "PARTY-R-2",
'amount': Decimal("13.50"),
}
], key=_row_key_func)
assert _unite_transaction_rows(rows) == sorted([
{
'payer': "Abc Def",
'iban': 'DE12345678901234567890',
'bic': 'HARKE9000',
'id': "ROLLA-R-1",
'amount': Decimal("50.00"),
},
{
'payer': 'Another Last, First Last',
'iban': 'DE111111111111111111111',
'bic': 'ikswez2020',
'id': 'PARTY-R-1, PARTY-R-2',
'amount': Decimal('20.00'),
}], key=_row_key_func)
| import json
from datetime import timedelta
from decimal import Decimal
import pytest
from django.utils.timezone import now
from pretix.base.models import Event, Order, OrderRefund, Organizer, Team, User
from pretix.plugins.banktransfer.models import RefundExport
from pretix.plugins.banktransfer.views import (
_row_key_func, _unite_transaction_rows,
)
@pytest.fixture
def env():
o = Organizer.objects.create(name='Dummy', slug='dummy')
event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(), plugins='pretix.plugins.banktransfer,pretix.plugins.paypal'
)
user = User.objects.create_user('<EMAIL>', 'dummy')
t = Team.objects.create(organizer=event.organizer, can_view_orders=True, can_change_orders=True)
t.members.add(user)
t.limit_events.add(event)
order = Order.objects.create(
code='1Z3AS', event=event, email='<EMAIL>',
status=Order.STATUS_PAID,
datetime=now(), expires=now() + timedelta(days=10),
total=23
)
refund = OrderRefund.objects.create(
order=order,
amount=Decimal("23"),
provider='banktransfer',
state=OrderRefund.REFUND_STATE_CREATED,
info=json.dumps({
'payer': "Abc Def",
'iban': "DE27520521540534534466",
'bic': "HELADEF1MEG",
})
)
return event, user, refund
url_prefixes = [
"/control/event/dummy/dummy/",
"/control/organizer/dummy/"
]
@pytest.mark.django_db
@pytest.mark.parametrize("url_prefix", url_prefixes)
def test_export_refunds_as_sepa_xml(client, env, url_prefix):
client.login(email='<EMAIL>', password='<PASSWORD>')
r = client.post(f'{url_prefix}banktransfer/refunds/', {"unite_transactions": True}, follow=True)
assert b"SEPA" in r.content
r = client.get(f'{url_prefix}banktransfer/sepa-export/{RefundExport.objects.last().id}/')
assert r.status_code == 200
r = client.post(f'{url_prefix}banktransfer/sepa-export/{RefundExport.objects.last().id}/', {
"account_holder": "<NAME>",
"iban": "DE71720690050653667120",
"bic": "GENODEF1AIL",
})
assert "DE27520521540534534466" in "".join(str(part) for part in r.streaming_content)
@pytest.mark.django_db
@pytest.mark.parametrize("url_prefix", url_prefixes)
def test_export_refunds(client, env, url_prefix):
client.login(email='<EMAIL>', password='<PASSWORD>')
r = client.get(f'{url_prefix}banktransfer/refunds/')
assert r.status_code == 200
r = client.post(f'{url_prefix}banktransfer/refunds/', {"unite_transactions": True}, follow=True)
assert r.status_code == 200
refund = RefundExport.objects.last()
assert refund is not None
assert b"Download CSV" in r.content
r = client.get(f'{url_prefix}banktransfer/export/{refund.id}/')
assert r.status_code == 200
assert "DE27520521540534534466" in "".join(str(part) for part in r.streaming_content)
def test_unite_transaction_rows():
rows = sorted([
{
'payer': "Abc Def",
'iban': 'DE12345678901234567890',
'bic': 'HARKE9000',
'id': "ROLLA-R-1",
'amount': Decimal("42.23"),
},
{
'payer': "First Last",
'iban': 'DE111111111111111111111',
'bic': 'ikswez2020',
'id': "PARTY-R-1",
'amount': Decimal("6.50"),
}
], key=_row_key_func)
assert _unite_transaction_rows(rows) == rows
rows = sorted(rows + [
{
'payer': "Abc Def",
'iban': 'DE12345678901234567890',
'bic': 'HARKE9000',
'id': "ROLLA-R-1",
'amount': Decimal("7.77"),
},
{
'payer': "Another Last",
'iban': 'DE111111111111111111111',
'bic': 'ikswez2020',
'id': "PARTY-R-2",
'amount': Decimal("13.50"),
}
], key=_row_key_func)
assert _unite_transaction_rows(rows) == sorted([
{
'payer': "Abc Def",
'iban': 'DE12345678901234567890',
'bic': 'HARKE9000',
'id': "ROLLA-R-1",
'amount': Decimal("50.00"),
},
{
'payer': 'Another Last, First Last',
'iban': 'DE111111111111111111111',
'bic': 'ikswez2020',
'id': 'PARTY-R-1, PARTY-R-2',
'amount': Decimal('20.00'),
}], key=_row_key_func) | none | 1 | 1.826474 | 2 |
|
datawinners/alldata/urls.py | ICT4H/dcs-web | 1 | 7665 | <gh_stars>1-10
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from django.conf.urls.defaults import patterns, url
from datawinners.alldata.views import get_entity_list_by_type
from datawinners.alldata.views import smart_phone_instruction
from datawinners.alldata.views import index, reports
from datawinners.alldata.views import failed_submissions
urlpatterns = patterns('',
url(r'^alldata/$', index, name="alldata_index"),
url(r'^project/$', index),
(r'^questionnaire/entities/(?P<entity_type>.+?)/$', get_entity_list_by_type),
(r'^questionnaire/reports/$', reports),
(r'^alldata/reports/$', reports),
(r'^allfailedsubmissions/$', failed_submissions),
url(r'^smartphoneinstruction$', smart_phone_instruction, name="smart_phone_instruction"),
url(r'^smartphoneinstruction/(?P<project_id>.+?)/$', smart_phone_instruction, name="smart_phone_instruction"),
)
| # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from django.conf.urls.defaults import patterns, url
from datawinners.alldata.views import get_entity_list_by_type
from datawinners.alldata.views import smart_phone_instruction
from datawinners.alldata.views import index, reports
from datawinners.alldata.views import failed_submissions
urlpatterns = patterns('',
url(r'^alldata/$', index, name="alldata_index"),
url(r'^project/$', index),
(r'^questionnaire/entities/(?P<entity_type>.+?)/$', get_entity_list_by_type),
(r'^questionnaire/reports/$', reports),
(r'^alldata/reports/$', reports),
(r'^allfailedsubmissions/$', failed_submissions),
url(r'^smartphoneinstruction$', smart_phone_instruction, name="smart_phone_instruction"),
url(r'^smartphoneinstruction/(?P<project_id>.+?)/$', smart_phone_instruction, name="smart_phone_instruction"),
) | fr | 0.370952 | # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 | 1.812685 | 2 |
NewLifeUtils/LoggerModule.py | NewLife1324/NewLifeUtils-Dev | 2 | 7666 | <gh_stars>1-10
from NewLifeUtils.ColorModule import ACC, MCC
from NewLifeUtils.UtilsModule import hex_to_rgb
from NewLifeUtils.FileModule import DataStorage, LogFile
from NewLifeUtils.StringUtilModule import remove_csi
from datetime import datetime
import sys
class Formatter(dict):
def __init__(self, *args, date_format="%d-%m-%Y", time_format="%H:%M:%S", **kwargs):
self.date_format = "%d-%m-%Y"
self.time_format = "%H:%M:%S"
dict.__init__(self, *args, **kwargs)
def __missing__(self, key):
if key == "time":
return datetime.now().strftime(self.time_format)
elif key == "date":
return datetime.now().strftime(self.date_format)
elif key.startswith("#"):
if key == "#reset":
return ACC.RESET
elif key == "#under":
return ACC.UNDERLINE
elif key == "#nounder":
return ACC.NO_UNDERLINE
elif key == "#reverse":
return ACC.REVERSE
elif key == "#noreverse":
return ACC.NO_REVERSE
else:
return ACC.customrgb(*hex_to_rgb(key))
else:
return "{" + key + "}"
def create_logger(
pattern="[{time}] {tag}: {message}",
tag_length=7,
default_tag="Log",
reader=False,
reader_bg="#24416b",
reader_fg="#a0dbf2",
file_log=False,
logfile=None,
time_format = "%d-%m-%Y",
data_format = "%H:%M:%S",
):
def log(message, tag=""):
if reader:
if not any([message.endswith(i) for i in tuple(":> ")]):
title = message + ": "
else:
title = message
message = message.rstrip(" ")
message = message.rstrip(":")
message = message.rstrip(">")
sys.stdout.write(
f"{ACC.bcustomrgb(*hex_to_rgb(reader_bg))}{ACC.customrgb(*hex_to_rgb(reader_fg))}{title}{MCC.ERASE_NXT_LINE}"
)
readed = input()
sys.stdout.write(ACC.RESET + MCC.up() + MCC.ERASE_ALL_LINE)
else:
readed = None
tag = ("{:<" + str(tag_length) + "}").format(tag if tag else default_tag)
log_record = pattern.format_map(
Formatter(tag=tag, message=message, input=readed,
time_format = time_format,
data_format = data_format)
)
sys.stdout.write(ACC.RESET + log_record + ACC.RESET + "\n")
if file_log:
logfile.write(remove_csi(log_record) + "\n")
return readed
return log
def cstm(pattern, **kwargs):
sys.stdout.write(
ACC.RESET + pattern.format_map(Formatter(**kwargs)) + ACC.RESET + "\n"
)
def smart_format(pattern, **kwargs):
return pattern.format_map(Formatter(**kwargs))
def init_from_cfg():
default_config = {
"log_pattern": "{#81f059}[{time}] {#6bd130}{tag}{#fff}: {#1ed476}{message}",
"wrn_pattern": "{#cfa529}[{time}] {#d7e356}{tag}{#fff}: {#b9c726}{message}",
"err_pattern": "{#cf4729}[{time}] {#d93b18}{tag}{#fff}: {#cf2727}{message}",
"tip_pattern": "{#9c1fd1}[{time}] {#471dc4}{tag}{#fff}: {#219ddb}{message}",
"rea_pattern": "{#2141a3}[{time}] {#5a51db}{tag}{#fff}: {#2459d6}{message} {#fff}: {#24d0d6}{input}",
"log_tag": "Log",
"wrn_tag": "Warn",
"err_tag": "Error",
"tip_tag": "Tip",
"rea_tag": "Reader",
"date_format": "%d-%m-%Y",
"time_format": "%H:%M:%S",
"tag_length": 7,
"file_log": True,
"logtime": "%d-%m-%Y-%H",
"logname": "log-{time}",
}
config = DataStorage("config.yml", "logger", default_config)
if config["file_log"]:
now = datetime.now()
logname = config["logname"]
logtime = config["logtime"]
logfile = LogFile(f"{logname.format(time=now.strftime(logtime))}.log", "logs")
else:
logfile = None
log = create_logger(pattern=config["log_pattern"], default_tag=config["log_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length'])
wrn = create_logger(pattern=config["wrn_pattern"], default_tag=config["wrn_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length'])
err = create_logger(pattern=config["err_pattern"], default_tag=config["err_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length'])
tip = create_logger(pattern=config["tip_pattern"], default_tag=config["tip_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length'])
rea = create_logger(
pattern=config["rea_pattern"], default_tag=config["rea_tag"], reader=True
)
return log, wrn, err, tip, rea
log, wrn, err, tip, rea = init_from_cfg() | from NewLifeUtils.ColorModule import ACC, MCC
from NewLifeUtils.UtilsModule import hex_to_rgb
from NewLifeUtils.FileModule import DataStorage, LogFile
from NewLifeUtils.StringUtilModule import remove_csi
from datetime import datetime
import sys
class Formatter(dict):
def __init__(self, *args, date_format="%d-%m-%Y", time_format="%H:%M:%S", **kwargs):
self.date_format = "%d-%m-%Y"
self.time_format = "%H:%M:%S"
dict.__init__(self, *args, **kwargs)
def __missing__(self, key):
if key == "time":
return datetime.now().strftime(self.time_format)
elif key == "date":
return datetime.now().strftime(self.date_format)
elif key.startswith("#"):
if key == "#reset":
return ACC.RESET
elif key == "#under":
return ACC.UNDERLINE
elif key == "#nounder":
return ACC.NO_UNDERLINE
elif key == "#reverse":
return ACC.REVERSE
elif key == "#noreverse":
return ACC.NO_REVERSE
else:
return ACC.customrgb(*hex_to_rgb(key))
else:
return "{" + key + "}"
def create_logger(
pattern="[{time}] {tag}: {message}",
tag_length=7,
default_tag="Log",
reader=False,
reader_bg="#24416b",
reader_fg="#a0dbf2",
file_log=False,
logfile=None,
time_format = "%d-%m-%Y",
data_format = "%H:%M:%S",
):
def log(message, tag=""):
if reader:
if not any([message.endswith(i) for i in tuple(":> ")]):
title = message + ": "
else:
title = message
message = message.rstrip(" ")
message = message.rstrip(":")
message = message.rstrip(">")
sys.stdout.write(
f"{ACC.bcustomrgb(*hex_to_rgb(reader_bg))}{ACC.customrgb(*hex_to_rgb(reader_fg))}{title}{MCC.ERASE_NXT_LINE}"
)
readed = input()
sys.stdout.write(ACC.RESET + MCC.up() + MCC.ERASE_ALL_LINE)
else:
readed = None
tag = ("{:<" + str(tag_length) + "}").format(tag if tag else default_tag)
log_record = pattern.format_map(
Formatter(tag=tag, message=message, input=readed,
time_format = time_format,
data_format = data_format)
)
sys.stdout.write(ACC.RESET + log_record + ACC.RESET + "\n")
if file_log:
logfile.write(remove_csi(log_record) + "\n")
return readed
return log
def cstm(pattern, **kwargs):
sys.stdout.write(
ACC.RESET + pattern.format_map(Formatter(**kwargs)) + ACC.RESET + "\n"
)
def smart_format(pattern, **kwargs):
return pattern.format_map(Formatter(**kwargs))
def init_from_cfg():
default_config = {
"log_pattern": "{#81f059}[{time}] {#6bd130}{tag}{#fff}: {#1ed476}{message}",
"wrn_pattern": "{#cfa529}[{time}] {#d7e356}{tag}{#fff}: {#b9c726}{message}",
"err_pattern": "{#cf4729}[{time}] {#d93b18}{tag}{#fff}: {#cf2727}{message}",
"tip_pattern": "{#9c1fd1}[{time}] {#471dc4}{tag}{#fff}: {#219ddb}{message}",
"rea_pattern": "{#2141a3}[{time}] {#5a51db}{tag}{#fff}: {#2459d6}{message} {#fff}: {#24d0d6}{input}",
"log_tag": "Log",
"wrn_tag": "Warn",
"err_tag": "Error",
"tip_tag": "Tip",
"rea_tag": "Reader",
"date_format": "%d-%m-%Y",
"time_format": "%H:%M:%S",
"tag_length": 7,
"file_log": True,
"logtime": "%d-%m-%Y-%H",
"logname": "log-{time}",
}
config = DataStorage("config.yml", "logger", default_config)
if config["file_log"]:
now = datetime.now()
logname = config["logname"]
logtime = config["logtime"]
logfile = LogFile(f"{logname.format(time=now.strftime(logtime))}.log", "logs")
else:
logfile = None
log = create_logger(pattern=config["log_pattern"], default_tag=config["log_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length'])
wrn = create_logger(pattern=config["wrn_pattern"], default_tag=config["wrn_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length'])
err = create_logger(pattern=config["err_pattern"], default_tag=config["err_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length'])
tip = create_logger(pattern=config["tip_pattern"], default_tag=config["tip_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length'])
rea = create_logger(
pattern=config["rea_pattern"], default_tag=config["rea_tag"], reader=True
)
return log, wrn, err, tip, rea
log, wrn, err, tip, rea = init_from_cfg() | en | 0.357801 | #81f059}[{time}] {#6bd130}{tag}{#fff}: {#1ed476}{message}", #cfa529}[{time}] {#d7e356}{tag}{#fff}: {#b9c726}{message}", #cf4729}[{time}] {#d93b18}{tag}{#fff}: {#cf2727}{message}", #9c1fd1}[{time}] {#471dc4}{tag}{#fff}: {#219ddb}{message}", #2141a3}[{time}] {#5a51db}{tag}{#fff}: {#2459d6}{message} {#fff}: {#24d0d6}{input}", | 2.290941 | 2 |
config/api_urls.py | elcolie/battleship | 0 | 7667 | <reponame>elcolie/battleship<filename>config/api_urls.py
from rest_framework import routers
from boards.api.viewsets import BoardViewSet
from fleets.api.viewsets import FleetViewSet
from missiles.api.viewsets import MissileViewSet
app_name = 'api'
router = routers.DefaultRouter()
router.register(r'boards', BoardViewSet, base_name='board')
router.register(r'fleets', FleetViewSet, base_name='fleet')
router.register(r'missiles', MissileViewSet, base_name='missile')
urlpatterns = router.urls
| from rest_framework import routers
from boards.api.viewsets import BoardViewSet
from fleets.api.viewsets import FleetViewSet
from missiles.api.viewsets import MissileViewSet
app_name = 'api'
router = routers.DefaultRouter()
router.register(r'boards', BoardViewSet, base_name='board')
router.register(r'fleets', FleetViewSet, base_name='fleet')
router.register(r'missiles', MissileViewSet, base_name='missile')
urlpatterns = router.urls | none | 1 | 1.819831 | 2 |
|
mealpy/evolutionary_based/MA.py | Alhassan20/mealpy | 1 | 7668 | <filename>mealpy/evolutionary_based/MA.py<gh_stars>1-10
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 14:22, 11/04/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
import time
import numpy as np
from mealpy.optimizer import Optimizer
class BaseMA(Optimizer):
"""
The original version of: Memetic Algorithm (MA)
(On evolution, search, optimization, genetic algorithms and martial arts: Towards memetic algorithms)
Link:
Clever Algorithms: Nature-Inspired Programming Recipes - Memetic Algorithm (MA)
http://www.cleveralgorithms.com/nature-inspired/physical/memetic_algorithm.html
"""
ID_POS = 0
ID_FIT = 1
ID_BIT = 2
def __init__(self, problem: dict, epoch=1000, pop_size=100, pc=0.98, pm=0.025, p_local=0.5, max_local_gens=10, bits_per_param=16):
"""
Args:
problem (dict): a dictionary of your problem
epoch (int): maximum number of iterations, default = 1000
pop_size (int): number of population size, default = 100
pc (float): cross-over probability, default = 0.95
pm (float): mutation probability, default = 0.025
p_local ():
max_local_gens ():
bits_per_param ():
"""
super().__init__(problem)
self.epoch = epoch
self.pop_size = pop_size
self.pc = pc
self.pm = pm
self.p_local = p_local
self.max_local_gens = max_local_gens
self.bits_per_param = bits_per_param
self.bits_total = self.problem_size * self.bits_per_param
def create_solution(self):
position = np.random.uniform(self.lb, self.ub)
fitness = self.get_fitness_position(position=position)
bitstring = ''.join(["1" if np.random.uniform() < 0.5 else "0" for _ in range(0, self.bits_total)])
return [position, fitness, bitstring]
def _decode__(self, bitstring=None):
"""
Decode the random bitstring into real number
Args:
bitstring (str): "11000000100101000101010" - bits_per_param = 16, 32 bit for 2 variable. eg. x1 and x2
Returns:
list of real number (vector)
"""
vector = np.ones(self.problem_size)
for idx in range(0, self.problem_size):
param = bitstring[idx * self.bits_per_param: (idx + 1) * self.bits_per_param] # Select 16 bit every time
vector[idx] = self.lb[idx] + ((self.ub[idx] - self.lb[idx]) / ((2.0 ** self.bits_per_param) - 1)) * int(param, 2)
return vector
def _crossover__(self, dad=None, mom=None):
if np.random.uniform() >= self.pc:
temp = [dad].copy()
return temp[0]
else:
child = ""
for idx in range(0, self.bits_total):
if np.random.uniform() < 0.5:
child += dad[idx]
else:
child += mom[idx]
return child
def _point_mutation__(self, bitstring=None):
child = ""
for bit in bitstring:
if np.random.uniform() < self.pc:
child += "0" if bit == "1" else "1"
else:
child += bit
return child
def create_next_generation(self, pop: list):
## Binary tournament
children = [self.get_solution_kway_tournament_selection(pop, k_way=2, output=1)[0] for _ in range(self.pop_size)]
## Reproduction
for idx in range(0, self.pop_size):
ancient = pop[idx + 1] if idx % 2 == 0 else pop[idx - 1]
if idx == self.pop_size - 1:
ancient = pop[0]
bitstring_new = self._crossover__(pop[idx][self.ID_BIT], ancient[self.ID_BIT])
bitstring_new = self._point_mutation__(bitstring_new)
pos_new = self._decode__(bitstring_new)
fit_new = self.get_fitness_position(pos_new)
children[idx] = [pos_new, fit_new, bitstring_new]
return children
def _bits_climber__(self, child=None):
current = child.copy()
for idx in range(0, self.max_local_gens):
child = current.copy()
bitstring_new = self._point_mutation__(child[self.ID_BIT])
pos_new = self._decode__(bitstring_new)
fit_new = self.get_fitness_position(pos_new)
current = self.get_better_solution(child, [pos_new, fit_new, bitstring_new])
return current
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
pop, g_best = self.get_global_best_solution(pop)
self.history_list_g_best = [g_best]
self.history_list_c_best = self.history_list_g_best.copy()
for epoch in range(0, self.epoch):
time_start = time.time()
# Create next generations
pop = self.create_next_generation(pop)
# Searching in local
for i in range(0, self.pop_size):
if np.random.uniform() < self.p_local:
pop[i] = self._bits_climber__(pop[i])
# Sort the population and update the global best solution
pop = self.update_global_best_solution(pop)
## Additional information for the framework
time_start = time.time() - time_start
self.history_list_epoch_time.append(time_start)
self.print_epoch(epoch + 1, time_start)
self.history_list_pop.append(pop.copy())
## Additional information for the framework
self.solution = self.history_list_g_best[-1]
self.save_data()
return self.solution[self.ID_POS], self.solution[self.ID_FIT][self.ID_TAR]
| <filename>mealpy/evolutionary_based/MA.py<gh_stars>1-10
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 14:22, 11/04/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
import time
import numpy as np
from mealpy.optimizer import Optimizer
class BaseMA(Optimizer):
"""
The original version of: Memetic Algorithm (MA)
(On evolution, search, optimization, genetic algorithms and martial arts: Towards memetic algorithms)
Link:
Clever Algorithms: Nature-Inspired Programming Recipes - Memetic Algorithm (MA)
http://www.cleveralgorithms.com/nature-inspired/physical/memetic_algorithm.html
"""
ID_POS = 0
ID_FIT = 1
ID_BIT = 2
def __init__(self, problem: dict, epoch=1000, pop_size=100, pc=0.98, pm=0.025, p_local=0.5, max_local_gens=10, bits_per_param=16):
"""
Args:
problem (dict): a dictionary of your problem
epoch (int): maximum number of iterations, default = 1000
pop_size (int): number of population size, default = 100
pc (float): cross-over probability, default = 0.95
pm (float): mutation probability, default = 0.025
p_local ():
max_local_gens ():
bits_per_param ():
"""
super().__init__(problem)
self.epoch = epoch
self.pop_size = pop_size
self.pc = pc
self.pm = pm
self.p_local = p_local
self.max_local_gens = max_local_gens
self.bits_per_param = bits_per_param
self.bits_total = self.problem_size * self.bits_per_param
def create_solution(self):
position = np.random.uniform(self.lb, self.ub)
fitness = self.get_fitness_position(position=position)
bitstring = ''.join(["1" if np.random.uniform() < 0.5 else "0" for _ in range(0, self.bits_total)])
return [position, fitness, bitstring]
def _decode__(self, bitstring=None):
"""
Decode the random bitstring into real number
Args:
bitstring (str): "11000000100101000101010" - bits_per_param = 16, 32 bit for 2 variable. eg. x1 and x2
Returns:
list of real number (vector)
"""
vector = np.ones(self.problem_size)
for idx in range(0, self.problem_size):
param = bitstring[idx * self.bits_per_param: (idx + 1) * self.bits_per_param] # Select 16 bit every time
vector[idx] = self.lb[idx] + ((self.ub[idx] - self.lb[idx]) / ((2.0 ** self.bits_per_param) - 1)) * int(param, 2)
return vector
def _crossover__(self, dad=None, mom=None):
if np.random.uniform() >= self.pc:
temp = [dad].copy()
return temp[0]
else:
child = ""
for idx in range(0, self.bits_total):
if np.random.uniform() < 0.5:
child += dad[idx]
else:
child += mom[idx]
return child
def _point_mutation__(self, bitstring=None):
child = ""
for bit in bitstring:
if np.random.uniform() < self.pc:
child += "0" if bit == "1" else "1"
else:
child += bit
return child
def create_next_generation(self, pop: list):
## Binary tournament
children = [self.get_solution_kway_tournament_selection(pop, k_way=2, output=1)[0] for _ in range(self.pop_size)]
## Reproduction
for idx in range(0, self.pop_size):
ancient = pop[idx + 1] if idx % 2 == 0 else pop[idx - 1]
if idx == self.pop_size - 1:
ancient = pop[0]
bitstring_new = self._crossover__(pop[idx][self.ID_BIT], ancient[self.ID_BIT])
bitstring_new = self._point_mutation__(bitstring_new)
pos_new = self._decode__(bitstring_new)
fit_new = self.get_fitness_position(pos_new)
children[idx] = [pos_new, fit_new, bitstring_new]
return children
def _bits_climber__(self, child=None):
current = child.copy()
for idx in range(0, self.max_local_gens):
child = current.copy()
bitstring_new = self._point_mutation__(child[self.ID_BIT])
pos_new = self._decode__(bitstring_new)
fit_new = self.get_fitness_position(pos_new)
current = self.get_better_solution(child, [pos_new, fit_new, bitstring_new])
return current
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
pop, g_best = self.get_global_best_solution(pop)
self.history_list_g_best = [g_best]
self.history_list_c_best = self.history_list_g_best.copy()
for epoch in range(0, self.epoch):
time_start = time.time()
# Create next generations
pop = self.create_next_generation(pop)
# Searching in local
for i in range(0, self.pop_size):
if np.random.uniform() < self.p_local:
pop[i] = self._bits_climber__(pop[i])
# Sort the population and update the global best solution
pop = self.update_global_best_solution(pop)
## Additional information for the framework
time_start = time.time() - time_start
self.history_list_epoch_time.append(time_start)
self.print_epoch(epoch + 1, time_start)
self.history_list_pop.append(pop.copy())
## Additional information for the framework
self.solution = self.history_list_g_best[-1]
self.save_data()
return self.solution[self.ID_POS], self.solution[self.ID_FIT][self.ID_TAR]
| en | 0.6147 | #!/usr/bin/env python # ------------------------------------------------------------------------------------------------------% # Created by "<NAME>" at 14:22, 11/04/2020 % # % # Email: <EMAIL> % # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % # Github: https://github.com/thieu1995 % # ------------------------------------------------------------------------------------------------------% The original version of: Memetic Algorithm (MA) (On evolution, search, optimization, genetic algorithms and martial arts: Towards memetic algorithms) Link: Clever Algorithms: Nature-Inspired Programming Recipes - Memetic Algorithm (MA) http://www.cleveralgorithms.com/nature-inspired/physical/memetic_algorithm.html Args: problem (dict): a dictionary of your problem epoch (int): maximum number of iterations, default = 1000 pop_size (int): number of population size, default = 100 pc (float): cross-over probability, default = 0.95 pm (float): mutation probability, default = 0.025 p_local (): max_local_gens (): bits_per_param (): Decode the random bitstring into real number Args: bitstring (str): "11000000100101000101010" - bits_per_param = 16, 32 bit for 2 variable. eg. x1 and x2 Returns: list of real number (vector) # Select 16 bit every time ## Binary tournament ## Reproduction # Create next generations # Searching in local # Sort the population and update the global best solution ## Additional information for the framework ## Additional information for the framework | 2.296615 | 2 |
src/estimagic/estimation/estimate_ml.py | OpenSourceEconomics/estimagic | 83 | 7669 | from estimagic.inference.ml_covs import cov_cluster_robust
from estimagic.inference.ml_covs import cov_hessian
from estimagic.inference.ml_covs import cov_jacobian
from estimagic.inference.ml_covs import cov_robust
from estimagic.inference.ml_covs import cov_strata_robust
from estimagic.inference.shared import calculate_inference_quantities
from estimagic.inference.shared import check_is_optimized_and_derivative_case
from estimagic.inference.shared import get_derivative_case
from estimagic.inference.shared import get_internal_first_derivative
from estimagic.inference.shared import transform_covariance
from estimagic.optimization.optimize import maximize
from estimagic.parameters.parameter_conversion import get_derivative_conversion_function
from estimagic.parameters.process_constraints import process_constraints
from estimagic.shared.check_option_dicts import check_numdiff_options
from estimagic.shared.check_option_dicts import check_optimization_options
def estimate_ml(
loglike,
params,
optimize_options,
*,
constraints=None,
logging=False,
log_options=None,
loglike_kwargs=None,
derivative=None,
derivative_kwargs=None,
loglike_and_derivative=None,
loglike_and_derivative_kwargs=None,
numdiff_options=None,
jacobian=None,
jacobian_kwargs=None,
hessian=False,
hessian_kwargs=None,
ci_level=0.95,
n_samples=10_000,
bounds_handling="raise",
design_info=None,
):
"""Do a maximum likelihood (ml) estimation.
This is a high level interface of our lower level functions for maximization,
numerical differentiation and inference. It does the full workflow for maximum
likelihood estimation with just one function call.
While we have good defaults, you can still configure each aspect of each step
via the optional arguments of this function. If you find it easier to do the
"difficult" steps (mainly maximization and calculating numerical derivatives
of a potentially noisy function) separately, you can do so and just provide those
results as ``params``, ``jacobian`` and ``hessian``.
The docstring is aspirational and not all options are supported yet.
Args:
loglike (callable): Likelihood function that takes a params DataFrame (and
potentially other keyword arguments) and returns a dictionary that has at
least the entries "value" (a scalar float) and "contributions" (a 1d numpy
array or pandas Series) with the log likelihood contribution per individual.
params (pd.DataFrame): DataFrame where the "value" column contains the
estimated or start parameters of a likelihood model. See :ref:`params` for
details. If the supplied parameters are estimated parameters, set
optimize_options to False.
optimize_options (dict or False): Keyword arguments that govern the numerical
optimization. Valid entries are all arguments of
:func:`~estimagic.optimization.optimize.minimize` except for criterion,
derivative, criterion_and_derivative and params. If you pass False as
optimize_options you signal that ``params`` are already the optimal
parameters and no numerical optimization is needed.
constraints (list): List with constraint dictionaries.
See .. _link: ../../docs/source/how_to_guides/how_to_use_constraints.ipynb
logging (pathlib.Path, str or False): Path to sqlite3 file (which typically has
the file extension ``.db``. If the file does not exist, it will be created.
The dashboard can only be used when logging is used.
log_options (dict): Additional keyword arguments to configure the logging.
- "fast_logging": A boolean that determines if "unsafe" settings are used
to speed up write processes to the database. This should only be used for
very short running criterion functions where the main purpose of the log
is a real-time dashboard and it would not be catastrophic to get a
corrupted database in case of a sudden system shutdown. If one evaluation
of the criterion function (and gradient if applicable) takes more than
100 ms, the logging overhead is negligible.
- "if_table_exists": (str) One of "extend", "replace", "raise". What to
do if the tables we want to write to already exist. Default "extend".
- "if_database_exists": (str): One of "extend", "replace", "raise". What to
do if the database we want to write to already exists. Default "extend".
loglike_kwargs (dict): Additional keyword arguments for loglike.
derivative (callable): Function takes params and potentially other keyword
arguments and calculates the first derivative of loglike. It can either
return a numpy array or pandas Series/DataFrame with the derivative or
a dictionary with derivatives of each output of loglike. If loglike
returns a dict but derivative does not, it is your responsibility to
make sure that the correct derivative for the numerical optimizers you are
using is returned.
derivative_kwargs (dict): Additional keyword arguments for loglike.
loglike_and_derivative (callable): Return a tuple consisting of the result
of loglike and the result of derivative. Only use this if you can exploit
synergies in the calculation of loglike and derivative.
loglike_and_derivative_kwargs (dict): Additional keyword arguments for
loglike_and_derivative.
numdiff_options (dict): Keyword arguments for the calculation of numerical
derivatives for the calculation of standard errors. See
:ref:`first_derivative` for details.
jacobian (callable or pandas.DataFrame or False): A function that takes
``params`` and potentially other keyword arguments and returns the jacobian
of loglike["contributions"] with respect to the params. Alternatively, you
can pass a pandas.DataFrame with the Jacobian at the optimal parameters.
This is only possible if you pass ``optimize_options=False``. Note that you
only need to pass a Jacobian function if you have a closed form Jacobian but
decided not to return it as part of ``derivative`` (e.g. because you use
a scalar optimizer and can calculate a gradient in a way that is faster
than calculating and summing the Jacobian). If you pass None, a numerical
Jacobian will be calculated. If you pass ``False``, you signal that no
Jacobian should be calculated. Thus, no result that requires the Jacobian
will be calculated.
jacobian_kwargs (dict): Additional keyword arguments for the Jacobian function.
hessian (callable or pd.DataFrame): A function that takes
``params`` and potentially other keyword arguments and returns the Hessian
of loglike["value"] with respect to the params. Alternatively, you
can pass a pandas.DataFrame with the Hessian at the optimal parameters.
This is only possible if you pass ``optimize_options=False``. If you pass
None, a numerical Hessian will be calculated. If you pass ``False``, you
signal that no Hessian should be calculated. Thus, no result that requires
the Hessian will be calculated.
hessian_kwargs (dict): Additional keyword arguments for the Hessian function.
ci_level (float): Confidence level for the calculation of confidence intervals.
The default is 0.95.
n_samples (int): Number of samples used to transform the covariance matrix of
the internal parameter vector into the covariance matrix of the external
parameters. For background information about internal and external params
see :ref:`implementation_of_constraints`. This is only used if you have
specified constraints.
bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds
are handled. If "clip", confidence intervals are clipped at the bounds.
Standard errors are only adjusted if a sampling step is necessary due to
additional constraints. If "raise" and any lower or upper bound is binding,
we raise an Error. If "ignore", boundary problems are simply ignored.
design_info (pandas.DataFrame): DataFrame with one row per observation that
contains some or all of the variables "psu" (primary sampling unit),
"stratum" and "fpc" (finite population corrector). See
:ref:`robust_likelihood_inference` for details.
Returns:
dict: The estimated parameters, standard errors and covariance matrix of the
parameters.
"""
# ==================================================================================
# Check and process inputs
# ==================================================================================
is_optimized = optimize_options is False
check_optimization_options(
optimize_options,
usage="estimate_ml",
algorithm_mandatory=True,
)
jac_case = get_derivative_case(jacobian)
hess_case = get_derivative_case(hessian)
check_is_optimized_and_derivative_case(is_optimized, jac_case)
check_is_optimized_and_derivative_case(is_optimized, hess_case)
cov_cases = _get_cov_cases(jac_case, hess_case, design_info)
check_numdiff_options(numdiff_options, "estimate_ml")
numdiff_options = {} if numdiff_options in (None, False) else numdiff_options
constraints = [] if constraints is None else constraints
processed_constraints, _ = process_constraints(constraints, params)
# ==================================================================================
# Calculate estimates via maximization (if necessary)
# ==================================================================================
if is_optimized:
estimates = params
else:
opt_res = maximize(
criterion=loglike,
criterion_kwargs=loglike_kwargs,
params=params,
constraints=constraints,
derivative=derivative,
derivative_kwargs=derivative_kwargs,
criterion_and_derivative=loglike_and_derivative,
criterion_and_derivative_kwargs=loglike_and_derivative_kwargs,
logging=logging,
log_options=log_options,
**optimize_options,
)
estimates = opt_res["solution_params"]
# ==================================================================================
# Calculate internal jacobian
# ==================================================================================
deriv_to_internal = get_derivative_conversion_function(
params=params, constraints=constraints
)
if jac_case == "pre-calculated":
int_jac = deriv_to_internal(jacobian)
elif jac_case == "closed-form":
jacobian_kwargs = {} if jacobian_kwargs is None else jacobian_kwargs
_jac = jacobian(estimates, **jacobian_kwargs)
int_jac = deriv_to_internal(_jac)
# switch to "numerical" even if jac_case == "skip" because jac is required for ml.
elif jac_case == "numerical":
options = numdiff_options.copy()
options["key"] = "contributions"
deriv_res = get_internal_first_derivative(
func=loglike,
params=estimates,
constraints=constraints,
func_kwargs=loglike_kwargs,
numdiff_options=options,
)
int_jac = deriv_res["derivative"]
jac_numdiff_info = {k: v for k, v in deriv_res.items() if k != "derivative"}
else:
int_jac = None
# ==================================================================================
# Calculate internal Hessian (most of this is not yet implemented)
# ==================================================================================
if hess_case == "skip":
int_hess = None
elif hess_case == "numerical":
raise NotImplementedError("Numerical Hessian calculation is not yet supported.")
hess_numdiff_info = {}
elif hess_case in ("closed-form", "pre-calculated") and constraints:
raise NotImplementedError(
"Closed-form or pre-calculated Hessians are not yet compatible with "
"constraints."
)
else:
int_hess = hessian(estimates, **hessian_kwargs)
# ==================================================================================
# Calculate all available internal cov types
# ==================================================================================
int_covs = {}
if "jacobian" in cov_cases:
int_covs["cov_jacobian"] = cov_jacobian(int_jac)
if "hessian" in cov_cases:
int_covs["cov_hessian"] = cov_hessian(int_hess)
if "robust" in cov_cases:
int_covs["cov_robust"] = cov_robust(jac=int_jac, hess=int_hess)
if "cluster_robust" in cov_cases:
int_covs["cov_cluster_robust"] = cov_cluster_robust(
jac=int_jac, hess=int_hess, design_info=design_info
)
if "strata_robust" in cov_cases:
int_covs["cov_strata_robust"] = cov_strata_robust(
jac=int_jac, hess=int_hess, design_info=design_info
)
# ==================================================================================
# Calculate all available external covs and summaries
# ==================================================================================
covs = {}
summaries = {}
for case in cov_cases:
cov = transform_covariance(
params=estimates,
internal_cov=int_covs[f"cov_{case}"],
constraints=constraints,
n_samples=n_samples,
bounds_handling=bounds_handling,
)
summary = calculate_inference_quantities(
params=estimates,
free_cov=cov,
ci_level=ci_level,
)
covs[f"cov_{case}"] = cov
summaries[f"summary_{case}"] = summary
# ==================================================================================
# Calculate external jac and hess (if no transforming constraints)
# ==================================================================================
if not processed_constraints:
ext_jac = int_jac
ext_hess = int_hess
else:
ext_jac = "No external Jacobian defined due to constraints."
ext_hess = "No external Hessian defined due to constraints."
# ==================================================================================
# Construct output
# ==================================================================================
out = {
**summaries,
**covs,
"jacobian": ext_jac,
"hessian": ext_hess,
}
if not is_optimized:
out["optimize_res"] = opt_res
if jac_case == "numerical":
out["jacobian_numdiff_info"] = jac_numdiff_info
if hess_case == "numerical":
out["hessian_numdiff_info"] = hess_numdiff_info
return out
def _get_cov_cases(jac_case, hess_case, design_info):
if jac_case == "skip" and hess_case == "skip":
raise ValueError("Jacobian and Hessian cannot both be False.")
elif jac_case == "skip" and hess_case != "skip":
cases = ["hessian"]
elif hess_case == "skip" and jac_case != "skip":
cases = ["jacobian"]
else:
cases = ["jacobian", "hessian", "robust"]
if design_info is not None:
if "psu" in design_info:
cases.append("cluster_robust")
if {"strata", "psu", "fpc"}.issubset(design_info):
cases.append("strata_robust")
return cases
| from estimagic.inference.ml_covs import cov_cluster_robust
from estimagic.inference.ml_covs import cov_hessian
from estimagic.inference.ml_covs import cov_jacobian
from estimagic.inference.ml_covs import cov_robust
from estimagic.inference.ml_covs import cov_strata_robust
from estimagic.inference.shared import calculate_inference_quantities
from estimagic.inference.shared import check_is_optimized_and_derivative_case
from estimagic.inference.shared import get_derivative_case
from estimagic.inference.shared import get_internal_first_derivative
from estimagic.inference.shared import transform_covariance
from estimagic.optimization.optimize import maximize
from estimagic.parameters.parameter_conversion import get_derivative_conversion_function
from estimagic.parameters.process_constraints import process_constraints
from estimagic.shared.check_option_dicts import check_numdiff_options
from estimagic.shared.check_option_dicts import check_optimization_options
def estimate_ml(
loglike,
params,
optimize_options,
*,
constraints=None,
logging=False,
log_options=None,
loglike_kwargs=None,
derivative=None,
derivative_kwargs=None,
loglike_and_derivative=None,
loglike_and_derivative_kwargs=None,
numdiff_options=None,
jacobian=None,
jacobian_kwargs=None,
hessian=False,
hessian_kwargs=None,
ci_level=0.95,
n_samples=10_000,
bounds_handling="raise",
design_info=None,
):
"""Do a maximum likelihood (ml) estimation.
This is a high level interface of our lower level functions for maximization,
numerical differentiation and inference. It does the full workflow for maximum
likelihood estimation with just one function call.
While we have good defaults, you can still configure each aspect of each step
via the optional arguments of this function. If you find it easier to do the
"difficult" steps (mainly maximization and calculating numerical derivatives
of a potentially noisy function) separately, you can do so and just provide those
results as ``params``, ``jacobian`` and ``hessian``.
The docstring is aspirational and not all options are supported yet.
Args:
loglike (callable): Likelihood function that takes a params DataFrame (and
potentially other keyword arguments) and returns a dictionary that has at
least the entries "value" (a scalar float) and "contributions" (a 1d numpy
array or pandas Series) with the log likelihood contribution per individual.
params (pd.DataFrame): DataFrame where the "value" column contains the
estimated or start parameters of a likelihood model. See :ref:`params` for
details. If the supplied parameters are estimated parameters, set
optimize_options to False.
optimize_options (dict or False): Keyword arguments that govern the numerical
optimization. Valid entries are all arguments of
:func:`~estimagic.optimization.optimize.minimize` except for criterion,
derivative, criterion_and_derivative and params. If you pass False as
optimize_options you signal that ``params`` are already the optimal
parameters and no numerical optimization is needed.
constraints (list): List with constraint dictionaries.
See .. _link: ../../docs/source/how_to_guides/how_to_use_constraints.ipynb
logging (pathlib.Path, str or False): Path to sqlite3 file (which typically has
the file extension ``.db``. If the file does not exist, it will be created.
The dashboard can only be used when logging is used.
log_options (dict): Additional keyword arguments to configure the logging.
- "fast_logging": A boolean that determines if "unsafe" settings are used
to speed up write processes to the database. This should only be used for
very short running criterion functions where the main purpose of the log
is a real-time dashboard and it would not be catastrophic to get a
corrupted database in case of a sudden system shutdown. If one evaluation
of the criterion function (and gradient if applicable) takes more than
100 ms, the logging overhead is negligible.
- "if_table_exists": (str) One of "extend", "replace", "raise". What to
do if the tables we want to write to already exist. Default "extend".
- "if_database_exists": (str): One of "extend", "replace", "raise". What to
do if the database we want to write to already exists. Default "extend".
loglike_kwargs (dict): Additional keyword arguments for loglike.
derivative (callable): Function takes params and potentially other keyword
arguments and calculates the first derivative of loglike. It can either
return a numpy array or pandas Series/DataFrame with the derivative or
a dictionary with derivatives of each output of loglike. If loglike
returns a dict but derivative does not, it is your responsibility to
make sure that the correct derivative for the numerical optimizers you are
using is returned.
derivative_kwargs (dict): Additional keyword arguments for loglike.
loglike_and_derivative (callable): Return a tuple consisting of the result
of loglike and the result of derivative. Only use this if you can exploit
synergies in the calculation of loglike and derivative.
loglike_and_derivative_kwargs (dict): Additional keyword arguments for
loglike_and_derivative.
numdiff_options (dict): Keyword arguments for the calculation of numerical
derivatives for the calculation of standard errors. See
:ref:`first_derivative` for details.
jacobian (callable or pandas.DataFrame or False): A function that takes
``params`` and potentially other keyword arguments and returns the jacobian
of loglike["contributions"] with respect to the params. Alternatively, you
can pass a pandas.DataFrame with the Jacobian at the optimal parameters.
This is only possible if you pass ``optimize_options=False``. Note that you
only need to pass a Jacobian function if you have a closed form Jacobian but
decided not to return it as part of ``derivative`` (e.g. because you use
a scalar optimizer and can calculate a gradient in a way that is faster
than calculating and summing the Jacobian). If you pass None, a numerical
Jacobian will be calculated. If you pass ``False``, you signal that no
Jacobian should be calculated. Thus, no result that requires the Jacobian
will be calculated.
jacobian_kwargs (dict): Additional keyword arguments for the Jacobian function.
hessian (callable or pd.DataFrame): A function that takes
``params`` and potentially other keyword arguments and returns the Hessian
of loglike["value"] with respect to the params. Alternatively, you
can pass a pandas.DataFrame with the Hessian at the optimal parameters.
This is only possible if you pass ``optimize_options=False``. If you pass
None, a numerical Hessian will be calculated. If you pass ``False``, you
signal that no Hessian should be calculated. Thus, no result that requires
the Hessian will be calculated.
hessian_kwargs (dict): Additional keyword arguments for the Hessian function.
ci_level (float): Confidence level for the calculation of confidence intervals.
The default is 0.95.
n_samples (int): Number of samples used to transform the covariance matrix of
the internal parameter vector into the covariance matrix of the external
parameters. For background information about internal and external params
see :ref:`implementation_of_constraints`. This is only used if you have
specified constraints.
bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds
are handled. If "clip", confidence intervals are clipped at the bounds.
Standard errors are only adjusted if a sampling step is necessary due to
additional constraints. If "raise" and any lower or upper bound is binding,
we raise an Error. If "ignore", boundary problems are simply ignored.
design_info (pandas.DataFrame): DataFrame with one row per observation that
contains some or all of the variables "psu" (primary sampling unit),
"stratum" and "fpc" (finite population corrector). See
:ref:`robust_likelihood_inference` for details.
Returns:
dict: The estimated parameters, standard errors and covariance matrix of the
parameters.
"""
# ==================================================================================
# Check and process inputs
# ==================================================================================
is_optimized = optimize_options is False
check_optimization_options(
optimize_options,
usage="estimate_ml",
algorithm_mandatory=True,
)
jac_case = get_derivative_case(jacobian)
hess_case = get_derivative_case(hessian)
check_is_optimized_and_derivative_case(is_optimized, jac_case)
check_is_optimized_and_derivative_case(is_optimized, hess_case)
cov_cases = _get_cov_cases(jac_case, hess_case, design_info)
check_numdiff_options(numdiff_options, "estimate_ml")
numdiff_options = {} if numdiff_options in (None, False) else numdiff_options
constraints = [] if constraints is None else constraints
processed_constraints, _ = process_constraints(constraints, params)
# ==================================================================================
# Calculate estimates via maximization (if necessary)
# ==================================================================================
if is_optimized:
estimates = params
else:
opt_res = maximize(
criterion=loglike,
criterion_kwargs=loglike_kwargs,
params=params,
constraints=constraints,
derivative=derivative,
derivative_kwargs=derivative_kwargs,
criterion_and_derivative=loglike_and_derivative,
criterion_and_derivative_kwargs=loglike_and_derivative_kwargs,
logging=logging,
log_options=log_options,
**optimize_options,
)
estimates = opt_res["solution_params"]
# ==================================================================================
# Calculate internal jacobian
# ==================================================================================
deriv_to_internal = get_derivative_conversion_function(
params=params, constraints=constraints
)
if jac_case == "pre-calculated":
int_jac = deriv_to_internal(jacobian)
elif jac_case == "closed-form":
jacobian_kwargs = {} if jacobian_kwargs is None else jacobian_kwargs
_jac = jacobian(estimates, **jacobian_kwargs)
int_jac = deriv_to_internal(_jac)
# switch to "numerical" even if jac_case == "skip" because jac is required for ml.
elif jac_case == "numerical":
options = numdiff_options.copy()
options["key"] = "contributions"
deriv_res = get_internal_first_derivative(
func=loglike,
params=estimates,
constraints=constraints,
func_kwargs=loglike_kwargs,
numdiff_options=options,
)
int_jac = deriv_res["derivative"]
jac_numdiff_info = {k: v for k, v in deriv_res.items() if k != "derivative"}
else:
int_jac = None
# ==================================================================================
# Calculate internal Hessian (most of this is not yet implemented)
# ==================================================================================
if hess_case == "skip":
int_hess = None
elif hess_case == "numerical":
raise NotImplementedError("Numerical Hessian calculation is not yet supported.")
hess_numdiff_info = {}
elif hess_case in ("closed-form", "pre-calculated") and constraints:
raise NotImplementedError(
"Closed-form or pre-calculated Hessians are not yet compatible with "
"constraints."
)
else:
int_hess = hessian(estimates, **hessian_kwargs)
# ==================================================================================
# Calculate all available internal cov types
# ==================================================================================
int_covs = {}
if "jacobian" in cov_cases:
int_covs["cov_jacobian"] = cov_jacobian(int_jac)
if "hessian" in cov_cases:
int_covs["cov_hessian"] = cov_hessian(int_hess)
if "robust" in cov_cases:
int_covs["cov_robust"] = cov_robust(jac=int_jac, hess=int_hess)
if "cluster_robust" in cov_cases:
int_covs["cov_cluster_robust"] = cov_cluster_robust(
jac=int_jac, hess=int_hess, design_info=design_info
)
if "strata_robust" in cov_cases:
int_covs["cov_strata_robust"] = cov_strata_robust(
jac=int_jac, hess=int_hess, design_info=design_info
)
# ==================================================================================
# Calculate all available external covs and summaries
# ==================================================================================
covs = {}
summaries = {}
for case in cov_cases:
cov = transform_covariance(
params=estimates,
internal_cov=int_covs[f"cov_{case}"],
constraints=constraints,
n_samples=n_samples,
bounds_handling=bounds_handling,
)
summary = calculate_inference_quantities(
params=estimates,
free_cov=cov,
ci_level=ci_level,
)
covs[f"cov_{case}"] = cov
summaries[f"summary_{case}"] = summary
# ==================================================================================
# Calculate external jac and hess (if no transforming constraints)
# ==================================================================================
if not processed_constraints:
ext_jac = int_jac
ext_hess = int_hess
else:
ext_jac = "No external Jacobian defined due to constraints."
ext_hess = "No external Hessian defined due to constraints."
# ==================================================================================
# Construct output
# ==================================================================================
out = {
**summaries,
**covs,
"jacobian": ext_jac,
"hessian": ext_hess,
}
if not is_optimized:
out["optimize_res"] = opt_res
if jac_case == "numerical":
out["jacobian_numdiff_info"] = jac_numdiff_info
if hess_case == "numerical":
out["hessian_numdiff_info"] = hess_numdiff_info
return out
def _get_cov_cases(jac_case, hess_case, design_info):
if jac_case == "skip" and hess_case == "skip":
raise ValueError("Jacobian and Hessian cannot both be False.")
elif jac_case == "skip" and hess_case != "skip":
cases = ["hessian"]
elif hess_case == "skip" and jac_case != "skip":
cases = ["jacobian"]
else:
cases = ["jacobian", "hessian", "robust"]
if design_info is not None:
if "psu" in design_info:
cases.append("cluster_robust")
if {"strata", "psu", "fpc"}.issubset(design_info):
cases.append("strata_robust")
return cases
| en | 0.72766 | Do a maximum likelihood (ml) estimation. This is a high level interface of our lower level functions for maximization, numerical differentiation and inference. It does the full workflow for maximum likelihood estimation with just one function call. While we have good defaults, you can still configure each aspect of each step via the optional arguments of this function. If you find it easier to do the "difficult" steps (mainly maximization and calculating numerical derivatives of a potentially noisy function) separately, you can do so and just provide those results as ``params``, ``jacobian`` and ``hessian``. The docstring is aspirational and not all options are supported yet. Args: loglike (callable): Likelihood function that takes a params DataFrame (and potentially other keyword arguments) and returns a dictionary that has at least the entries "value" (a scalar float) and "contributions" (a 1d numpy array or pandas Series) with the log likelihood contribution per individual. params (pd.DataFrame): DataFrame where the "value" column contains the estimated or start parameters of a likelihood model. See :ref:`params` for details. If the supplied parameters are estimated parameters, set optimize_options to False. optimize_options (dict or False): Keyword arguments that govern the numerical optimization. Valid entries are all arguments of :func:`~estimagic.optimization.optimize.minimize` except for criterion, derivative, criterion_and_derivative and params. If you pass False as optimize_options you signal that ``params`` are already the optimal parameters and no numerical optimization is needed. constraints (list): List with constraint dictionaries. See .. _link: ../../docs/source/how_to_guides/how_to_use_constraints.ipynb logging (pathlib.Path, str or False): Path to sqlite3 file (which typically has the file extension ``.db``. If the file does not exist, it will be created. The dashboard can only be used when logging is used. log_options (dict): Additional keyword arguments to configure the logging. - "fast_logging": A boolean that determines if "unsafe" settings are used to speed up write processes to the database. This should only be used for very short running criterion functions where the main purpose of the log is a real-time dashboard and it would not be catastrophic to get a corrupted database in case of a sudden system shutdown. If one evaluation of the criterion function (and gradient if applicable) takes more than 100 ms, the logging overhead is negligible. - "if_table_exists": (str) One of "extend", "replace", "raise". What to do if the tables we want to write to already exist. Default "extend". - "if_database_exists": (str): One of "extend", "replace", "raise". What to do if the database we want to write to already exists. Default "extend". loglike_kwargs (dict): Additional keyword arguments for loglike. derivative (callable): Function takes params and potentially other keyword arguments and calculates the first derivative of loglike. It can either return a numpy array or pandas Series/DataFrame with the derivative or a dictionary with derivatives of each output of loglike. If loglike returns a dict but derivative does not, it is your responsibility to make sure that the correct derivative for the numerical optimizers you are using is returned. derivative_kwargs (dict): Additional keyword arguments for loglike. loglike_and_derivative (callable): Return a tuple consisting of the result of loglike and the result of derivative. Only use this if you can exploit synergies in the calculation of loglike and derivative. loglike_and_derivative_kwargs (dict): Additional keyword arguments for loglike_and_derivative. numdiff_options (dict): Keyword arguments for the calculation of numerical derivatives for the calculation of standard errors. See :ref:`first_derivative` for details. jacobian (callable or pandas.DataFrame or False): A function that takes ``params`` and potentially other keyword arguments and returns the jacobian of loglike["contributions"] with respect to the params. Alternatively, you can pass a pandas.DataFrame with the Jacobian at the optimal parameters. This is only possible if you pass ``optimize_options=False``. Note that you only need to pass a Jacobian function if you have a closed form Jacobian but decided not to return it as part of ``derivative`` (e.g. because you use a scalar optimizer and can calculate a gradient in a way that is faster than calculating and summing the Jacobian). If you pass None, a numerical Jacobian will be calculated. If you pass ``False``, you signal that no Jacobian should be calculated. Thus, no result that requires the Jacobian will be calculated. jacobian_kwargs (dict): Additional keyword arguments for the Jacobian function. hessian (callable or pd.DataFrame): A function that takes ``params`` and potentially other keyword arguments and returns the Hessian of loglike["value"] with respect to the params. Alternatively, you can pass a pandas.DataFrame with the Hessian at the optimal parameters. This is only possible if you pass ``optimize_options=False``. If you pass None, a numerical Hessian will be calculated. If you pass ``False``, you signal that no Hessian should be calculated. Thus, no result that requires the Hessian will be calculated. hessian_kwargs (dict): Additional keyword arguments for the Hessian function. ci_level (float): Confidence level for the calculation of confidence intervals. The default is 0.95. n_samples (int): Number of samples used to transform the covariance matrix of the internal parameter vector into the covariance matrix of the external parameters. For background information about internal and external params see :ref:`implementation_of_constraints`. This is only used if you have specified constraints. bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds are handled. If "clip", confidence intervals are clipped at the bounds. Standard errors are only adjusted if a sampling step is necessary due to additional constraints. If "raise" and any lower or upper bound is binding, we raise an Error. If "ignore", boundary problems are simply ignored. design_info (pandas.DataFrame): DataFrame with one row per observation that contains some or all of the variables "psu" (primary sampling unit), "stratum" and "fpc" (finite population corrector). See :ref:`robust_likelihood_inference` for details. Returns: dict: The estimated parameters, standard errors and covariance matrix of the parameters. # ================================================================================== # Check and process inputs # ================================================================================== # ================================================================================== # Calculate estimates via maximization (if necessary) # ================================================================================== # ================================================================================== # Calculate internal jacobian # ================================================================================== # switch to "numerical" even if jac_case == "skip" because jac is required for ml. # ================================================================================== # Calculate internal Hessian (most of this is not yet implemented) # ================================================================================== # ================================================================================== # Calculate all available internal cov types # ================================================================================== # ================================================================================== # Calculate all available external covs and summaries # ================================================================================== # ================================================================================== # Calculate external jac and hess (if no transforming constraints) # ================================================================================== # ================================================================================== # Construct output # ================================================================================== | 2.084175 | 2 |
neural_architecture_search_appendix_a.py | NunoEdgarGFlowHub/neural_architecture_search_with_reinforcement_learning_appendix_a | 68 | 7670 | import six
import chainer
import numpy as np
import chainer.links as L
import chainer.functions as F
import nutszebra_chainer
import functools
from collections import defaultdict
class Conv(nutszebra_chainer.Model):
def __init__(self, in_channel, out_channel, filter_size=(3, 3), stride=(1, 1), pad=(1, 1)):
super(Conv, self).__init__(
conv=L.Convolution2D(in_channel, out_channel, filter_size, stride, pad),
)
def weight_initialization(self):
self.conv.W.data = self.weight_relu_initialization(self.conv)
self.conv.b.data = self.bias_initialization(self.conv, constant=0)
def __call__(self, x, train=False):
return self.conv(x)
def count_parameters(self):
return functools.reduce(lambda a, b: a * b, self.conv.W.data.shape)
class Conv_ReLU_BN(nutszebra_chainer.Model):
def __init__(self, in_channel, out_channel, filter_size=(3, 3), stride=(1, 1), pad=(1, 1)):
super(Conv_ReLU_BN, self).__init__(
conv=L.Convolution2D(in_channel, out_channel, filter_size, stride, pad),
bn=L.BatchNormalization(out_channel),
)
def weight_initialization(self):
self.conv.W.data = self.weight_relu_initialization(self.conv)
self.conv.b.data = self.bias_initialization(self.conv, constant=0)
def __call__(self, x, train=False):
return self.bn(F.relu(self.conv(x)), test=not train)
def count_parameters(self):
return functools.reduce(lambda a, b: a * b, self.conv.W.data.shape)
class AppendixA(nutszebra_chainer.Model):
def __init__(self, category_num):
super(AppendixA, self).__init__()
out_channels = [36, 48, 36, 36, 48, 48, 48, 36, 36, 36, 36, 48, 48, 48, 48]
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
skip_connections = [[0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
filters = [(3, 3), (3, 3), (3, 3), (5, 5), (3, 7), (7, 7), (7, 7), (7, 3), (7, 1), (7, 7), (5, 7), (7, 7), (7, 5), (7, 5), (7, 5)]
modules = []
in_channel = 3
for i in six.moves.range(len(out_channels)):
modules += [('conv{}'.format(i), Conv_ReLU_BN(in_channel, out_channels[i], filters[i], 1, 0))]
in_channel = int(np.sum([out_channels[ii] for ii, s in enumerate(skip_connections) if s[i] == 1])) + out_channels[i]
modules += [('linear', Conv(out_channels[-1], category_num, 1, 1, 0))]
# register layers
[self.add_link(*link) for link in modules]
self.modules = modules
self.category_num = category_num
self.out_channels = out_channels
self.skip_connections = skip_connections
self.filters = filters
self.name = 'appndix_a_{}'.format(category_num)
def weight_initialization(self):
[link.weight_initialization() for _, link in self.modules]
def count_parameters(self):
return int(np.sum([link.count_parameters() for _, link in self.modules]))
@staticmethod
def _zero_pads(x, pad, axis):
if type(x.data) is not np.ndarray:
pad.to_gpu()
return F.concat((x, pad), axis=axis)
@staticmethod
def zero_pads(x, sizes):
batch, channel, height, width = x.data.shape
diff_height = sizes[2] - height
diff_width = sizes[3] - width
# pad along with height
if diff_height >= 1:
pad = chainer.Variable(np.zeros((batch, channel, diff_height, width), dtype=x.dtype), volatile=x.volatile)
x = AppendixA._zero_pads(x, pad, axis=2)
_, _, height, _ = x.data.shape
# pad along with width
if diff_width >= 1:
pad = chainer.Variable(np.zeros((batch, channel, height, diff_width), dtype=x.dtype), volatile=x.volatile)
x = AppendixA._zero_pads(x, pad, axis=3)
return x
def _max(a, b):
return (max(a[0], b[0]), max(a[1], b[1]), max(a[2], b[2]), max(a[3], b[3]))
@staticmethod
def concatenate(X):
sizes = (0, 0, 0, 0)
for x in X:
sizes = AppendixA._max(sizes, x.data.shape)
X = [AppendixA.zero_pads(x, sizes) for x in X]
return F.concat(X, axis=1)
def __call__(self, x, train=False):
x = [x]
outputs = []
for i in six.moves.range(len(self.out_channels)):
x = self['conv{}'.format(i)](self.concatenate(x), train=train)
outputs.append(x)
x = [outputs[ii] for ii, s in enumerate(self.skip_connections) if s[i] == 1] + [outputs[i]]
x = outputs[-1]
batch, channels, height, width = x.data.shape
x = F.reshape(F.average_pooling_2d(x, (height, width)), (batch, channels, 1, 1))
return F.reshape(self.linear(x, train), (batch, self.category_num))
def calc_loss(self, y, t):
loss = F.softmax_cross_entropy(y, t)
return loss
def accuracy(self, y, t, xp=np):
y.to_cpu()
t.to_cpu()
indices = np.where((t.data == np.argmax(y.data, axis=1)) == True)[0]
accuracy = defaultdict(int)
for i in indices:
accuracy[t.data[i]] += 1
indices = np.where((t.data == np.argmax(y.data, axis=1)) == False)[0]
false_accuracy = defaultdict(int)
false_y = np.argmax(y.data, axis=1)
for i in indices:
false_accuracy[(t.data[i], false_y[i])] += 1
return accuracy, false_accuracy
| import six
import chainer
import numpy as np
import chainer.links as L
import chainer.functions as F
import nutszebra_chainer
import functools
from collections import defaultdict
class Conv(nutszebra_chainer.Model):
def __init__(self, in_channel, out_channel, filter_size=(3, 3), stride=(1, 1), pad=(1, 1)):
super(Conv, self).__init__(
conv=L.Convolution2D(in_channel, out_channel, filter_size, stride, pad),
)
def weight_initialization(self):
self.conv.W.data = self.weight_relu_initialization(self.conv)
self.conv.b.data = self.bias_initialization(self.conv, constant=0)
def __call__(self, x, train=False):
return self.conv(x)
def count_parameters(self):
return functools.reduce(lambda a, b: a * b, self.conv.W.data.shape)
class Conv_ReLU_BN(nutszebra_chainer.Model):
def __init__(self, in_channel, out_channel, filter_size=(3, 3), stride=(1, 1), pad=(1, 1)):
super(Conv_ReLU_BN, self).__init__(
conv=L.Convolution2D(in_channel, out_channel, filter_size, stride, pad),
bn=L.BatchNormalization(out_channel),
)
def weight_initialization(self):
self.conv.W.data = self.weight_relu_initialization(self.conv)
self.conv.b.data = self.bias_initialization(self.conv, constant=0)
def __call__(self, x, train=False):
return self.bn(F.relu(self.conv(x)), test=not train)
def count_parameters(self):
return functools.reduce(lambda a, b: a * b, self.conv.W.data.shape)
class AppendixA(nutszebra_chainer.Model):
def __init__(self, category_num):
super(AppendixA, self).__init__()
out_channels = [36, 48, 36, 36, 48, 48, 48, 36, 36, 36, 36, 48, 48, 48, 48]
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
skip_connections = [[0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
filters = [(3, 3), (3, 3), (3, 3), (5, 5), (3, 7), (7, 7), (7, 7), (7, 3), (7, 1), (7, 7), (5, 7), (7, 7), (7, 5), (7, 5), (7, 5)]
modules = []
in_channel = 3
for i in six.moves.range(len(out_channels)):
modules += [('conv{}'.format(i), Conv_ReLU_BN(in_channel, out_channels[i], filters[i], 1, 0))]
in_channel = int(np.sum([out_channels[ii] for ii, s in enumerate(skip_connections) if s[i] == 1])) + out_channels[i]
modules += [('linear', Conv(out_channels[-1], category_num, 1, 1, 0))]
# register layers
[self.add_link(*link) for link in modules]
self.modules = modules
self.category_num = category_num
self.out_channels = out_channels
self.skip_connections = skip_connections
self.filters = filters
self.name = 'appndix_a_{}'.format(category_num)
def weight_initialization(self):
[link.weight_initialization() for _, link in self.modules]
def count_parameters(self):
return int(np.sum([link.count_parameters() for _, link in self.modules]))
@staticmethod
def _zero_pads(x, pad, axis):
if type(x.data) is not np.ndarray:
pad.to_gpu()
return F.concat((x, pad), axis=axis)
@staticmethod
def zero_pads(x, sizes):
batch, channel, height, width = x.data.shape
diff_height = sizes[2] - height
diff_width = sizes[3] - width
# pad along with height
if diff_height >= 1:
pad = chainer.Variable(np.zeros((batch, channel, diff_height, width), dtype=x.dtype), volatile=x.volatile)
x = AppendixA._zero_pads(x, pad, axis=2)
_, _, height, _ = x.data.shape
# pad along with width
if diff_width >= 1:
pad = chainer.Variable(np.zeros((batch, channel, height, diff_width), dtype=x.dtype), volatile=x.volatile)
x = AppendixA._zero_pads(x, pad, axis=3)
return x
def _max(a, b):
return (max(a[0], b[0]), max(a[1], b[1]), max(a[2], b[2]), max(a[3], b[3]))
@staticmethod
def concatenate(X):
sizes = (0, 0, 0, 0)
for x in X:
sizes = AppendixA._max(sizes, x.data.shape)
X = [AppendixA.zero_pads(x, sizes) for x in X]
return F.concat(X, axis=1)
def __call__(self, x, train=False):
x = [x]
outputs = []
for i in six.moves.range(len(self.out_channels)):
x = self['conv{}'.format(i)](self.concatenate(x), train=train)
outputs.append(x)
x = [outputs[ii] for ii, s in enumerate(self.skip_connections) if s[i] == 1] + [outputs[i]]
x = outputs[-1]
batch, channels, height, width = x.data.shape
x = F.reshape(F.average_pooling_2d(x, (height, width)), (batch, channels, 1, 1))
return F.reshape(self.linear(x, train), (batch, self.category_num))
def calc_loss(self, y, t):
loss = F.softmax_cross_entropy(y, t)
return loss
def accuracy(self, y, t, xp=np):
y.to_cpu()
t.to_cpu()
indices = np.where((t.data == np.argmax(y.data, axis=1)) == True)[0]
accuracy = defaultdict(int)
for i in indices:
accuracy[t.data[i]] += 1
indices = np.where((t.data == np.argmax(y.data, axis=1)) == False)[0]
false_accuracy = defaultdict(int)
false_y = np.argmax(y.data, axis=1)
for i in indices:
false_accuracy[(t.data[i], false_y[i])] += 1
return accuracy, false_accuracy
| en | 0.858144 | # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 # register layers # pad along with height # pad along with width | 2.467678 | 2 |
test/test_proportions_delta.py | quizlet/abracadabra | 24 | 7671 | <reponame>quizlet/abracadabra
import pytest
from abra import Experiment, HypothesisTest
def test_large_proportions_delta_expermiment(proportions_data_large):
exp = Experiment(proportions_data_large, name='proportions-test')
# run 'A/A' test
test_aa = HypothesisTest(
metric='metric',
control='A', variation='A',
hypothesis='larger',
inference_method='proportions_delta'
)
results_aa = exp.run_test(test_aa)
assert results_aa.test_statistic == 'z'
assert not results_aa.accept_hypothesis
# run A/B test
test_ab = HypothesisTest(
metric='metric',
control='A', variation='B',
hypothesis='larger',
inference_method='proportions_delta'
)
results_ab = exp.run_test(test_ab)
assert results_ab.test_statistic == 'z'
assert results_ab.accept_hypothesis
def test_proportions_delta_ab_unequal(proportions_data_small):
exp = Experiment(proportions_data_small, name='proportions-test')
# run A/B test
test_ab = HypothesisTest(
metric='metric',
control='A', variation='F',
hypothesis='unequal',
inference_method='proportions_delta'
)
results_ab = exp.run_test(test_ab)
assert results_ab.test_statistic == 'z'
assert results_ab.accept_hypothesis
def test_proportions_delta_ab_larger(proportions_data_small):
exp = Experiment(proportions_data_small, name='proportions-test')
# run A/B test
test_ab = HypothesisTest(
metric='metric',
control='A', variation='F',
hypothesis='larger',
inference_method='proportions_delta'
)
results_ab = exp.run_test(test_ab)
assert results_ab.accept_hypothesis
def test_proportions_delta_ab_smaller(proportions_data_small):
exp = Experiment(proportions_data_small, name='proportions-test')
# run A/B test
test_ab = HypothesisTest(
metric='metric',
control='A', variation='F',
hypothesis='smaller',
inference_method='proportions_delta'
)
results_ab = exp.run_test(test_ab)
assert not results_ab.accept_hypothesis
def test_proportions_delta_aa(proportions_data_small):
exp = Experiment(proportions_data_small, name='proportions-test')
# run A/A test
test_aa = HypothesisTest(
metric='metric',
control='A', variation='A',
hypothesis='larger',
inference_method='proportions_delta'
)
results_aa = exp.run_test(test_aa)
assert not results_aa.accept_hypothesis
def test_proportions_delta_experiment_t(proportions_data_small):
"""Small sample sizes defautl to t-tests"""
exp = Experiment(proportions_data_small.sample(29), name='proportions-test')
test_aa = HypothesisTest(
metric='metric',
control='A', variation='A',
hypothesis='unequal',
inference_method='means_delta'
)
results_aa = exp.run_test(test_aa)
assert results_aa.test_statistic == 't' | import pytest
from abra import Experiment, HypothesisTest
def test_large_proportions_delta_expermiment(proportions_data_large):
exp = Experiment(proportions_data_large, name='proportions-test')
# run 'A/A' test
test_aa = HypothesisTest(
metric='metric',
control='A', variation='A',
hypothesis='larger',
inference_method='proportions_delta'
)
results_aa = exp.run_test(test_aa)
assert results_aa.test_statistic == 'z'
assert not results_aa.accept_hypothesis
# run A/B test
test_ab = HypothesisTest(
metric='metric',
control='A', variation='B',
hypothesis='larger',
inference_method='proportions_delta'
)
results_ab = exp.run_test(test_ab)
assert results_ab.test_statistic == 'z'
assert results_ab.accept_hypothesis
def test_proportions_delta_ab_unequal(proportions_data_small):
exp = Experiment(proportions_data_small, name='proportions-test')
# run A/B test
test_ab = HypothesisTest(
metric='metric',
control='A', variation='F',
hypothesis='unequal',
inference_method='proportions_delta'
)
results_ab = exp.run_test(test_ab)
assert results_ab.test_statistic == 'z'
assert results_ab.accept_hypothesis
def test_proportions_delta_ab_larger(proportions_data_small):
exp = Experiment(proportions_data_small, name='proportions-test')
# run A/B test
test_ab = HypothesisTest(
metric='metric',
control='A', variation='F',
hypothesis='larger',
inference_method='proportions_delta'
)
results_ab = exp.run_test(test_ab)
assert results_ab.accept_hypothesis
def test_proportions_delta_ab_smaller(proportions_data_small):
exp = Experiment(proportions_data_small, name='proportions-test')
# run A/B test
test_ab = HypothesisTest(
metric='metric',
control='A', variation='F',
hypothesis='smaller',
inference_method='proportions_delta'
)
results_ab = exp.run_test(test_ab)
assert not results_ab.accept_hypothesis
def test_proportions_delta_aa(proportions_data_small):
exp = Experiment(proportions_data_small, name='proportions-test')
# run A/A test
test_aa = HypothesisTest(
metric='metric',
control='A', variation='A',
hypothesis='larger',
inference_method='proportions_delta'
)
results_aa = exp.run_test(test_aa)
assert not results_aa.accept_hypothesis
def test_proportions_delta_experiment_t(proportions_data_small):
"""Small sample sizes defautl to t-tests"""
exp = Experiment(proportions_data_small.sample(29), name='proportions-test')
test_aa = HypothesisTest(
metric='metric',
control='A', variation='A',
hypothesis='unequal',
inference_method='means_delta'
)
results_aa = exp.run_test(test_aa)
assert results_aa.test_statistic == 't' | en | 0.41088 | # run 'A/A' test # run A/B test # run A/B test # run A/B test # run A/B test # run A/A test Small sample sizes defautl to t-tests | 2.370993 | 2 |
src/bootils/plugins/core/jsw.py | Build-The-Web/bootils | 3 | 7672 | # -*- coding: utf-8 -*-
# pylint: disable=
""" Tanuki Java Service Wrapper runtime environment.
Debian JSW paths (Wheezy 3.5.3; Jessie 3.5.22)::
/usr/sbin/wrapper – ELF executable
/usr/share/wrapper/daemon.sh
/usr/share/wrapper/make-wrapper-init.sh
/usr/share/wrapper/wrapper.conf
"""
# Copyright © 2015 1&1 Group <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals, print_function
from ..loader import PluginBase
class JavaServiceWrapper(PluginBase):
"""Tanuki Java Service Wrapper runtime environment."""
def control_start(self, *args, **options):
"""Start a Java service."""
print("*** JSW START ***")
return True # TODO: actually implement this
def control_stop(self, *args, **options):
"""Stop a Java service."""
return False # TODO: actually implement this
| # -*- coding: utf-8 -*-
# pylint: disable=
""" Tanuki Java Service Wrapper runtime environment.
Debian JSW paths (Wheezy 3.5.3; Jessie 3.5.22)::
/usr/sbin/wrapper – ELF executable
/usr/share/wrapper/daemon.sh
/usr/share/wrapper/make-wrapper-init.sh
/usr/share/wrapper/wrapper.conf
"""
# Copyright © 2015 1&1 Group <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals, print_function
from ..loader import PluginBase
class JavaServiceWrapper(PluginBase):
"""Tanuki Java Service Wrapper runtime environment."""
def control_start(self, *args, **options):
"""Start a Java service."""
print("*** JSW START ***")
return True # TODO: actually implement this
def control_stop(self, *args, **options):
"""Stop a Java service."""
return False # TODO: actually implement this
| en | 0.748937 | # -*- coding: utf-8 -*- # pylint: disable= Tanuki Java Service Wrapper runtime environment. Debian JSW paths (Wheezy 3.5.3; Jessie 3.5.22):: /usr/sbin/wrapper – ELF executable /usr/share/wrapper/daemon.sh /usr/share/wrapper/make-wrapper-init.sh /usr/share/wrapper/wrapper.conf # Copyright © 2015 1&1 Group <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tanuki Java Service Wrapper runtime environment. Start a Java service. # TODO: actually implement this Stop a Java service. # TODO: actually implement this | 1.804404 | 2 |
MachineLearning/hw1/models/LinearRegression.py | ChoKyuWon/SchoolProjects | 0 | 7673 | import numpy as np
class LinearRegression:
def __init__(self, num_features):
self.num_features = num_features
self.W = np.zeros((self.num_features, 1))
def train(self, x, y, epochs, batch_size, lr, optim):
final_loss = None # loss of final epoch
# Training should be done for 'epochs' times with minibatch size of 'batch_size'
# The function 'train' should return the loss of final epoch
# Loss of an epoch is calculated as an average of minibatch losses
# ========================= EDIT HERE ========================
# xline 과 n번째 y가 매칭됨. f(xline)=yi
final_loss=0
num_data=len(y)
k=0
def dlossF(k, j):
s=0
size = batch_size
for Xi, Yi in zip(x[k:k+batch_size], y[k:k+batch_size]):
fx=np.transpose(Xi).dot(self.W)
s = s + (fx-Yi)*Xi[j]
if (num_data - k) < batch_size:
size = num_data - k
return s/size
for iterative in range(0, epochs):
k = k + batch_size
if k == num_data:
k = batch_size
grad = np.zeros((self.num_features, 1))
for j in range(0, self.num_features):
grad[j] = dlossF(k, j)
self.W = optim.update(self.W, grad, lr)
# ============================================================
return final_loss
def eval(self, x):
pred = None
# Evaluation Function
# Given the input 'x', the function should return prediction for 'x'
# ========================= EDIT HERE ========================
ylist=[]
for xline in x:
y = np.transpose(xline).dot(self.W)
ylist.append(y[0])
pred = np.array(ylist)
# ============================================================
return pred
| import numpy as np
class LinearRegression:
def __init__(self, num_features):
self.num_features = num_features
self.W = np.zeros((self.num_features, 1))
def train(self, x, y, epochs, batch_size, lr, optim):
final_loss = None # loss of final epoch
# Training should be done for 'epochs' times with minibatch size of 'batch_size'
# The function 'train' should return the loss of final epoch
# Loss of an epoch is calculated as an average of minibatch losses
# ========================= EDIT HERE ========================
# xline 과 n번째 y가 매칭됨. f(xline)=yi
final_loss=0
num_data=len(y)
k=0
def dlossF(k, j):
s=0
size = batch_size
for Xi, Yi in zip(x[k:k+batch_size], y[k:k+batch_size]):
fx=np.transpose(Xi).dot(self.W)
s = s + (fx-Yi)*Xi[j]
if (num_data - k) < batch_size:
size = num_data - k
return s/size
for iterative in range(0, epochs):
k = k + batch_size
if k == num_data:
k = batch_size
grad = np.zeros((self.num_features, 1))
for j in range(0, self.num_features):
grad[j] = dlossF(k, j)
self.W = optim.update(self.W, grad, lr)
# ============================================================
return final_loss
def eval(self, x):
pred = None
# Evaluation Function
# Given the input 'x', the function should return prediction for 'x'
# ========================= EDIT HERE ========================
ylist=[]
for xline in x:
y = np.transpose(xline).dot(self.W)
ylist.append(y[0])
pred = np.array(ylist)
# ============================================================
return pred
| en | 0.653711 | # loss of final epoch # Training should be done for 'epochs' times with minibatch size of 'batch_size' # The function 'train' should return the loss of final epoch # Loss of an epoch is calculated as an average of minibatch losses # ========================= EDIT HERE ======================== # xline 과 n번째 y가 매칭됨. f(xline)=yi # ============================================================ # Evaluation Function # Given the input 'x', the function should return prediction for 'x' # ========================= EDIT HERE ======================== # ============================================================ | 3.032826 | 3 |
apps/bc_scraper/actions/schedule.py | aurmeneta/ramos-uc | 7 | 7674 | <reponame>aurmeneta/ramos-uc<filename>apps/bc_scraper/actions/schedule.py
from copy import copy
DEFAULT_SCHEDULE = {}
for day in "lmwjvs":
for mod in "12345678":
DEFAULT_SCHEDULE[day + mod] = "'FREE'"
def process_schedule(text_sc):
"""For a given schedule text in BC format, returns the SQL queries for inserting
the full schedule and schedule info. Those queries have to format ID.
"""
### Full Schedule
data = text_sc.split("\nROW: ")[1:]
# data rows -> day-day:module,module <> type <> room <><>
schedule = copy(DEFAULT_SCHEDULE)
for row in data:
row = row.split("<>")[:2]
horario = row[0].split(":")
days = horario[0].split("-")
modules = horario[1].split(",")
for day in days:
for mod in modules:
if len(day) and len(mod):
schedule[day.lower() + mod] = "'" + row[1] + "'"
cols = ",".join(schedule.keys())
values = ",".join(schedule.values())
full_sc_query = (
f"INSERT INTO courses_fullschedule (section_id, {cols}) VALUES (%s, {values});"
)
### Info Schedule
schedule_info = {"total": 0}
for type in ["AYU", "CLAS", "LAB", "PRA", "SUP", "TAL", "TER", "TES"]:
schedule_info[type] = list(schedule.values()).count("'" + type + "'")
schedule_info["total"] += schedule_info[type]
schedule_info[type] = str(schedule_info[type])
schedule_info["total"] = str(schedule_info["total"])
cols = ",".join(schedule_info.keys())
values = ",".join(schedule_info.values())
info_sc_query = (
f"INSERT INTO courses_scheduleinfo (section_id, {cols}) VALUES (%s, {values});"
)
return full_sc_query, info_sc_query
| from copy import copy
DEFAULT_SCHEDULE = {}
for day in "lmwjvs":
for mod in "12345678":
DEFAULT_SCHEDULE[day + mod] = "'FREE'"
def process_schedule(text_sc):
"""For a given schedule text in BC format, returns the SQL queries for inserting
the full schedule and schedule info. Those queries have to format ID.
"""
### Full Schedule
data = text_sc.split("\nROW: ")[1:]
# data rows -> day-day:module,module <> type <> room <><>
schedule = copy(DEFAULT_SCHEDULE)
for row in data:
row = row.split("<>")[:2]
horario = row[0].split(":")
days = horario[0].split("-")
modules = horario[1].split(",")
for day in days:
for mod in modules:
if len(day) and len(mod):
schedule[day.lower() + mod] = "'" + row[1] + "'"
cols = ",".join(schedule.keys())
values = ",".join(schedule.values())
full_sc_query = (
f"INSERT INTO courses_fullschedule (section_id, {cols}) VALUES (%s, {values});"
)
### Info Schedule
schedule_info = {"total": 0}
for type in ["AYU", "CLAS", "LAB", "PRA", "SUP", "TAL", "TER", "TES"]:
schedule_info[type] = list(schedule.values()).count("'" + type + "'")
schedule_info["total"] += schedule_info[type]
schedule_info[type] = str(schedule_info[type])
schedule_info["total"] = str(schedule_info["total"])
cols = ",".join(schedule_info.keys())
values = ",".join(schedule_info.values())
info_sc_query = (
f"INSERT INTO courses_scheduleinfo (section_id, {cols}) VALUES (%s, {values});"
)
return full_sc_query, info_sc_query | en | 0.542325 | For a given schedule text in BC format, returns the SQL queries for inserting the full schedule and schedule info. Those queries have to format ID. ### Full Schedule # data rows -> day-day:module,module <> type <> room <><> ### Info Schedule | 2.978312 | 3 |
openstack_dashboard/dashboards/admin/volumes/views.py | NunoEdgarGFlowHub/horizon | 1 | 7675 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Admin views for managing volumes and snapshots.
"""
from collections import OrderedDict
from django.conf import settings
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.dashboards.admin.volumes \
import forms as volumes_forms
from openstack_dashboard.dashboards.admin.volumes \
import tables as volumes_tables
from openstack_dashboard.dashboards.admin.volumes \
import tabs as volumes_tabs
from openstack_dashboard.dashboards.project.volumes \
import views as volumes_views
class VolumesView(tables.PagedTableMixin, volumes_views.VolumeTableMixIn,
tables.DataTableView):
table_class = volumes_tables.VolumesTable
page_title = _("Volumes")
FILTERS_MAPPING = {'bootable': {_('yes'): 'true', _('no'): 'false'},
'encrypted': {_('yes'): True, _('no'): False}}
def get_data(self):
default_filters = {'all_tenants': True}
filters = self.get_filters(default_filters.copy())
filter_first = getattr(settings, 'FILTER_DATA_FIRST', {})
volumes = []
self.table.needs_filter_first = False
if filter_first.get('admin.volumes', False) and \
len(filters) == len(default_filters):
self.table.needs_filter_first = True
return volumes
if 'project' in filters:
# Keystone returns a tuple ([],false) where the first element is
# tenant list that's why the 0 is hardcoded below
tenants = keystone.tenant_list(self.request)[0]
tenant_ids = [t.id for t in tenants
if t.name == filters['project']]
if not tenant_ids:
return []
del filters['project']
for id in tenant_ids:
filters['project_id'] = id
volumes += self._get_volumes(search_opts=filters)
else:
volumes = self._get_volumes(search_opts=filters)
attached_instance_ids = self._get_attached_instance_ids(volumes)
instances = self._get_instances(search_opts={'all_tenants': True},
instance_ids=attached_instance_ids)
volume_ids_with_snapshots = self._get_volumes_ids_with_snapshots(
search_opts={'all_tenants': True})
self._set_volume_attributes(
volumes, instances, volume_ids_with_snapshots)
# Gather our tenants to correlate against IDs
try:
tenants, has_more = keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _('Unable to retrieve volume project information.')
exceptions.handle(self.request, msg)
tenant_dict = OrderedDict([(t.id, t) for t in tenants])
for volume in volumes:
tenant_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None)
tenant = tenant_dict.get(tenant_id, None)
volume.tenant_name = getattr(tenant, "name", None)
return volumes
def get_filters(self, filters):
self.table = self._tables['volumes']
self.handle_server_filter(self.request, table=self.table)
self.update_server_filter_action(self.request, table=self.table)
filters = super(VolumesView, self).get_filters(filters,
self.FILTERS_MAPPING)
return filters
class DetailView(volumes_views.DetailView):
tab_group_class = volumes_tabs.VolumeDetailTabs
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
table = volumes_tables.VolumesTable(self.request)
context["actions"] = table.render_row_actions(context["volume"])
return context
def get_search_opts(self, volume):
search_opts = super(DetailView, self).get_search_opts(volume)
search_opts['all_tenants'] = True
return search_opts
def get_redirect_url(self):
return reverse('horizon:admin:volumes:index')
class ManageVolumeView(forms.ModalFormView):
form_class = volumes_forms.ManageVolume
template_name = 'admin/volumes/manage_volume.html'
form_id = "manage_volume_modal"
submit_label = _("Manage")
success_url = reverse_lazy('horizon:admin:volumes:index')
submit_url = reverse_lazy('horizon:admin:volumes:manage')
cancel_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Manage Volume")
def get_context_data(self, **kwargs):
context = super(ManageVolumeView, self).get_context_data(**kwargs)
return context
class UnmanageVolumeView(forms.ModalFormView):
form_class = volumes_forms.UnmanageVolume
template_name = 'admin/volumes/unmanage_volume.html'
form_id = "unmanage_volume_modal"
submit_label = _("Unmanage")
success_url = reverse_lazy('horizon:admin:volumes:index')
submit_url = 'horizon:admin:volumes:unmanage'
cancel_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Unmanage Volume")
def get_context_data(self, **kwargs):
context = super(UnmanageVolumeView, self).get_context_data(**kwargs)
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'name': volume.name,
'host': getattr(volume, "os-vol-host-attr:host")}
class MigrateVolumeView(forms.ModalFormView):
form_class = volumes_forms.MigrateVolume
template_name = 'admin/volumes/migrate_volume.html'
form_id = "migrate_volume_modal"
submit_label = _("Migrate")
success_url = reverse_lazy('horizon:admin:volumes:index')
submit_url = 'horizon:admin:volumes:migrate'
cancel_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Migrate Volume")
def get_context_data(self, **kwargs):
context = super(MigrateVolumeView, self).get_context_data(**kwargs)
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
@memoized.memoized_method
def get_hosts(self):
try:
return cinder.pool_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve pools information.'),
redirect=self.success_url)
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'name': volume.name,
'current_host': getattr(volume, "os-vol-host-attr:host"),
'hosts': self.get_hosts()}
class UpdateStatusView(forms.ModalFormView):
form_class = volumes_forms.UpdateStatus
modal_id = "update_volume_status_modal"
template_name = 'admin/volumes/update_status.html'
submit_label = _("Update Status")
submit_url = "horizon:admin:volumes:update_status"
success_url = reverse_lazy('horizon:admin:volumes:index')
page_title = _("Update Volume Status")
def get_context_data(self, **kwargs):
context = super(UpdateStatusView, self).get_context_data(**kwargs)
context["volume_id"] = self.kwargs['volume_id']
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'status': volume.status}
| # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Admin views for managing volumes and snapshots.
"""
from collections import OrderedDict
from django.conf import settings
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.dashboards.admin.volumes \
import forms as volumes_forms
from openstack_dashboard.dashboards.admin.volumes \
import tables as volumes_tables
from openstack_dashboard.dashboards.admin.volumes \
import tabs as volumes_tabs
from openstack_dashboard.dashboards.project.volumes \
import views as volumes_views
class VolumesView(tables.PagedTableMixin, volumes_views.VolumeTableMixIn,
tables.DataTableView):
table_class = volumes_tables.VolumesTable
page_title = _("Volumes")
FILTERS_MAPPING = {'bootable': {_('yes'): 'true', _('no'): 'false'},
'encrypted': {_('yes'): True, _('no'): False}}
def get_data(self):
default_filters = {'all_tenants': True}
filters = self.get_filters(default_filters.copy())
filter_first = getattr(settings, 'FILTER_DATA_FIRST', {})
volumes = []
self.table.needs_filter_first = False
if filter_first.get('admin.volumes', False) and \
len(filters) == len(default_filters):
self.table.needs_filter_first = True
return volumes
if 'project' in filters:
# Keystone returns a tuple ([],false) where the first element is
# tenant list that's why the 0 is hardcoded below
tenants = keystone.tenant_list(self.request)[0]
tenant_ids = [t.id for t in tenants
if t.name == filters['project']]
if not tenant_ids:
return []
del filters['project']
for id in tenant_ids:
filters['project_id'] = id
volumes += self._get_volumes(search_opts=filters)
else:
volumes = self._get_volumes(search_opts=filters)
attached_instance_ids = self._get_attached_instance_ids(volumes)
instances = self._get_instances(search_opts={'all_tenants': True},
instance_ids=attached_instance_ids)
volume_ids_with_snapshots = self._get_volumes_ids_with_snapshots(
search_opts={'all_tenants': True})
self._set_volume_attributes(
volumes, instances, volume_ids_with_snapshots)
# Gather our tenants to correlate against IDs
try:
tenants, has_more = keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _('Unable to retrieve volume project information.')
exceptions.handle(self.request, msg)
tenant_dict = OrderedDict([(t.id, t) for t in tenants])
for volume in volumes:
tenant_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None)
tenant = tenant_dict.get(tenant_id, None)
volume.tenant_name = getattr(tenant, "name", None)
return volumes
def get_filters(self, filters):
self.table = self._tables['volumes']
self.handle_server_filter(self.request, table=self.table)
self.update_server_filter_action(self.request, table=self.table)
filters = super(VolumesView, self).get_filters(filters,
self.FILTERS_MAPPING)
return filters
class DetailView(volumes_views.DetailView):
tab_group_class = volumes_tabs.VolumeDetailTabs
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
table = volumes_tables.VolumesTable(self.request)
context["actions"] = table.render_row_actions(context["volume"])
return context
def get_search_opts(self, volume):
search_opts = super(DetailView, self).get_search_opts(volume)
search_opts['all_tenants'] = True
return search_opts
def get_redirect_url(self):
return reverse('horizon:admin:volumes:index')
class ManageVolumeView(forms.ModalFormView):
form_class = volumes_forms.ManageVolume
template_name = 'admin/volumes/manage_volume.html'
form_id = "manage_volume_modal"
submit_label = _("Manage")
success_url = reverse_lazy('horizon:admin:volumes:index')
submit_url = reverse_lazy('horizon:admin:volumes:manage')
cancel_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Manage Volume")
def get_context_data(self, **kwargs):
context = super(ManageVolumeView, self).get_context_data(**kwargs)
return context
class UnmanageVolumeView(forms.ModalFormView):
form_class = volumes_forms.UnmanageVolume
template_name = 'admin/volumes/unmanage_volume.html'
form_id = "unmanage_volume_modal"
submit_label = _("Unmanage")
success_url = reverse_lazy('horizon:admin:volumes:index')
submit_url = 'horizon:admin:volumes:unmanage'
cancel_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Unmanage Volume")
def get_context_data(self, **kwargs):
context = super(UnmanageVolumeView, self).get_context_data(**kwargs)
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'name': volume.name,
'host': getattr(volume, "os-vol-host-attr:host")}
class MigrateVolumeView(forms.ModalFormView):
form_class = volumes_forms.MigrateVolume
template_name = 'admin/volumes/migrate_volume.html'
form_id = "migrate_volume_modal"
submit_label = _("Migrate")
success_url = reverse_lazy('horizon:admin:volumes:index')
submit_url = 'horizon:admin:volumes:migrate'
cancel_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Migrate Volume")
def get_context_data(self, **kwargs):
context = super(MigrateVolumeView, self).get_context_data(**kwargs)
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
@memoized.memoized_method
def get_hosts(self):
try:
return cinder.pool_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve pools information.'),
redirect=self.success_url)
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'name': volume.name,
'current_host': getattr(volume, "os-vol-host-attr:host"),
'hosts': self.get_hosts()}
class UpdateStatusView(forms.ModalFormView):
form_class = volumes_forms.UpdateStatus
modal_id = "update_volume_status_modal"
template_name = 'admin/volumes/update_status.html'
submit_label = _("Update Status")
submit_url = "horizon:admin:volumes:update_status"
success_url = reverse_lazy('horizon:admin:volumes:index')
page_title = _("Update Volume Status")
def get_context_data(self, **kwargs):
context = super(UpdateStatusView, self).get_context_data(**kwargs)
context["volume_id"] = self.kwargs['volume_id']
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'status': volume.status}
| en | 0.855499 | # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Admin views for managing volumes and snapshots. # Keystone returns a tuple ([],false) where the first element is # tenant list that's why the 0 is hardcoded below # Gather our tenants to correlate against IDs | 1.693681 | 2 |
tests/__init__.py | flowolf/yessssms | 6 | 7676 | """Tests for YesssSMS."""
| """Tests for YesssSMS."""
| en | 0.746891 | Tests for YesssSMS. | 1.12382 | 1 |
bldr/dep/env.py | bldr-cmd/bldr-cmd | 0 | 7677 | <filename>bldr/dep/env.py<gh_stars>0
# This is used by Environment to populate its env
# Due to circular dependencies it cannot reference other parts of bldr
import toml
def default(dotbldr_path: str) -> dict:
dep = {
'config': toml.load(f"{dotbldr_path}/dependency.toml"),
'lock': toml.load(f"{dotbldr_path}/dependency.lock.toml")
}
return dep
def save_lock(dotbldr_path: str, lock_env: dict):
with open(f"{dotbldr_path}/dependency.lock.toml", 'w') as toml_file:
return toml.dump(lock_env, toml_file)
def save_config(dotbldr_path: str, config_env: dict):
with open(f"{dotbldr_path}/dependency.toml", 'w') as toml_file:
return toml.dump(config_env, toml_file) | <filename>bldr/dep/env.py<gh_stars>0
# This is used by Environment to populate its env
# Due to circular dependencies it cannot reference other parts of bldr
import toml
def default(dotbldr_path: str) -> dict:
dep = {
'config': toml.load(f"{dotbldr_path}/dependency.toml"),
'lock': toml.load(f"{dotbldr_path}/dependency.lock.toml")
}
return dep
def save_lock(dotbldr_path: str, lock_env: dict):
with open(f"{dotbldr_path}/dependency.lock.toml", 'w') as toml_file:
return toml.dump(lock_env, toml_file)
def save_config(dotbldr_path: str, config_env: dict):
with open(f"{dotbldr_path}/dependency.toml", 'w') as toml_file:
return toml.dump(config_env, toml_file) | en | 0.931071 | # This is used by Environment to populate its env # Due to circular dependencies it cannot reference other parts of bldr | 2.201642 | 2 |
rasa-sample/actions.py | ijufumi/demo-python | 0 | 7678 | import re
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.events import SlotSet
import lark_module
class ActionHelloWorld(Action):
state_map = {}
def name(self) -> Text:
return "action_hello_world"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
state = tracker.current_state()
print("current_state: {}".format(state))
sender_id = state["sender_id"]
if sender_id not in self.state_map:
self.state_map[sender_id] = 0
self.state_map[sender_id] += 1
dispatcher.utter_message(
text="Hello World!",
json_message={"data": "hogeohge"},
# template="<div></div>",
buttons=[{"title": "OK", "payload": "99!"}])
print("state: {}".format(self.state_map[sender_id]))
return []
class ActionCustomButton(Action):
def name(self) -> Text:
return "action_custom_button"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(
text="Which ?",
buttons=[{"title": "OK", "payload": "1"},
{"title": "NG", "payload": "2"},
{"title": "Unknown", "payload": "9"}])
return []
class ActionJsonMessage(Action):
def name(self) -> Text:
return "action_json_message"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(
text="Which ?",
json_message={"data": {
"key1": "value1",
"key2": "value2",
}}
)
return []
class ActionConversation(Action):
def name(self) -> Text:
return "action_conversation"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
state = tracker.current_state()
print("current_state: {}".format(state))
input_text = state['latest_message'].get('text')
latest_bot = None
for event in reversed(state['events']):
if event['event'] == 'bot':
data = event.get('data', {}).get('custom', {}).get('data', [])
latest_bot = data[0] if len(data) > 0 else None
break
print("latest_bot: {}".format(latest_bot))
if not latest_bot:
print("use utter_conversation_1")
dispatcher.utter_message(template="utter_conversation_1", json_message={"data": {"key1": "value1",
"key2": "value2"}})
else:
if latest_bot == 'conversation_1':
print("use utter_conversation_2")
dispatcher.utter_message(template="utter_conversation_2", json_message={"data": ["conversation_2"]})
elif latest_bot == 'conversation_2':
result = re.match("\\d+", input_text)
if result:
print("use utter_conversation_3")
dispatcher.utter_message(template="utter_conversation_3", json_message={"data": ["conversation_3"]})
else:
print("use utter_conversation_2")
dispatcher.utter_message(template="utter_conversation_2", json_message={"data": ["conversation_2"]})
elif latest_bot == 'conversation_3':
result = re.match("\\d+", input_text)
if not result:
print("use utter_conversation_3")
dispatcher.utter_message(template="utter_conversation_3", json_message={"data": ["conversation_3"]})
else:
dispatcher.utter_message(text="Bye", json_message={"data": ["conversation_3"]})
else:
dispatcher.utter_message(text="Bye", json_message={"data": ["conversation_3"]})
return []
class ActionConversation2(Action):
action_state = {}
def name(self) -> Text:
return "action_conversation2"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
state = tracker.current_state()
sender_id = state.get("sender_id")
current_action = self.action_state.get(sender_id)
input_text = state['latest_message'].get('text')
print("state: {}, current_action: {}".format(state, current_action))
if current_action:
result = lark_module.execute(input_text)
if result:
dispatcher.utter_message(text=result, json_message={"data": ["step2"]},
elements=[{"data": ["step2"]}])
else:
dispatcher.utter_message(text="Bye", json_message={"data": ["step3"]})
else:
dispatcher.utter_message(text="Where are you from ?", json_message={"data": ["step3"]})
self.action_state[sender_id] = "get_start"
return []
| import re
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.events import SlotSet
import lark_module
class ActionHelloWorld(Action):
state_map = {}
def name(self) -> Text:
return "action_hello_world"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
state = tracker.current_state()
print("current_state: {}".format(state))
sender_id = state["sender_id"]
if sender_id not in self.state_map:
self.state_map[sender_id] = 0
self.state_map[sender_id] += 1
dispatcher.utter_message(
text="Hello World!",
json_message={"data": "hogeohge"},
# template="<div></div>",
buttons=[{"title": "OK", "payload": "99!"}])
print("state: {}".format(self.state_map[sender_id]))
return []
class ActionCustomButton(Action):
def name(self) -> Text:
return "action_custom_button"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(
text="Which ?",
buttons=[{"title": "OK", "payload": "1"},
{"title": "NG", "payload": "2"},
{"title": "Unknown", "payload": "9"}])
return []
class ActionJsonMessage(Action):
def name(self) -> Text:
return "action_json_message"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(
text="Which ?",
json_message={"data": {
"key1": "value1",
"key2": "value2",
}}
)
return []
class ActionConversation(Action):
def name(self) -> Text:
return "action_conversation"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
state = tracker.current_state()
print("current_state: {}".format(state))
input_text = state['latest_message'].get('text')
latest_bot = None
for event in reversed(state['events']):
if event['event'] == 'bot':
data = event.get('data', {}).get('custom', {}).get('data', [])
latest_bot = data[0] if len(data) > 0 else None
break
print("latest_bot: {}".format(latest_bot))
if not latest_bot:
print("use utter_conversation_1")
dispatcher.utter_message(template="utter_conversation_1", json_message={"data": {"key1": "value1",
"key2": "value2"}})
else:
if latest_bot == 'conversation_1':
print("use utter_conversation_2")
dispatcher.utter_message(template="utter_conversation_2", json_message={"data": ["conversation_2"]})
elif latest_bot == 'conversation_2':
result = re.match("\\d+", input_text)
if result:
print("use utter_conversation_3")
dispatcher.utter_message(template="utter_conversation_3", json_message={"data": ["conversation_3"]})
else:
print("use utter_conversation_2")
dispatcher.utter_message(template="utter_conversation_2", json_message={"data": ["conversation_2"]})
elif latest_bot == 'conversation_3':
result = re.match("\\d+", input_text)
if not result:
print("use utter_conversation_3")
dispatcher.utter_message(template="utter_conversation_3", json_message={"data": ["conversation_3"]})
else:
dispatcher.utter_message(text="Bye", json_message={"data": ["conversation_3"]})
else:
dispatcher.utter_message(text="Bye", json_message={"data": ["conversation_3"]})
return []
class ActionConversation2(Action):
action_state = {}
def name(self) -> Text:
return "action_conversation2"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
state = tracker.current_state()
sender_id = state.get("sender_id")
current_action = self.action_state.get(sender_id)
input_text = state['latest_message'].get('text')
print("state: {}, current_action: {}".format(state, current_action))
if current_action:
result = lark_module.execute(input_text)
if result:
dispatcher.utter_message(text=result, json_message={"data": ["step2"]},
elements=[{"data": ["step2"]}])
else:
dispatcher.utter_message(text="Bye", json_message={"data": ["step3"]})
else:
dispatcher.utter_message(text="Where are you from ?", json_message={"data": ["step3"]})
self.action_state[sender_id] = "get_start"
return []
| uk | 0.07619 | # template="<div></div>", | 2.252543 | 2 |
parsers/politico.py | plympton/newsdiffs | 0 | 7679 | <reponame>plympton/newsdiffs
from baseparser import BaseParser, grab_url, logger
# Different versions of BeautifulSoup have different properties.
# Some work with one site, some with another.
# This is BeautifulSoup 3.2.
from BeautifulSoup import BeautifulSoup
# This is BeautifulSoup 4
import bs4
class PoliticoParser(BaseParser):
domains = ['www.politico.com']
feeder_pat = '^http://www.politico.com/(news/stories|story)/'
feeder_pages = ['http://www.politico.com/']
feeder_bs = bs4.BeautifulSoup
def _parse(self, html):
soup = bs4.BeautifulSoup(html)
print_link = soup.findAll('a', text='Print')[0].get('href')
html2 = grab_url(print_link)
logger.debug('got html 2')
# Now we have to switch back to bs3. Hilarious.
# and the labeled encoding is wrong, so force utf-8.
soup = BeautifulSoup(html2, convertEntities=BeautifulSoup.HTML_ENTITIES,
fromEncoding='utf-8')
self.meta = soup.findAll('meta')
p_tags = soup.findAll('p')[1:]
real_p_tags = [p for p in p_tags if
not p.findAll(attrs={'class':"twitter-follow-button"})]
self.title = soup.find('strong').getText()
entity = soup.find('span', attrs={'class':'author'})
children = list(entity.childGenerator())
try:
self.byline = 'By ' + children[1].getText()
except IndexError:
self.byline = ''
self.date = children[-1].strip()
self.body = '\n'+'\n\n'.join([p.getText() for p in real_p_tags])
| from baseparser import BaseParser, grab_url, logger
# Different versions of BeautifulSoup have different properties.
# Some work with one site, some with another.
# This is BeautifulSoup 3.2.
from BeautifulSoup import BeautifulSoup
# This is BeautifulSoup 4
import bs4
class PoliticoParser(BaseParser):
domains = ['www.politico.com']
feeder_pat = '^http://www.politico.com/(news/stories|story)/'
feeder_pages = ['http://www.politico.com/']
feeder_bs = bs4.BeautifulSoup
def _parse(self, html):
soup = bs4.BeautifulSoup(html)
print_link = soup.findAll('a', text='Print')[0].get('href')
html2 = grab_url(print_link)
logger.debug('got html 2')
# Now we have to switch back to bs3. Hilarious.
# and the labeled encoding is wrong, so force utf-8.
soup = BeautifulSoup(html2, convertEntities=BeautifulSoup.HTML_ENTITIES,
fromEncoding='utf-8')
self.meta = soup.findAll('meta')
p_tags = soup.findAll('p')[1:]
real_p_tags = [p for p in p_tags if
not p.findAll(attrs={'class':"twitter-follow-button"})]
self.title = soup.find('strong').getText()
entity = soup.find('span', attrs={'class':'author'})
children = list(entity.childGenerator())
try:
self.byline = 'By ' + children[1].getText()
except IndexError:
self.byline = ''
self.date = children[-1].strip()
self.body = '\n'+'\n\n'.join([p.getText() for p in real_p_tags]) | en | 0.940811 | # Different versions of BeautifulSoup have different properties. # Some work with one site, some with another. # This is BeautifulSoup 3.2. # This is BeautifulSoup 4 # Now we have to switch back to bs3. Hilarious. # and the labeled encoding is wrong, so force utf-8. | 2.74895 | 3 |
integration-tests/bats/server_multiclient_test.py | fairhopeweb/dolt | 2 | 7680 | <filename>integration-tests/bats/server_multiclient_test.py
import os
import sys
from queue import Queue
from threading import Thread
from helper.pytest import DoltConnection
# Utility functions
def print_err(e):
print(e, file=sys.stderr)
def query(dc, query_str):
return dc.query(query_str, False)
def query_with_expected_error(dc, non_error_msg , query_str):
try:
dc.query(query_str, False)
raise Exception(non_error_msg)
except:
pass
def row(pk, c1, c2):
return {"pk":str(pk),"c1":str(c1),"c2":str(c2)}
UPDATE_BRANCH_FAIL_MSG = "Failed to update branch"
def commit_and_update_branch(dc, commit_message, expected_hashes, branch_name):
expected_hash = "("
for i, eh in enumerate(expected_hashes):
if i != 0:
expected_hash += " or "
expected_hash += "hash = %s" % eh
expected_hash += ")"
query_str = 'UPDATE dolt_branches SET hash = Commit("-m", "%s") WHERE name = "%s" AND %s' % (commit_message, branch_name, expected_hash)
_, row_count = query(dc, query_str)
if row_count != 1:
raise Exception(UPDATE_BRANCH_FAIL_MSG)
query(dc, 'SET @@repo1_head=HASHOF("%s");' % branch_name)
def query_and_test_results(dc, query_str, expected):
results, _ = query(dc, query_str)
if results != expected:
raise Exception("Unexpected results for query:\n\t%s\nExpected:\n\t%s\nActual:\n\t%s" % (query_str, str(), str(results)))
def resolve_theirs(dc):
query_str = "REPLACE INTO test (pk, c1, c2) SELECT their_pk, their_c1, their_c2 FROM dolt_conflicts_test WHERE their_pk IS NOT NULL;"
query(dc, query_str)
query_str = """DELETE FROM test WHERE pk in (
SELECT base_pk FROM dolt_conflicts_test WHERE their_pk IS NULL
);"""
query(dc, query_str)
query(dc, "DELETE FROM dolt_conflicts_test")
def create_branch(dc, branch_name):
query_str = 'INSERT INTO dolt_branches (name, hash) VALUES ("%s", @@repo1_head);' % branch_name
_, row_count = query(dc, query_str)
if row_count != 1:
raise Exception("Failed to create branch")
# work functions
def connect(dc):
dc.connect()
def create_tables(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query(dc, """
CREATE TABLE test (
pk INT NOT NULL,
c1 INT,
c2 INT,
PRIMARY KEY(pk));""")
commit_and_update_branch(dc, "Created tables", ["@@repo1_head"], "master")
query_and_test_results(dc, "SHOW TABLES;", [{"Table": "test"}])
def duplicate_table_create(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query_with_expected_error(dc, "Should have failed creating duplicate table", """
CREATE TABLE test (
pk INT NOT NULL,
c1 INT,
c2 INT,
PRIMARY KEY(pk));""")
def seed_master(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
_, row_count = query(dc, 'INSERT INTO test VALUES (0,0,0),(1,1,1),(2,2,2)')
if row_count != 3:
raise Exception("Failed to update rows")
commit_and_update_branch(dc, "Seeded initial data", ["@@repo1_head"], "master")
expected = [row(0,0,0), row(1,1,1), row(2,2,2)]
query_and_test_results(dc, "SELECT pk, c1, c2 FROM test ORDER BY pk", expected)
def modify_pk0_on_master_and_commit(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query(dc, "UPDATE test SET c1=1 WHERE pk=0;")
commit_and_update_branch(dc, "set c1 to 1", ["@@repo1_head"], "master")
def modify_pk0_on_master_no_commit(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query(dc, "UPDATE test SET c1=2 WHERE pk=0")
def fail_to_commit(dc):
try:
commit_and_update_branch(dc, "Created tables", ["@@repo1_head"], "master")
raise Exception("Failed to fail commit")
except Exception as e:
if str(e) != UPDATE_BRANCH_FAIL_MSG:
raise e
def commit_to_feature(dc):
create_branch(dc, "feature")
commit_and_update_branch(dc, "set c1 to 2", ["@@repo1_head"], "feature")
def merge_resolve_commit(dc):
query(dc, 'SET @@repo1_head=Merge("master");')
query_and_test_results(dc, "SELECT * from dolt_conflicts;", [{"table": "test", "num_conflicts": "1"}])
resolve_theirs(dc)
expected = [row(0,1,0), row(1,1,1), row(2,2,2)]
query_and_test_results(dc, "SELECT pk, c1, c2 FROM test ORDER BY pk", expected)
commit_and_update_branch(dc, "resolved conflicts", ['HASHOF("HEAD^1")', 'HASHOF("HEAD^2")'], "master")
# test script
MAX_SIMULTANEOUS_CONNECTIONS = 2
PORT_STR = sys.argv[1]
CONNECTIONS = [None]*MAX_SIMULTANEOUS_CONNECTIONS
for i in range(MAX_SIMULTANEOUS_CONNECTIONS):
CONNECTIONS[i] = DoltConnection(port=int(PORT_STR), database="repo1", user='dolt', auto_commit=False)
WORK_QUEUE = Queue()
# work item run by workers
class WorkItem(object):
def __init__(self, dc, *work_funcs):
self.dc = dc
self.work_funcs = work_funcs
self.exception = None
# worker thread function
def worker():
while True:
try:
item = WORK_QUEUE.get()
for work_func in item.work_funcs:
work_func(item.dc)
WORK_QUEUE.task_done()
except Exception as e:
work_item.exception = e
WORK_QUEUE.task_done()
# start the worker threads
for i in range(MAX_SIMULTANEOUS_CONNECTIONS):
t = Thread(target=worker)
t.daemon = True
t.start()
# This defines the actual test script. Each stage in the script has a list of work items. Each work item
# in a stage should have a different connection associated with it. Each connections work is done in parallel
# each of the work functions for a connection is executed in order.
work_item_stages = [
[WorkItem(CONNECTIONS[0], connect, create_tables)],
[WorkItem(CONNECTIONS[0], seed_master), WorkItem(CONNECTIONS[1], connect, duplicate_table_create)],
[WorkItem(CONNECTIONS[0], modify_pk0_on_master_and_commit), WorkItem(CONNECTIONS[1], modify_pk0_on_master_no_commit)],
[WorkItem(CONNECTIONS[1], fail_to_commit, commit_to_feature, merge_resolve_commit)]
]
# Loop through the work item stages executing each stage by sending the work items for the stage to the worker threads
# and then waiting for all of them to finish before moving on to the next one. Checks for an error after every stage.
for stage, work_items in enumerate(work_item_stages):
print("Running stage %d / %d" % (stage,len(work_item_stages)))
for work_item in work_items:
WORK_QUEUE.put(work_item)
WORK_QUEUE.join()
for work_item in work_items:
if work_item.exception is not None:
print_err(work_item.exception)
sys.exit(1)
| <filename>integration-tests/bats/server_multiclient_test.py
import os
import sys
from queue import Queue
from threading import Thread
from helper.pytest import DoltConnection
# Utility functions
def print_err(e):
print(e, file=sys.stderr)
def query(dc, query_str):
return dc.query(query_str, False)
def query_with_expected_error(dc, non_error_msg , query_str):
try:
dc.query(query_str, False)
raise Exception(non_error_msg)
except:
pass
def row(pk, c1, c2):
return {"pk":str(pk),"c1":str(c1),"c2":str(c2)}
UPDATE_BRANCH_FAIL_MSG = "Failed to update branch"
def commit_and_update_branch(dc, commit_message, expected_hashes, branch_name):
expected_hash = "("
for i, eh in enumerate(expected_hashes):
if i != 0:
expected_hash += " or "
expected_hash += "hash = %s" % eh
expected_hash += ")"
query_str = 'UPDATE dolt_branches SET hash = Commit("-m", "%s") WHERE name = "%s" AND %s' % (commit_message, branch_name, expected_hash)
_, row_count = query(dc, query_str)
if row_count != 1:
raise Exception(UPDATE_BRANCH_FAIL_MSG)
query(dc, 'SET @@repo1_head=HASHOF("%s");' % branch_name)
def query_and_test_results(dc, query_str, expected):
results, _ = query(dc, query_str)
if results != expected:
raise Exception("Unexpected results for query:\n\t%s\nExpected:\n\t%s\nActual:\n\t%s" % (query_str, str(), str(results)))
def resolve_theirs(dc):
query_str = "REPLACE INTO test (pk, c1, c2) SELECT their_pk, their_c1, their_c2 FROM dolt_conflicts_test WHERE their_pk IS NOT NULL;"
query(dc, query_str)
query_str = """DELETE FROM test WHERE pk in (
SELECT base_pk FROM dolt_conflicts_test WHERE their_pk IS NULL
);"""
query(dc, query_str)
query(dc, "DELETE FROM dolt_conflicts_test")
def create_branch(dc, branch_name):
query_str = 'INSERT INTO dolt_branches (name, hash) VALUES ("%s", @@repo1_head);' % branch_name
_, row_count = query(dc, query_str)
if row_count != 1:
raise Exception("Failed to create branch")
# work functions
def connect(dc):
dc.connect()
def create_tables(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query(dc, """
CREATE TABLE test (
pk INT NOT NULL,
c1 INT,
c2 INT,
PRIMARY KEY(pk));""")
commit_and_update_branch(dc, "Created tables", ["@@repo1_head"], "master")
query_and_test_results(dc, "SHOW TABLES;", [{"Table": "test"}])
def duplicate_table_create(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query_with_expected_error(dc, "Should have failed creating duplicate table", """
CREATE TABLE test (
pk INT NOT NULL,
c1 INT,
c2 INT,
PRIMARY KEY(pk));""")
def seed_master(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
_, row_count = query(dc, 'INSERT INTO test VALUES (0,0,0),(1,1,1),(2,2,2)')
if row_count != 3:
raise Exception("Failed to update rows")
commit_and_update_branch(dc, "Seeded initial data", ["@@repo1_head"], "master")
expected = [row(0,0,0), row(1,1,1), row(2,2,2)]
query_and_test_results(dc, "SELECT pk, c1, c2 FROM test ORDER BY pk", expected)
def modify_pk0_on_master_and_commit(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query(dc, "UPDATE test SET c1=1 WHERE pk=0;")
commit_and_update_branch(dc, "set c1 to 1", ["@@repo1_head"], "master")
def modify_pk0_on_master_no_commit(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query(dc, "UPDATE test SET c1=2 WHERE pk=0")
def fail_to_commit(dc):
try:
commit_and_update_branch(dc, "Created tables", ["@@repo1_head"], "master")
raise Exception("Failed to fail commit")
except Exception as e:
if str(e) != UPDATE_BRANCH_FAIL_MSG:
raise e
def commit_to_feature(dc):
create_branch(dc, "feature")
commit_and_update_branch(dc, "set c1 to 2", ["@@repo1_head"], "feature")
def merge_resolve_commit(dc):
query(dc, 'SET @@repo1_head=Merge("master");')
query_and_test_results(dc, "SELECT * from dolt_conflicts;", [{"table": "test", "num_conflicts": "1"}])
resolve_theirs(dc)
expected = [row(0,1,0), row(1,1,1), row(2,2,2)]
query_and_test_results(dc, "SELECT pk, c1, c2 FROM test ORDER BY pk", expected)
commit_and_update_branch(dc, "resolved conflicts", ['HASHOF("HEAD^1")', 'HASHOF("HEAD^2")'], "master")
# test script
MAX_SIMULTANEOUS_CONNECTIONS = 2
PORT_STR = sys.argv[1]
CONNECTIONS = [None]*MAX_SIMULTANEOUS_CONNECTIONS
for i in range(MAX_SIMULTANEOUS_CONNECTIONS):
CONNECTIONS[i] = DoltConnection(port=int(PORT_STR), database="repo1", user='dolt', auto_commit=False)
WORK_QUEUE = Queue()
# work item run by workers
class WorkItem(object):
def __init__(self, dc, *work_funcs):
self.dc = dc
self.work_funcs = work_funcs
self.exception = None
# worker thread function
def worker():
while True:
try:
item = WORK_QUEUE.get()
for work_func in item.work_funcs:
work_func(item.dc)
WORK_QUEUE.task_done()
except Exception as e:
work_item.exception = e
WORK_QUEUE.task_done()
# start the worker threads
for i in range(MAX_SIMULTANEOUS_CONNECTIONS):
t = Thread(target=worker)
t.daemon = True
t.start()
# This defines the actual test script. Each stage in the script has a list of work items. Each work item
# in a stage should have a different connection associated with it. Each connections work is done in parallel
# each of the work functions for a connection is executed in order.
work_item_stages = [
[WorkItem(CONNECTIONS[0], connect, create_tables)],
[WorkItem(CONNECTIONS[0], seed_master), WorkItem(CONNECTIONS[1], connect, duplicate_table_create)],
[WorkItem(CONNECTIONS[0], modify_pk0_on_master_and_commit), WorkItem(CONNECTIONS[1], modify_pk0_on_master_no_commit)],
[WorkItem(CONNECTIONS[1], fail_to_commit, commit_to_feature, merge_resolve_commit)]
]
# Loop through the work item stages executing each stage by sending the work items for the stage to the worker threads
# and then waiting for all of them to finish before moving on to the next one. Checks for an error after every stage.
for stage, work_items in enumerate(work_item_stages):
print("Running stage %d / %d" % (stage,len(work_item_stages)))
for work_item in work_items:
WORK_QUEUE.put(work_item)
WORK_QUEUE.join()
for work_item in work_items:
if work_item.exception is not None:
print_err(work_item.exception)
sys.exit(1)
| en | 0.891769 | # Utility functions DELETE FROM test WHERE pk in ( SELECT base_pk FROM dolt_conflicts_test WHERE their_pk IS NULL ); # work functions CREATE TABLE test ( pk INT NOT NULL, c1 INT, c2 INT, PRIMARY KEY(pk)); CREATE TABLE test ( pk INT NOT NULL, c1 INT, c2 INT, PRIMARY KEY(pk)); # test script # work item run by workers # worker thread function # start the worker threads # This defines the actual test script. Each stage in the script has a list of work items. Each work item # in a stage should have a different connection associated with it. Each connections work is done in parallel # each of the work functions for a connection is executed in order. # Loop through the work item stages executing each stage by sending the work items for the stage to the worker threads # and then waiting for all of them to finish before moving on to the next one. Checks for an error after every stage. | 1.971394 | 2 |
messenger/client/messenger.py | marik348/python-messenger | 2 | 7681 | from requests import get, post, exceptions
from datetime import datetime
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtGui import QFont
from qtwidgets import PasswordEdit
from client_commands import (help_client, online, status, myself, reg, role, ban, unban)
from client_content import (get_warning_messages, get_client_commands, get_message_box_text, get_message_style)
from click_label import clickable
from client_ui import Ui_Messenger
from preferences import Preferences
from style_sheet import load_stylesheet
class Messenger(QtWidgets.QMainWindow, Ui_Messenger):
"""
The messenger object acts as the main object and is managed by client.
Shows UI and is responsible for UX.
UI is separated on 3 main parts, which have their indexes: 0 - Login form, 1 - Registration form, 2 - Chat.
Every 5 seconds requests server status.
Every second shows new messages, if user logged in.
Under main label "Python Messenger" there is server status, which displays whether server is working,
if yes, you can hover on it to see full server status.
In case of disconnection from server it'll show server-off message and navigate to login form.
It's possible to change server IP address in preferences menu.
:param translate: properly shows all content
:param password_line1: input line with icons to show/hide password entries on login form
:param password_line2: input line with icons to show/hide password entries on registration form
:param username: user nickname string
:param password: <PASSWORD>
:param last_message_time: last time of getting messages, defaults to 0
:param max_text_len: maximum text message length to send in chat, defaults to 250
:param server_IP: server IPv4 string
:param message_style: style for messages defined in :func:`get_message_style`
:param warning_messages: dict of warning messages defined in :func:`get_warning_messages`
:param message_box_text: dict of content for message box defined in :func:`get_message_box_text`
:param client_commands: list of dicts with client-side commands defined in :func:`get_client_commands`
:param run_client_command: dict, where key is the name of client command and value is the function of this command
:param server_commands: list of dicts with server-side commands defined in :func:`get_server_commands`
:param run_server_command: dict, where key is the name of server command and value is the function of this command
:param timer_get_messages: timer, which every second runs :func:`get_messages`
:param timer_get_status: timer, which every 5 seconds runs :func:`get_status`
"""
def __init__(self, parent=None):
"""Initialize messenger object."""
super().__init__(parent)
self.setupUi(self)
self.translate = QtCore.QCoreApplication.translate
self.password_line1 = PasswordEdit(True, self.login_page)
self.password_line2 = PasswordEdit(True, self.registration_page)
self.modify_password_lines()
# Connect buttons to the methods.
self.send_button.pressed.connect(self.send)
self.sign_up_button.pressed.connect(self.sign_up_user)
self.login_button.pressed.connect(self.login_user)
# Connect actions to the methods.
self.action_shortcuts.triggered.connect(self.show_shortcuts_box)
self.action_commands.triggered.connect(self.show_commands_box)
self.action_about.triggered.connect(self.show_about_box)
self.action_contacts.triggered.connect(self.show_contacts_box)
self.action_preferences.triggered.connect(self.open_preferences_window)
self.action_logout.triggered.connect(self.logout)
self.action_close.triggered.connect(self.close)
# Filter shortcuts and text overflow.
self.plain_text_edit.installEventFilter(self)
self.username = None
self.password = <PASSWORD>
self.last_message_time = 0
self.max_text_len = 250
self.server_IP = '0.0.0.0:9000'
# Load client content.
self.message_style = get_message_style()
self.warning_messages = get_warning_messages()
self.message_box_text = get_message_box_text()
# Load commands.
self.client_commands = get_client_commands()
self.run_client_command = {'close': self.close,
'logout': self.logout,
'reload': self.reload}
self.server_commands = []
self.run_server_command = {}
self.timer_get_messages = QtCore.QTimer()
self.timer_get_messages.timeout.connect(self.get_messages)
self.timer_get_messages.start(1000)
self.timer_get_status = QtCore.QTimer()
self.timer_get_status.timeout.connect(self.get_status)
self.timer_get_status.start(5000)
clickable(self.go_to_sign_up).connect(self.go_to_registration_form)
clickable(self.go_to_login).connect(self.go_to_login_form)
self.get_status()
def eventFilter(self, obj, event):
"""
Filters Enter key press and message text length.
If Enter key pressed, sends user's message.
If length of message is above maximum, doesn't allow writing.
"""
if event.type() == QtCore.QEvent.KeyPress and obj is self.plain_text_edit:
text = self.plain_text_edit.toPlainText()
if event.key() == QtCore.Qt.Key_Return and self.plain_text_edit.hasFocus():
self.send()
return True
elif len(text) > self.max_text_len:
text = text[:self.max_text_len]
self.plain_text_edit.setPlainText(text)
cursor = self.plain_text_edit.textCursor()
cursor.setPosition(self.max_text_len)
self.plain_text_edit.setTextCursor(cursor)
return True
return super().eventFilter(obj, event)
def closeEvent(self, event):
"""
Shows question message box for acception or ignoring to close the messenger.
Asks user does he really wants to close the messenger, if yes,
than marks logout of user and closes the messenger.
Otherwise, ignores closing messenger event.
:param event: event to close the messenger
"""
reply = QMessageBox.question(self, 'Quit', self.message_box_text["close"],
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
# User closes the messenger and is logged in.
if reply == QMessageBox.Yes and self.stacked_widget.currentIndex() == 2:
try:
post(
f'http://{self.server_IP}/logout',
json={"username": self.username}, verify=False
)
except exceptions.RequestException as e:
raise SystemExit
event.accept()
# User closes the messenger and is logged out.
elif reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def logout(self):
"""
Shows question message box for acception or ignoring to log out from account.
Asks user does he really wants to log out, if yes,
than marks logout and navigates to login form.
Otherwise, ignores logout event.
"""
reply = QMessageBox.question(self, 'Logout', self.message_box_text["logout"],
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
try:
post(
f'http://{self.server_IP}/logout',
json={"username": self.username}, verify=False
)
except exceptions.RequestException as e:
self.show_server_off_box()
self.clear_user_data()
return
self.go_to_login_form()
self.clear_user_data()
self.action_logout.setEnabled(False)
self.action_commands.setEnabled(False)
self.action_preferences.setEnabled(True)
else:
return
def modify_password_lines(self):
"""Modifies and appears password lines."""
geometry = QtCore.QRect(60, 200, 291, 41)
font = QFont()
font.setPointSize(14)
self.password_line1.setGeometry(geometry)
self.password_line1.setFont(font)
self.password_line1.setEchoMode(QtWidgets.QLineEdit.Password)
self.password_line1.setObjectName("password_line1")
self.password_line1.setPlaceholderText(self.translate("Messenger", "Password"))
self.password_line2.setGeometry(geometry)
self.password_line2.setFont(font)
self.password_line2.setEchoMode(QtWidgets.QLineEdit.Password)
self.password_line2.setObjectName("password_line2")
self.password_line2.setPlaceholderText(self.translate("Messenger", "Enter Your Password"))
def open_preferences_window(self):
"""Opens settings window."""
settings = Preferences(self)
if settings.exec():
self.server_IP = settings.server_IP.text()
def clear_user_data(self):
"""Clears user data after logout."""
self.username = None
self.plain_text_edit.clear()
self.text_browser.clear()
self.last_message_time = 0
def reload(self):
"""Reloads all messages and deletes commands output."""
self.text_browser.clear()
self.last_message_time = 0
def go_to_registration_form(self):
"""Navigates to registration menu."""
self.stacked_widget.setCurrentIndex(1)
def go_to_login_form(self):
"""Navigates to login menu."""
self.stacked_widget.setCurrentIndex(0)
def go_to_chat(self):
"""Navigates to chat."""
self.get_server_commands()
self.stacked_widget.setCurrentIndex(2)
self.action_logout.setEnabled(True)
self.action_commands.setEnabled(True)
self.action_preferences.setEnabled(False)
self.plain_text_edit.setFocus()
self.clear_credentials()
def clear_credentials(self):
"""Clears login and password lines after log in or sign up."""
self.password_line1.clear()
self.login_line1.clear()
self.password_line2.clear()
self.login_line2.clear()
self.password = None
def show_about_box(self):
"""Shows message box with content about messenger."""
QMessageBox.information(self, 'About', self.message_box_text["about"])
def show_contacts_box(self):
"""Shows message box with contacts information."""
QMessageBox.information(self, 'Contacts', self.message_box_text["contacts"])
def show_server_off_box(self):
"""Shows message box about server off information."""
QMessageBox.critical(self, 'Opsss...', self.message_box_text["server_is_off"])
self.go_to_login_form()
def show_shortcuts_box(self):
"""Shows message box with shortcuts."""
QMessageBox.information(self, 'Shortcuts', self.message_box_text["shortcuts"])
def show_commands_box(self):
"""Shows message box with available commands."""
output = help_client(self.client_commands, self.server_commands, [])
output = output.replace('=', '')
QMessageBox.information(self, 'Commands', output)
def sign_up_user(self):
"""
Registers user.
Verifies correctness of login and password input.
Sends request to sign up user.
"""
# Clear registration form.
self.login_error2.setText(self.translate("Messenger", self.warning_messages['empty_str']))
self.password_error2.setText(self.translate("Messenger", self.warning_messages['empty_str']))
self.login_line2.setStyleSheet("border: 1px solid #B8B5B2")
self.password_line2.setStyleSheet("border: <PASSWORD>")
self.username = self.login_line2.text()
self.password = self.password_line2.text()
# Check that form isn't empty.
if not self.username:
if not self.password:
self.login_error2.setText(self.translate("Messenger", self.warning_messages['login_required']))
self.password_error2.setText(self.translate("Messenger", self.warning_messages['password_required']))
self.login_line2.setStyleSheet("border: 1px solid red")
self.password_line2.setStyleSheet("border: 1px solid red")
return
else:
self.login_error2.setText(self.translate("Messenger", self.warning_messages['login_required']))
self.login_line2.setStyleSheet("border: 1px solid red")
return
else:
if not self.password:
self.password_error2.setText(self.translate("Messenger", self.warning_messages['password_required']))
self.password_line2.setStyleSheet("border: 1px solid red")
return
if not self.username.isalnum():
self.login_error2.setText(self.translate("Messenger", self.warning_messages['not_alphanumeric']))
self.login_error2.adjustSize()
self.login_line2.setStyleSheet("border: 1px solid red")
return
try:
response = post(
f'http://{self.server_IP}/sign_up',
auth=(self.username, self.password),
verify=False
)
except exceptions.RequestException as e:
self.show_server_off_box()
self.clear_credentials()
return
# Process bad request.
if response.json()['login_out_of_range']:
self.login_error2.setText(self.translate("Messenger", self.warning_messages['login_out_of_range']))
self.login_error2.adjustSize()
self.login_line2.setStyleSheet("border: 1px solid red")
return
elif response.json()['password_out_of_range']:
self.password_error2.setText(self.translate("Messenger", self.warning_messages['password_out_of_range']))
self.password_error2.adjustSize()
self.password_line2.setStyleSheet("border: 1px solid red")
return
elif not response.json()['ok']:
self.login_error2.setText(self.translate("Messenger", self.warning_messages['registered']))
self.login_error2.adjustSize()
self.login_line2.setStyleSheet("border: 1px solid red")
return
self.go_to_chat()
def login_user(self):
"""
Allows user to log in.
Verifies correctness of login and password input.
Sends request to authenticate user.
"""
# Clear login form.
self.login_error1.setText(self.translate("Messenger", self.warning_messages['empty_str']))
self.password_error1.setText(self.translate("Messenger", self.warning_messages['empty_str']))
self.login_line1.setStyleSheet("border: 1px solid #B8B5B2")
self.password_line1.setStyleSheet("border: 1px solid #B8B5B2")
self.username = self.login_line1.text()
self.password = self.password_line1.text()
# Check that form isn't empty.
if not self.username:
if not self.password:
self.login_error1.setText(self.translate("Messenger", self.warning_messages['login_required']))
self.password_error1.setText(self.translate("Messenger", self.warning_messages['password_required']))
self.login_line1.setStyleSheet("border: 1px solid red")
self.password_line1.setStyleSheet("border: 1px solid red")
return
else:
self.login_error1.setText(self.translate("Messenger", self.warning_messages['login_required']))
self.login_line1.setStyleSheet("border: 1px solid red")
return
else:
if not self.password:
self.password_error1.setText(self.translate("Messenger", self.warning_messages['password_required']))
self.password_line1.setStyleSheet("border: 1px solid red")
return
try:
response = post(
f'http://{self.server_IP}/auth',
auth=(self.username, self.password),
verify=False
)
except exceptions.RequestException as e:
self.show_server_off_box()
self.clear_credentials()
return
# Process bad request.
if not response.json()['exist']:
self.login_error1.setText(self.translate("Messenger", self.warning_messages['invalid_login']))
self.login_line1.setStyleSheet("border: 1px solid red")
return
if not response.json()['match']:
self.password_error1.setText(self.translate("Messenger", self.warning_messages['invalid_password']))
self.password_line1.setStyleSheet("border: 1px solid red")
return
if response.json()['banned']:
self.login_error1.setText(self.translate("Messenger", self.warning_messages['banned']))
self.login_line1.setStyleSheet("border: 1px solid red")
return
self.go_to_chat()
def get_server_commands(self):
"""Sends request to get available server-side commands for user."""
try:
response = post(
f'http://{self.server_IP}/command',
json={"username": self.username, "command": 'help'}, verify=False
)
except exceptions.RequestException as e:
self.clear_user_data()
self.show_server_off_box()
return
if not response.json()['ok']:
self.show_text(response.json()['output'] + "<br>")
self.plain_text_edit.clear()
return
self.server_commands = response.json()['output']
# Connect command name with function.
for cmd in self.server_commands:
if cmd['name'] != 'help': self.run_server_command[f"{cmd['name']}"] = globals()[cmd['name']]
def send(self):
"""Separates and directs messages & commands to relevant function."""
self.plain_text_edit.setFocus()
text = self.plain_text_edit.toPlainText()
text = text.strip()
# Validate text don't execute HTML.
text = text.replace('</', '')
text = text.replace('<', '')
text = text.replace('>', '')
if len(text) > self.max_text_len:
text = text[:self.max_text_len]
if not text:
return
elif text.startswith('/'):
self.send_command(text[1:])
else:
self.send_message(text)
def send_message(self, text):
"""
Stores message on the server.
:param text: text of message
"""
try:
post(
f'http://{self.server_IP}/send_message',
json={"username": self.username, "text": text},
verify=False
)
except exceptions.RequestException as e:
self.clear_user_data()
self.show_server_off_box()
return
self.plain_text_edit.clear()
self.plain_text_edit.repaint()
def send_command(self, cmd_string):
"""
Executes command.
If it's client-side command, executes directly from client.
If it's server-side command, sends command to execute
on the server and processes the output.
:param cmd_string: command with parameters to execute
"""
command = cmd_string.split()[0]
args = cmd_string.split()[1:] if len(cmd_string) > 1 else None
# Run client-side command.
if command in [cmd['name'] for cmd in self.client_commands]:
self.run_client_command.get(command)()
self.plain_text_edit.clear()
return
# Invalid command name.
elif command not in [cmd['name'] for cmd in self.server_commands]:
self.show_text(f"<b>Error:</b> Command '/{command}' not found.<br>"
f"Try '/help' to list all available commands :)<br>")
self.plain_text_edit.clear()
return
# Process 'help' command.
elif command == 'help':
output = help_client(self.client_commands, self.server_commands, args)
self.show_text(output)
self.plain_text_edit.clear()
return
try:
response = post(
f'http://{self.server_IP}/command',
json={"username": self.username, "command": cmd_string}, verify=False
)
except exceptions.RequestException as e:
self.clear_user_data()
self.show_server_off_box()
return
if not response.json()['ok']:
self.show_text("<b>Error:</b> " + response.json()['output'] + "<br>")
self.plain_text_edit.clear()
return
# Assign command function & run it with output from server.
run_command = self.run_server_command.get(command)
output = run_command(response.json()['output'], args)
self.show_text(output)
self.plain_text_edit.clear()
self.plain_text_edit.repaint()
def get_messages(self):
"""Sends request to get new messages and appears them in style."""
if not self.stacked_widget.currentIndex() == 2:
return
try:
response = get(
f'http://{self.server_IP}/get_messages',
params={'after': self.last_message_time},
verify=False
)
data = response.json()
except exceptions.RequestException as e:
self.clear_user_data()
self.show_server_off_box()
return
# Generate message.
for message in data['messages']:
# float -> datetime.
beauty_time = datetime.fromtimestamp(message['time'])
beauty_time = beauty_time.strftime('%d/%m %H:%M:%S')
# User will see his messages from the right side.
if message['username'] == self.username:
self.show_text(self.message_style['begin'] + beauty_time + ' ' + message['username']
+ self.message_style['middle'] + message['text'] + self.message_style['end'])
self.last_message_time = message['time']
else:
self.show_text(message['username'] + ' ' + beauty_time)
self.show_text(message['text'] + "<br>")
self.last_message_time = message['time']
def get_status(self):
"""Sends request to get server status."""
try:
response = get(
f'http://{self.server_IP}/status',
verify=False
)
status = response.json()
# Server is off.
except exceptions.RequestException as e:
self.server_status.setText(self.translate("Messenger", '<p style="font-size:12px">'
'<img src="images/server-is-off.png"> Offline</p>'))
tool_tip = f"Can't connect to the server<br>" \
f"Maybe server isn't run or you've entered an invalid IP address in Preferences"
self.server_status.setToolTip(tool_tip)
return
# Server is on.
self.server_status.setText(self.translate("Messenger", '<p style="font-size:12px">'
'<img src="images/server-is-on.png"> Online</p>'))
tool_tip = f"Server is working<br>" \
f"Users online: {status['users_online']}<br>" \
f"Date and time: {status['time']}<br>" \
f"Registered users: {status['users_count']}<br>" \
f"Written messages: {status['messages_count']}"
self.server_status.setToolTip(tool_tip)
def show_text(self, text):
"""Shows given text in messenger chat."""
self.text_browser.append(text)
self.text_browser.repaint()
app = QtWidgets.QApplication([])
window = Messenger()
app.setStyleSheet(load_stylesheet())
window.show()
app.exec_()
| from requests import get, post, exceptions
from datetime import datetime
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtGui import QFont
from qtwidgets import PasswordEdit
from client_commands import (help_client, online, status, myself, reg, role, ban, unban)
from client_content import (get_warning_messages, get_client_commands, get_message_box_text, get_message_style)
from click_label import clickable
from client_ui import Ui_Messenger
from preferences import Preferences
from style_sheet import load_stylesheet
class Messenger(QtWidgets.QMainWindow, Ui_Messenger):
"""
The messenger object acts as the main object and is managed by client.
Shows UI and is responsible for UX.
UI is separated on 3 main parts, which have their indexes: 0 - Login form, 1 - Registration form, 2 - Chat.
Every 5 seconds requests server status.
Every second shows new messages, if user logged in.
Under main label "Python Messenger" there is server status, which displays whether server is working,
if yes, you can hover on it to see full server status.
In case of disconnection from server it'll show server-off message and navigate to login form.
It's possible to change server IP address in preferences menu.
:param translate: properly shows all content
:param password_line1: input line with icons to show/hide password entries on login form
:param password_line2: input line with icons to show/hide password entries on registration form
:param username: user nickname string
:param password: <PASSWORD>
:param last_message_time: last time of getting messages, defaults to 0
:param max_text_len: maximum text message length to send in chat, defaults to 250
:param server_IP: server IPv4 string
:param message_style: style for messages defined in :func:`get_message_style`
:param warning_messages: dict of warning messages defined in :func:`get_warning_messages`
:param message_box_text: dict of content for message box defined in :func:`get_message_box_text`
:param client_commands: list of dicts with client-side commands defined in :func:`get_client_commands`
:param run_client_command: dict, where key is the name of client command and value is the function of this command
:param server_commands: list of dicts with server-side commands defined in :func:`get_server_commands`
:param run_server_command: dict, where key is the name of server command and value is the function of this command
:param timer_get_messages: timer, which every second runs :func:`get_messages`
:param timer_get_status: timer, which every 5 seconds runs :func:`get_status`
"""
def __init__(self, parent=None):
"""Initialize messenger object."""
super().__init__(parent)
self.setupUi(self)
self.translate = QtCore.QCoreApplication.translate
self.password_line1 = PasswordEdit(True, self.login_page)
self.password_line2 = PasswordEdit(True, self.registration_page)
self.modify_password_lines()
# Connect buttons to the methods.
self.send_button.pressed.connect(self.send)
self.sign_up_button.pressed.connect(self.sign_up_user)
self.login_button.pressed.connect(self.login_user)
# Connect actions to the methods.
self.action_shortcuts.triggered.connect(self.show_shortcuts_box)
self.action_commands.triggered.connect(self.show_commands_box)
self.action_about.triggered.connect(self.show_about_box)
self.action_contacts.triggered.connect(self.show_contacts_box)
self.action_preferences.triggered.connect(self.open_preferences_window)
self.action_logout.triggered.connect(self.logout)
self.action_close.triggered.connect(self.close)
# Filter shortcuts and text overflow.
self.plain_text_edit.installEventFilter(self)
self.username = None
self.password = <PASSWORD>
self.last_message_time = 0
self.max_text_len = 250
self.server_IP = '0.0.0.0:9000'
# Load client content.
self.message_style = get_message_style()
self.warning_messages = get_warning_messages()
self.message_box_text = get_message_box_text()
# Load commands.
self.client_commands = get_client_commands()
self.run_client_command = {'close': self.close,
'logout': self.logout,
'reload': self.reload}
self.server_commands = []
self.run_server_command = {}
self.timer_get_messages = QtCore.QTimer()
self.timer_get_messages.timeout.connect(self.get_messages)
self.timer_get_messages.start(1000)
self.timer_get_status = QtCore.QTimer()
self.timer_get_status.timeout.connect(self.get_status)
self.timer_get_status.start(5000)
clickable(self.go_to_sign_up).connect(self.go_to_registration_form)
clickable(self.go_to_login).connect(self.go_to_login_form)
self.get_status()
def eventFilter(self, obj, event):
"""
Filters Enter key press and message text length.
If Enter key pressed, sends user's message.
If length of message is above maximum, doesn't allow writing.
"""
if event.type() == QtCore.QEvent.KeyPress and obj is self.plain_text_edit:
text = self.plain_text_edit.toPlainText()
if event.key() == QtCore.Qt.Key_Return and self.plain_text_edit.hasFocus():
self.send()
return True
elif len(text) > self.max_text_len:
text = text[:self.max_text_len]
self.plain_text_edit.setPlainText(text)
cursor = self.plain_text_edit.textCursor()
cursor.setPosition(self.max_text_len)
self.plain_text_edit.setTextCursor(cursor)
return True
return super().eventFilter(obj, event)
def closeEvent(self, event):
"""
Shows question message box for acception or ignoring to close the messenger.
Asks user does he really wants to close the messenger, if yes,
than marks logout of user and closes the messenger.
Otherwise, ignores closing messenger event.
:param event: event to close the messenger
"""
reply = QMessageBox.question(self, 'Quit', self.message_box_text["close"],
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
# User closes the messenger and is logged in.
if reply == QMessageBox.Yes and self.stacked_widget.currentIndex() == 2:
try:
post(
f'http://{self.server_IP}/logout',
json={"username": self.username}, verify=False
)
except exceptions.RequestException as e:
raise SystemExit
event.accept()
# User closes the messenger and is logged out.
elif reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def logout(self):
"""
Shows question message box for acception or ignoring to log out from account.
Asks user does he really wants to log out, if yes,
than marks logout and navigates to login form.
Otherwise, ignores logout event.
"""
reply = QMessageBox.question(self, 'Logout', self.message_box_text["logout"],
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
try:
post(
f'http://{self.server_IP}/logout',
json={"username": self.username}, verify=False
)
except exceptions.RequestException as e:
self.show_server_off_box()
self.clear_user_data()
return
self.go_to_login_form()
self.clear_user_data()
self.action_logout.setEnabled(False)
self.action_commands.setEnabled(False)
self.action_preferences.setEnabled(True)
else:
return
def modify_password_lines(self):
"""Modifies and appears password lines."""
geometry = QtCore.QRect(60, 200, 291, 41)
font = QFont()
font.setPointSize(14)
self.password_line1.setGeometry(geometry)
self.password_line1.setFont(font)
self.password_line1.setEchoMode(QtWidgets.QLineEdit.Password)
self.password_line1.setObjectName("password_line1")
self.password_line1.setPlaceholderText(self.translate("Messenger", "Password"))
self.password_line2.setGeometry(geometry)
self.password_line2.setFont(font)
self.password_line2.setEchoMode(QtWidgets.QLineEdit.Password)
self.password_line2.setObjectName("password_line2")
self.password_line2.setPlaceholderText(self.translate("Messenger", "Enter Your Password"))
def open_preferences_window(self):
"""Opens settings window."""
settings = Preferences(self)
if settings.exec():
self.server_IP = settings.server_IP.text()
def clear_user_data(self):
"""Clears user data after logout."""
self.username = None
self.plain_text_edit.clear()
self.text_browser.clear()
self.last_message_time = 0
def reload(self):
"""Reloads all messages and deletes commands output."""
self.text_browser.clear()
self.last_message_time = 0
def go_to_registration_form(self):
"""Navigates to registration menu."""
self.stacked_widget.setCurrentIndex(1)
def go_to_login_form(self):
"""Navigates to login menu."""
self.stacked_widget.setCurrentIndex(0)
def go_to_chat(self):
"""Navigates to chat."""
self.get_server_commands()
self.stacked_widget.setCurrentIndex(2)
self.action_logout.setEnabled(True)
self.action_commands.setEnabled(True)
self.action_preferences.setEnabled(False)
self.plain_text_edit.setFocus()
self.clear_credentials()
def clear_credentials(self):
"""Clears login and password lines after log in or sign up."""
self.password_line1.clear()
self.login_line1.clear()
self.password_line2.clear()
self.login_line2.clear()
self.password = None
def show_about_box(self):
"""Shows message box with content about messenger."""
QMessageBox.information(self, 'About', self.message_box_text["about"])
def show_contacts_box(self):
"""Shows message box with contacts information."""
QMessageBox.information(self, 'Contacts', self.message_box_text["contacts"])
def show_server_off_box(self):
"""Shows message box about server off information."""
QMessageBox.critical(self, 'Opsss...', self.message_box_text["server_is_off"])
self.go_to_login_form()
def show_shortcuts_box(self):
"""Shows message box with shortcuts."""
QMessageBox.information(self, 'Shortcuts', self.message_box_text["shortcuts"])
def show_commands_box(self):
"""Shows message box with available commands."""
output = help_client(self.client_commands, self.server_commands, [])
output = output.replace('=', '')
QMessageBox.information(self, 'Commands', output)
def sign_up_user(self):
"""
Registers user.
Verifies correctness of login and password input.
Sends request to sign up user.
"""
# Clear registration form.
self.login_error2.setText(self.translate("Messenger", self.warning_messages['empty_str']))
self.password_error2.setText(self.translate("Messenger", self.warning_messages['empty_str']))
self.login_line2.setStyleSheet("border: 1px solid #B8B5B2")
self.password_line2.setStyleSheet("border: <PASSWORD>")
self.username = self.login_line2.text()
self.password = self.password_line2.text()
# Check that form isn't empty.
if not self.username:
if not self.password:
self.login_error2.setText(self.translate("Messenger", self.warning_messages['login_required']))
self.password_error2.setText(self.translate("Messenger", self.warning_messages['password_required']))
self.login_line2.setStyleSheet("border: 1px solid red")
self.password_line2.setStyleSheet("border: 1px solid red")
return
else:
self.login_error2.setText(self.translate("Messenger", self.warning_messages['login_required']))
self.login_line2.setStyleSheet("border: 1px solid red")
return
else:
if not self.password:
self.password_error2.setText(self.translate("Messenger", self.warning_messages['password_required']))
self.password_line2.setStyleSheet("border: 1px solid red")
return
if not self.username.isalnum():
self.login_error2.setText(self.translate("Messenger", self.warning_messages['not_alphanumeric']))
self.login_error2.adjustSize()
self.login_line2.setStyleSheet("border: 1px solid red")
return
try:
response = post(
f'http://{self.server_IP}/sign_up',
auth=(self.username, self.password),
verify=False
)
except exceptions.RequestException as e:
self.show_server_off_box()
self.clear_credentials()
return
# Process bad request.
if response.json()['login_out_of_range']:
self.login_error2.setText(self.translate("Messenger", self.warning_messages['login_out_of_range']))
self.login_error2.adjustSize()
self.login_line2.setStyleSheet("border: 1px solid red")
return
elif response.json()['password_out_of_range']:
self.password_error2.setText(self.translate("Messenger", self.warning_messages['password_out_of_range']))
self.password_error2.adjustSize()
self.password_line2.setStyleSheet("border: 1px solid red")
return
elif not response.json()['ok']:
self.login_error2.setText(self.translate("Messenger", self.warning_messages['registered']))
self.login_error2.adjustSize()
self.login_line2.setStyleSheet("border: 1px solid red")
return
self.go_to_chat()
def login_user(self):
"""
Allows user to log in.
Verifies correctness of login and password input.
Sends request to authenticate user.
"""
# Clear login form.
self.login_error1.setText(self.translate("Messenger", self.warning_messages['empty_str']))
self.password_error1.setText(self.translate("Messenger", self.warning_messages['empty_str']))
self.login_line1.setStyleSheet("border: 1px solid #B8B5B2")
self.password_line1.setStyleSheet("border: 1px solid #B8B5B2")
self.username = self.login_line1.text()
self.password = self.password_line1.text()
# Check that form isn't empty.
if not self.username:
if not self.password:
self.login_error1.setText(self.translate("Messenger", self.warning_messages['login_required']))
self.password_error1.setText(self.translate("Messenger", self.warning_messages['password_required']))
self.login_line1.setStyleSheet("border: 1px solid red")
self.password_line1.setStyleSheet("border: 1px solid red")
return
else:
self.login_error1.setText(self.translate("Messenger", self.warning_messages['login_required']))
self.login_line1.setStyleSheet("border: 1px solid red")
return
else:
if not self.password:
self.password_error1.setText(self.translate("Messenger", self.warning_messages['password_required']))
self.password_line1.setStyleSheet("border: 1px solid red")
return
try:
response = post(
f'http://{self.server_IP}/auth',
auth=(self.username, self.password),
verify=False
)
except exceptions.RequestException as e:
self.show_server_off_box()
self.clear_credentials()
return
# Process bad request.
if not response.json()['exist']:
self.login_error1.setText(self.translate("Messenger", self.warning_messages['invalid_login']))
self.login_line1.setStyleSheet("border: 1px solid red")
return
if not response.json()['match']:
self.password_error1.setText(self.translate("Messenger", self.warning_messages['invalid_password']))
self.password_line1.setStyleSheet("border: 1px solid red")
return
if response.json()['banned']:
self.login_error1.setText(self.translate("Messenger", self.warning_messages['banned']))
self.login_line1.setStyleSheet("border: 1px solid red")
return
self.go_to_chat()
def get_server_commands(self):
"""Sends request to get available server-side commands for user."""
try:
response = post(
f'http://{self.server_IP}/command',
json={"username": self.username, "command": 'help'}, verify=False
)
except exceptions.RequestException as e:
self.clear_user_data()
self.show_server_off_box()
return
if not response.json()['ok']:
self.show_text(response.json()['output'] + "<br>")
self.plain_text_edit.clear()
return
self.server_commands = response.json()['output']
# Connect command name with function.
for cmd in self.server_commands:
if cmd['name'] != 'help': self.run_server_command[f"{cmd['name']}"] = globals()[cmd['name']]
def send(self):
"""Separates and directs messages & commands to relevant function."""
self.plain_text_edit.setFocus()
text = self.plain_text_edit.toPlainText()
text = text.strip()
# Validate text don't execute HTML.
text = text.replace('</', '')
text = text.replace('<', '')
text = text.replace('>', '')
if len(text) > self.max_text_len:
text = text[:self.max_text_len]
if not text:
return
elif text.startswith('/'):
self.send_command(text[1:])
else:
self.send_message(text)
def send_message(self, text):
"""
Stores message on the server.
:param text: text of message
"""
try:
post(
f'http://{self.server_IP}/send_message',
json={"username": self.username, "text": text},
verify=False
)
except exceptions.RequestException as e:
self.clear_user_data()
self.show_server_off_box()
return
self.plain_text_edit.clear()
self.plain_text_edit.repaint()
def send_command(self, cmd_string):
"""
Executes command.
If it's client-side command, executes directly from client.
If it's server-side command, sends command to execute
on the server and processes the output.
:param cmd_string: command with parameters to execute
"""
command = cmd_string.split()[0]
args = cmd_string.split()[1:] if len(cmd_string) > 1 else None
# Run client-side command.
if command in [cmd['name'] for cmd in self.client_commands]:
self.run_client_command.get(command)()
self.plain_text_edit.clear()
return
# Invalid command name.
elif command not in [cmd['name'] for cmd in self.server_commands]:
self.show_text(f"<b>Error:</b> Command '/{command}' not found.<br>"
f"Try '/help' to list all available commands :)<br>")
self.plain_text_edit.clear()
return
# Process 'help' command.
elif command == 'help':
output = help_client(self.client_commands, self.server_commands, args)
self.show_text(output)
self.plain_text_edit.clear()
return
try:
response = post(
f'http://{self.server_IP}/command',
json={"username": self.username, "command": cmd_string}, verify=False
)
except exceptions.RequestException as e:
self.clear_user_data()
self.show_server_off_box()
return
if not response.json()['ok']:
self.show_text("<b>Error:</b> " + response.json()['output'] + "<br>")
self.plain_text_edit.clear()
return
# Assign command function & run it with output from server.
run_command = self.run_server_command.get(command)
output = run_command(response.json()['output'], args)
self.show_text(output)
self.plain_text_edit.clear()
self.plain_text_edit.repaint()
def get_messages(self):
"""Sends request to get new messages and appears them in style."""
if not self.stacked_widget.currentIndex() == 2:
return
try:
response = get(
f'http://{self.server_IP}/get_messages',
params={'after': self.last_message_time},
verify=False
)
data = response.json()
except exceptions.RequestException as e:
self.clear_user_data()
self.show_server_off_box()
return
# Generate message.
for message in data['messages']:
# float -> datetime.
beauty_time = datetime.fromtimestamp(message['time'])
beauty_time = beauty_time.strftime('%d/%m %H:%M:%S')
# User will see his messages from the right side.
if message['username'] == self.username:
self.show_text(self.message_style['begin'] + beauty_time + ' ' + message['username']
+ self.message_style['middle'] + message['text'] + self.message_style['end'])
self.last_message_time = message['time']
else:
self.show_text(message['username'] + ' ' + beauty_time)
self.show_text(message['text'] + "<br>")
self.last_message_time = message['time']
def get_status(self):
"""Sends request to get server status."""
try:
response = get(
f'http://{self.server_IP}/status',
verify=False
)
status = response.json()
# Server is off.
except exceptions.RequestException as e:
self.server_status.setText(self.translate("Messenger", '<p style="font-size:12px">'
'<img src="images/server-is-off.png"> Offline</p>'))
tool_tip = f"Can't connect to the server<br>" \
f"Maybe server isn't run or you've entered an invalid IP address in Preferences"
self.server_status.setToolTip(tool_tip)
return
# Server is on.
self.server_status.setText(self.translate("Messenger", '<p style="font-size:12px">'
'<img src="images/server-is-on.png"> Online</p>'))
tool_tip = f"Server is working<br>" \
f"Users online: {status['users_online']}<br>" \
f"Date and time: {status['time']}<br>" \
f"Registered users: {status['users_count']}<br>" \
f"Written messages: {status['messages_count']}"
self.server_status.setToolTip(tool_tip)
def show_text(self, text):
"""Shows given text in messenger chat."""
self.text_browser.append(text)
self.text_browser.repaint()
app = QtWidgets.QApplication([])
window = Messenger()
app.setStyleSheet(load_stylesheet())
window.show()
app.exec_()
| en | 0.800161 | The messenger object acts as the main object and is managed by client. Shows UI and is responsible for UX. UI is separated on 3 main parts, which have their indexes: 0 - Login form, 1 - Registration form, 2 - Chat. Every 5 seconds requests server status. Every second shows new messages, if user logged in. Under main label "Python Messenger" there is server status, which displays whether server is working, if yes, you can hover on it to see full server status. In case of disconnection from server it'll show server-off message and navigate to login form. It's possible to change server IP address in preferences menu. :param translate: properly shows all content :param password_line1: input line with icons to show/hide password entries on login form :param password_line2: input line with icons to show/hide password entries on registration form :param username: user nickname string :param password: <PASSWORD> :param last_message_time: last time of getting messages, defaults to 0 :param max_text_len: maximum text message length to send in chat, defaults to 250 :param server_IP: server IPv4 string :param message_style: style for messages defined in :func:`get_message_style` :param warning_messages: dict of warning messages defined in :func:`get_warning_messages` :param message_box_text: dict of content for message box defined in :func:`get_message_box_text` :param client_commands: list of dicts with client-side commands defined in :func:`get_client_commands` :param run_client_command: dict, where key is the name of client command and value is the function of this command :param server_commands: list of dicts with server-side commands defined in :func:`get_server_commands` :param run_server_command: dict, where key is the name of server command and value is the function of this command :param timer_get_messages: timer, which every second runs :func:`get_messages` :param timer_get_status: timer, which every 5 seconds runs :func:`get_status` Initialize messenger object. # Connect buttons to the methods. # Connect actions to the methods. # Filter shortcuts and text overflow. # Load client content. # Load commands. Filters Enter key press and message text length. If Enter key pressed, sends user's message. If length of message is above maximum, doesn't allow writing. Shows question message box for acception or ignoring to close the messenger. Asks user does he really wants to close the messenger, if yes, than marks logout of user and closes the messenger. Otherwise, ignores closing messenger event. :param event: event to close the messenger # User closes the messenger and is logged in. # User closes the messenger and is logged out. Shows question message box for acception or ignoring to log out from account. Asks user does he really wants to log out, if yes, than marks logout and navigates to login form. Otherwise, ignores logout event. Modifies and appears password lines. Opens settings window. Clears user data after logout. Reloads all messages and deletes commands output. Navigates to registration menu. Navigates to login menu. Navigates to chat. Clears login and password lines after log in or sign up. Shows message box with content about messenger. Shows message box with contacts information. Shows message box about server off information. Shows message box with shortcuts. Shows message box with available commands. Registers user. Verifies correctness of login and password input. Sends request to sign up user. # Clear registration form. #B8B5B2") # Check that form isn't empty. # Process bad request. Allows user to log in. Verifies correctness of login and password input. Sends request to authenticate user. # Clear login form. #B8B5B2") #B8B5B2") # Check that form isn't empty. # Process bad request. Sends request to get available server-side commands for user. # Connect command name with function. Separates and directs messages & commands to relevant function. # Validate text don't execute HTML. Stores message on the server. :param text: text of message Executes command. If it's client-side command, executes directly from client. If it's server-side command, sends command to execute on the server and processes the output. :param cmd_string: command with parameters to execute # Run client-side command. # Invalid command name. # Process 'help' command. # Assign command function & run it with output from server. Sends request to get new messages and appears them in style. # Generate message. # float -> datetime. # User will see his messages from the right side. Sends request to get server status. # Server is off. # Server is on. Shows given text in messenger chat. | 2.515322 | 3 |
oscar/lib/python2.7/site-packages/prompt_toolkit/utils.py | sainjusajan/django-oscar | 0 | 7682 | <gh_stars>0
from __future__ import unicode_literals
import inspect
import os
import signal
import sys
import threading
import weakref
from wcwidth import wcwidth
from six.moves import range
__all__ = (
'Event',
'DummyContext',
'get_cwidth',
'suspend_to_background_supported',
'is_conemu_ansi',
'is_windows',
'in_main_thread',
'take_using_weights',
'test_callable_args',
)
class Event(object):
"""
Simple event to which event handlers can be attached. For instance::
class Cls:
def __init__(self):
# Define event. The first parameter is the sender.
self.event = Event(self)
obj = Cls()
def handler(sender):
pass
# Add event handler by using the += operator.
obj.event += handler
# Fire event.
obj.event()
"""
def __init__(self, sender, handler=None):
self.sender = sender
self._handlers = []
if handler is not None:
self += handler
def __call__(self):
" Fire event. "
for handler in self._handlers:
handler(self.sender)
def fire(self):
" Alias for just calling the event. "
self()
def __iadd__(self, handler):
"""
Add another handler to this callback.
(Handler should be a callable that takes exactly one parameter: the
sender object.)
"""
# Test handler.
assert callable(handler)
if not test_callable_args(handler, [None]):
raise TypeError("%r doesn't take exactly one argument." % handler)
# Add to list of event handlers.
self._handlers.append(handler)
return self
def __isub__(self, handler):
"""
Remove a handler from this callback.
"""
self._handlers.remove(handler)
return self
# Cache of signatures. Improves the performance of `test_callable_args`.
_signatures_cache = weakref.WeakKeyDictionary()
def test_callable_args(func, args):
"""
Return True when this function can be called with the given arguments.
"""
assert isinstance(args, (list, tuple))
signature = getattr(inspect, 'signature', None)
if signature is not None:
# For Python 3, use inspect.signature.
try:
sig = _signatures_cache[func]
except KeyError:
sig = signature(func)
_signatures_cache[func] = sig
try:
sig.bind(*args)
except TypeError:
return False
else:
return True
else:
# For older Python versions, fall back to using getargspec.
spec = inspect.getargspec(func)
# Drop the 'self'
def drop_self(spec):
args, varargs, varkw, defaults = spec
if args[0:1] == ['self']:
args = args[1:]
return inspect.ArgSpec(args, varargs, varkw, defaults)
spec = drop_self(spec)
# When taking *args, always return True.
if spec.varargs is not None:
return True
# Test whether the given amount of args is between the min and max
# accepted argument counts.
return len(spec.args) - len(spec.defaults or []) <= len(args) <= len(spec.args)
class DummyContext(object):
"""
(contextlib.nested is not available on Py3)
"""
def __enter__(self):
pass
def __exit__(self, *a):
pass
class _CharSizesCache(dict):
"""
Cache for wcwidth sizes.
"""
def __missing__(self, string):
# Note: We use the `max(0, ...` because some non printable control
# characters, like e.g. Ctrl-underscore get a -1 wcwidth value.
# It can be possible that these characters end up in the input
# text.
if len(string) == 1:
result = max(0, wcwidth(string))
else:
result = sum(max(0, wcwidth(c)) for c in string)
# Cache for short strings.
# (It's hard to tell what we can consider short...)
if len(string) < 256:
self[string] = result
return result
_CHAR_SIZES_CACHE = _CharSizesCache()
def get_cwidth(string):
"""
Return width of a string. Wrapper around ``wcwidth``.
"""
return _CHAR_SIZES_CACHE[string]
def suspend_to_background_supported():
"""
Returns `True` when the Python implementation supports
suspend-to-background. This is typically `False' on Windows systems.
"""
return hasattr(signal, 'SIGTSTP')
def is_windows():
"""
True when we are using Windows.
"""
return sys.platform.startswith('win') # E.g. 'win32', not 'darwin' or 'linux2'
def is_conemu_ansi():
"""
True when the ConEmu Windows console is used.
"""
return is_windows() and os.environ.get('ConEmuANSI', 'OFF') == 'ON'
def in_main_thread():
"""
True when the current thread is the main thread.
"""
return threading.current_thread().__class__.__name__ == '_MainThread'
def take_using_weights(items, weights):
"""
Generator that keeps yielding items from the items list, in proportion to
their weight. For instance::
# Getting the first 70 items from this generator should have yielded 10
# times A, 20 times B and 40 times C, all distributed equally..
take_using_weights(['A', 'B', 'C'], [5, 10, 20])
:param items: List of items to take from.
:param weights: Integers representing the weight. (Numbers have to be
integers, not floats.)
"""
assert isinstance(items, list)
assert isinstance(weights, list)
assert all(isinstance(i, int) for i in weights)
assert len(items) == len(weights)
assert len(items) > 0
already_taken = [0 for i in items]
item_count = len(items)
max_weight = max(weights)
i = 0
while True:
# Each iteration of this loop, we fill up until by (total_weight/max_weight).
adding = True
while adding:
adding = False
for item_i, item, weight in zip(range(item_count), items, weights):
if already_taken[item_i] < i * weight / float(max_weight):
yield item
already_taken[item_i] += 1
adding = True
i += 1
| from __future__ import unicode_literals
import inspect
import os
import signal
import sys
import threading
import weakref
from wcwidth import wcwidth
from six.moves import range
__all__ = (
'Event',
'DummyContext',
'get_cwidth',
'suspend_to_background_supported',
'is_conemu_ansi',
'is_windows',
'in_main_thread',
'take_using_weights',
'test_callable_args',
)
class Event(object):
"""
Simple event to which event handlers can be attached. For instance::
class Cls:
def __init__(self):
# Define event. The first parameter is the sender.
self.event = Event(self)
obj = Cls()
def handler(sender):
pass
# Add event handler by using the += operator.
obj.event += handler
# Fire event.
obj.event()
"""
def __init__(self, sender, handler=None):
self.sender = sender
self._handlers = []
if handler is not None:
self += handler
def __call__(self):
" Fire event. "
for handler in self._handlers:
handler(self.sender)
def fire(self):
" Alias for just calling the event. "
self()
def __iadd__(self, handler):
"""
Add another handler to this callback.
(Handler should be a callable that takes exactly one parameter: the
sender object.)
"""
# Test handler.
assert callable(handler)
if not test_callable_args(handler, [None]):
raise TypeError("%r doesn't take exactly one argument." % handler)
# Add to list of event handlers.
self._handlers.append(handler)
return self
def __isub__(self, handler):
"""
Remove a handler from this callback.
"""
self._handlers.remove(handler)
return self
# Cache of signatures. Improves the performance of `test_callable_args`.
_signatures_cache = weakref.WeakKeyDictionary()
def test_callable_args(func, args):
"""
Return True when this function can be called with the given arguments.
"""
assert isinstance(args, (list, tuple))
signature = getattr(inspect, 'signature', None)
if signature is not None:
# For Python 3, use inspect.signature.
try:
sig = _signatures_cache[func]
except KeyError:
sig = signature(func)
_signatures_cache[func] = sig
try:
sig.bind(*args)
except TypeError:
return False
else:
return True
else:
# For older Python versions, fall back to using getargspec.
spec = inspect.getargspec(func)
# Drop the 'self'
def drop_self(spec):
args, varargs, varkw, defaults = spec
if args[0:1] == ['self']:
args = args[1:]
return inspect.ArgSpec(args, varargs, varkw, defaults)
spec = drop_self(spec)
# When taking *args, always return True.
if spec.varargs is not None:
return True
# Test whether the given amount of args is between the min and max
# accepted argument counts.
return len(spec.args) - len(spec.defaults or []) <= len(args) <= len(spec.args)
class DummyContext(object):
"""
(contextlib.nested is not available on Py3)
"""
def __enter__(self):
pass
def __exit__(self, *a):
pass
class _CharSizesCache(dict):
"""
Cache for wcwidth sizes.
"""
def __missing__(self, string):
# Note: We use the `max(0, ...` because some non printable control
# characters, like e.g. Ctrl-underscore get a -1 wcwidth value.
# It can be possible that these characters end up in the input
# text.
if len(string) == 1:
result = max(0, wcwidth(string))
else:
result = sum(max(0, wcwidth(c)) for c in string)
# Cache for short strings.
# (It's hard to tell what we can consider short...)
if len(string) < 256:
self[string] = result
return result
_CHAR_SIZES_CACHE = _CharSizesCache()
def get_cwidth(string):
"""
Return width of a string. Wrapper around ``wcwidth``.
"""
return _CHAR_SIZES_CACHE[string]
def suspend_to_background_supported():
"""
Returns `True` when the Python implementation supports
suspend-to-background. This is typically `False' on Windows systems.
"""
return hasattr(signal, 'SIGTSTP')
def is_windows():
"""
True when we are using Windows.
"""
return sys.platform.startswith('win') # E.g. 'win32', not 'darwin' or 'linux2'
def is_conemu_ansi():
"""
True when the ConEmu Windows console is used.
"""
return is_windows() and os.environ.get('ConEmuANSI', 'OFF') == 'ON'
def in_main_thread():
"""
True when the current thread is the main thread.
"""
return threading.current_thread().__class__.__name__ == '_MainThread'
def take_using_weights(items, weights):
"""
Generator that keeps yielding items from the items list, in proportion to
their weight. For instance::
# Getting the first 70 items from this generator should have yielded 10
# times A, 20 times B and 40 times C, all distributed equally..
take_using_weights(['A', 'B', 'C'], [5, 10, 20])
:param items: List of items to take from.
:param weights: Integers representing the weight. (Numbers have to be
integers, not floats.)
"""
assert isinstance(items, list)
assert isinstance(weights, list)
assert all(isinstance(i, int) for i in weights)
assert len(items) == len(weights)
assert len(items) > 0
already_taken = [0 for i in items]
item_count = len(items)
max_weight = max(weights)
i = 0
while True:
# Each iteration of this loop, we fill up until by (total_weight/max_weight).
adding = True
while adding:
adding = False
for item_i, item, weight in zip(range(item_count), items, weights):
if already_taken[item_i] < i * weight / float(max_weight):
yield item
already_taken[item_i] += 1
adding = True
i += 1 | en | 0.783349 | Simple event to which event handlers can be attached. For instance::
class Cls:
def __init__(self):
# Define event. The first parameter is the sender.
self.event = Event(self)
obj = Cls()
def handler(sender):
pass
# Add event handler by using the += operator.
obj.event += handler
# Fire event.
obj.event() Add another handler to this callback.
(Handler should be a callable that takes exactly one parameter: the
sender object.) # Test handler. # Add to list of event handlers. Remove a handler from this callback. # Cache of signatures. Improves the performance of `test_callable_args`. Return True when this function can be called with the given arguments. # For Python 3, use inspect.signature. # For older Python versions, fall back to using getargspec. # Drop the 'self' # When taking *args, always return True. # Test whether the given amount of args is between the min and max # accepted argument counts. (contextlib.nested is not available on Py3) Cache for wcwidth sizes. # Note: We use the `max(0, ...` because some non printable control # characters, like e.g. Ctrl-underscore get a -1 wcwidth value. # It can be possible that these characters end up in the input # text. # Cache for short strings. # (It's hard to tell what we can consider short...) Return width of a string. Wrapper around ``wcwidth``. Returns `True` when the Python implementation supports
suspend-to-background. This is typically `False' on Windows systems. True when we are using Windows. # E.g. 'win32', not 'darwin' or 'linux2' True when the ConEmu Windows console is used. True when the current thread is the main thread. Generator that keeps yielding items from the items list, in proportion to
their weight. For instance::
# Getting the first 70 items from this generator should have yielded 10
# times A, 20 times B and 40 times C, all distributed equally..
take_using_weights(['A', 'B', 'C'], [5, 10, 20])
:param items: List of items to take from.
:param weights: Integers representing the weight. (Numbers have to be
integers, not floats.) # Each iteration of this loop, we fill up until by (total_weight/max_weight). | 2.284971 | 2 |
lmdb/cffi.py | hirnimeshrampuresoftware/py-lmdb | 185 | 7683 | <reponame>hirnimeshrampuresoftware/py-lmdb
#
# Copyright 2013 The py-lmdb authors, all rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted only as authorized by the OpenLDAP
# Public License.
#
# A copy of this license is available in the file LICENSE in the
# top-level directory of the distribution or, alternatively, at
# <http://www.OpenLDAP.org/license.html>.
#
# OpenLDAP is a registered trademark of the OpenLDAP Foundation.
#
# Individual files and/or contributed packages may be copyright by
# other parties and/or subject to additional restrictions.
#
# This work also contains materials derived from public sources.
#
# Additional information about OpenLDAP can be obtained at
# <http://www.openldap.org/>.
#
"""
CPython/CFFI wrapper for OpenLDAP's "Lightning" MDB database.
Please see https://lmdb.readthedocs.io/
"""
from __future__ import absolute_import
from __future__ import with_statement
import errno
import inspect
import os
import sys
import threading
is_win32 = sys.platform == 'win32'
if is_win32:
import msvcrt
try:
import __builtin__
except ImportError:
import builtins as __builtin__ # type: ignore
import lmdb
try:
from lmdb import _config
except ImportError:
_config = None # type: ignore
__all__ = [
'Cursor',
'Environment',
'Transaction',
'_Database',
'enable_drop_gil',
'version',
]
__all__ += [
'BadDbiError',
'BadRslotError',
'BadTxnError',
'BadValsizeError',
'CorruptedError',
'CursorFullError',
'DbsFullError',
'DiskError',
'Error',
'IncompatibleError',
'InvalidError',
'InvalidParameterError',
'KeyExistsError',
'LockError',
'MapFullError',
'MapResizedError',
'MemoryError',
'NotFoundError',
'PageFullError',
'PageNotFoundError',
'PanicError',
'ReadersFullError',
'ReadonlyError',
'TlsFullError',
'TxnFullError',
'VersionMismatchError',
]
# Handle moronic Python 3 mess.
UnicodeType = getattr(__builtin__, 'unicode', str)
BytesType = getattr(__builtin__, 'bytes', str)
O_0755 = int('0755', 8)
O_0111 = int('0111', 8)
EMPTY_BYTES = UnicodeType().encode()
# Used to track context across CFFI callbacks.
_callbacks = threading.local()
_CFFI_CDEF = '''
typedef int mode_t;
typedef ... MDB_env;
typedef struct MDB_txn MDB_txn;
typedef struct MDB_cursor MDB_cursor;
typedef unsigned int MDB_dbi;
enum MDB_cursor_op {
MDB_FIRST,
MDB_FIRST_DUP,
MDB_GET_BOTH,
MDB_GET_BOTH_RANGE,
MDB_GET_CURRENT,
MDB_GET_MULTIPLE,
MDB_LAST,
MDB_LAST_DUP,
MDB_NEXT,
MDB_NEXT_DUP,
MDB_NEXT_MULTIPLE,
MDB_NEXT_NODUP,
MDB_PREV,
MDB_PREV_DUP,
MDB_PREV_NODUP,
MDB_SET,
MDB_SET_KEY,
MDB_SET_RANGE,
...
};
typedef enum MDB_cursor_op MDB_cursor_op;
struct MDB_val {
size_t mv_size;
void *mv_data;
...;
};
typedef struct MDB_val MDB_val;
struct MDB_stat {
unsigned int ms_psize;
unsigned int ms_depth;
size_t ms_branch_pages;
size_t ms_leaf_pages;
size_t ms_overflow_pages;
size_t ms_entries;
...;
};
typedef struct MDB_stat MDB_stat;
struct MDB_envinfo {
void *me_mapaddr;
size_t me_mapsize;
size_t me_last_pgno;
size_t me_last_txnid;
unsigned int me_maxreaders;
unsigned int me_numreaders;
...;
};
typedef struct MDB_envinfo MDB_envinfo;
typedef int (*MDB_cmp_func)(const MDB_val *a, const MDB_val *b);
typedef void (*MDB_rel_func)(MDB_val *item, void *oldptr, void *newptr,
void *relctx);
char *mdb_strerror(int err);
int mdb_env_create(MDB_env **env);
int mdb_env_open(MDB_env *env, const char *path, unsigned int flags,
mode_t mode);
int mdb_env_copy2(MDB_env *env, const char *path, int flags);
int mdb_env_copyfd2(MDB_env *env, int fd, int flags);
int mdb_env_stat(MDB_env *env, MDB_stat *stat);
int mdb_env_info(MDB_env *env, MDB_envinfo *stat);
int mdb_env_get_maxkeysize(MDB_env *env);
int mdb_env_sync(MDB_env *env, int force);
void mdb_env_close(MDB_env *env);
int mdb_env_set_flags(MDB_env *env, unsigned int flags, int onoff);
int mdb_env_get_flags(MDB_env *env, unsigned int *flags);
int mdb_env_get_path(MDB_env *env, const char **path);
int mdb_env_set_mapsize(MDB_env *env, size_t size);
int mdb_env_set_maxreaders(MDB_env *env, unsigned int readers);
int mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers);
int mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs);
int mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags,
MDB_txn **txn);
int mdb_txn_commit(MDB_txn *txn);
void mdb_txn_reset(MDB_txn *txn);
int mdb_txn_renew(MDB_txn *txn);
void mdb_txn_abort(MDB_txn *txn);
size_t mdb_txn_id(MDB_txn *txn);
int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags,
MDB_dbi *dbi);
int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat);
int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del_);
int mdb_get(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data);
int mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **cursor);
void mdb_cursor_close(MDB_cursor *cursor);
int mdb_cursor_del(MDB_cursor *cursor, unsigned int flags);
int mdb_cursor_count(MDB_cursor *cursor, size_t *countp);
int mdb_cursor_get(MDB_cursor *cursor, MDB_val *key, MDB_val*data, int op);
typedef int (MDB_msg_func)(const char *msg, void *ctx);
int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx);
int mdb_reader_check(MDB_env *env, int *dead);
int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags);
#define MDB_VERSION_MAJOR ...
#define MDB_VERSION_MINOR ...
#define MDB_VERSION_PATCH ...
#define EACCES ...
#define EAGAIN ...
#define EINVAL ...
#define ENOMEM ...
#define ENOSPC ...
#define MDB_BAD_RSLOT ...
#define MDB_BAD_DBI ...
#define MDB_BAD_TXN ...
#define MDB_BAD_VALSIZE ...
#define MDB_CORRUPTED ...
#define MDB_CURSOR_FULL ...
#define MDB_DBS_FULL ...
#define MDB_INCOMPATIBLE ...
#define MDB_INVALID ...
#define MDB_KEYEXIST ...
#define MDB_MAP_FULL ...
#define MDB_MAP_RESIZED ...
#define MDB_NOTFOUND ...
#define MDB_PAGE_FULL ...
#define MDB_PAGE_NOTFOUND ...
#define MDB_PANIC ...
#define MDB_READERS_FULL ...
#define MDB_TLS_FULL ...
#define MDB_TXN_FULL ...
#define MDB_VERSION_MISMATCH ...
#define MDB_APPEND ...
#define MDB_APPENDDUP ...
#define MDB_CP_COMPACT ...
#define MDB_CREATE ...
#define MDB_DUPFIXED ...
#define MDB_DUPSORT ...
#define MDB_INTEGERDUP ...
#define MDB_INTEGERKEY ...
#define MDB_MAPASYNC ...
#define MDB_NODUPDATA ...
#define MDB_NOLOCK ...
#define MDB_NOMEMINIT ...
#define MDB_NOMETASYNC ...
#define MDB_NOOVERWRITE ...
#define MDB_NORDAHEAD ...
#define MDB_NOSUBDIR ...
#define MDB_NOSYNC ...
#define MDB_NOTLS ...
#define MDB_RDONLY ...
#define MDB_REVERSEKEY ...
#define MDB_WRITEMAP ...
// Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where
// CFFI will use PyString_AS_STRING when passed as an argument.
static int pymdb_del(MDB_txn *txn, MDB_dbi dbi,
char *key_s, size_t keylen,
char *val_s, size_t vallen);
static int pymdb_put(MDB_txn *txn, MDB_dbi dbi,
char *key_s, size_t keylen,
char *val_s, size_t vallen,
unsigned int flags);
static int pymdb_get(MDB_txn *txn, MDB_dbi dbi,
char *key_s, size_t keylen,
MDB_val *val_out);
static int pymdb_cursor_get(MDB_cursor *cursor,
char *key_s, size_t key_len,
char *data_s, size_t data_len,
MDB_val *key, MDB_val *data, int op);
static int pymdb_cursor_put(MDB_cursor *cursor,
char *key_s, size_t keylen,
char *val_s, size_t vallen, int flags);
// Prefaults a range
static void preload(int rc, void *x, size_t size);
'''
_CFFI_CDEF_PATCHED = '''
int mdb_env_copy3(MDB_env *env, const char *path, unsigned int flags, MDB_txn *txn);
int mdb_env_copyfd3(MDB_env *env, int fd, unsigned int flags, MDB_txn *txn);
'''
_CFFI_VERIFY = '''
#include <sys/stat.h>
#include "lmdb.h"
#include "preload.h"
// Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where
// CFFI will use PyString_AS_STRING when passed as an argument.
static int pymdb_get(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
MDB_val *val_out)
{
MDB_val key = {keylen, key_s};
int rc = mdb_get(txn, dbi, &key, val_out);
return rc;
}
static int pymdb_put(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
char *val_s, size_t vallen, unsigned int flags)
{
MDB_val key = {keylen, key_s};
MDB_val val = {vallen, val_s};
return mdb_put(txn, dbi, &key, &val, flags);
}
static int pymdb_del(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
char *val_s, size_t vallen)
{
MDB_val key = {keylen, key_s};
MDB_val val = {vallen, val_s};
MDB_val *valptr;
if(vallen == 0) {
valptr = NULL;
} else {
valptr = &val;
}
return mdb_del(txn, dbi, &key, valptr);
}
static int pymdb_cursor_get(MDB_cursor *cursor,
char *key_s, size_t key_len,
char *data_s, size_t data_len,
MDB_val *key, MDB_val *data, int op)
{
MDB_val tmp_key = {key_len, key_s};
MDB_val tmp_data = {data_len, data_s};
int rc = mdb_cursor_get(cursor, &tmp_key, &tmp_data, op);
if(! rc) {
*key = tmp_key;
*data = tmp_data;
}
return rc;
}
static int pymdb_cursor_put(MDB_cursor *cursor, char *key_s, size_t keylen,
char *val_s, size_t vallen, int flags)
{
MDB_val tmpkey = {keylen, key_s};
MDB_val tmpval = {vallen, val_s};
return mdb_cursor_put(cursor, &tmpkey, &tmpval, flags);
}
'''
if not lmdb._reading_docs():
import cffi
# Try to use distutils-bundled CFFI configuration to avoid a recompile and
# potential compile errors during first module import.
_config_vars = _config.CONFIG if _config else {
'extra_compile_args': ['-w'],
'extra_sources': ['lib/mdb.c', 'lib/midl.c'],
'extra_include_dirs': ['lib'],
'extra_library_dirs': [],
'libraries': []
}
_have_patched_lmdb = '-DHAVE_PATCHED_LMDB=1' in _config.CONFIG['extra_compile_args'] # type: ignore
if _have_patched_lmdb:
_CFFI_CDEF += _CFFI_CDEF_PATCHED
_ffi = cffi.FFI()
_ffi.cdef(_CFFI_CDEF)
_lib = _ffi.verify(_CFFI_VERIFY,
modulename='lmdb_cffi',
ext_package='lmdb',
sources=_config_vars['extra_sources'],
extra_compile_args=_config_vars['extra_compile_args'],
include_dirs=_config_vars['extra_include_dirs'],
libraries=_config_vars['libraries'],
library_dirs=_config_vars['extra_library_dirs'])
@_ffi.callback("int(char *, void *)")
def _msg_func(s, _):
"""mdb_msg_func() callback. Appends `s` to _callbacks.msg_func list.
"""
_callbacks.msg_func.append(_ffi.string(s).decode())
return 0
class Error(Exception):
"""Raised when an LMDB-related error occurs, and no more specific
:py:class:`lmdb.Error` subclass exists."""
def __init__(self, what, code=0):
self.what = what
self.code = code
self.reason = _ffi.string(_lib.mdb_strerror(code))
msg = what
if code:
msg = '%s: %s' % (what, self.reason)
hint = getattr(self, 'MDB_HINT', None)
if hint:
msg += ' (%s)' % (hint,)
Exception.__init__(self, msg)
class KeyExistsError(Error):
"""Key/data pair already exists."""
MDB_NAME = 'MDB_KEYEXIST'
class NotFoundError(Error):
"""No matching key/data pair found.
Normally py-lmdb indicates a missing key by returning ``None``, or a
user-supplied default value, however LMDB may return this error where
py-lmdb does not know to convert it into a non-exceptional return.
"""
MDB_NAME = 'MDB_NOTFOUND'
class PageNotFoundError(Error):
"""Request page not found."""
MDB_NAME = 'MDB_PAGE_NOTFOUND'
class CorruptedError(Error):
"""Located page was of the wrong type."""
MDB_NAME = 'MDB_CORRUPTED'
class PanicError(Error):
"""Update of meta page failed."""
MDB_NAME = 'MDB_PANIC'
class VersionMismatchError(Error):
"""Database environment version mismatch."""
MDB_NAME = 'MDB_VERSION_MISMATCH'
class InvalidError(Error):
"""File is not an MDB file."""
MDB_NAME = 'MDB_INVALID'
class MapFullError(Error):
"""Environment map_size= limit reached."""
MDB_NAME = 'MDB_MAP_FULL'
MDB_HINT = 'Please use a larger Environment(map_size=) parameter'
class DbsFullError(Error):
"""Environment max_dbs= limit reached."""
MDB_NAME = 'MDB_DBS_FULL'
MDB_HINT = 'Please use a larger Environment(max_dbs=) parameter'
class ReadersFullError(Error):
"""Environment max_readers= limit reached."""
MDB_NAME = 'MDB_READERS_FULL'
MDB_HINT = 'Please use a larger Environment(max_readers=) parameter'
class TlsFullError(Error):
"""Thread-local storage keys full - too many environments open."""
MDB_NAME = 'MDB_TLS_FULL'
class TxnFullError(Error):
"""Transaciton has too many dirty pages - transaction too big."""
MDB_NAME = 'MDB_TXN_FULL'
MDB_HINT = 'Please do less work within your transaction'
class CursorFullError(Error):
"""Internal error - cursor stack limit reached."""
MDB_NAME = 'MDB_CURSOR_FULL'
class PageFullError(Error):
"""Internal error - page has no more space."""
MDB_NAME = 'MDB_PAGE_FULL'
class MapResizedError(Error):
"""Database contents grew beyond environment map_size=."""
MDB_NAME = 'MDB_MAP_RESIZED'
class IncompatibleError(Error):
"""Operation and DB incompatible, or DB flags changed."""
MDB_NAME = 'MDB_INCOMPATIBLE'
class BadRslotError(Error):
"""Invalid reuse of reader locktable slot."""
MDB_NAME = 'MDB_BAD_RSLOT'
class BadDbiError(Error):
"""The specified DBI was changed unexpectedly."""
MDB_NAME = 'MDB_BAD_DBI'
class BadTxnError(Error):
"""Transaction cannot recover - it must be aborted."""
MDB_NAME = 'MDB_BAD_TXN'
class BadValsizeError(Error):
"""Too big key/data, key is empty, or wrong DUPFIXED size."""
MDB_NAME = 'MDB_BAD_VALSIZE'
class ReadonlyError(Error):
"""An attempt was made to modify a read-only database."""
MDB_NAME = 'EACCES'
class InvalidParameterError(Error):
"""An invalid parameter was specified."""
MDB_NAME = 'EINVAL'
class LockError(Error):
"""The environment was locked by another process."""
MDB_NAME = 'EAGAIN'
class MemoryError(Error):
"""Out of memory."""
MDB_NAME = 'ENOMEM'
class DiskError(Error):
"""No more disk space."""
MDB_NAME = 'ENOSPC'
# Prepare _error_map, a mapping of integer MDB_ERROR_CODE to exception class.
if not lmdb._reading_docs():
_error_map = {}
for obj in list(globals().values()):
if inspect.isclass(obj) and issubclass(obj, Error) and obj is not Error:
_error_map[getattr(_lib, obj.MDB_NAME)] = obj
del obj
def _error(what, rc):
"""Lookup and instantiate the correct exception class for the error code
`rc`, using :py:class:`Error` if no better class exists."""
return _error_map.get(rc, Error)(what, rc)
class Some_LMDB_Resource_That_Was_Deleted_Or_Closed(object):
"""We need this because CFFI on PyPy treats None as cffi.NULL, instead of
throwing an exception it feeds LMDB null pointers. That means simply
replacing native handles with None during _invalidate() will cause NULL
pointer dereferences. Instead use this class, and its weird name to cause a
TypeError, with a very obvious string in the exception text.
The only alternatives to this are inserting a check around every single use
of a native handle to ensure the handle is still valid prior to calling
LMDB, or doing no crash-safety checking at all.
"""
def __nonzero__(self):
return 0
def __bool__(self):
return False
def __repr__(self):
return "<This used to be a LMDB resource but it was deleted or closed>"
_invalid = Some_LMDB_Resource_That_Was_Deleted_Or_Closed()
def _mvbuf(mv):
"""Convert a MDB_val cdata to a CFFI buffer object."""
return _ffi.buffer(mv.mv_data, mv.mv_size)
def _mvstr(mv):
"""Convert a MDB_val cdata to Python bytes."""
return _ffi.buffer(mv.mv_data, mv.mv_size)[:]
def preload(mv):
_lib.preload(0, mv.mv_data, mv.mv_size)
def enable_drop_gil():
"""Deprecated."""
def version(subpatch=False):
"""
Return a tuple of integers `(major, minor, patch)` describing the LMDB
library version that the binding is linked against. The version of the
binding itself is available from ``lmdb.__version__``.
`subpatch`:
If true, returns a 4 integer tuple consisting of the same plus
an extra integer that represents any patches applied by py-lmdb
itself (0 representing no patches).
"""
if subpatch:
return (_lib.MDB_VERSION_MAJOR,
_lib.MDB_VERSION_MINOR,
_lib.MDB_VERSION_PATCH,
1 if _have_patched_lmdb else 0)
return (_lib.MDB_VERSION_MAJOR,
_lib.MDB_VERSION_MINOR,
_lib.MDB_VERSION_PATCH)
class Environment(object):
"""
Structure for a database environment. An environment may contain multiple
databases, all residing in the same shared-memory map and underlying disk
file.
To write to the environment a :py:class:`Transaction` must be created. One
simultaneous write transaction is allowed, however there is no limit on the
number of read transactions even when a write transaction exists.
This class is aliased to `lmdb.open`.
It is a serious error to have open the same LMDB file in the same process at
the same time. Failure to heed this may lead to data corruption and
interpreter crash.
Equivalent to `mdb_env_open()
<http://lmdb.tech/doc/group__mdb.html#ga1fe2740e25b1689dc412e7b9faadba1b>`_
`path`:
Location of directory (if `subdir=True`) or file prefix to store
the database.
`map_size`:
Maximum size database may grow to; used to size the memory mapping.
If database grows larger than ``map_size``, an exception will be
raised and the user must close and reopen :py:class:`Environment`.
On 64-bit there is no penalty for making this huge (say 1TB). Must
be <2GB on 32-bit.
.. note::
**The default map size is set low to encourage a crash**, so
users can figure out a good value before learning about this
option too late.
`subdir`:
If ``True``, `path` refers to a subdirectory to store the data and
lock files in, otherwise it refers to a filename prefix.
`readonly`:
If ``True``, disallow any write operations. Note the lock file is
still modified. If specified, the ``write`` flag to
:py:meth:`begin` or :py:class:`Transaction` is ignored.
`metasync`:
If ``False``, flush system buffers to disk only once per
transaction, omit the metadata flush. Defer that until the system
flushes files to disk, or next commit or :py:meth:`sync`.
This optimization maintains database integrity, but a system crash
may undo the last committed transaction. I.e. it preserves the ACI
(atomicity, consistency, isolation) but not D (durability) database
property.
`sync`:
If ``False``, don't flush system buffers to disk when committing a
transaction. This optimization means a system crash can corrupt the
database or lose the last transactions if buffers are not yet
flushed to disk.
The risk is governed by how often the system flushes dirty buffers
to disk and how often :py:meth:`sync` is called. However, if the
filesystem preserves write order and `writemap=False`, transactions
exhibit ACI (atomicity, consistency, isolation) properties and only
lose D (durability). I.e. database integrity is maintained, but a
system crash may undo the final transactions.
Note that `sync=False, writemap=True` leaves the system with no
hint for when to write transactions to disk, unless :py:meth:`sync`
is called. `map_async=True, writemap=True` may be preferable.
`mode`:
File creation mode.
`create`:
If ``False``, do not create the directory `path` if it is missing.
`readahead`:
If ``False``, LMDB will disable the OS filesystem readahead
mechanism, which may improve random read performance when a
database is larger than RAM.
`writemap`:
If ``True``, use a writeable memory map unless `readonly=True`.
This is faster and uses fewer mallocs, but loses protection from
application bugs like wild pointer writes and other bad updates
into the database. Incompatible with nested transactions.
Processes with and without `writemap` on the same environment do
not cooperate well.
`meminit`:
If ``False`` LMDB will not zero-initialize buffers prior to writing
them to disk. This improves performance but may cause old heap data
to be written saved in the unused portion of the buffer. Do not use
this option if your application manipulates confidential data (e.g.
plaintext passwords) in memory. This option is only meaningful when
`writemap=False`; new pages are always zero-initialized when
`writemap=True`.
`map_async`:
When ``writemap=True``, use asynchronous flushes to disk. As with
``sync=False``, a system crash can then corrupt the database or
lose the last transactions. Calling :py:meth:`sync` ensures
on-disk database integrity until next commit.
`max_readers`:
Maximum number of simultaneous read transactions. Can only be set
by the first process to open an environment, as it affects the size
of the lock file and shared memory area. Attempts to simultaneously
start more than this many *read* transactions will fail.
`max_dbs`:
Maximum number of databases available. If 0, assume environment
will be used as a single database.
`max_spare_txns`:
Read-only transactions to cache after becoming unused. Caching
transactions avoids two allocations, one lock and linear scan
of the shared environment per invocation of :py:meth:`begin`,
:py:class:`Transaction`, :py:meth:`get`, :py:meth:`gets`, or
:py:meth:`cursor`. Should match the process's maximum expected
concurrent transactions (e.g. thread count).
`lock`:
If ``False``, don't do any locking. If concurrent access is
anticipated, the caller must manage all concurrency itself. For
proper operation the caller must enforce single-writer semantics,
and must ensure that no readers are using old transactions while a
writer is active. The simplest approach is to use an exclusive lock
so that no readers may be active at all when a writer begins.
"""
def __init__(self, path, map_size=10485760, subdir=True,
readonly=False, metasync=True, sync=True, map_async=False,
mode=O_0755, create=True, readahead=True, writemap=False,
meminit=True, max_readers=126, max_dbs=0, max_spare_txns=1,
lock=True):
self._max_spare_txns = max_spare_txns
self._spare_txns = []
envpp = _ffi.new('MDB_env **')
rc = _lib.mdb_env_create(envpp)
if rc:
raise _error("mdb_env_create", rc)
self._env = envpp[0]
self._deps = set()
self._creating_db_in_readonly = False
self.set_mapsize(map_size)
rc = _lib.mdb_env_set_maxreaders(self._env, max_readers)
if rc:
raise _error("mdb_env_set_maxreaders", rc)
rc = _lib.mdb_env_set_maxdbs(self._env, max_dbs)
if rc:
raise _error("mdb_env_set_maxdbs", rc)
if create and subdir and not readonly:
try:
os.mkdir(path, mode)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
flags = _lib.MDB_NOTLS
if not subdir:
flags |= _lib.MDB_NOSUBDIR
if readonly:
flags |= _lib.MDB_RDONLY
self.readonly = readonly
if not metasync:
flags |= _lib.MDB_NOMETASYNC
if not sync:
flags |= _lib.MDB_NOSYNC
if map_async:
flags |= _lib.MDB_MAPASYNC
if not readahead:
flags |= _lib.MDB_NORDAHEAD
if writemap:
flags |= _lib.MDB_WRITEMAP
if not meminit:
flags |= _lib.MDB_NOMEMINIT
if not lock:
flags |= _lib.MDB_NOLOCK
if isinstance(path, UnicodeType):
path = path.encode(sys.getfilesystemencoding())
rc = _lib.mdb_env_open(self._env, path, flags, mode & ~O_0111)
if rc:
raise _error(path, rc)
with self.begin(db=object()) as txn:
self._db = _Database(
env=self,
txn=txn,
name=None,
reverse_key=False,
dupsort=False,
create=True,
integerkey=False,
integerdup=False,
dupfixed=False
)
self._dbs = {None: self._db}
def __enter__(self):
return self
def __exit__(self, _1, _2, _3):
self.close()
def __del__(self):
self.close()
_env = None
_deps = None
_spare_txns = None
_dbs = None
def set_mapsize(self, map_size):
"""Change the maximum size of the map file. This function will fail if
any transactions are active in the current process.
`map_size`:
The new size in bytes.
Equivalent to `mdb_env_set_mapsize()
<http://lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>`_
Warning:
There's a data race in the underlying library that may cause
catastrophic loss of data if you use this method.
You are safe if one of the following are true:
* Only one process accessing a particular LMDB file ever calls
this method.
* You use locking external to this library to ensure that only one
process accessing the current LMDB file can be inside this function.
"""
rc = _lib.mdb_env_set_mapsize(self._env, map_size)
if rc:
raise _error("mdb_env_set_mapsize", rc)
def close(self):
"""Close the environment, invalidating any open iterators, cursors, and
transactions. Repeat calls to :py:meth:`close` have no effect.
Equivalent to `mdb_env_close()
<http://lmdb.tech/doc/group__mdb.html#ga4366c43ada8874588b6a62fbda2d1e95>`_
"""
if self._env:
if self._deps:
while self._deps:
self._deps.pop()._invalidate()
self._deps = None
if self._spare_txns:
while self._spare_txns:
_lib.mdb_txn_abort(self._spare_txns.pop())
self._spare_txns = None
if self._dbs:
self._dbs.clear()
self._dbs = None
self._db = None
_lib.mdb_env_close(self._env)
self._env = _invalid
def path(self):
"""Directory path or file name prefix where this environment is
stored.
Equivalent to `mdb_env_get_path()
<http://lmdb.tech/doc/group__mdb.html#gac699fdd8c4f8013577cb933fb6a757fe>`_
"""
path = _ffi.new('char **')
rc = _lib.mdb_env_get_path(self._env, path)
if rc:
raise _error("mdb_env_get_path", rc)
return _ffi.string(path[0]).decode(sys.getfilesystemencoding())
def copy(self, path, compact=False, txn=None):
"""Make a consistent copy of the environment in the given destination
directory.
`compact`:
If ``True``, perform compaction while copying: omit free pages and
sequentially renumber all pages in output. This option consumes
more CPU and runs more slowly than the default, but may produce a
smaller output database.
`txn`:
If provided, the backup will be taken from the database with
respect to that transaction, otherwise a temporary read-only
transaction will be created. Note: this parameter being non-None
is not available if the module was built with LMDB_PURE. Note:
this parameter may be set only if compact=True.
Equivalent to `mdb_env_copy2() or mdb_env_copy3()
<http://lmdb.tech/doc/group__mdb.html#ga5d51d6130325f7353db0955dbedbc378>`_
"""
flags = _lib.MDB_CP_COMPACT if compact else 0
if txn and not _have_patched_lmdb:
raise TypeError("Non-patched LMDB doesn't support transaction with env.copy")
if txn and not flags:
raise TypeError("txn argument only compatible with compact=True")
encoded = path.encode(sys.getfilesystemencoding())
if _have_patched_lmdb:
rc = _lib.mdb_env_copy3(self._env, encoded, flags, txn._txn if txn else _ffi.NULL)
if rc:
raise _error("mdb_env_copy3", rc)
else:
rc = _lib.mdb_env_copy2(self._env, encoded, flags)
if rc:
raise _error("mdb_env_copy2", rc)
def copyfd(self, fd, compact=False, txn=None):
"""Copy a consistent version of the environment to file descriptor
`fd`.
`compact`:
If ``True``, perform compaction while copying: omit free pages and
sequentially renumber all pages in output. This option consumes
more CPU and runs more slowly than the default, but may produce a
smaller output database.
`txn`:
If provided, the backup will be taken from the database with
respect to that transaction, otherwise a temporary read-only
transaction will be created. Note: this parameter being non-None
is not available if the module was built with LMDB_PURE.
Equivalent to `mdb_env_copyfd2() or mdb_env_copyfd3
<http://lmdb.tech/doc/group__mdb.html#ga5d51d6130325f7353db0955dbedbc378>`_
"""
if txn and not _have_patched_lmdb:
raise TypeError("Non-patched LMDB doesn't support transaction with env.copy")
if is_win32:
# Convert C library handle to kernel handle.
fd = msvcrt.get_osfhandle(fd)
flags = _lib.MDB_CP_COMPACT if compact else 0
if txn and not flags:
raise TypeError("txn argument only compatible with compact=True")
if _have_patched_lmdb:
rc = _lib.mdb_env_copyfd3(self._env, fd, flags, txn._txn if txn else _ffi.NULL)
if rc:
raise _error("mdb_env_copyfd3", rc)
else:
rc = _lib.mdb_env_copyfd2(self._env, fd, flags)
if rc:
raise _error("mdb_env_copyfd2", rc)
def sync(self, force=False):
"""Flush the data buffers to disk.
Equivalent to `mdb_env_sync()
<http://lmdb.tech/doc/group__mdb.html#ga85e61f05aa68b520cc6c3b981dba5037>`_
Data is always written to disk when :py:meth:`Transaction.commit` is
called, but the operating system may keep it buffered. MDB always
flushes the OS buffers upon commit as well, unless the environment was
opened with `sync=False` or `metasync=False`.
`force`:
If ``True``, force a synchronous flush. Otherwise if the
environment was opened with `sync=False` the flushes will be
omitted, and with `map_async=True` they will be asynchronous.
"""
rc = _lib.mdb_env_sync(self._env, force)
if rc:
raise _error("mdb_env_sync", rc)
def _convert_stat(self, st):
"""Convert a MDB_stat to a dict.
"""
return {
"psize": st.ms_psize,
"depth": st.ms_depth,
"branch_pages": st.ms_branch_pages,
"leaf_pages": st.ms_leaf_pages,
"overflow_pages": st.ms_overflow_pages,
"entries": st.ms_entries
}
def stat(self):
"""stat()
Return some environment statistics for the default database as a dict:
+--------------------+---------------------------------------+
| ``psize`` | Size of a database page in bytes. |
+--------------------+---------------------------------------+
| ``depth`` | Height of the B-tree. |
+--------------------+---------------------------------------+
| ``branch_pages`` | Number of internal (non-leaf) pages. |
+--------------------+---------------------------------------+
| ``leaf_pages`` | Number of leaf pages. |
+--------------------+---------------------------------------+
| ``overflow_pages`` | Number of overflow pages. |
+--------------------+---------------------------------------+
| ``entries`` | Number of data items. |
+--------------------+---------------------------------------+
Equivalent to `mdb_env_stat()
<http://lmdb.tech/doc/group__mdb.html#gaf881dca452050efbd434cd16e4bae255>`_
"""
st = _ffi.new('MDB_stat *')
rc = _lib.mdb_env_stat(self._env, st)
if rc:
raise _error("mdb_env_stat", rc)
return self._convert_stat(st)
def info(self):
"""Return some nice environment information as a dict:
+--------------------+---------------------------------------------+
| ``map_addr`` | Address of database map in RAM. |
+--------------------+---------------------------------------------+
| ``map_size`` | Size of database map in RAM. |
+--------------------+---------------------------------------------+
| ``last_pgno`` | ID of last used page. |
+--------------------+---------------------------------------------+
| ``last_txnid`` | ID of last committed transaction. |
+--------------------+---------------------------------------------+
| ``max_readers`` | Number of reader slots allocated in the |
| | lock file. Equivalent to the value of |
| | `maxreaders=` specified by the first |
| | process opening the Environment. |
+--------------------+---------------------------------------------+
| ``num_readers`` | Maximum number of reader slots in |
| | simultaneous use since the lock file was |
| | initialized. |
+--------------------+---------------------------------------------+
Equivalent to `mdb_env_info()
<http://lmdb.tech/doc/group__mdb.html#ga18769362c7e7d6cf91889a028a5c5947>`_
"""
info = _ffi.new('MDB_envinfo *')
rc = _lib.mdb_env_info(self._env, info)
if rc:
raise _error("mdb_env_info", rc)
return {
"map_addr": int(_ffi.cast('long', info.me_mapaddr)),
"map_size": info.me_mapsize,
"last_pgno": info.me_last_pgno,
"last_txnid": info.me_last_txnid,
"max_readers": info.me_maxreaders,
"num_readers": info.me_numreaders
}
def flags(self):
"""Return a dict describing Environment constructor flags used to
instantiate this environment."""
flags_ = _ffi.new('unsigned int[]', 1)
rc = _lib.mdb_env_get_flags(self._env, flags_)
if rc:
raise _error("mdb_env_get_flags", rc)
flags = flags_[0]
return {
'subdir': not (flags & _lib.MDB_NOSUBDIR),
'readonly': bool(flags & _lib.MDB_RDONLY),
'metasync': not (flags & _lib.MDB_NOMETASYNC),
'sync': not (flags & _lib.MDB_NOSYNC),
'map_async': bool(flags & _lib.MDB_MAPASYNC),
'readahead': not (flags & _lib.MDB_NORDAHEAD),
'writemap': bool(flags & _lib.MDB_WRITEMAP),
'meminit': not (flags & _lib.MDB_NOMEMINIT),
'lock': not (flags & _lib.MDB_NOLOCK),
}
def max_key_size(self):
"""Return the maximum size in bytes of a record's key part. This
matches the ``MDB_MAXKEYSIZE`` constant set at compile time."""
return _lib.mdb_env_get_maxkeysize(self._env)
def max_readers(self):
"""Return the maximum number of readers specified during open of the
environment by the first process. This is the same as `max_readers=`
specified to the constructor if this process was the first to open the
environment."""
readers_ = _ffi.new('unsigned int[]', 1)
rc = _lib.mdb_env_get_maxreaders(self._env, readers_)
if rc:
raise _error("mdb_env_get_maxreaders", rc)
return readers_[0]
def readers(self):
"""Return a multi line Unicode string describing the current state of
the reader lock table."""
_callbacks.msg_func = []
try:
rc = _lib.mdb_reader_list(self._env, _msg_func, _ffi.NULL)
if rc:
raise _error("mdb_reader_list", rc)
return UnicodeType().join(_callbacks.msg_func)
finally:
del _callbacks.msg_func
def reader_check(self):
"""Search the reader lock table for stale entries, for example due to a
crashed process. Returns the number of stale entries that were cleared.
"""
reaped = _ffi.new('int[]', 1)
rc = _lib.mdb_reader_check(self._env, reaped)
if rc:
raise _error('mdb_reader_check', rc)
return reaped[0]
def open_db(self, key=None, txn=None, reverse_key=False, dupsort=False,
create=True, integerkey=False, integerdup=False,
dupfixed=False):
"""
Open a database, returning an instance of :py:class:`_Database`. Repeat
:py:meth:`Environment.open_db` calls for the same name will return the
same handle. As a special case, the main database is always open.
Equivalent to `mdb_dbi_open()
<http://lmdb.tech/doc/group__mdb.html#gac08cad5b096925642ca359a6d6f0562a>`_
Named databases are implemented by *storing a special descriptor in the
main database*. All databases in an environment *share the same file*.
Because the descriptor is present in the main database, attempts to
create a named database will fail if a key matching the database's name
already exists. Furthermore *the key is visible to lookups and
enumerations*. If your main database keyspace conflicts with the names
you use for named databases, then move the contents of your main
database to another named database.
::
>>> env = lmdb.open('/tmp/test', max_dbs=2)
>>> with env.begin(write=True) as txn
... txn.put('somename', 'somedata')
>>> # Error: database cannot share name of existing key!
>>> subdb = env.open_db('somename')
A newly created database will not exist if the transaction that created
it aborted, nor if another process deleted it. The handle resides in
the shared environment, it is not owned by the current transaction or
process. Only one thread should call this function; it is not
mutex-protected in a read-only transaction.
The `dupsort`, `integerkey`, `integerdup`, and `dupfixed` parameters are
ignored if the database already exists. The state of those settings are
persistent and immutable per database. See :py:meth:`_Database.flags`
to view the state of those options for an opened database. A consequence
of the immutability of these flags is that the default non-named database
will never have these flags set.
Preexisting transactions, other than the current transaction and any
parents, must not use the new handle, nor must their children.
`key`:
Bytestring database name. If ``None``, indicates the main
database should be returned, otherwise indicates a named
database should be created inside the main database.
In other words, *a key representing the database will be
visible in the main database, and the database name cannot
conflict with any existing key.*
`txn`:
Transaction used to create the database if it does not exist.
If unspecified, a temporarily write transaction is used. Do not
call :py:meth:`open_db` from inside an existing transaction
without supplying it here. Note the passed transaction must
have `write=True`.
`reverse_key`:
If ``True``, keys are compared from right to left (e.g. DNS
names).
`dupsort`:
Duplicate keys may be used in the database. (Or, from another
perspective, keys may have multiple data items, stored in
sorted order.) By default keys must be unique and may have only
a single data item.
`create`:
If ``True``, create the database if it doesn't exist, otherwise
raise an exception.
`integerkey`:
If ``True``, indicates keys in the database are C unsigned
or ``size_t`` integers encoded in native byte order. Keys must
all be either unsigned or ``size_t``, they cannot be mixed in a
single database.
`integerdup`:
If ``True``, values in the
database are C unsigned or ``size_t`` integers encode din
native byte order. Implies `dupsort` and `dupfixed` are
``True``.
`dupfixed`:
If ``True``, values for each key
in database are of fixed size, allowing each additional
duplicate value for a key to be stored without a header
indicating its size. Implies `dupsort` is ``True``.
"""
if isinstance(key, UnicodeType):
raise TypeError('key must be bytes')
if key is None and (reverse_key or dupsort or integerkey or integerdup
or dupfixed):
raise ValueError('May not set flags on the main database')
db = self._dbs.get(key)
if db:
return db
if integerdup:
dupfixed = True
if dupfixed:
dupsort = True
if txn:
db = _Database(self, txn, key, reverse_key, dupsort, create,
integerkey, integerdup, dupfixed)
else:
try:
self._creating_db_in_readonly = True
with self.begin(write=not self.readonly) as txn:
db = _Database(self, txn, key, reverse_key, dupsort, create,
integerkey, integerdup, dupfixed)
finally:
self._creating_db_in_readonly = False
self._dbs[key] = db
return db
def begin(self, db=None, parent=None, write=False, buffers=False):
"""Shortcut for :py:class:`lmdb.Transaction`"""
return Transaction(self, db, parent, write, buffers)
class _Database(object):
"""
Internal database handle. This class is opaque, save a single method.
Should not be constructed directly. Use :py:meth:`Environment.open_db`
instead.
"""
def __init__(self, env, txn, name, reverse_key, dupsort, create,
integerkey, integerdup, dupfixed):
env._deps.add(self)
self._deps = set()
self._name = name
flags = 0
if reverse_key:
flags |= _lib.MDB_REVERSEKEY
if dupsort:
flags |= _lib.MDB_DUPSORT
if create:
flags |= _lib.MDB_CREATE
if integerkey:
flags |= _lib.MDB_INTEGERKEY
if integerdup:
flags |= _lib.MDB_INTEGERDUP
if dupfixed:
flags |= _lib.MDB_DUPFIXED
dbipp = _ffi.new('MDB_dbi *')
self._dbi = None
rc = _lib.mdb_dbi_open(txn._txn, name or _ffi.NULL, flags, dbipp)
if rc:
raise _error("mdb_dbi_open", rc)
self._dbi = dbipp[0]
self._load_flags(txn)
def _load_flags(self, txn):
"""Load MDB's notion of the database flags."""
flags_ = _ffi.new('unsigned int[]', 1)
rc = _lib.mdb_dbi_flags(txn._txn, self._dbi, flags_)
if rc:
raise _error("mdb_dbi_flags", rc)
self._flags = flags_[0]
def flags(self, *args):
"""Return the database's associated flags as a dict of _Database
constructor kwargs."""
if len(args) > 1:
raise TypeError('flags takes 0 or 1 arguments')
return {
'reverse_key': bool(self._flags & _lib.MDB_REVERSEKEY),
'dupsort': bool(self._flags & _lib.MDB_DUPSORT),
'integerkey': bool(self._flags & _lib.MDB_INTEGERKEY),
'integerdup': bool(self._flags & _lib.MDB_INTEGERDUP),
'dupfixed': bool(self._flags & _lib.MDB_DUPFIXED),
}
def _invalidate(self):
self._dbi = _invalid
open = Environment
class Transaction(object):
"""
A transaction object. All operations require a transaction handle,
transactions may be read-only or read-write. Write transactions may not
span threads. Transaction objects implement the context manager protocol,
so that reliable release of the transaction happens even in the face of
unhandled exceptions:
.. code-block:: python
# Transaction aborts correctly:
with env.begin(write=True) as txn:
crash()
# Transaction commits automatically:
with env.begin(write=True) as txn:
txn.put('a', 'b')
Equivalent to `mdb_txn_begin()
<http://lmdb.tech/doc/group__mdb.html#gad7ea55da06b77513609efebd44b26920>`_
`env`:
Environment the transaction should be on.
`db`:
Default named database to operate on. If unspecified, defaults to
the environment's main database. Can be overridden on a per-call
basis below.
`parent`:
``None``, or a parent transaction (see lmdb.h).
`write`:
Transactions are read-only by default. To modify the database, you
must pass `write=True`. This flag is ignored if
:py:class:`Environment` was opened with ``readonly=True``.
`buffers`:
If ``True``, indicates :py:func:`buffer` objects should be yielded
instead of bytestrings. This setting applies to the
:py:class:`Transaction` instance itself and any :py:class:`Cursors
<Cursor>` created within the transaction.
This feature significantly improves performance, since MDB has a
zero-copy design, but it requires care when manipulating the
returned buffer objects. The benefit of this facility is diminished
when using small keys and values.
"""
# If constructor fails, then __del__ will attempt to access these
# attributes.
_env = _invalid
_txn = _invalid
_parent = None
_write = False
# Mutations occurred since transaction start. Required to know when Cursor
# key/value must be refreshed.
_mutations = 0
def __init__(self, env, db=None, parent=None, write=False, buffers=False):
env._deps.add(self)
self.env = env # hold ref
self._db = db or env._db
self._env = env._env
self._key = _ffi.new('MDB_val *')
self._val = _ffi.new('MDB_val *')
self._to_py = _mvbuf if buffers else _mvstr
self._deps = set()
if parent:
self._parent = parent
parent_txn = parent._txn
parent._deps.add(self)
else:
parent_txn = _ffi.NULL
if write:
if env.readonly:
msg = 'Cannot start write transaction with read-only env'
raise _error(msg, _lib.EACCES)
txnpp = _ffi.new('MDB_txn **')
rc = _lib.mdb_txn_begin(self._env, parent_txn, 0, txnpp)
if rc:
raise _error("mdb_txn_begin", rc)
self._txn = txnpp[0]
self._write = True
else:
try: # Exception catch in order to avoid racy 'if txns:' test
if env._creating_db_in_readonly: # Don't use spare txns for creating a DB when read-only
raise IndexError
self._txn = env._spare_txns.pop()
env._max_spare_txns += 1
rc = _lib.mdb_txn_renew(self._txn)
if rc:
while self._deps:
self._deps.pop()._invalidate()
_lib.mdb_txn_abort(self._txn)
self._txn = _invalid
self._invalidate()
raise _error("mdb_txn_renew", rc)
except IndexError:
txnpp = _ffi.new('MDB_txn **')
flags = _lib.MDB_RDONLY
rc = _lib.mdb_txn_begin(self._env, parent_txn, flags, txnpp)
if rc:
raise _error("mdb_txn_begin", rc)
self._txn = txnpp[0]
def _invalidate(self):
if self._txn:
self.abort()
self.env._deps.discard(self)
self._parent = None
self._env = _invalid
def __del__(self):
self.abort()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
self.abort()
else:
self.commit()
def id(self):
"""id()
Return the transaction's ID.
This returns the identifier associated with this transaction. For a
read-only transaction, this corresponds to the snapshot being read;
concurrent readers will frequently have the same transaction ID.
"""
return _lib.mdb_txn_id(self._txn)
def stat(self, db):
"""stat(db)
Return statistics like :py:meth:`Environment.stat`, except for a single
DBI. `db` must be a database handle returned by :py:meth:`open_db`.
"""
st = _ffi.new('MDB_stat *')
rc = _lib.mdb_stat(self._txn, db._dbi, st)
if rc:
raise _error('mdb_stat', rc)
return self.env._convert_stat(st)
def drop(self, db, delete=True):
"""Delete all keys in a named database and optionally delete the named
database itself. Deleting the named database causes it to become
unavailable, and invalidates existing cursors.
Equivalent to `mdb_drop()
<http://lmdb.tech/doc/group__mdb.html#gab966fab3840fc54a6571dfb32b00f2db>`_
"""
while db._deps:
db._deps.pop()._invalidate()
rc = _lib.mdb_drop(self._txn, db._dbi, delete)
self._mutations += 1
if rc:
raise _error("mdb_drop", rc)
if db._name in self.env._dbs:
del self.env._dbs[db._name]
def _cache_spare(self):
# In order to avoid taking and maintaining a lock, a race is allowed
# below which may result in more spare txns than desired. It seems
# unlikely the race could ever result in a large amount of spare txns,
# and in any case a correctly configured program should not be opening
# more read-only transactions than there are configured spares.
if self.env._max_spare_txns > 0:
_lib.mdb_txn_reset(self._txn)
self.env._spare_txns.append(self._txn)
self.env._max_spare_txns -= 1
self._txn = _invalid
self._invalidate()
return True
return False
def commit(self):
"""Commit the pending transaction.
Equivalent to `mdb_txn_commit()
<http://lmdb.tech/doc/group__mdb.html#ga846fbd6f46105617ac9f4d76476f6597>`_
"""
while self._deps:
self._deps.pop()._invalidate()
if self._write or not self._cache_spare():
rc = _lib.mdb_txn_commit(self._txn)
self._txn = _invalid
if rc:
raise _error("mdb_txn_commit", rc)
self._invalidate()
def abort(self):
"""Abort the pending transaction. Repeat calls to :py:meth:`abort` have
no effect after a previously successful :py:meth:`commit` or
:py:meth:`abort`, or after the associated :py:class:`Environment` has
been closed.
Equivalent to `mdb_txn_abort()
<http://lmdb.tech/doc/group__mdb.html#ga73a5938ae4c3239ee11efa07eb22b882>`_
"""
if self._txn:
while self._deps:
self._deps.pop()._invalidate()
if self._write or not self._cache_spare():
rc = _lib.mdb_txn_abort(self._txn)
self._txn = _invalid
if rc:
raise _error("mdb_txn_abort", rc)
self._invalidate()
def get(self, key, default=None, db=None):
"""Fetch the first value matching `key`, returning `default` if `key`
does not exist. A cursor must be used to fetch all values for a key in
a `dupsort=True` database.
Equivalent to `mdb_get()
<http://lmdb.tech/doc/group__mdb.html#ga8bf10cd91d3f3a83a34d04ce6b07992d>`_
"""
rc = _lib.pymdb_get(self._txn, (db or self._db)._dbi,
key, len(key), self._val)
if rc:
if rc == _lib.MDB_NOTFOUND:
return default
raise _error("mdb_cursor_get", rc)
preload(self._val)
return self._to_py(self._val)
def put(self, key, value, dupdata=True, overwrite=True, append=False,
db=None):
"""Store a record, returning ``True`` if it was written, or ``False``
to indicate the key was already present and `overwrite=False`.
On success, the cursor is positioned on the new record.
Equivalent to `mdb_put()
<http://lmdb.tech/doc/group__mdb.html#ga4fa8573d9236d54687c61827ebf8cac0>`_
`key`:
Bytestring key to store.
`value`:
Bytestring value to store.
`dupdata`:
If ``False`` and database was opened with `dupsort=True`, will return
``False`` if the key already has that value. In other words, this only
affects the return value.
`overwrite`:
If ``False``, do not overwrite any existing matching key. If
False and writing to a dupsort=True database, this will not add a value
to the key and this function will return ``False``.
`append`:
If ``True``, append the pair to the end of the database without
comparing its order first. Appending a key that is not greater
than the highest existing key will fail and return ``False``.
`db`:
Named database to operate on. If unspecified, defaults to the
database given to the :py:class:`Transaction` constructor.
"""
flags = 0
if not dupdata:
flags |= _lib.MDB_NODUPDATA
if not overwrite:
flags |= _lib.MDB_NOOVERWRITE
if append:
flags |= _lib.MDB_APPEND
rc = _lib.pymdb_put(self._txn, (db or self._db)._dbi,
key, len(key), value, len(value), flags)
self._mutations += 1
if rc:
if rc == _lib.MDB_KEYEXIST:
return False
raise _error("mdb_put", rc)
return True
def replace(self, key, value, db=None):
"""Use a temporary cursor to invoke :py:meth:`Cursor.replace`.
`db`:
Named database to operate on. If unspecified, defaults to the
database given to the :py:class:`Transaction` constructor.
"""
with Cursor(db or self._db, self) as curs:
return curs.replace(key, value)
def pop(self, key, db=None):
"""Use a temporary cursor to invoke :py:meth:`Cursor.pop`.
`db`:
Named database to operate on. If unspecified, defaults to the
database given to the :py:class:`Transaction` constructor.
"""
with Cursor(db or self._db, self) as curs:
return curs.pop(key)
def delete(self, key, value=EMPTY_BYTES, db=None):
"""Delete a key from the database.
Equivalent to `mdb_del()
<http://lmdb.tech/doc/group__mdb.html#gab8182f9360ea69ac0afd4a4eaab1ddb0>`_
`key`:
The key to delete.
value:
If the database was opened with dupsort=True and value is not
the empty bytestring, then delete elements matching only this
`(key, value)` pair, otherwise all values for key are deleted.
Returns True if at least one key was deleted.
"""
if value is None: # for bug-compatibility with cpython impl
value = EMPTY_BYTES
rc = _lib.pymdb_del(self._txn, (db or self._db)._dbi,
key, len(key), value, len(value))
self._mutations += 1
if rc:
if rc == _lib.MDB_NOTFOUND:
return False
raise _error("mdb_del", rc)
return True
def cursor(self, db=None):
"""Shortcut for ``lmdb.Cursor(db, self)``"""
return Cursor(db or self._db, self)
class Cursor(object):
"""
Structure for navigating a database.
Equivalent to `mdb_cursor_open()
<http://lmdb.tech/doc/group__mdb.html#ga9ff5d7bd42557fd5ee235dc1d62613aa>`_
`db`:
:py:class:`_Database` to navigate.
`txn`:
:py:class:`Transaction` to navigate.
As a convenience, :py:meth:`Transaction.cursor` can be used to quickly
return a cursor:
::
>>> env = lmdb.open('/tmp/foo')
>>> child_db = env.open_db('child_db')
>>> with env.begin() as txn:
... cursor = txn.cursor() # Cursor on main database.
... cursor2 = txn.cursor(child_db) # Cursor on child database.
Cursors start in an unpositioned state. If :py:meth:`iternext` or
:py:meth:`iterprev` are used in this state, iteration proceeds from the
start or end respectively. Iterators directly position using the cursor,
meaning strange behavior results when multiple iterators exist on the same
cursor.
.. note::
From the perspective of the Python binding, cursors return to an
'unpositioned' state once any scanning or seeking method (e.g.
:py:meth:`next`, :py:meth:`prev_nodup`, :py:meth:`set_range`) returns
``False`` or raises an exception. This is primarily to ensure safe,
consistent semantics in the face of any error condition.
When the Cursor returns to an unpositioned state, its :py:meth:`key`
and :py:meth:`value` return empty strings to indicate there is no
active position, although internally the LMDB cursor may still have a
valid position.
This may lead to slightly surprising behaviour when iterating the
values for a `dupsort=True` database's keys, since methods such as
:py:meth:`iternext_dup` will cause Cursor to appear unpositioned,
despite it returning ``False`` only to indicate there are no more
values for the current key. In that case, simply calling
:py:meth:`next` would cause iteration to resume at the next available
key.
This behaviour may change in future.
Iterator methods such as :py:meth:`iternext` and :py:meth:`iterprev` accept
`keys` and `values` arguments. If both are ``True``, then the value of
:py:meth:`item` is yielded on each iteration. If only `keys` is ``True``,
:py:meth:`key` is yielded, otherwise only :py:meth:`value` is yielded.
Prior to iteration, a cursor can be positioned anywhere in the database:
::
>>> with env.begin() as txn:
... cursor = txn.cursor()
... if not cursor.set_range('5'): # Position at first key >= '5'.
... print('Not found!')
... else:
... for key, value in cursor: # Iterate from first key >= '5'.
... print((key, value))
Iteration is not required to navigate, and sometimes results in ugly or
inefficient code. In cases where the iteration order is not obvious, or is
related to the data being read, use of :py:meth:`set_key`,
:py:meth:`set_range`, :py:meth:`key`, :py:meth:`value`, and :py:meth:`item`
may be preferable:
::
>>> # Record the path from a child to the root of a tree.
>>> path = ['child14123']
>>> while path[-1] != 'root':
... assert cursor.set_key(path[-1]), \\
... 'Tree is broken! Path: %s' % (path,)
... path.append(cursor.value())
"""
def __init__(self, db, txn):
db._deps.add(self)
txn._deps.add(self)
self.db = db # hold ref
self.txn = txn # hold ref
self._dbi = db._dbi
self._txn = txn._txn
self._key = _ffi.new('MDB_val *')
self._val = _ffi.new('MDB_val *')
self._valid = False
self._to_py = txn._to_py
curpp = _ffi.new('MDB_cursor **')
self._cur = None
rc = _lib.mdb_cursor_open(self._txn, self._dbi, curpp)
if rc:
raise _error("mdb_cursor_open", rc)
self._cur = curpp[0]
# If Transaction.mutations!=last_mutation, must MDB_GET_CURRENT to
# refresh `key' and `val'.
self._last_mutation = txn._mutations
def _invalidate(self):
if self._cur:
_lib.mdb_cursor_close(self._cur)
self.db._deps.discard(self)
self.txn._deps.discard(self)
self._cur = _invalid
self._dbi = _invalid
self._txn = _invalid
def __del__(self):
self._invalidate()
def close(self):
"""Close the cursor, freeing its associated resources."""
self._invalidate()
def __enter__(self):
return self
def __exit__(self, _1, _2, _3):
self._invalidate()
def key(self):
"""Return the current key."""
# Must refresh `key` and `val` following mutation.
if self._last_mutation != self.txn._mutations:
self._cursor_get(_lib.MDB_GET_CURRENT)
return self._to_py(self._key)
def value(self):
"""Return the current value."""
# Must refresh `key` and `val` following mutation.
if self._last_mutation != self.txn._mutations:
self._cursor_get(_lib.MDB_GET_CURRENT)
preload(self._val)
return self._to_py(self._val)
def item(self):
"""Return the current `(key, value)` pair."""
# Must refresh `key` and `val` following mutation.
if self._last_mutation != self.txn._mutations:
self._cursor_get(_lib.MDB_GET_CURRENT)
preload(self._val)
return self._to_py(self._key), self._to_py(self._val)
def _iter(self, op, keys, values):
if not values:
get = self.key
elif not keys:
get = self.value
else:
get = self.item
cur = self._cur
key = self._key
val = self._val
rc = 0
while self._valid:
yield get()
rc = _lib.mdb_cursor_get(cur, key, val, op)
self._valid = not rc
if rc:
self._key.mv_size = 0
self._val.mv_size = 0
if rc != _lib.MDB_NOTFOUND:
raise _error("mdb_cursor_get", rc)
def iternext(self, keys=True, values=True):
"""Return a forward iterator that yields the current element before
calling :py:meth:`next`, repeating until the end of the database is
reached. As a convenience, :py:class:`Cursor` implements the iterator
protocol by automatically returning a forward iterator when invoked:
::
>>> # Equivalent:
>>> it = iter(cursor)
>>> it = cursor.iternext(keys=True, values=True)
If the cursor is not yet positioned, it is moved to the first key in
the database, otherwise iteration proceeds from the current position.
"""
if not self._valid:
self.first()
return self._iter(_lib.MDB_NEXT, keys, values)
__iter__ = iternext
def iternext_dup(self, keys=False, values=True):
"""Return a forward iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`next_dup`,
repeating until the last value of the current key is reached.
Only meaningful for databases opened with `dupsort=True`.
.. code-block:: python
if not cursor.set_key("foo"):
print("No values found for 'foo'")
else:
for idx, data in enumerate(cursor.iternext_dup()):
print("%d'th value for 'foo': %s" % (idx, data))
"""
return self._iter(_lib.MDB_NEXT_DUP, keys, values)
def iternext_nodup(self, keys=True, values=False):
"""Return a forward iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`next_nodup`,
repeating until the end of the database is reached.
Only meaningful for databases opened with `dupsort=True`.
If the cursor is not yet positioned, it is moved to the first key in
the database, otherwise iteration proceeds from the current position.
.. code-block:: python
for key in cursor.iternext_nodup():
print("Key '%s' has %d values" % (key, cursor.count()))
"""
if not self._valid:
self.first()
return self._iter(_lib.MDB_NEXT_NODUP, keys, values)
def iterprev(self, keys=True, values=True):
"""Return a reverse iterator that yields the current element before
calling :py:meth:`prev`, until the start of the database is reached.
If the cursor is not yet positioned, it is moved to the last key in
the database, otherwise iteration proceeds from the current position.
::
>>> with env.begin() as txn:
... for i, (key, value) in enumerate(txn.cursor().iterprev()):
... print('%dth last item is (%r, %r)' % (1+i, key, value))
"""
if not self._valid:
self.last()
return self._iter(_lib.MDB_PREV, keys, values)
def iterprev_dup(self, keys=False, values=True):
"""Return a reverse iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`prev_dup`,
repeating until the first value of the current key is reached.
Only meaningful for databases opened with `dupsort=True`.
"""
return self._iter(_lib.MDB_PREV_DUP, keys, values)
def iterprev_nodup(self, keys=True, values=False):
"""Return a reverse iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`prev_nodup`,
repeating until the start of the database is reached.
If the cursor is not yet positioned, it is moved to the last key in
the database, otherwise iteration proceeds from the current position.
Only meaningful for databases opened with `dupsort=True`.
"""
if not self._valid:
self.last()
return self._iter(_lib.MDB_PREV_NODUP, keys, values)
def _cursor_get(self, op):
rc = _lib.mdb_cursor_get(self._cur, self._key, self._val, op)
self._valid = v = not rc
self._last_mutation = self.txn._mutations
if rc:
self._key.mv_size = 0
self._val.mv_size = 0
if rc != _lib.MDB_NOTFOUND:
if not (rc == _lib.EINVAL and op == _lib.MDB_GET_CURRENT):
raise _error("mdb_cursor_get", rc)
return v
def _cursor_get_kv(self, op, k, v):
rc = _lib.pymdb_cursor_get(self._cur, k, len(k), v, len(v),
self._key, self._val, op)
self._valid = v = not rc
if rc:
self._key.mv_size = 0
self._val.mv_size = 0
if rc != _lib.MDB_NOTFOUND:
if not (rc == _lib.EINVAL and op == _lib.MDB_GET_CURRENT):
raise _error("mdb_cursor_get", rc)
return v
def first(self):
"""Move to the first key in the database, returning ``True`` on success
or ``False`` if the database is empty.
If the database was opened with `dupsort=True` and the key contains
duplicates, the cursor is positioned on the first value ("duplicate").
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_FIRST
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_FIRST)
def first_dup(self):
"""Move to the first value ("duplicate") for the current key, returning
``True`` on success or ``False`` if the database is empty.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_FIRST_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_FIRST_DUP)
def last(self):
"""Move to the last key in the database, returning ``True`` on success
or ``False`` if the database is empty.
If the database was opened with `dupsort=True` and the key contains
duplicates, the cursor is positioned on the last value ("duplicate").
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_LAST
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_LAST)
def last_dup(self):
"""Move to the last value ("duplicate") for the current key, returning
``True`` on success or ``False`` if the database is empty.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_LAST_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_LAST_DUP)
def prev(self):
"""Move to the previous element, returning ``True`` on success or
``False`` if there is no previous item.
For databases opened with `dupsort=True`, moves to the previous data
item ("duplicate") for the current key if one exists, otherwise moves
to the previous key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_PREV
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_PREV)
def prev_dup(self):
"""Move to the previous value ("duplicate") of the current key,
returning ``True`` on success or ``False`` if there is no previous
value.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_PREV_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_PREV_DUP)
def prev_nodup(self):
"""Move to the last value ("duplicate") of the previous key, returning
``True`` on success or ``False`` if there is no previous key.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_PREV_NODUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_PREV_NODUP)
def next(self):
"""Move to the next element, returning ``True`` on success or ``False``
if there is no next element.
For databases opened with `dupsort=True`, moves to the next value
("duplicate") for the current key if one exists, otherwise moves to the
first value of the next key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_NEXT
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_NEXT)
def next_dup(self):
"""Move to the next value ("duplicate") of the current key, returning
``True`` on success or ``False`` if there is no next value.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_NEXT_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_NEXT_DUP)
def next_nodup(self):
"""Move to the first value ("duplicate") of the next key, returning
``True`` on success or ``False`` if there is no next key.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_NEXT_NODUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_NEXT_NODUP)
def set_key(self, key):
"""Seek exactly to `key`, returning ``True`` on success or ``False`` if
the exact key was not found. It is an error to :py:meth:`set_key` the
empty bytestring.
For databases opened with `dupsort=True`, moves to the first value
("duplicate") for the key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_SET_KEY
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES)
def set_key_dup(self, key, value):
"""Seek exactly to `(key, value)`, returning ``True`` on success or
``False`` if the exact key and value was not found. It is an error
to :py:meth:`set_key` the empty bytestring.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_GET_BOTH
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get_kv(_lib.MDB_GET_BOTH, key, value)
def get(self, key, default=None):
"""Equivalent to :py:meth:`set_key()`, except :py:meth:`value` is
returned when `key` is found, otherwise `default`.
"""
if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES):
return self.value()
return default
def getmulti(self, keys, dupdata=False, dupfixed_bytes=None, keyfixed=False):
"""Returns an iterable of `(key, value)` 2-tuples containing results
for each key in the iterable `keys`.
`keys`:
Iterable to read keys from.
`dupdata`:
If ``True`` and database was opened with `dupsort=True`, read
all duplicate values for each matching key.
`dupfixed_bytes`:
If database was opened with `dupsort=True` and `dupfixed=True`,
accepts the size of each value, in bytes, and applies an
optimization reducing the number of database lookups.
`keyfixed`:
If `dupfixed_bytes` is set and database key size is fixed,
setting keyfixed=True will result in this function returning
a memoryview to the results as a structured array of bytes.
The structured array can be instantiated by passing the
memoryview buffer to NumPy:
.. code-block:: python
key_bytes, val_bytes = 4, 8
dtype = np.dtype([(f'S{key_bytes}', f'S{val_bytes}}')])
arr = np.frombuffer(
cur.getmulti(keys, dupdata=True, dupfixed_bytes=val_bytes, keyfixed=True)
)
"""
if dupfixed_bytes and dupfixed_bytes < 0:
raise _error("dupfixed_bytes must be a positive integer.")
elif (dupfixed_bytes or keyfixed) and not dupdata:
raise _error("dupdata is required for dupfixed_bytes/key_bytes.")
elif keyfixed and not dupfixed_bytes:
raise _error("dupfixed_bytes is required for key_bytes.")
if dupfixed_bytes:
get_op = _lib.MDB_GET_MULTIPLE
next_op = _lib.MDB_NEXT_MULTIPLE
else:
get_op = _lib.MDB_GET_CURRENT
next_op = _lib.MDB_NEXT_DUP
a = bytearray()
lst = list()
for key in keys:
if self.set_key(key):
while self._valid:
self._cursor_get(get_op)
preload(self._val)
key = self._to_py(self._key)
val = self._to_py(self._val)
if dupfixed_bytes:
gen = (
(key, val[i:i + dupfixed_bytes])
for i in range(0, len(val), dupfixed_bytes))
if keyfixed:
for k, v in gen:
a.extend(k + v)
else:
for k, v in gen:
lst.append((k, v))
else:
lst.append((key, val))
if dupdata:
self._cursor_get(next_op)
else:
break
if keyfixed:
return memoryview(a)
else:
return lst
def set_range(self, key):
"""Seek to the first key greater than or equal to `key`, returning
``True`` on success, or ``False`` to indicate key was past end of
database. Behaves like :py:meth:`first` if `key` is the empty
bytestring.
For databases opened with `dupsort=True`, moves to the first value
("duplicate") for the key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_SET_RANGE
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
if not key:
return self.first()
return self._cursor_get_kv(_lib.MDB_SET_RANGE, key, EMPTY_BYTES)
def set_range_dup(self, key, value):
"""Seek to the first key/value pair greater than or equal to `key`,
returning ``True`` on success, or ``False`` to indicate that `value` was past the
last value of `key` or that `(key, value)` was past the end end of database.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_GET_BOTH_RANGE
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
rc = self._cursor_get_kv(_lib.MDB_GET_BOTH_RANGE, key, value)
# issue #126: MDB_GET_BOTH_RANGE does not satisfy its documentation,
# and fails to update `key` and `value` on success. Therefore
# explicitly call MDB_GET_CURRENT after MDB_GET_BOTH_RANGE.
self._cursor_get(_lib.MDB_GET_CURRENT)
return rc
def delete(self, dupdata=False):
"""Delete the current element and move to the next, returning ``True``
on success or ``False`` if the database was empty.
If `dupdata` is ``True``, delete all values ("duplicates") for the
current key, otherwise delete only the currently positioned value. Only
meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_del()
<http://lmdb.tech/doc/group__mdb.html#ga26a52d3efcfd72e5bf6bd6960bf75f95>`_
"""
v = self._valid
if v:
flags = _lib.MDB_NODUPDATA if dupdata else 0
rc = _lib.mdb_cursor_del(self._cur, flags)
self.txn._mutations += 1
if rc:
raise _error("mdb_cursor_del", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
v = rc == 0
return v
def count(self):
"""Return the number of values ("duplicates") for the current key.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_count()
<http://lmdb.tech/doc/group__mdb.html#ga4041fd1e1862c6b7d5f10590b86ffbe2>`_
"""
countp = _ffi.new('size_t *')
rc = _lib.mdb_cursor_count(self._cur, countp)
if rc:
raise _error("mdb_cursor_count", rc)
return countp[0]
def put(self, key, val, dupdata=True, overwrite=True, append=False):
"""Store a record, returning ``True`` if it was written, or ``False``
to indicate the key was already present and `overwrite=False`. On
success, the cursor is positioned on the key.
Equivalent to `mdb_cursor_put()
<http://lmdb.tech/doc/group__mdb.html#ga1f83ccb40011837ff37cc32be01ad91e>`_
`key`:
Bytestring key to store.
`val`:
Bytestring value to store.
`dupdata`:
If ``False`` and database was opened with `dupsort=True`, will return
``False`` if the key already has that value. In other words, this only
affects the return value.
`overwrite`:
If ``False``, do not overwrite the value for the key if it
exists, just return ``False``. For databases opened with
`dupsort=True`, ``False`` will always be returned if a
duplicate key/value pair is inserted, regardless of the setting
for `overwrite`.
`append`:
If ``True``, append the pair to the end of the database without
comparing its order first. Appending a key that is not greater
than the highest existing key will fail and return ``False``.
"""
flags = 0
if not dupdata:
flags |= _lib.MDB_NODUPDATA
if not overwrite:
flags |= _lib.MDB_NOOVERWRITE
if append:
if self.txn._db._flags & _lib.MDB_DUPSORT:
flags |= _lib.MDB_APPENDDUP
else:
flags |= _lib.MDB_APPEND
rc = _lib.pymdb_cursor_put(self._cur, key, len(key), val, len(val), flags)
self.txn._mutations += 1
if rc:
if rc == _lib.MDB_KEYEXIST:
return False
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return True
def putmulti(self, items, dupdata=True, overwrite=True, append=False):
"""Invoke :py:meth:`put` for each `(key, value)` 2-tuple from the
iterable `items`. Elements must be exactly 2-tuples, they may not be of
any other type, or tuple subclass.
Returns a tuple `(consumed, added)`, where `consumed` is the number of
elements read from the iterable, and `added` is the number of new
entries added to the database. `added` may be less than `consumed` when
`overwrite=False`.
`items`:
Iterable to read records from.
`dupdata`:
If ``True`` and database was opened with `dupsort=True`, add
pair as a duplicate if the given key already exists. Otherwise
overwrite any existing matching key.
`overwrite`:
If ``False``, do not overwrite the value for the key if it
exists, just return ``False``. For databases opened with
`dupsort=True`, ``False`` will always be returned if a
duplicate key/value pair is inserted, regardless of the setting
for `overwrite`.
`append`:
If ``True``, append records to the end of the database without
comparing their order first. Appending a key that is not
greater than the highest existing key will cause corruption.
"""
flags = 0
if not dupdata:
flags |= _lib.MDB_NODUPDATA
if not overwrite:
flags |= _lib.MDB_NOOVERWRITE
if append:
if self.txn._db._flags & _lib.MDB_DUPSORT:
flags |= _lib.MDB_APPENDDUP
else:
flags |= _lib.MDB_APPEND
added = 0
skipped = 0
for key, value in items:
rc = _lib.pymdb_cursor_put(self._cur, key, len(key),
value, len(value), flags)
self.txn._mutations += 1
added += 1
if rc:
if rc == _lib.MDB_KEYEXIST:
skipped += 1
else:
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return added, added - skipped
def replace(self, key, val):
"""Store a record, returning its previous value if one existed. Returns
``None`` if no previous value existed. This uses the best available
mechanism to minimize the cost of a `set-and-return-previous`
operation.
For databases opened with `dupsort=True`, only the first data element
("duplicate") is returned if it existed, all data elements are removed
and the new `(key, data)` pair is inserted.
`key`:
Bytestring key to store.
`value`:
Bytestring value to store.
"""
if self.db._flags & _lib.MDB_DUPSORT:
if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES):
preload(self._val)
old = _mvstr(self._val)
self.delete(True)
else:
old = None
self.put(key, val)
return old
flags = _lib.MDB_NOOVERWRITE
keylen = len(key)
rc = _lib.pymdb_cursor_put(self._cur, key, keylen, val, len(val), flags)
self.txn._mutations += 1
if not rc:
return
if rc != _lib.MDB_KEYEXIST:
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
preload(self._val)
old = _mvstr(self._val)
rc = _lib.pymdb_cursor_put(self._cur, key, keylen, val, len(val), 0)
self.txn._mutations += 1
if rc:
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return old
def pop(self, key):
"""Fetch a record's value then delete it. Returns ``None`` if no
previous value existed. This uses the best available mechanism to
minimize the cost of a `delete-and-return-previous` operation.
For databases opened with `dupsort=True`, the first data element
("duplicate") for the key will be popped.
`key`:
Bytestring key to delete.
"""
if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES):
preload(self._val)
old = _mvstr(self._val)
rc = _lib.mdb_cursor_del(self._cur, 0)
self.txn._mutations += 1
if rc:
raise _error("mdb_cursor_del", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return old
def _iter_from(self, k, reverse):
"""Helper for centidb. Please do not rely on this interface, it may be
removed in future.
"""
if not k and not reverse:
found = self.first()
else:
found = self.set_range(k)
if reverse:
if not found:
self.last()
return self.iterprev()
else:
if not found:
return iter(())
return self.iternext()
| #
# Copyright 2013 The py-lmdb authors, all rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted only as authorized by the OpenLDAP
# Public License.
#
# A copy of this license is available in the file LICENSE in the
# top-level directory of the distribution or, alternatively, at
# <http://www.OpenLDAP.org/license.html>.
#
# OpenLDAP is a registered trademark of the OpenLDAP Foundation.
#
# Individual files and/or contributed packages may be copyright by
# other parties and/or subject to additional restrictions.
#
# This work also contains materials derived from public sources.
#
# Additional information about OpenLDAP can be obtained at
# <http://www.openldap.org/>.
#
"""
CPython/CFFI wrapper for OpenLDAP's "Lightning" MDB database.
Please see https://lmdb.readthedocs.io/
"""
from __future__ import absolute_import
from __future__ import with_statement
import errno
import inspect
import os
import sys
import threading
is_win32 = sys.platform == 'win32'
if is_win32:
import msvcrt
try:
import __builtin__
except ImportError:
import builtins as __builtin__ # type: ignore
import lmdb
try:
from lmdb import _config
except ImportError:
_config = None # type: ignore
__all__ = [
'Cursor',
'Environment',
'Transaction',
'_Database',
'enable_drop_gil',
'version',
]
__all__ += [
'BadDbiError',
'BadRslotError',
'BadTxnError',
'BadValsizeError',
'CorruptedError',
'CursorFullError',
'DbsFullError',
'DiskError',
'Error',
'IncompatibleError',
'InvalidError',
'InvalidParameterError',
'KeyExistsError',
'LockError',
'MapFullError',
'MapResizedError',
'MemoryError',
'NotFoundError',
'PageFullError',
'PageNotFoundError',
'PanicError',
'ReadersFullError',
'ReadonlyError',
'TlsFullError',
'TxnFullError',
'VersionMismatchError',
]
# Handle moronic Python 3 mess.
UnicodeType = getattr(__builtin__, 'unicode', str)
BytesType = getattr(__builtin__, 'bytes', str)
O_0755 = int('0755', 8)
O_0111 = int('0111', 8)
EMPTY_BYTES = UnicodeType().encode()
# Used to track context across CFFI callbacks.
_callbacks = threading.local()
_CFFI_CDEF = '''
typedef int mode_t;
typedef ... MDB_env;
typedef struct MDB_txn MDB_txn;
typedef struct MDB_cursor MDB_cursor;
typedef unsigned int MDB_dbi;
enum MDB_cursor_op {
MDB_FIRST,
MDB_FIRST_DUP,
MDB_GET_BOTH,
MDB_GET_BOTH_RANGE,
MDB_GET_CURRENT,
MDB_GET_MULTIPLE,
MDB_LAST,
MDB_LAST_DUP,
MDB_NEXT,
MDB_NEXT_DUP,
MDB_NEXT_MULTIPLE,
MDB_NEXT_NODUP,
MDB_PREV,
MDB_PREV_DUP,
MDB_PREV_NODUP,
MDB_SET,
MDB_SET_KEY,
MDB_SET_RANGE,
...
};
typedef enum MDB_cursor_op MDB_cursor_op;
struct MDB_val {
size_t mv_size;
void *mv_data;
...;
};
typedef struct MDB_val MDB_val;
struct MDB_stat {
unsigned int ms_psize;
unsigned int ms_depth;
size_t ms_branch_pages;
size_t ms_leaf_pages;
size_t ms_overflow_pages;
size_t ms_entries;
...;
};
typedef struct MDB_stat MDB_stat;
struct MDB_envinfo {
void *me_mapaddr;
size_t me_mapsize;
size_t me_last_pgno;
size_t me_last_txnid;
unsigned int me_maxreaders;
unsigned int me_numreaders;
...;
};
typedef struct MDB_envinfo MDB_envinfo;
typedef int (*MDB_cmp_func)(const MDB_val *a, const MDB_val *b);
typedef void (*MDB_rel_func)(MDB_val *item, void *oldptr, void *newptr,
void *relctx);
char *mdb_strerror(int err);
int mdb_env_create(MDB_env **env);
int mdb_env_open(MDB_env *env, const char *path, unsigned int flags,
mode_t mode);
int mdb_env_copy2(MDB_env *env, const char *path, int flags);
int mdb_env_copyfd2(MDB_env *env, int fd, int flags);
int mdb_env_stat(MDB_env *env, MDB_stat *stat);
int mdb_env_info(MDB_env *env, MDB_envinfo *stat);
int mdb_env_get_maxkeysize(MDB_env *env);
int mdb_env_sync(MDB_env *env, int force);
void mdb_env_close(MDB_env *env);
int mdb_env_set_flags(MDB_env *env, unsigned int flags, int onoff);
int mdb_env_get_flags(MDB_env *env, unsigned int *flags);
int mdb_env_get_path(MDB_env *env, const char **path);
int mdb_env_set_mapsize(MDB_env *env, size_t size);
int mdb_env_set_maxreaders(MDB_env *env, unsigned int readers);
int mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers);
int mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs);
int mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags,
MDB_txn **txn);
int mdb_txn_commit(MDB_txn *txn);
void mdb_txn_reset(MDB_txn *txn);
int mdb_txn_renew(MDB_txn *txn);
void mdb_txn_abort(MDB_txn *txn);
size_t mdb_txn_id(MDB_txn *txn);
int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags,
MDB_dbi *dbi);
int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat);
int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del_);
int mdb_get(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data);
int mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **cursor);
void mdb_cursor_close(MDB_cursor *cursor);
int mdb_cursor_del(MDB_cursor *cursor, unsigned int flags);
int mdb_cursor_count(MDB_cursor *cursor, size_t *countp);
int mdb_cursor_get(MDB_cursor *cursor, MDB_val *key, MDB_val*data, int op);
typedef int (MDB_msg_func)(const char *msg, void *ctx);
int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx);
int mdb_reader_check(MDB_env *env, int *dead);
int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags);
#define MDB_VERSION_MAJOR ...
#define MDB_VERSION_MINOR ...
#define MDB_VERSION_PATCH ...
#define EACCES ...
#define EAGAIN ...
#define EINVAL ...
#define ENOMEM ...
#define ENOSPC ...
#define MDB_BAD_RSLOT ...
#define MDB_BAD_DBI ...
#define MDB_BAD_TXN ...
#define MDB_BAD_VALSIZE ...
#define MDB_CORRUPTED ...
#define MDB_CURSOR_FULL ...
#define MDB_DBS_FULL ...
#define MDB_INCOMPATIBLE ...
#define MDB_INVALID ...
#define MDB_KEYEXIST ...
#define MDB_MAP_FULL ...
#define MDB_MAP_RESIZED ...
#define MDB_NOTFOUND ...
#define MDB_PAGE_FULL ...
#define MDB_PAGE_NOTFOUND ...
#define MDB_PANIC ...
#define MDB_READERS_FULL ...
#define MDB_TLS_FULL ...
#define MDB_TXN_FULL ...
#define MDB_VERSION_MISMATCH ...
#define MDB_APPEND ...
#define MDB_APPENDDUP ...
#define MDB_CP_COMPACT ...
#define MDB_CREATE ...
#define MDB_DUPFIXED ...
#define MDB_DUPSORT ...
#define MDB_INTEGERDUP ...
#define MDB_INTEGERKEY ...
#define MDB_MAPASYNC ...
#define MDB_NODUPDATA ...
#define MDB_NOLOCK ...
#define MDB_NOMEMINIT ...
#define MDB_NOMETASYNC ...
#define MDB_NOOVERWRITE ...
#define MDB_NORDAHEAD ...
#define MDB_NOSUBDIR ...
#define MDB_NOSYNC ...
#define MDB_NOTLS ...
#define MDB_RDONLY ...
#define MDB_REVERSEKEY ...
#define MDB_WRITEMAP ...
// Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where
// CFFI will use PyString_AS_STRING when passed as an argument.
static int pymdb_del(MDB_txn *txn, MDB_dbi dbi,
char *key_s, size_t keylen,
char *val_s, size_t vallen);
static int pymdb_put(MDB_txn *txn, MDB_dbi dbi,
char *key_s, size_t keylen,
char *val_s, size_t vallen,
unsigned int flags);
static int pymdb_get(MDB_txn *txn, MDB_dbi dbi,
char *key_s, size_t keylen,
MDB_val *val_out);
static int pymdb_cursor_get(MDB_cursor *cursor,
char *key_s, size_t key_len,
char *data_s, size_t data_len,
MDB_val *key, MDB_val *data, int op);
static int pymdb_cursor_put(MDB_cursor *cursor,
char *key_s, size_t keylen,
char *val_s, size_t vallen, int flags);
// Prefaults a range
static void preload(int rc, void *x, size_t size);
'''
_CFFI_CDEF_PATCHED = '''
int mdb_env_copy3(MDB_env *env, const char *path, unsigned int flags, MDB_txn *txn);
int mdb_env_copyfd3(MDB_env *env, int fd, unsigned int flags, MDB_txn *txn);
'''
_CFFI_VERIFY = '''
#include <sys/stat.h>
#include "lmdb.h"
#include "preload.h"
// Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where
// CFFI will use PyString_AS_STRING when passed as an argument.
static int pymdb_get(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
MDB_val *val_out)
{
MDB_val key = {keylen, key_s};
int rc = mdb_get(txn, dbi, &key, val_out);
return rc;
}
static int pymdb_put(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
char *val_s, size_t vallen, unsigned int flags)
{
MDB_val key = {keylen, key_s};
MDB_val val = {vallen, val_s};
return mdb_put(txn, dbi, &key, &val, flags);
}
static int pymdb_del(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
char *val_s, size_t vallen)
{
MDB_val key = {keylen, key_s};
MDB_val val = {vallen, val_s};
MDB_val *valptr;
if(vallen == 0) {
valptr = NULL;
} else {
valptr = &val;
}
return mdb_del(txn, dbi, &key, valptr);
}
static int pymdb_cursor_get(MDB_cursor *cursor,
char *key_s, size_t key_len,
char *data_s, size_t data_len,
MDB_val *key, MDB_val *data, int op)
{
MDB_val tmp_key = {key_len, key_s};
MDB_val tmp_data = {data_len, data_s};
int rc = mdb_cursor_get(cursor, &tmp_key, &tmp_data, op);
if(! rc) {
*key = tmp_key;
*data = tmp_data;
}
return rc;
}
static int pymdb_cursor_put(MDB_cursor *cursor, char *key_s, size_t keylen,
char *val_s, size_t vallen, int flags)
{
MDB_val tmpkey = {keylen, key_s};
MDB_val tmpval = {vallen, val_s};
return mdb_cursor_put(cursor, &tmpkey, &tmpval, flags);
}
'''
if not lmdb._reading_docs():
import cffi
# Try to use distutils-bundled CFFI configuration to avoid a recompile and
# potential compile errors during first module import.
_config_vars = _config.CONFIG if _config else {
'extra_compile_args': ['-w'],
'extra_sources': ['lib/mdb.c', 'lib/midl.c'],
'extra_include_dirs': ['lib'],
'extra_library_dirs': [],
'libraries': []
}
_have_patched_lmdb = '-DHAVE_PATCHED_LMDB=1' in _config.CONFIG['extra_compile_args'] # type: ignore
if _have_patched_lmdb:
_CFFI_CDEF += _CFFI_CDEF_PATCHED
_ffi = cffi.FFI()
_ffi.cdef(_CFFI_CDEF)
_lib = _ffi.verify(_CFFI_VERIFY,
modulename='lmdb_cffi',
ext_package='lmdb',
sources=_config_vars['extra_sources'],
extra_compile_args=_config_vars['extra_compile_args'],
include_dirs=_config_vars['extra_include_dirs'],
libraries=_config_vars['libraries'],
library_dirs=_config_vars['extra_library_dirs'])
@_ffi.callback("int(char *, void *)")
def _msg_func(s, _):
"""mdb_msg_func() callback. Appends `s` to _callbacks.msg_func list.
"""
_callbacks.msg_func.append(_ffi.string(s).decode())
return 0
class Error(Exception):
"""Raised when an LMDB-related error occurs, and no more specific
:py:class:`lmdb.Error` subclass exists."""
def __init__(self, what, code=0):
self.what = what
self.code = code
self.reason = _ffi.string(_lib.mdb_strerror(code))
msg = what
if code:
msg = '%s: %s' % (what, self.reason)
hint = getattr(self, 'MDB_HINT', None)
if hint:
msg += ' (%s)' % (hint,)
Exception.__init__(self, msg)
class KeyExistsError(Error):
"""Key/data pair already exists."""
MDB_NAME = 'MDB_KEYEXIST'
class NotFoundError(Error):
"""No matching key/data pair found.
Normally py-lmdb indicates a missing key by returning ``None``, or a
user-supplied default value, however LMDB may return this error where
py-lmdb does not know to convert it into a non-exceptional return.
"""
MDB_NAME = 'MDB_NOTFOUND'
class PageNotFoundError(Error):
"""Request page not found."""
MDB_NAME = 'MDB_PAGE_NOTFOUND'
class CorruptedError(Error):
"""Located page was of the wrong type."""
MDB_NAME = 'MDB_CORRUPTED'
class PanicError(Error):
"""Update of meta page failed."""
MDB_NAME = 'MDB_PANIC'
class VersionMismatchError(Error):
"""Database environment version mismatch."""
MDB_NAME = 'MDB_VERSION_MISMATCH'
class InvalidError(Error):
"""File is not an MDB file."""
MDB_NAME = 'MDB_INVALID'
class MapFullError(Error):
"""Environment map_size= limit reached."""
MDB_NAME = 'MDB_MAP_FULL'
MDB_HINT = 'Please use a larger Environment(map_size=) parameter'
class DbsFullError(Error):
"""Environment max_dbs= limit reached."""
MDB_NAME = 'MDB_DBS_FULL'
MDB_HINT = 'Please use a larger Environment(max_dbs=) parameter'
class ReadersFullError(Error):
"""Environment max_readers= limit reached."""
MDB_NAME = 'MDB_READERS_FULL'
MDB_HINT = 'Please use a larger Environment(max_readers=) parameter'
class TlsFullError(Error):
"""Thread-local storage keys full - too many environments open."""
MDB_NAME = 'MDB_TLS_FULL'
class TxnFullError(Error):
"""Transaciton has too many dirty pages - transaction too big."""
MDB_NAME = 'MDB_TXN_FULL'
MDB_HINT = 'Please do less work within your transaction'
class CursorFullError(Error):
"""Internal error - cursor stack limit reached."""
MDB_NAME = 'MDB_CURSOR_FULL'
class PageFullError(Error):
"""Internal error - page has no more space."""
MDB_NAME = 'MDB_PAGE_FULL'
class MapResizedError(Error):
"""Database contents grew beyond environment map_size=."""
MDB_NAME = 'MDB_MAP_RESIZED'
class IncompatibleError(Error):
"""Operation and DB incompatible, or DB flags changed."""
MDB_NAME = 'MDB_INCOMPATIBLE'
class BadRslotError(Error):
"""Invalid reuse of reader locktable slot."""
MDB_NAME = 'MDB_BAD_RSLOT'
class BadDbiError(Error):
"""The specified DBI was changed unexpectedly."""
MDB_NAME = 'MDB_BAD_DBI'
class BadTxnError(Error):
"""Transaction cannot recover - it must be aborted."""
MDB_NAME = 'MDB_BAD_TXN'
class BadValsizeError(Error):
"""Too big key/data, key is empty, or wrong DUPFIXED size."""
MDB_NAME = 'MDB_BAD_VALSIZE'
class ReadonlyError(Error):
"""An attempt was made to modify a read-only database."""
MDB_NAME = 'EACCES'
class InvalidParameterError(Error):
"""An invalid parameter was specified."""
MDB_NAME = 'EINVAL'
class LockError(Error):
"""The environment was locked by another process."""
MDB_NAME = 'EAGAIN'
class MemoryError(Error):
"""Out of memory."""
MDB_NAME = 'ENOMEM'
class DiskError(Error):
"""No more disk space."""
MDB_NAME = 'ENOSPC'
# Prepare _error_map, a mapping of integer MDB_ERROR_CODE to exception class.
if not lmdb._reading_docs():
_error_map = {}
for obj in list(globals().values()):
if inspect.isclass(obj) and issubclass(obj, Error) and obj is not Error:
_error_map[getattr(_lib, obj.MDB_NAME)] = obj
del obj
def _error(what, rc):
"""Lookup and instantiate the correct exception class for the error code
`rc`, using :py:class:`Error` if no better class exists."""
return _error_map.get(rc, Error)(what, rc)
class Some_LMDB_Resource_That_Was_Deleted_Or_Closed(object):
"""We need this because CFFI on PyPy treats None as cffi.NULL, instead of
throwing an exception it feeds LMDB null pointers. That means simply
replacing native handles with None during _invalidate() will cause NULL
pointer dereferences. Instead use this class, and its weird name to cause a
TypeError, with a very obvious string in the exception text.
The only alternatives to this are inserting a check around every single use
of a native handle to ensure the handle is still valid prior to calling
LMDB, or doing no crash-safety checking at all.
"""
def __nonzero__(self):
return 0
def __bool__(self):
return False
def __repr__(self):
return "<This used to be a LMDB resource but it was deleted or closed>"
_invalid = Some_LMDB_Resource_That_Was_Deleted_Or_Closed()
def _mvbuf(mv):
"""Convert a MDB_val cdata to a CFFI buffer object."""
return _ffi.buffer(mv.mv_data, mv.mv_size)
def _mvstr(mv):
"""Convert a MDB_val cdata to Python bytes."""
return _ffi.buffer(mv.mv_data, mv.mv_size)[:]
def preload(mv):
_lib.preload(0, mv.mv_data, mv.mv_size)
def enable_drop_gil():
"""Deprecated."""
def version(subpatch=False):
"""
Return a tuple of integers `(major, minor, patch)` describing the LMDB
library version that the binding is linked against. The version of the
binding itself is available from ``lmdb.__version__``.
`subpatch`:
If true, returns a 4 integer tuple consisting of the same plus
an extra integer that represents any patches applied by py-lmdb
itself (0 representing no patches).
"""
if subpatch:
return (_lib.MDB_VERSION_MAJOR,
_lib.MDB_VERSION_MINOR,
_lib.MDB_VERSION_PATCH,
1 if _have_patched_lmdb else 0)
return (_lib.MDB_VERSION_MAJOR,
_lib.MDB_VERSION_MINOR,
_lib.MDB_VERSION_PATCH)
class Environment(object):
"""
Structure for a database environment. An environment may contain multiple
databases, all residing in the same shared-memory map and underlying disk
file.
To write to the environment a :py:class:`Transaction` must be created. One
simultaneous write transaction is allowed, however there is no limit on the
number of read transactions even when a write transaction exists.
This class is aliased to `lmdb.open`.
It is a serious error to have open the same LMDB file in the same process at
the same time. Failure to heed this may lead to data corruption and
interpreter crash.
Equivalent to `mdb_env_open()
<http://lmdb.tech/doc/group__mdb.html#ga1fe2740e25b1689dc412e7b9faadba1b>`_
`path`:
Location of directory (if `subdir=True`) or file prefix to store
the database.
`map_size`:
Maximum size database may grow to; used to size the memory mapping.
If database grows larger than ``map_size``, an exception will be
raised and the user must close and reopen :py:class:`Environment`.
On 64-bit there is no penalty for making this huge (say 1TB). Must
be <2GB on 32-bit.
.. note::
**The default map size is set low to encourage a crash**, so
users can figure out a good value before learning about this
option too late.
`subdir`:
If ``True``, `path` refers to a subdirectory to store the data and
lock files in, otherwise it refers to a filename prefix.
`readonly`:
If ``True``, disallow any write operations. Note the lock file is
still modified. If specified, the ``write`` flag to
:py:meth:`begin` or :py:class:`Transaction` is ignored.
`metasync`:
If ``False``, flush system buffers to disk only once per
transaction, omit the metadata flush. Defer that until the system
flushes files to disk, or next commit or :py:meth:`sync`.
This optimization maintains database integrity, but a system crash
may undo the last committed transaction. I.e. it preserves the ACI
(atomicity, consistency, isolation) but not D (durability) database
property.
`sync`:
If ``False``, don't flush system buffers to disk when committing a
transaction. This optimization means a system crash can corrupt the
database or lose the last transactions if buffers are not yet
flushed to disk.
The risk is governed by how often the system flushes dirty buffers
to disk and how often :py:meth:`sync` is called. However, if the
filesystem preserves write order and `writemap=False`, transactions
exhibit ACI (atomicity, consistency, isolation) properties and only
lose D (durability). I.e. database integrity is maintained, but a
system crash may undo the final transactions.
Note that `sync=False, writemap=True` leaves the system with no
hint for when to write transactions to disk, unless :py:meth:`sync`
is called. `map_async=True, writemap=True` may be preferable.
`mode`:
File creation mode.
`create`:
If ``False``, do not create the directory `path` if it is missing.
`readahead`:
If ``False``, LMDB will disable the OS filesystem readahead
mechanism, which may improve random read performance when a
database is larger than RAM.
`writemap`:
If ``True``, use a writeable memory map unless `readonly=True`.
This is faster and uses fewer mallocs, but loses protection from
application bugs like wild pointer writes and other bad updates
into the database. Incompatible with nested transactions.
Processes with and without `writemap` on the same environment do
not cooperate well.
`meminit`:
If ``False`` LMDB will not zero-initialize buffers prior to writing
them to disk. This improves performance but may cause old heap data
to be written saved in the unused portion of the buffer. Do not use
this option if your application manipulates confidential data (e.g.
plaintext passwords) in memory. This option is only meaningful when
`writemap=False`; new pages are always zero-initialized when
`writemap=True`.
`map_async`:
When ``writemap=True``, use asynchronous flushes to disk. As with
``sync=False``, a system crash can then corrupt the database or
lose the last transactions. Calling :py:meth:`sync` ensures
on-disk database integrity until next commit.
`max_readers`:
Maximum number of simultaneous read transactions. Can only be set
by the first process to open an environment, as it affects the size
of the lock file and shared memory area. Attempts to simultaneously
start more than this many *read* transactions will fail.
`max_dbs`:
Maximum number of databases available. If 0, assume environment
will be used as a single database.
`max_spare_txns`:
Read-only transactions to cache after becoming unused. Caching
transactions avoids two allocations, one lock and linear scan
of the shared environment per invocation of :py:meth:`begin`,
:py:class:`Transaction`, :py:meth:`get`, :py:meth:`gets`, or
:py:meth:`cursor`. Should match the process's maximum expected
concurrent transactions (e.g. thread count).
`lock`:
If ``False``, don't do any locking. If concurrent access is
anticipated, the caller must manage all concurrency itself. For
proper operation the caller must enforce single-writer semantics,
and must ensure that no readers are using old transactions while a
writer is active. The simplest approach is to use an exclusive lock
so that no readers may be active at all when a writer begins.
"""
def __init__(self, path, map_size=10485760, subdir=True,
readonly=False, metasync=True, sync=True, map_async=False,
mode=O_0755, create=True, readahead=True, writemap=False,
meminit=True, max_readers=126, max_dbs=0, max_spare_txns=1,
lock=True):
self._max_spare_txns = max_spare_txns
self._spare_txns = []
envpp = _ffi.new('MDB_env **')
rc = _lib.mdb_env_create(envpp)
if rc:
raise _error("mdb_env_create", rc)
self._env = envpp[0]
self._deps = set()
self._creating_db_in_readonly = False
self.set_mapsize(map_size)
rc = _lib.mdb_env_set_maxreaders(self._env, max_readers)
if rc:
raise _error("mdb_env_set_maxreaders", rc)
rc = _lib.mdb_env_set_maxdbs(self._env, max_dbs)
if rc:
raise _error("mdb_env_set_maxdbs", rc)
if create and subdir and not readonly:
try:
os.mkdir(path, mode)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
flags = _lib.MDB_NOTLS
if not subdir:
flags |= _lib.MDB_NOSUBDIR
if readonly:
flags |= _lib.MDB_RDONLY
self.readonly = readonly
if not metasync:
flags |= _lib.MDB_NOMETASYNC
if not sync:
flags |= _lib.MDB_NOSYNC
if map_async:
flags |= _lib.MDB_MAPASYNC
if not readahead:
flags |= _lib.MDB_NORDAHEAD
if writemap:
flags |= _lib.MDB_WRITEMAP
if not meminit:
flags |= _lib.MDB_NOMEMINIT
if not lock:
flags |= _lib.MDB_NOLOCK
if isinstance(path, UnicodeType):
path = path.encode(sys.getfilesystemencoding())
rc = _lib.mdb_env_open(self._env, path, flags, mode & ~O_0111)
if rc:
raise _error(path, rc)
with self.begin(db=object()) as txn:
self._db = _Database(
env=self,
txn=txn,
name=None,
reverse_key=False,
dupsort=False,
create=True,
integerkey=False,
integerdup=False,
dupfixed=False
)
self._dbs = {None: self._db}
def __enter__(self):
return self
def __exit__(self, _1, _2, _3):
self.close()
def __del__(self):
self.close()
_env = None
_deps = None
_spare_txns = None
_dbs = None
def set_mapsize(self, map_size):
"""Change the maximum size of the map file. This function will fail if
any transactions are active in the current process.
`map_size`:
The new size in bytes.
Equivalent to `mdb_env_set_mapsize()
<http://lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>`_
Warning:
There's a data race in the underlying library that may cause
catastrophic loss of data if you use this method.
You are safe if one of the following are true:
* Only one process accessing a particular LMDB file ever calls
this method.
* You use locking external to this library to ensure that only one
process accessing the current LMDB file can be inside this function.
"""
rc = _lib.mdb_env_set_mapsize(self._env, map_size)
if rc:
raise _error("mdb_env_set_mapsize", rc)
def close(self):
"""Close the environment, invalidating any open iterators, cursors, and
transactions. Repeat calls to :py:meth:`close` have no effect.
Equivalent to `mdb_env_close()
<http://lmdb.tech/doc/group__mdb.html#ga4366c43ada8874588b6a62fbda2d1e95>`_
"""
if self._env:
if self._deps:
while self._deps:
self._deps.pop()._invalidate()
self._deps = None
if self._spare_txns:
while self._spare_txns:
_lib.mdb_txn_abort(self._spare_txns.pop())
self._spare_txns = None
if self._dbs:
self._dbs.clear()
self._dbs = None
self._db = None
_lib.mdb_env_close(self._env)
self._env = _invalid
def path(self):
"""Directory path or file name prefix where this environment is
stored.
Equivalent to `mdb_env_get_path()
<http://lmdb.tech/doc/group__mdb.html#gac699fdd8c4f8013577cb933fb6a757fe>`_
"""
path = _ffi.new('char **')
rc = _lib.mdb_env_get_path(self._env, path)
if rc:
raise _error("mdb_env_get_path", rc)
return _ffi.string(path[0]).decode(sys.getfilesystemencoding())
def copy(self, path, compact=False, txn=None):
"""Make a consistent copy of the environment in the given destination
directory.
`compact`:
If ``True``, perform compaction while copying: omit free pages and
sequentially renumber all pages in output. This option consumes
more CPU and runs more slowly than the default, but may produce a
smaller output database.
`txn`:
If provided, the backup will be taken from the database with
respect to that transaction, otherwise a temporary read-only
transaction will be created. Note: this parameter being non-None
is not available if the module was built with LMDB_PURE. Note:
this parameter may be set only if compact=True.
Equivalent to `mdb_env_copy2() or mdb_env_copy3()
<http://lmdb.tech/doc/group__mdb.html#ga5d51d6130325f7353db0955dbedbc378>`_
"""
flags = _lib.MDB_CP_COMPACT if compact else 0
if txn and not _have_patched_lmdb:
raise TypeError("Non-patched LMDB doesn't support transaction with env.copy")
if txn and not flags:
raise TypeError("txn argument only compatible with compact=True")
encoded = path.encode(sys.getfilesystemencoding())
if _have_patched_lmdb:
rc = _lib.mdb_env_copy3(self._env, encoded, flags, txn._txn if txn else _ffi.NULL)
if rc:
raise _error("mdb_env_copy3", rc)
else:
rc = _lib.mdb_env_copy2(self._env, encoded, flags)
if rc:
raise _error("mdb_env_copy2", rc)
def copyfd(self, fd, compact=False, txn=None):
"""Copy a consistent version of the environment to file descriptor
`fd`.
`compact`:
If ``True``, perform compaction while copying: omit free pages and
sequentially renumber all pages in output. This option consumes
more CPU and runs more slowly than the default, but may produce a
smaller output database.
`txn`:
If provided, the backup will be taken from the database with
respect to that transaction, otherwise a temporary read-only
transaction will be created. Note: this parameter being non-None
is not available if the module was built with LMDB_PURE.
Equivalent to `mdb_env_copyfd2() or mdb_env_copyfd3
<http://lmdb.tech/doc/group__mdb.html#ga5d51d6130325f7353db0955dbedbc378>`_
"""
if txn and not _have_patched_lmdb:
raise TypeError("Non-patched LMDB doesn't support transaction with env.copy")
if is_win32:
# Convert C library handle to kernel handle.
fd = msvcrt.get_osfhandle(fd)
flags = _lib.MDB_CP_COMPACT if compact else 0
if txn and not flags:
raise TypeError("txn argument only compatible with compact=True")
if _have_patched_lmdb:
rc = _lib.mdb_env_copyfd3(self._env, fd, flags, txn._txn if txn else _ffi.NULL)
if rc:
raise _error("mdb_env_copyfd3", rc)
else:
rc = _lib.mdb_env_copyfd2(self._env, fd, flags)
if rc:
raise _error("mdb_env_copyfd2", rc)
def sync(self, force=False):
"""Flush the data buffers to disk.
Equivalent to `mdb_env_sync()
<http://lmdb.tech/doc/group__mdb.html#ga85e61f05aa68b520cc6c3b981dba5037>`_
Data is always written to disk when :py:meth:`Transaction.commit` is
called, but the operating system may keep it buffered. MDB always
flushes the OS buffers upon commit as well, unless the environment was
opened with `sync=False` or `metasync=False`.
`force`:
If ``True``, force a synchronous flush. Otherwise if the
environment was opened with `sync=False` the flushes will be
omitted, and with `map_async=True` they will be asynchronous.
"""
rc = _lib.mdb_env_sync(self._env, force)
if rc:
raise _error("mdb_env_sync", rc)
def _convert_stat(self, st):
"""Convert a MDB_stat to a dict.
"""
return {
"psize": st.ms_psize,
"depth": st.ms_depth,
"branch_pages": st.ms_branch_pages,
"leaf_pages": st.ms_leaf_pages,
"overflow_pages": st.ms_overflow_pages,
"entries": st.ms_entries
}
def stat(self):
"""stat()
Return some environment statistics for the default database as a dict:
+--------------------+---------------------------------------+
| ``psize`` | Size of a database page in bytes. |
+--------------------+---------------------------------------+
| ``depth`` | Height of the B-tree. |
+--------------------+---------------------------------------+
| ``branch_pages`` | Number of internal (non-leaf) pages. |
+--------------------+---------------------------------------+
| ``leaf_pages`` | Number of leaf pages. |
+--------------------+---------------------------------------+
| ``overflow_pages`` | Number of overflow pages. |
+--------------------+---------------------------------------+
| ``entries`` | Number of data items. |
+--------------------+---------------------------------------+
Equivalent to `mdb_env_stat()
<http://lmdb.tech/doc/group__mdb.html#gaf881dca452050efbd434cd16e4bae255>`_
"""
st = _ffi.new('MDB_stat *')
rc = _lib.mdb_env_stat(self._env, st)
if rc:
raise _error("mdb_env_stat", rc)
return self._convert_stat(st)
def info(self):
"""Return some nice environment information as a dict:
+--------------------+---------------------------------------------+
| ``map_addr`` | Address of database map in RAM. |
+--------------------+---------------------------------------------+
| ``map_size`` | Size of database map in RAM. |
+--------------------+---------------------------------------------+
| ``last_pgno`` | ID of last used page. |
+--------------------+---------------------------------------------+
| ``last_txnid`` | ID of last committed transaction. |
+--------------------+---------------------------------------------+
| ``max_readers`` | Number of reader slots allocated in the |
| | lock file. Equivalent to the value of |
| | `maxreaders=` specified by the first |
| | process opening the Environment. |
+--------------------+---------------------------------------------+
| ``num_readers`` | Maximum number of reader slots in |
| | simultaneous use since the lock file was |
| | initialized. |
+--------------------+---------------------------------------------+
Equivalent to `mdb_env_info()
<http://lmdb.tech/doc/group__mdb.html#ga18769362c7e7d6cf91889a028a5c5947>`_
"""
info = _ffi.new('MDB_envinfo *')
rc = _lib.mdb_env_info(self._env, info)
if rc:
raise _error("mdb_env_info", rc)
return {
"map_addr": int(_ffi.cast('long', info.me_mapaddr)),
"map_size": info.me_mapsize,
"last_pgno": info.me_last_pgno,
"last_txnid": info.me_last_txnid,
"max_readers": info.me_maxreaders,
"num_readers": info.me_numreaders
}
def flags(self):
"""Return a dict describing Environment constructor flags used to
instantiate this environment."""
flags_ = _ffi.new('unsigned int[]', 1)
rc = _lib.mdb_env_get_flags(self._env, flags_)
if rc:
raise _error("mdb_env_get_flags", rc)
flags = flags_[0]
return {
'subdir': not (flags & _lib.MDB_NOSUBDIR),
'readonly': bool(flags & _lib.MDB_RDONLY),
'metasync': not (flags & _lib.MDB_NOMETASYNC),
'sync': not (flags & _lib.MDB_NOSYNC),
'map_async': bool(flags & _lib.MDB_MAPASYNC),
'readahead': not (flags & _lib.MDB_NORDAHEAD),
'writemap': bool(flags & _lib.MDB_WRITEMAP),
'meminit': not (flags & _lib.MDB_NOMEMINIT),
'lock': not (flags & _lib.MDB_NOLOCK),
}
def max_key_size(self):
"""Return the maximum size in bytes of a record's key part. This
matches the ``MDB_MAXKEYSIZE`` constant set at compile time."""
return _lib.mdb_env_get_maxkeysize(self._env)
def max_readers(self):
"""Return the maximum number of readers specified during open of the
environment by the first process. This is the same as `max_readers=`
specified to the constructor if this process was the first to open the
environment."""
readers_ = _ffi.new('unsigned int[]', 1)
rc = _lib.mdb_env_get_maxreaders(self._env, readers_)
if rc:
raise _error("mdb_env_get_maxreaders", rc)
return readers_[0]
def readers(self):
"""Return a multi line Unicode string describing the current state of
the reader lock table."""
_callbacks.msg_func = []
try:
rc = _lib.mdb_reader_list(self._env, _msg_func, _ffi.NULL)
if rc:
raise _error("mdb_reader_list", rc)
return UnicodeType().join(_callbacks.msg_func)
finally:
del _callbacks.msg_func
def reader_check(self):
"""Search the reader lock table for stale entries, for example due to a
crashed process. Returns the number of stale entries that were cleared.
"""
reaped = _ffi.new('int[]', 1)
rc = _lib.mdb_reader_check(self._env, reaped)
if rc:
raise _error('mdb_reader_check', rc)
return reaped[0]
def open_db(self, key=None, txn=None, reverse_key=False, dupsort=False,
create=True, integerkey=False, integerdup=False,
dupfixed=False):
"""
Open a database, returning an instance of :py:class:`_Database`. Repeat
:py:meth:`Environment.open_db` calls for the same name will return the
same handle. As a special case, the main database is always open.
Equivalent to `mdb_dbi_open()
<http://lmdb.tech/doc/group__mdb.html#gac08cad5b096925642ca359a6d6f0562a>`_
Named databases are implemented by *storing a special descriptor in the
main database*. All databases in an environment *share the same file*.
Because the descriptor is present in the main database, attempts to
create a named database will fail if a key matching the database's name
already exists. Furthermore *the key is visible to lookups and
enumerations*. If your main database keyspace conflicts with the names
you use for named databases, then move the contents of your main
database to another named database.
::
>>> env = lmdb.open('/tmp/test', max_dbs=2)
>>> with env.begin(write=True) as txn
... txn.put('somename', 'somedata')
>>> # Error: database cannot share name of existing key!
>>> subdb = env.open_db('somename')
A newly created database will not exist if the transaction that created
it aborted, nor if another process deleted it. The handle resides in
the shared environment, it is not owned by the current transaction or
process. Only one thread should call this function; it is not
mutex-protected in a read-only transaction.
The `dupsort`, `integerkey`, `integerdup`, and `dupfixed` parameters are
ignored if the database already exists. The state of those settings are
persistent and immutable per database. See :py:meth:`_Database.flags`
to view the state of those options for an opened database. A consequence
of the immutability of these flags is that the default non-named database
will never have these flags set.
Preexisting transactions, other than the current transaction and any
parents, must not use the new handle, nor must their children.
`key`:
Bytestring database name. If ``None``, indicates the main
database should be returned, otherwise indicates a named
database should be created inside the main database.
In other words, *a key representing the database will be
visible in the main database, and the database name cannot
conflict with any existing key.*
`txn`:
Transaction used to create the database if it does not exist.
If unspecified, a temporarily write transaction is used. Do not
call :py:meth:`open_db` from inside an existing transaction
without supplying it here. Note the passed transaction must
have `write=True`.
`reverse_key`:
If ``True``, keys are compared from right to left (e.g. DNS
names).
`dupsort`:
Duplicate keys may be used in the database. (Or, from another
perspective, keys may have multiple data items, stored in
sorted order.) By default keys must be unique and may have only
a single data item.
`create`:
If ``True``, create the database if it doesn't exist, otherwise
raise an exception.
`integerkey`:
If ``True``, indicates keys in the database are C unsigned
or ``size_t`` integers encoded in native byte order. Keys must
all be either unsigned or ``size_t``, they cannot be mixed in a
single database.
`integerdup`:
If ``True``, values in the
database are C unsigned or ``size_t`` integers encode din
native byte order. Implies `dupsort` and `dupfixed` are
``True``.
`dupfixed`:
If ``True``, values for each key
in database are of fixed size, allowing each additional
duplicate value for a key to be stored without a header
indicating its size. Implies `dupsort` is ``True``.
"""
if isinstance(key, UnicodeType):
raise TypeError('key must be bytes')
if key is None and (reverse_key or dupsort or integerkey or integerdup
or dupfixed):
raise ValueError('May not set flags on the main database')
db = self._dbs.get(key)
if db:
return db
if integerdup:
dupfixed = True
if dupfixed:
dupsort = True
if txn:
db = _Database(self, txn, key, reverse_key, dupsort, create,
integerkey, integerdup, dupfixed)
else:
try:
self._creating_db_in_readonly = True
with self.begin(write=not self.readonly) as txn:
db = _Database(self, txn, key, reverse_key, dupsort, create,
integerkey, integerdup, dupfixed)
finally:
self._creating_db_in_readonly = False
self._dbs[key] = db
return db
def begin(self, db=None, parent=None, write=False, buffers=False):
"""Shortcut for :py:class:`lmdb.Transaction`"""
return Transaction(self, db, parent, write, buffers)
class _Database(object):
"""
Internal database handle. This class is opaque, save a single method.
Should not be constructed directly. Use :py:meth:`Environment.open_db`
instead.
"""
def __init__(self, env, txn, name, reverse_key, dupsort, create,
integerkey, integerdup, dupfixed):
env._deps.add(self)
self._deps = set()
self._name = name
flags = 0
if reverse_key:
flags |= _lib.MDB_REVERSEKEY
if dupsort:
flags |= _lib.MDB_DUPSORT
if create:
flags |= _lib.MDB_CREATE
if integerkey:
flags |= _lib.MDB_INTEGERKEY
if integerdup:
flags |= _lib.MDB_INTEGERDUP
if dupfixed:
flags |= _lib.MDB_DUPFIXED
dbipp = _ffi.new('MDB_dbi *')
self._dbi = None
rc = _lib.mdb_dbi_open(txn._txn, name or _ffi.NULL, flags, dbipp)
if rc:
raise _error("mdb_dbi_open", rc)
self._dbi = dbipp[0]
self._load_flags(txn)
def _load_flags(self, txn):
"""Load MDB's notion of the database flags."""
flags_ = _ffi.new('unsigned int[]', 1)
rc = _lib.mdb_dbi_flags(txn._txn, self._dbi, flags_)
if rc:
raise _error("mdb_dbi_flags", rc)
self._flags = flags_[0]
def flags(self, *args):
"""Return the database's associated flags as a dict of _Database
constructor kwargs."""
if len(args) > 1:
raise TypeError('flags takes 0 or 1 arguments')
return {
'reverse_key': bool(self._flags & _lib.MDB_REVERSEKEY),
'dupsort': bool(self._flags & _lib.MDB_DUPSORT),
'integerkey': bool(self._flags & _lib.MDB_INTEGERKEY),
'integerdup': bool(self._flags & _lib.MDB_INTEGERDUP),
'dupfixed': bool(self._flags & _lib.MDB_DUPFIXED),
}
def _invalidate(self):
self._dbi = _invalid
open = Environment
class Transaction(object):
"""
A transaction object. All operations require a transaction handle,
transactions may be read-only or read-write. Write transactions may not
span threads. Transaction objects implement the context manager protocol,
so that reliable release of the transaction happens even in the face of
unhandled exceptions:
.. code-block:: python
# Transaction aborts correctly:
with env.begin(write=True) as txn:
crash()
# Transaction commits automatically:
with env.begin(write=True) as txn:
txn.put('a', 'b')
Equivalent to `mdb_txn_begin()
<http://lmdb.tech/doc/group__mdb.html#gad7ea55da06b77513609efebd44b26920>`_
`env`:
Environment the transaction should be on.
`db`:
Default named database to operate on. If unspecified, defaults to
the environment's main database. Can be overridden on a per-call
basis below.
`parent`:
``None``, or a parent transaction (see lmdb.h).
`write`:
Transactions are read-only by default. To modify the database, you
must pass `write=True`. This flag is ignored if
:py:class:`Environment` was opened with ``readonly=True``.
`buffers`:
If ``True``, indicates :py:func:`buffer` objects should be yielded
instead of bytestrings. This setting applies to the
:py:class:`Transaction` instance itself and any :py:class:`Cursors
<Cursor>` created within the transaction.
This feature significantly improves performance, since MDB has a
zero-copy design, but it requires care when manipulating the
returned buffer objects. The benefit of this facility is diminished
when using small keys and values.
"""
# If constructor fails, then __del__ will attempt to access these
# attributes.
_env = _invalid
_txn = _invalid
_parent = None
_write = False
# Mutations occurred since transaction start. Required to know when Cursor
# key/value must be refreshed.
_mutations = 0
def __init__(self, env, db=None, parent=None, write=False, buffers=False):
env._deps.add(self)
self.env = env # hold ref
self._db = db or env._db
self._env = env._env
self._key = _ffi.new('MDB_val *')
self._val = _ffi.new('MDB_val *')
self._to_py = _mvbuf if buffers else _mvstr
self._deps = set()
if parent:
self._parent = parent
parent_txn = parent._txn
parent._deps.add(self)
else:
parent_txn = _ffi.NULL
if write:
if env.readonly:
msg = 'Cannot start write transaction with read-only env'
raise _error(msg, _lib.EACCES)
txnpp = _ffi.new('MDB_txn **')
rc = _lib.mdb_txn_begin(self._env, parent_txn, 0, txnpp)
if rc:
raise _error("mdb_txn_begin", rc)
self._txn = txnpp[0]
self._write = True
else:
try: # Exception catch in order to avoid racy 'if txns:' test
if env._creating_db_in_readonly: # Don't use spare txns for creating a DB when read-only
raise IndexError
self._txn = env._spare_txns.pop()
env._max_spare_txns += 1
rc = _lib.mdb_txn_renew(self._txn)
if rc:
while self._deps:
self._deps.pop()._invalidate()
_lib.mdb_txn_abort(self._txn)
self._txn = _invalid
self._invalidate()
raise _error("mdb_txn_renew", rc)
except IndexError:
txnpp = _ffi.new('MDB_txn **')
flags = _lib.MDB_RDONLY
rc = _lib.mdb_txn_begin(self._env, parent_txn, flags, txnpp)
if rc:
raise _error("mdb_txn_begin", rc)
self._txn = txnpp[0]
def _invalidate(self):
if self._txn:
self.abort()
self.env._deps.discard(self)
self._parent = None
self._env = _invalid
def __del__(self):
self.abort()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
self.abort()
else:
self.commit()
def id(self):
"""id()
Return the transaction's ID.
This returns the identifier associated with this transaction. For a
read-only transaction, this corresponds to the snapshot being read;
concurrent readers will frequently have the same transaction ID.
"""
return _lib.mdb_txn_id(self._txn)
def stat(self, db):
"""stat(db)
Return statistics like :py:meth:`Environment.stat`, except for a single
DBI. `db` must be a database handle returned by :py:meth:`open_db`.
"""
st = _ffi.new('MDB_stat *')
rc = _lib.mdb_stat(self._txn, db._dbi, st)
if rc:
raise _error('mdb_stat', rc)
return self.env._convert_stat(st)
def drop(self, db, delete=True):
"""Delete all keys in a named database and optionally delete the named
database itself. Deleting the named database causes it to become
unavailable, and invalidates existing cursors.
Equivalent to `mdb_drop()
<http://lmdb.tech/doc/group__mdb.html#gab966fab3840fc54a6571dfb32b00f2db>`_
"""
while db._deps:
db._deps.pop()._invalidate()
rc = _lib.mdb_drop(self._txn, db._dbi, delete)
self._mutations += 1
if rc:
raise _error("mdb_drop", rc)
if db._name in self.env._dbs:
del self.env._dbs[db._name]
def _cache_spare(self):
# In order to avoid taking and maintaining a lock, a race is allowed
# below which may result in more spare txns than desired. It seems
# unlikely the race could ever result in a large amount of spare txns,
# and in any case a correctly configured program should not be opening
# more read-only transactions than there are configured spares.
if self.env._max_spare_txns > 0:
_lib.mdb_txn_reset(self._txn)
self.env._spare_txns.append(self._txn)
self.env._max_spare_txns -= 1
self._txn = _invalid
self._invalidate()
return True
return False
def commit(self):
"""Commit the pending transaction.
Equivalent to `mdb_txn_commit()
<http://lmdb.tech/doc/group__mdb.html#ga846fbd6f46105617ac9f4d76476f6597>`_
"""
while self._deps:
self._deps.pop()._invalidate()
if self._write or not self._cache_spare():
rc = _lib.mdb_txn_commit(self._txn)
self._txn = _invalid
if rc:
raise _error("mdb_txn_commit", rc)
self._invalidate()
def abort(self):
"""Abort the pending transaction. Repeat calls to :py:meth:`abort` have
no effect after a previously successful :py:meth:`commit` or
:py:meth:`abort`, or after the associated :py:class:`Environment` has
been closed.
Equivalent to `mdb_txn_abort()
<http://lmdb.tech/doc/group__mdb.html#ga73a5938ae4c3239ee11efa07eb22b882>`_
"""
if self._txn:
while self._deps:
self._deps.pop()._invalidate()
if self._write or not self._cache_spare():
rc = _lib.mdb_txn_abort(self._txn)
self._txn = _invalid
if rc:
raise _error("mdb_txn_abort", rc)
self._invalidate()
def get(self, key, default=None, db=None):
"""Fetch the first value matching `key`, returning `default` if `key`
does not exist. A cursor must be used to fetch all values for a key in
a `dupsort=True` database.
Equivalent to `mdb_get()
<http://lmdb.tech/doc/group__mdb.html#ga8bf10cd91d3f3a83a34d04ce6b07992d>`_
"""
rc = _lib.pymdb_get(self._txn, (db or self._db)._dbi,
key, len(key), self._val)
if rc:
if rc == _lib.MDB_NOTFOUND:
return default
raise _error("mdb_cursor_get", rc)
preload(self._val)
return self._to_py(self._val)
def put(self, key, value, dupdata=True, overwrite=True, append=False,
db=None):
"""Store a record, returning ``True`` if it was written, or ``False``
to indicate the key was already present and `overwrite=False`.
On success, the cursor is positioned on the new record.
Equivalent to `mdb_put()
<http://lmdb.tech/doc/group__mdb.html#ga4fa8573d9236d54687c61827ebf8cac0>`_
`key`:
Bytestring key to store.
`value`:
Bytestring value to store.
`dupdata`:
If ``False`` and database was opened with `dupsort=True`, will return
``False`` if the key already has that value. In other words, this only
affects the return value.
`overwrite`:
If ``False``, do not overwrite any existing matching key. If
False and writing to a dupsort=True database, this will not add a value
to the key and this function will return ``False``.
`append`:
If ``True``, append the pair to the end of the database without
comparing its order first. Appending a key that is not greater
than the highest existing key will fail and return ``False``.
`db`:
Named database to operate on. If unspecified, defaults to the
database given to the :py:class:`Transaction` constructor.
"""
flags = 0
if not dupdata:
flags |= _lib.MDB_NODUPDATA
if not overwrite:
flags |= _lib.MDB_NOOVERWRITE
if append:
flags |= _lib.MDB_APPEND
rc = _lib.pymdb_put(self._txn, (db or self._db)._dbi,
key, len(key), value, len(value), flags)
self._mutations += 1
if rc:
if rc == _lib.MDB_KEYEXIST:
return False
raise _error("mdb_put", rc)
return True
def replace(self, key, value, db=None):
"""Use a temporary cursor to invoke :py:meth:`Cursor.replace`.
`db`:
Named database to operate on. If unspecified, defaults to the
database given to the :py:class:`Transaction` constructor.
"""
with Cursor(db or self._db, self) as curs:
return curs.replace(key, value)
def pop(self, key, db=None):
"""Use a temporary cursor to invoke :py:meth:`Cursor.pop`.
`db`:
Named database to operate on. If unspecified, defaults to the
database given to the :py:class:`Transaction` constructor.
"""
with Cursor(db or self._db, self) as curs:
return curs.pop(key)
def delete(self, key, value=EMPTY_BYTES, db=None):
"""Delete a key from the database.
Equivalent to `mdb_del()
<http://lmdb.tech/doc/group__mdb.html#gab8182f9360ea69ac0afd4a4eaab1ddb0>`_
`key`:
The key to delete.
value:
If the database was opened with dupsort=True and value is not
the empty bytestring, then delete elements matching only this
`(key, value)` pair, otherwise all values for key are deleted.
Returns True if at least one key was deleted.
"""
if value is None: # for bug-compatibility with cpython impl
value = EMPTY_BYTES
rc = _lib.pymdb_del(self._txn, (db or self._db)._dbi,
key, len(key), value, len(value))
self._mutations += 1
if rc:
if rc == _lib.MDB_NOTFOUND:
return False
raise _error("mdb_del", rc)
return True
def cursor(self, db=None):
"""Shortcut for ``lmdb.Cursor(db, self)``"""
return Cursor(db or self._db, self)
class Cursor(object):
"""
Structure for navigating a database.
Equivalent to `mdb_cursor_open()
<http://lmdb.tech/doc/group__mdb.html#ga9ff5d7bd42557fd5ee235dc1d62613aa>`_
`db`:
:py:class:`_Database` to navigate.
`txn`:
:py:class:`Transaction` to navigate.
As a convenience, :py:meth:`Transaction.cursor` can be used to quickly
return a cursor:
::
>>> env = lmdb.open('/tmp/foo')
>>> child_db = env.open_db('child_db')
>>> with env.begin() as txn:
... cursor = txn.cursor() # Cursor on main database.
... cursor2 = txn.cursor(child_db) # Cursor on child database.
Cursors start in an unpositioned state. If :py:meth:`iternext` or
:py:meth:`iterprev` are used in this state, iteration proceeds from the
start or end respectively. Iterators directly position using the cursor,
meaning strange behavior results when multiple iterators exist on the same
cursor.
.. note::
From the perspective of the Python binding, cursors return to an
'unpositioned' state once any scanning or seeking method (e.g.
:py:meth:`next`, :py:meth:`prev_nodup`, :py:meth:`set_range`) returns
``False`` or raises an exception. This is primarily to ensure safe,
consistent semantics in the face of any error condition.
When the Cursor returns to an unpositioned state, its :py:meth:`key`
and :py:meth:`value` return empty strings to indicate there is no
active position, although internally the LMDB cursor may still have a
valid position.
This may lead to slightly surprising behaviour when iterating the
values for a `dupsort=True` database's keys, since methods such as
:py:meth:`iternext_dup` will cause Cursor to appear unpositioned,
despite it returning ``False`` only to indicate there are no more
values for the current key. In that case, simply calling
:py:meth:`next` would cause iteration to resume at the next available
key.
This behaviour may change in future.
Iterator methods such as :py:meth:`iternext` and :py:meth:`iterprev` accept
`keys` and `values` arguments. If both are ``True``, then the value of
:py:meth:`item` is yielded on each iteration. If only `keys` is ``True``,
:py:meth:`key` is yielded, otherwise only :py:meth:`value` is yielded.
Prior to iteration, a cursor can be positioned anywhere in the database:
::
>>> with env.begin() as txn:
... cursor = txn.cursor()
... if not cursor.set_range('5'): # Position at first key >= '5'.
... print('Not found!')
... else:
... for key, value in cursor: # Iterate from first key >= '5'.
... print((key, value))
Iteration is not required to navigate, and sometimes results in ugly or
inefficient code. In cases where the iteration order is not obvious, or is
related to the data being read, use of :py:meth:`set_key`,
:py:meth:`set_range`, :py:meth:`key`, :py:meth:`value`, and :py:meth:`item`
may be preferable:
::
>>> # Record the path from a child to the root of a tree.
>>> path = ['child14123']
>>> while path[-1] != 'root':
... assert cursor.set_key(path[-1]), \\
... 'Tree is broken! Path: %s' % (path,)
... path.append(cursor.value())
"""
def __init__(self, db, txn):
db._deps.add(self)
txn._deps.add(self)
self.db = db # hold ref
self.txn = txn # hold ref
self._dbi = db._dbi
self._txn = txn._txn
self._key = _ffi.new('MDB_val *')
self._val = _ffi.new('MDB_val *')
self._valid = False
self._to_py = txn._to_py
curpp = _ffi.new('MDB_cursor **')
self._cur = None
rc = _lib.mdb_cursor_open(self._txn, self._dbi, curpp)
if rc:
raise _error("mdb_cursor_open", rc)
self._cur = curpp[0]
# If Transaction.mutations!=last_mutation, must MDB_GET_CURRENT to
# refresh `key' and `val'.
self._last_mutation = txn._mutations
def _invalidate(self):
if self._cur:
_lib.mdb_cursor_close(self._cur)
self.db._deps.discard(self)
self.txn._deps.discard(self)
self._cur = _invalid
self._dbi = _invalid
self._txn = _invalid
def __del__(self):
self._invalidate()
def close(self):
"""Close the cursor, freeing its associated resources."""
self._invalidate()
def __enter__(self):
return self
def __exit__(self, _1, _2, _3):
self._invalidate()
def key(self):
"""Return the current key."""
# Must refresh `key` and `val` following mutation.
if self._last_mutation != self.txn._mutations:
self._cursor_get(_lib.MDB_GET_CURRENT)
return self._to_py(self._key)
def value(self):
"""Return the current value."""
# Must refresh `key` and `val` following mutation.
if self._last_mutation != self.txn._mutations:
self._cursor_get(_lib.MDB_GET_CURRENT)
preload(self._val)
return self._to_py(self._val)
def item(self):
"""Return the current `(key, value)` pair."""
# Must refresh `key` and `val` following mutation.
if self._last_mutation != self.txn._mutations:
self._cursor_get(_lib.MDB_GET_CURRENT)
preload(self._val)
return self._to_py(self._key), self._to_py(self._val)
def _iter(self, op, keys, values):
if not values:
get = self.key
elif not keys:
get = self.value
else:
get = self.item
cur = self._cur
key = self._key
val = self._val
rc = 0
while self._valid:
yield get()
rc = _lib.mdb_cursor_get(cur, key, val, op)
self._valid = not rc
if rc:
self._key.mv_size = 0
self._val.mv_size = 0
if rc != _lib.MDB_NOTFOUND:
raise _error("mdb_cursor_get", rc)
def iternext(self, keys=True, values=True):
"""Return a forward iterator that yields the current element before
calling :py:meth:`next`, repeating until the end of the database is
reached. As a convenience, :py:class:`Cursor` implements the iterator
protocol by automatically returning a forward iterator when invoked:
::
>>> # Equivalent:
>>> it = iter(cursor)
>>> it = cursor.iternext(keys=True, values=True)
If the cursor is not yet positioned, it is moved to the first key in
the database, otherwise iteration proceeds from the current position.
"""
if not self._valid:
self.first()
return self._iter(_lib.MDB_NEXT, keys, values)
__iter__ = iternext
def iternext_dup(self, keys=False, values=True):
"""Return a forward iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`next_dup`,
repeating until the last value of the current key is reached.
Only meaningful for databases opened with `dupsort=True`.
.. code-block:: python
if not cursor.set_key("foo"):
print("No values found for 'foo'")
else:
for idx, data in enumerate(cursor.iternext_dup()):
print("%d'th value for 'foo': %s" % (idx, data))
"""
return self._iter(_lib.MDB_NEXT_DUP, keys, values)
def iternext_nodup(self, keys=True, values=False):
"""Return a forward iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`next_nodup`,
repeating until the end of the database is reached.
Only meaningful for databases opened with `dupsort=True`.
If the cursor is not yet positioned, it is moved to the first key in
the database, otherwise iteration proceeds from the current position.
.. code-block:: python
for key in cursor.iternext_nodup():
print("Key '%s' has %d values" % (key, cursor.count()))
"""
if not self._valid:
self.first()
return self._iter(_lib.MDB_NEXT_NODUP, keys, values)
def iterprev(self, keys=True, values=True):
"""Return a reverse iterator that yields the current element before
calling :py:meth:`prev`, until the start of the database is reached.
If the cursor is not yet positioned, it is moved to the last key in
the database, otherwise iteration proceeds from the current position.
::
>>> with env.begin() as txn:
... for i, (key, value) in enumerate(txn.cursor().iterprev()):
... print('%dth last item is (%r, %r)' % (1+i, key, value))
"""
if not self._valid:
self.last()
return self._iter(_lib.MDB_PREV, keys, values)
def iterprev_dup(self, keys=False, values=True):
"""Return a reverse iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`prev_dup`,
repeating until the first value of the current key is reached.
Only meaningful for databases opened with `dupsort=True`.
"""
return self._iter(_lib.MDB_PREV_DUP, keys, values)
def iterprev_nodup(self, keys=True, values=False):
"""Return a reverse iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`prev_nodup`,
repeating until the start of the database is reached.
If the cursor is not yet positioned, it is moved to the last key in
the database, otherwise iteration proceeds from the current position.
Only meaningful for databases opened with `dupsort=True`.
"""
if not self._valid:
self.last()
return self._iter(_lib.MDB_PREV_NODUP, keys, values)
def _cursor_get(self, op):
rc = _lib.mdb_cursor_get(self._cur, self._key, self._val, op)
self._valid = v = not rc
self._last_mutation = self.txn._mutations
if rc:
self._key.mv_size = 0
self._val.mv_size = 0
if rc != _lib.MDB_NOTFOUND:
if not (rc == _lib.EINVAL and op == _lib.MDB_GET_CURRENT):
raise _error("mdb_cursor_get", rc)
return v
def _cursor_get_kv(self, op, k, v):
rc = _lib.pymdb_cursor_get(self._cur, k, len(k), v, len(v),
self._key, self._val, op)
self._valid = v = not rc
if rc:
self._key.mv_size = 0
self._val.mv_size = 0
if rc != _lib.MDB_NOTFOUND:
if not (rc == _lib.EINVAL and op == _lib.MDB_GET_CURRENT):
raise _error("mdb_cursor_get", rc)
return v
def first(self):
"""Move to the first key in the database, returning ``True`` on success
or ``False`` if the database is empty.
If the database was opened with `dupsort=True` and the key contains
duplicates, the cursor is positioned on the first value ("duplicate").
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_FIRST
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_FIRST)
def first_dup(self):
"""Move to the first value ("duplicate") for the current key, returning
``True`` on success or ``False`` if the database is empty.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_FIRST_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_FIRST_DUP)
def last(self):
"""Move to the last key in the database, returning ``True`` on success
or ``False`` if the database is empty.
If the database was opened with `dupsort=True` and the key contains
duplicates, the cursor is positioned on the last value ("duplicate").
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_LAST
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_LAST)
def last_dup(self):
"""Move to the last value ("duplicate") for the current key, returning
``True`` on success or ``False`` if the database is empty.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_LAST_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_LAST_DUP)
def prev(self):
"""Move to the previous element, returning ``True`` on success or
``False`` if there is no previous item.
For databases opened with `dupsort=True`, moves to the previous data
item ("duplicate") for the current key if one exists, otherwise moves
to the previous key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_PREV
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_PREV)
def prev_dup(self):
"""Move to the previous value ("duplicate") of the current key,
returning ``True`` on success or ``False`` if there is no previous
value.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_PREV_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_PREV_DUP)
def prev_nodup(self):
"""Move to the last value ("duplicate") of the previous key, returning
``True`` on success or ``False`` if there is no previous key.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_PREV_NODUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_PREV_NODUP)
def next(self):
"""Move to the next element, returning ``True`` on success or ``False``
if there is no next element.
For databases opened with `dupsort=True`, moves to the next value
("duplicate") for the current key if one exists, otherwise moves to the
first value of the next key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_NEXT
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_NEXT)
def next_dup(self):
"""Move to the next value ("duplicate") of the current key, returning
``True`` on success or ``False`` if there is no next value.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_NEXT_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_NEXT_DUP)
def next_nodup(self):
"""Move to the first value ("duplicate") of the next key, returning
``True`` on success or ``False`` if there is no next key.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_NEXT_NODUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_NEXT_NODUP)
def set_key(self, key):
"""Seek exactly to `key`, returning ``True`` on success or ``False`` if
the exact key was not found. It is an error to :py:meth:`set_key` the
empty bytestring.
For databases opened with `dupsort=True`, moves to the first value
("duplicate") for the key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_SET_KEY
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES)
def set_key_dup(self, key, value):
"""Seek exactly to `(key, value)`, returning ``True`` on success or
``False`` if the exact key and value was not found. It is an error
to :py:meth:`set_key` the empty bytestring.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_GET_BOTH
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get_kv(_lib.MDB_GET_BOTH, key, value)
def get(self, key, default=None):
"""Equivalent to :py:meth:`set_key()`, except :py:meth:`value` is
returned when `key` is found, otherwise `default`.
"""
if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES):
return self.value()
return default
def getmulti(self, keys, dupdata=False, dupfixed_bytes=None, keyfixed=False):
"""Returns an iterable of `(key, value)` 2-tuples containing results
for each key in the iterable `keys`.
`keys`:
Iterable to read keys from.
`dupdata`:
If ``True`` and database was opened with `dupsort=True`, read
all duplicate values for each matching key.
`dupfixed_bytes`:
If database was opened with `dupsort=True` and `dupfixed=True`,
accepts the size of each value, in bytes, and applies an
optimization reducing the number of database lookups.
`keyfixed`:
If `dupfixed_bytes` is set and database key size is fixed,
setting keyfixed=True will result in this function returning
a memoryview to the results as a structured array of bytes.
The structured array can be instantiated by passing the
memoryview buffer to NumPy:
.. code-block:: python
key_bytes, val_bytes = 4, 8
dtype = np.dtype([(f'S{key_bytes}', f'S{val_bytes}}')])
arr = np.frombuffer(
cur.getmulti(keys, dupdata=True, dupfixed_bytes=val_bytes, keyfixed=True)
)
"""
if dupfixed_bytes and dupfixed_bytes < 0:
raise _error("dupfixed_bytes must be a positive integer.")
elif (dupfixed_bytes or keyfixed) and not dupdata:
raise _error("dupdata is required for dupfixed_bytes/key_bytes.")
elif keyfixed and not dupfixed_bytes:
raise _error("dupfixed_bytes is required for key_bytes.")
if dupfixed_bytes:
get_op = _lib.MDB_GET_MULTIPLE
next_op = _lib.MDB_NEXT_MULTIPLE
else:
get_op = _lib.MDB_GET_CURRENT
next_op = _lib.MDB_NEXT_DUP
a = bytearray()
lst = list()
for key in keys:
if self.set_key(key):
while self._valid:
self._cursor_get(get_op)
preload(self._val)
key = self._to_py(self._key)
val = self._to_py(self._val)
if dupfixed_bytes:
gen = (
(key, val[i:i + dupfixed_bytes])
for i in range(0, len(val), dupfixed_bytes))
if keyfixed:
for k, v in gen:
a.extend(k + v)
else:
for k, v in gen:
lst.append((k, v))
else:
lst.append((key, val))
if dupdata:
self._cursor_get(next_op)
else:
break
if keyfixed:
return memoryview(a)
else:
return lst
def set_range(self, key):
"""Seek to the first key greater than or equal to `key`, returning
``True`` on success, or ``False`` to indicate key was past end of
database. Behaves like :py:meth:`first` if `key` is the empty
bytestring.
For databases opened with `dupsort=True`, moves to the first value
("duplicate") for the key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_SET_RANGE
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
if not key:
return self.first()
return self._cursor_get_kv(_lib.MDB_SET_RANGE, key, EMPTY_BYTES)
def set_range_dup(self, key, value):
"""Seek to the first key/value pair greater than or equal to `key`,
returning ``True`` on success, or ``False`` to indicate that `value` was past the
last value of `key` or that `(key, value)` was past the end end of database.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_GET_BOTH_RANGE
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
rc = self._cursor_get_kv(_lib.MDB_GET_BOTH_RANGE, key, value)
# issue #126: MDB_GET_BOTH_RANGE does not satisfy its documentation,
# and fails to update `key` and `value` on success. Therefore
# explicitly call MDB_GET_CURRENT after MDB_GET_BOTH_RANGE.
self._cursor_get(_lib.MDB_GET_CURRENT)
return rc
def delete(self, dupdata=False):
"""Delete the current element and move to the next, returning ``True``
on success or ``False`` if the database was empty.
If `dupdata` is ``True``, delete all values ("duplicates") for the
current key, otherwise delete only the currently positioned value. Only
meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_del()
<http://lmdb.tech/doc/group__mdb.html#ga26a52d3efcfd72e5bf6bd6960bf75f95>`_
"""
v = self._valid
if v:
flags = _lib.MDB_NODUPDATA if dupdata else 0
rc = _lib.mdb_cursor_del(self._cur, flags)
self.txn._mutations += 1
if rc:
raise _error("mdb_cursor_del", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
v = rc == 0
return v
def count(self):
"""Return the number of values ("duplicates") for the current key.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_count()
<http://lmdb.tech/doc/group__mdb.html#ga4041fd1e1862c6b7d5f10590b86ffbe2>`_
"""
countp = _ffi.new('size_t *')
rc = _lib.mdb_cursor_count(self._cur, countp)
if rc:
raise _error("mdb_cursor_count", rc)
return countp[0]
def put(self, key, val, dupdata=True, overwrite=True, append=False):
"""Store a record, returning ``True`` if it was written, or ``False``
to indicate the key was already present and `overwrite=False`. On
success, the cursor is positioned on the key.
Equivalent to `mdb_cursor_put()
<http://lmdb.tech/doc/group__mdb.html#ga1f83ccb40011837ff37cc32be01ad91e>`_
`key`:
Bytestring key to store.
`val`:
Bytestring value to store.
`dupdata`:
If ``False`` and database was opened with `dupsort=True`, will return
``False`` if the key already has that value. In other words, this only
affects the return value.
`overwrite`:
If ``False``, do not overwrite the value for the key if it
exists, just return ``False``. For databases opened with
`dupsort=True`, ``False`` will always be returned if a
duplicate key/value pair is inserted, regardless of the setting
for `overwrite`.
`append`:
If ``True``, append the pair to the end of the database without
comparing its order first. Appending a key that is not greater
than the highest existing key will fail and return ``False``.
"""
flags = 0
if not dupdata:
flags |= _lib.MDB_NODUPDATA
if not overwrite:
flags |= _lib.MDB_NOOVERWRITE
if append:
if self.txn._db._flags & _lib.MDB_DUPSORT:
flags |= _lib.MDB_APPENDDUP
else:
flags |= _lib.MDB_APPEND
rc = _lib.pymdb_cursor_put(self._cur, key, len(key), val, len(val), flags)
self.txn._mutations += 1
if rc:
if rc == _lib.MDB_KEYEXIST:
return False
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return True
def putmulti(self, items, dupdata=True, overwrite=True, append=False):
"""Invoke :py:meth:`put` for each `(key, value)` 2-tuple from the
iterable `items`. Elements must be exactly 2-tuples, they may not be of
any other type, or tuple subclass.
Returns a tuple `(consumed, added)`, where `consumed` is the number of
elements read from the iterable, and `added` is the number of new
entries added to the database. `added` may be less than `consumed` when
`overwrite=False`.
`items`:
Iterable to read records from.
`dupdata`:
If ``True`` and database was opened with `dupsort=True`, add
pair as a duplicate if the given key already exists. Otherwise
overwrite any existing matching key.
`overwrite`:
If ``False``, do not overwrite the value for the key if it
exists, just return ``False``. For databases opened with
`dupsort=True`, ``False`` will always be returned if a
duplicate key/value pair is inserted, regardless of the setting
for `overwrite`.
`append`:
If ``True``, append records to the end of the database without
comparing their order first. Appending a key that is not
greater than the highest existing key will cause corruption.
"""
flags = 0
if not dupdata:
flags |= _lib.MDB_NODUPDATA
if not overwrite:
flags |= _lib.MDB_NOOVERWRITE
if append:
if self.txn._db._flags & _lib.MDB_DUPSORT:
flags |= _lib.MDB_APPENDDUP
else:
flags |= _lib.MDB_APPEND
added = 0
skipped = 0
for key, value in items:
rc = _lib.pymdb_cursor_put(self._cur, key, len(key),
value, len(value), flags)
self.txn._mutations += 1
added += 1
if rc:
if rc == _lib.MDB_KEYEXIST:
skipped += 1
else:
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return added, added - skipped
def replace(self, key, val):
"""Store a record, returning its previous value if one existed. Returns
``None`` if no previous value existed. This uses the best available
mechanism to minimize the cost of a `set-and-return-previous`
operation.
For databases opened with `dupsort=True`, only the first data element
("duplicate") is returned if it existed, all data elements are removed
and the new `(key, data)` pair is inserted.
`key`:
Bytestring key to store.
`value`:
Bytestring value to store.
"""
if self.db._flags & _lib.MDB_DUPSORT:
if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES):
preload(self._val)
old = _mvstr(self._val)
self.delete(True)
else:
old = None
self.put(key, val)
return old
flags = _lib.MDB_NOOVERWRITE
keylen = len(key)
rc = _lib.pymdb_cursor_put(self._cur, key, keylen, val, len(val), flags)
self.txn._mutations += 1
if not rc:
return
if rc != _lib.MDB_KEYEXIST:
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
preload(self._val)
old = _mvstr(self._val)
rc = _lib.pymdb_cursor_put(self._cur, key, keylen, val, len(val), 0)
self.txn._mutations += 1
if rc:
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return old
def pop(self, key):
"""Fetch a record's value then delete it. Returns ``None`` if no
previous value existed. This uses the best available mechanism to
minimize the cost of a `delete-and-return-previous` operation.
For databases opened with `dupsort=True`, the first data element
("duplicate") for the key will be popped.
`key`:
Bytestring key to delete.
"""
if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES):
preload(self._val)
old = _mvstr(self._val)
rc = _lib.mdb_cursor_del(self._cur, 0)
self.txn._mutations += 1
if rc:
raise _error("mdb_cursor_del", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return old
def _iter_from(self, k, reverse):
"""Helper for centidb. Please do not rely on this interface, it may be
removed in future.
"""
if not k and not reverse:
found = self.first()
else:
found = self.set_range(k)
if reverse:
if not found:
self.last()
return self.iterprev()
else:
if not found:
return iter(())
return self.iternext() | en | 0.697407 | # # Copyright 2013 The py-lmdb authors, all rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted only as authorized by the OpenLDAP # Public License. # # A copy of this license is available in the file LICENSE in the # top-level directory of the distribution or, alternatively, at # <http://www.OpenLDAP.org/license.html>. # # OpenLDAP is a registered trademark of the OpenLDAP Foundation. # # Individual files and/or contributed packages may be copyright by # other parties and/or subject to additional restrictions. # # This work also contains materials derived from public sources. # # Additional information about OpenLDAP can be obtained at # <http://www.openldap.org/>. # CPython/CFFI wrapper for OpenLDAP's "Lightning" MDB database. Please see https://lmdb.readthedocs.io/ # type: ignore # type: ignore # Handle moronic Python 3 mess. # Used to track context across CFFI callbacks. typedef int mode_t; typedef ... MDB_env; typedef struct MDB_txn MDB_txn; typedef struct MDB_cursor MDB_cursor; typedef unsigned int MDB_dbi; enum MDB_cursor_op { MDB_FIRST, MDB_FIRST_DUP, MDB_GET_BOTH, MDB_GET_BOTH_RANGE, MDB_GET_CURRENT, MDB_GET_MULTIPLE, MDB_LAST, MDB_LAST_DUP, MDB_NEXT, MDB_NEXT_DUP, MDB_NEXT_MULTIPLE, MDB_NEXT_NODUP, MDB_PREV, MDB_PREV_DUP, MDB_PREV_NODUP, MDB_SET, MDB_SET_KEY, MDB_SET_RANGE, ... }; typedef enum MDB_cursor_op MDB_cursor_op; struct MDB_val { size_t mv_size; void *mv_data; ...; }; typedef struct MDB_val MDB_val; struct MDB_stat { unsigned int ms_psize; unsigned int ms_depth; size_t ms_branch_pages; size_t ms_leaf_pages; size_t ms_overflow_pages; size_t ms_entries; ...; }; typedef struct MDB_stat MDB_stat; struct MDB_envinfo { void *me_mapaddr; size_t me_mapsize; size_t me_last_pgno; size_t me_last_txnid; unsigned int me_maxreaders; unsigned int me_numreaders; ...; }; typedef struct MDB_envinfo MDB_envinfo; typedef int (*MDB_cmp_func)(const MDB_val *a, const MDB_val *b); typedef void (*MDB_rel_func)(MDB_val *item, void *oldptr, void *newptr, void *relctx); char *mdb_strerror(int err); int mdb_env_create(MDB_env **env); int mdb_env_open(MDB_env *env, const char *path, unsigned int flags, mode_t mode); int mdb_env_copy2(MDB_env *env, const char *path, int flags); int mdb_env_copyfd2(MDB_env *env, int fd, int flags); int mdb_env_stat(MDB_env *env, MDB_stat *stat); int mdb_env_info(MDB_env *env, MDB_envinfo *stat); int mdb_env_get_maxkeysize(MDB_env *env); int mdb_env_sync(MDB_env *env, int force); void mdb_env_close(MDB_env *env); int mdb_env_set_flags(MDB_env *env, unsigned int flags, int onoff); int mdb_env_get_flags(MDB_env *env, unsigned int *flags); int mdb_env_get_path(MDB_env *env, const char **path); int mdb_env_set_mapsize(MDB_env *env, size_t size); int mdb_env_set_maxreaders(MDB_env *env, unsigned int readers); int mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers); int mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs); int mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **txn); int mdb_txn_commit(MDB_txn *txn); void mdb_txn_reset(MDB_txn *txn); int mdb_txn_renew(MDB_txn *txn); void mdb_txn_abort(MDB_txn *txn); size_t mdb_txn_id(MDB_txn *txn); int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *dbi); int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat); int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del_); int mdb_get(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data); int mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **cursor); void mdb_cursor_close(MDB_cursor *cursor); int mdb_cursor_del(MDB_cursor *cursor, unsigned int flags); int mdb_cursor_count(MDB_cursor *cursor, size_t *countp); int mdb_cursor_get(MDB_cursor *cursor, MDB_val *key, MDB_val*data, int op); typedef int (MDB_msg_func)(const char *msg, void *ctx); int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx); int mdb_reader_check(MDB_env *env, int *dead); int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags); #define MDB_VERSION_MAJOR ... #define MDB_VERSION_MINOR ... #define MDB_VERSION_PATCH ... #define EACCES ... #define EAGAIN ... #define EINVAL ... #define ENOMEM ... #define ENOSPC ... #define MDB_BAD_RSLOT ... #define MDB_BAD_DBI ... #define MDB_BAD_TXN ... #define MDB_BAD_VALSIZE ... #define MDB_CORRUPTED ... #define MDB_CURSOR_FULL ... #define MDB_DBS_FULL ... #define MDB_INCOMPATIBLE ... #define MDB_INVALID ... #define MDB_KEYEXIST ... #define MDB_MAP_FULL ... #define MDB_MAP_RESIZED ... #define MDB_NOTFOUND ... #define MDB_PAGE_FULL ... #define MDB_PAGE_NOTFOUND ... #define MDB_PANIC ... #define MDB_READERS_FULL ... #define MDB_TLS_FULL ... #define MDB_TXN_FULL ... #define MDB_VERSION_MISMATCH ... #define MDB_APPEND ... #define MDB_APPENDDUP ... #define MDB_CP_COMPACT ... #define MDB_CREATE ... #define MDB_DUPFIXED ... #define MDB_DUPSORT ... #define MDB_INTEGERDUP ... #define MDB_INTEGERKEY ... #define MDB_MAPASYNC ... #define MDB_NODUPDATA ... #define MDB_NOLOCK ... #define MDB_NOMEMINIT ... #define MDB_NOMETASYNC ... #define MDB_NOOVERWRITE ... #define MDB_NORDAHEAD ... #define MDB_NOSUBDIR ... #define MDB_NOSYNC ... #define MDB_NOTLS ... #define MDB_RDONLY ... #define MDB_REVERSEKEY ... #define MDB_WRITEMAP ... // Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where // CFFI will use PyString_AS_STRING when passed as an argument. static int pymdb_del(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen, char *val_s, size_t vallen); static int pymdb_put(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen, char *val_s, size_t vallen, unsigned int flags); static int pymdb_get(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen, MDB_val *val_out); static int pymdb_cursor_get(MDB_cursor *cursor, char *key_s, size_t key_len, char *data_s, size_t data_len, MDB_val *key, MDB_val *data, int op); static int pymdb_cursor_put(MDB_cursor *cursor, char *key_s, size_t keylen, char *val_s, size_t vallen, int flags); // Prefaults a range static void preload(int rc, void *x, size_t size); int mdb_env_copy3(MDB_env *env, const char *path, unsigned int flags, MDB_txn *txn); int mdb_env_copyfd3(MDB_env *env, int fd, unsigned int flags, MDB_txn *txn); #include <sys/stat.h> #include "lmdb.h" #include "preload.h" // Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where // CFFI will use PyString_AS_STRING when passed as an argument. static int pymdb_get(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen, MDB_val *val_out) { MDB_val key = {keylen, key_s}; int rc = mdb_get(txn, dbi, &key, val_out); return rc; } static int pymdb_put(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen, char *val_s, size_t vallen, unsigned int flags) { MDB_val key = {keylen, key_s}; MDB_val val = {vallen, val_s}; return mdb_put(txn, dbi, &key, &val, flags); } static int pymdb_del(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen, char *val_s, size_t vallen) { MDB_val key = {keylen, key_s}; MDB_val val = {vallen, val_s}; MDB_val *valptr; if(vallen == 0) { valptr = NULL; } else { valptr = &val; } return mdb_del(txn, dbi, &key, valptr); } static int pymdb_cursor_get(MDB_cursor *cursor, char *key_s, size_t key_len, char *data_s, size_t data_len, MDB_val *key, MDB_val *data, int op) { MDB_val tmp_key = {key_len, key_s}; MDB_val tmp_data = {data_len, data_s}; int rc = mdb_cursor_get(cursor, &tmp_key, &tmp_data, op); if(! rc) { *key = tmp_key; *data = tmp_data; } return rc; } static int pymdb_cursor_put(MDB_cursor *cursor, char *key_s, size_t keylen, char *val_s, size_t vallen, int flags) { MDB_val tmpkey = {keylen, key_s}; MDB_val tmpval = {vallen, val_s}; return mdb_cursor_put(cursor, &tmpkey, &tmpval, flags); } # Try to use distutils-bundled CFFI configuration to avoid a recompile and # potential compile errors during first module import. # type: ignore mdb_msg_func() callback. Appends `s` to _callbacks.msg_func list. Raised when an LMDB-related error occurs, and no more specific :py:class:`lmdb.Error` subclass exists. Key/data pair already exists. No matching key/data pair found. Normally py-lmdb indicates a missing key by returning ``None``, or a user-supplied default value, however LMDB may return this error where py-lmdb does not know to convert it into a non-exceptional return. Request page not found. Located page was of the wrong type. Update of meta page failed. Database environment version mismatch. File is not an MDB file. Environment map_size= limit reached. Environment max_dbs= limit reached. Environment max_readers= limit reached. Thread-local storage keys full - too many environments open. Transaciton has too many dirty pages - transaction too big. Internal error - cursor stack limit reached. Internal error - page has no more space. Database contents grew beyond environment map_size=. Operation and DB incompatible, or DB flags changed. Invalid reuse of reader locktable slot. The specified DBI was changed unexpectedly. Transaction cannot recover - it must be aborted. Too big key/data, key is empty, or wrong DUPFIXED size. An attempt was made to modify a read-only database. An invalid parameter was specified. The environment was locked by another process. Out of memory. No more disk space. # Prepare _error_map, a mapping of integer MDB_ERROR_CODE to exception class. Lookup and instantiate the correct exception class for the error code `rc`, using :py:class:`Error` if no better class exists. We need this because CFFI on PyPy treats None as cffi.NULL, instead of throwing an exception it feeds LMDB null pointers. That means simply replacing native handles with None during _invalidate() will cause NULL pointer dereferences. Instead use this class, and its weird name to cause a TypeError, with a very obvious string in the exception text. The only alternatives to this are inserting a check around every single use of a native handle to ensure the handle is still valid prior to calling LMDB, or doing no crash-safety checking at all. Convert a MDB_val cdata to a CFFI buffer object. Convert a MDB_val cdata to Python bytes. Deprecated. Return a tuple of integers `(major, minor, patch)` describing the LMDB library version that the binding is linked against. The version of the binding itself is available from ``lmdb.__version__``. `subpatch`: If true, returns a 4 integer tuple consisting of the same plus an extra integer that represents any patches applied by py-lmdb itself (0 representing no patches). Structure for a database environment. An environment may contain multiple databases, all residing in the same shared-memory map and underlying disk file. To write to the environment a :py:class:`Transaction` must be created. One simultaneous write transaction is allowed, however there is no limit on the number of read transactions even when a write transaction exists. This class is aliased to `lmdb.open`. It is a serious error to have open the same LMDB file in the same process at the same time. Failure to heed this may lead to data corruption and interpreter crash. Equivalent to `mdb_env_open() <http://lmdb.tech/doc/group__mdb.html#ga1fe2740e25b1689dc412e7b9faadba1b>`_ `path`: Location of directory (if `subdir=True`) or file prefix to store the database. `map_size`: Maximum size database may grow to; used to size the memory mapping. If database grows larger than ``map_size``, an exception will be raised and the user must close and reopen :py:class:`Environment`. On 64-bit there is no penalty for making this huge (say 1TB). Must be <2GB on 32-bit. .. note:: **The default map size is set low to encourage a crash**, so users can figure out a good value before learning about this option too late. `subdir`: If ``True``, `path` refers to a subdirectory to store the data and lock files in, otherwise it refers to a filename prefix. `readonly`: If ``True``, disallow any write operations. Note the lock file is still modified. If specified, the ``write`` flag to :py:meth:`begin` or :py:class:`Transaction` is ignored. `metasync`: If ``False``, flush system buffers to disk only once per transaction, omit the metadata flush. Defer that until the system flushes files to disk, or next commit or :py:meth:`sync`. This optimization maintains database integrity, but a system crash may undo the last committed transaction. I.e. it preserves the ACI (atomicity, consistency, isolation) but not D (durability) database property. `sync`: If ``False``, don't flush system buffers to disk when committing a transaction. This optimization means a system crash can corrupt the database or lose the last transactions if buffers are not yet flushed to disk. The risk is governed by how often the system flushes dirty buffers to disk and how often :py:meth:`sync` is called. However, if the filesystem preserves write order and `writemap=False`, transactions exhibit ACI (atomicity, consistency, isolation) properties and only lose D (durability). I.e. database integrity is maintained, but a system crash may undo the final transactions. Note that `sync=False, writemap=True` leaves the system with no hint for when to write transactions to disk, unless :py:meth:`sync` is called. `map_async=True, writemap=True` may be preferable. `mode`: File creation mode. `create`: If ``False``, do not create the directory `path` if it is missing. `readahead`: If ``False``, LMDB will disable the OS filesystem readahead mechanism, which may improve random read performance when a database is larger than RAM. `writemap`: If ``True``, use a writeable memory map unless `readonly=True`. This is faster and uses fewer mallocs, but loses protection from application bugs like wild pointer writes and other bad updates into the database. Incompatible with nested transactions. Processes with and without `writemap` on the same environment do not cooperate well. `meminit`: If ``False`` LMDB will not zero-initialize buffers prior to writing them to disk. This improves performance but may cause old heap data to be written saved in the unused portion of the buffer. Do not use this option if your application manipulates confidential data (e.g. plaintext passwords) in memory. This option is only meaningful when `writemap=False`; new pages are always zero-initialized when `writemap=True`. `map_async`: When ``writemap=True``, use asynchronous flushes to disk. As with ``sync=False``, a system crash can then corrupt the database or lose the last transactions. Calling :py:meth:`sync` ensures on-disk database integrity until next commit. `max_readers`: Maximum number of simultaneous read transactions. Can only be set by the first process to open an environment, as it affects the size of the lock file and shared memory area. Attempts to simultaneously start more than this many *read* transactions will fail. `max_dbs`: Maximum number of databases available. If 0, assume environment will be used as a single database. `max_spare_txns`: Read-only transactions to cache after becoming unused. Caching transactions avoids two allocations, one lock and linear scan of the shared environment per invocation of :py:meth:`begin`, :py:class:`Transaction`, :py:meth:`get`, :py:meth:`gets`, or :py:meth:`cursor`. Should match the process's maximum expected concurrent transactions (e.g. thread count). `lock`: If ``False``, don't do any locking. If concurrent access is anticipated, the caller must manage all concurrency itself. For proper operation the caller must enforce single-writer semantics, and must ensure that no readers are using old transactions while a writer is active. The simplest approach is to use an exclusive lock so that no readers may be active at all when a writer begins. Change the maximum size of the map file. This function will fail if any transactions are active in the current process. `map_size`: The new size in bytes. Equivalent to `mdb_env_set_mapsize() <http://lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>`_ Warning: There's a data race in the underlying library that may cause catastrophic loss of data if you use this method. You are safe if one of the following are true: * Only one process accessing a particular LMDB file ever calls this method. * You use locking external to this library to ensure that only one process accessing the current LMDB file can be inside this function. Close the environment, invalidating any open iterators, cursors, and transactions. Repeat calls to :py:meth:`close` have no effect. Equivalent to `mdb_env_close() <http://lmdb.tech/doc/group__mdb.html#ga4366c43ada8874588b6a62fbda2d1e95>`_ Directory path or file name prefix where this environment is stored. Equivalent to `mdb_env_get_path() <http://lmdb.tech/doc/group__mdb.html#gac699fdd8c4f8013577cb933fb6a757fe>`_ Make a consistent copy of the environment in the given destination directory. `compact`: If ``True``, perform compaction while copying: omit free pages and sequentially renumber all pages in output. This option consumes more CPU and runs more slowly than the default, but may produce a smaller output database. `txn`: If provided, the backup will be taken from the database with respect to that transaction, otherwise a temporary read-only transaction will be created. Note: this parameter being non-None is not available if the module was built with LMDB_PURE. Note: this parameter may be set only if compact=True. Equivalent to `mdb_env_copy2() or mdb_env_copy3() <http://lmdb.tech/doc/group__mdb.html#ga5d51d6130325f7353db0955dbedbc378>`_ Copy a consistent version of the environment to file descriptor `fd`. `compact`: If ``True``, perform compaction while copying: omit free pages and sequentially renumber all pages in output. This option consumes more CPU and runs more slowly than the default, but may produce a smaller output database. `txn`: If provided, the backup will be taken from the database with respect to that transaction, otherwise a temporary read-only transaction will be created. Note: this parameter being non-None is not available if the module was built with LMDB_PURE. Equivalent to `mdb_env_copyfd2() or mdb_env_copyfd3 <http://lmdb.tech/doc/group__mdb.html#ga5d51d6130325f7353db0955dbedbc378>`_ # Convert C library handle to kernel handle. Flush the data buffers to disk. Equivalent to `mdb_env_sync() <http://lmdb.tech/doc/group__mdb.html#ga85e61f05aa68b520cc6c3b981dba5037>`_ Data is always written to disk when :py:meth:`Transaction.commit` is called, but the operating system may keep it buffered. MDB always flushes the OS buffers upon commit as well, unless the environment was opened with `sync=False` or `metasync=False`. `force`: If ``True``, force a synchronous flush. Otherwise if the environment was opened with `sync=False` the flushes will be omitted, and with `map_async=True` they will be asynchronous. Convert a MDB_stat to a dict. stat() Return some environment statistics for the default database as a dict: +--------------------+---------------------------------------+ | ``psize`` | Size of a database page in bytes. | +--------------------+---------------------------------------+ | ``depth`` | Height of the B-tree. | +--------------------+---------------------------------------+ | ``branch_pages`` | Number of internal (non-leaf) pages. | +--------------------+---------------------------------------+ | ``leaf_pages`` | Number of leaf pages. | +--------------------+---------------------------------------+ | ``overflow_pages`` | Number of overflow pages. | +--------------------+---------------------------------------+ | ``entries`` | Number of data items. | +--------------------+---------------------------------------+ Equivalent to `mdb_env_stat() <http://lmdb.tech/doc/group__mdb.html#gaf881dca452050efbd434cd16e4bae255>`_ Return some nice environment information as a dict: +--------------------+---------------------------------------------+ | ``map_addr`` | Address of database map in RAM. | +--------------------+---------------------------------------------+ | ``map_size`` | Size of database map in RAM. | +--------------------+---------------------------------------------+ | ``last_pgno`` | ID of last used page. | +--------------------+---------------------------------------------+ | ``last_txnid`` | ID of last committed transaction. | +--------------------+---------------------------------------------+ | ``max_readers`` | Number of reader slots allocated in the | | | lock file. Equivalent to the value of | | | `maxreaders=` specified by the first | | | process opening the Environment. | +--------------------+---------------------------------------------+ | ``num_readers`` | Maximum number of reader slots in | | | simultaneous use since the lock file was | | | initialized. | +--------------------+---------------------------------------------+ Equivalent to `mdb_env_info() <http://lmdb.tech/doc/group__mdb.html#ga18769362c7e7d6cf91889a028a5c5947>`_ Return a dict describing Environment constructor flags used to instantiate this environment. Return the maximum size in bytes of a record's key part. This matches the ``MDB_MAXKEYSIZE`` constant set at compile time. Return the maximum number of readers specified during open of the environment by the first process. This is the same as `max_readers=` specified to the constructor if this process was the first to open the environment. Return a multi line Unicode string describing the current state of the reader lock table. Search the reader lock table for stale entries, for example due to a crashed process. Returns the number of stale entries that were cleared. Open a database, returning an instance of :py:class:`_Database`. Repeat :py:meth:`Environment.open_db` calls for the same name will return the same handle. As a special case, the main database is always open. Equivalent to `mdb_dbi_open() <http://lmdb.tech/doc/group__mdb.html#gac08cad5b096925642ca359a6d6f0562a>`_ Named databases are implemented by *storing a special descriptor in the main database*. All databases in an environment *share the same file*. Because the descriptor is present in the main database, attempts to create a named database will fail if a key matching the database's name already exists. Furthermore *the key is visible to lookups and enumerations*. If your main database keyspace conflicts with the names you use for named databases, then move the contents of your main database to another named database. :: >>> env = lmdb.open('/tmp/test', max_dbs=2) >>> with env.begin(write=True) as txn ... txn.put('somename', 'somedata') >>> # Error: database cannot share name of existing key! >>> subdb = env.open_db('somename') A newly created database will not exist if the transaction that created it aborted, nor if another process deleted it. The handle resides in the shared environment, it is not owned by the current transaction or process. Only one thread should call this function; it is not mutex-protected in a read-only transaction. The `dupsort`, `integerkey`, `integerdup`, and `dupfixed` parameters are ignored if the database already exists. The state of those settings are persistent and immutable per database. See :py:meth:`_Database.flags` to view the state of those options for an opened database. A consequence of the immutability of these flags is that the default non-named database will never have these flags set. Preexisting transactions, other than the current transaction and any parents, must not use the new handle, nor must their children. `key`: Bytestring database name. If ``None``, indicates the main database should be returned, otherwise indicates a named database should be created inside the main database. In other words, *a key representing the database will be visible in the main database, and the database name cannot conflict with any existing key.* `txn`: Transaction used to create the database if it does not exist. If unspecified, a temporarily write transaction is used. Do not call :py:meth:`open_db` from inside an existing transaction without supplying it here. Note the passed transaction must have `write=True`. `reverse_key`: If ``True``, keys are compared from right to left (e.g. DNS names). `dupsort`: Duplicate keys may be used in the database. (Or, from another perspective, keys may have multiple data items, stored in sorted order.) By default keys must be unique and may have only a single data item. `create`: If ``True``, create the database if it doesn't exist, otherwise raise an exception. `integerkey`: If ``True``, indicates keys in the database are C unsigned or ``size_t`` integers encoded in native byte order. Keys must all be either unsigned or ``size_t``, they cannot be mixed in a single database. `integerdup`: If ``True``, values in the database are C unsigned or ``size_t`` integers encode din native byte order. Implies `dupsort` and `dupfixed` are ``True``. `dupfixed`: If ``True``, values for each key in database are of fixed size, allowing each additional duplicate value for a key to be stored without a header indicating its size. Implies `dupsort` is ``True``. Shortcut for :py:class:`lmdb.Transaction` Internal database handle. This class is opaque, save a single method. Should not be constructed directly. Use :py:meth:`Environment.open_db` instead. Load MDB's notion of the database flags. Return the database's associated flags as a dict of _Database constructor kwargs. A transaction object. All operations require a transaction handle, transactions may be read-only or read-write. Write transactions may not span threads. Transaction objects implement the context manager protocol, so that reliable release of the transaction happens even in the face of unhandled exceptions: .. code-block:: python # Transaction aborts correctly: with env.begin(write=True) as txn: crash() # Transaction commits automatically: with env.begin(write=True) as txn: txn.put('a', 'b') Equivalent to `mdb_txn_begin() <http://lmdb.tech/doc/group__mdb.html#gad7ea55da06b77513609efebd44b26920>`_ `env`: Environment the transaction should be on. `db`: Default named database to operate on. If unspecified, defaults to the environment's main database. Can be overridden on a per-call basis below. `parent`: ``None``, or a parent transaction (see lmdb.h). `write`: Transactions are read-only by default. To modify the database, you must pass `write=True`. This flag is ignored if :py:class:`Environment` was opened with ``readonly=True``. `buffers`: If ``True``, indicates :py:func:`buffer` objects should be yielded instead of bytestrings. This setting applies to the :py:class:`Transaction` instance itself and any :py:class:`Cursors <Cursor>` created within the transaction. This feature significantly improves performance, since MDB has a zero-copy design, but it requires care when manipulating the returned buffer objects. The benefit of this facility is diminished when using small keys and values. # If constructor fails, then __del__ will attempt to access these # attributes. # Mutations occurred since transaction start. Required to know when Cursor # key/value must be refreshed. # hold ref # Exception catch in order to avoid racy 'if txns:' test # Don't use spare txns for creating a DB when read-only id() Return the transaction's ID. This returns the identifier associated with this transaction. For a read-only transaction, this corresponds to the snapshot being read; concurrent readers will frequently have the same transaction ID. stat(db) Return statistics like :py:meth:`Environment.stat`, except for a single DBI. `db` must be a database handle returned by :py:meth:`open_db`. Delete all keys in a named database and optionally delete the named database itself. Deleting the named database causes it to become unavailable, and invalidates existing cursors. Equivalent to `mdb_drop() <http://lmdb.tech/doc/group__mdb.html#gab966fab3840fc54a6571dfb32b00f2db>`_ # In order to avoid taking and maintaining a lock, a race is allowed # below which may result in more spare txns than desired. It seems # unlikely the race could ever result in a large amount of spare txns, # and in any case a correctly configured program should not be opening # more read-only transactions than there are configured spares. Commit the pending transaction. Equivalent to `mdb_txn_commit() <http://lmdb.tech/doc/group__mdb.html#ga846fbd6f46105617ac9f4d76476f6597>`_ Abort the pending transaction. Repeat calls to :py:meth:`abort` have no effect after a previously successful :py:meth:`commit` or :py:meth:`abort`, or after the associated :py:class:`Environment` has been closed. Equivalent to `mdb_txn_abort() <http://lmdb.tech/doc/group__mdb.html#ga73a5938ae4c3239ee11efa07eb22b882>`_ Fetch the first value matching `key`, returning `default` if `key` does not exist. A cursor must be used to fetch all values for a key in a `dupsort=True` database. Equivalent to `mdb_get() <http://lmdb.tech/doc/group__mdb.html#ga8bf10cd91d3f3a83a34d04ce6b07992d>`_ Store a record, returning ``True`` if it was written, or ``False`` to indicate the key was already present and `overwrite=False`. On success, the cursor is positioned on the new record. Equivalent to `mdb_put() <http://lmdb.tech/doc/group__mdb.html#ga4fa8573d9236d54687c61827ebf8cac0>`_ `key`: Bytestring key to store. `value`: Bytestring value to store. `dupdata`: If ``False`` and database was opened with `dupsort=True`, will return ``False`` if the key already has that value. In other words, this only affects the return value. `overwrite`: If ``False``, do not overwrite any existing matching key. If False and writing to a dupsort=True database, this will not add a value to the key and this function will return ``False``. `append`: If ``True``, append the pair to the end of the database without comparing its order first. Appending a key that is not greater than the highest existing key will fail and return ``False``. `db`: Named database to operate on. If unspecified, defaults to the database given to the :py:class:`Transaction` constructor. Use a temporary cursor to invoke :py:meth:`Cursor.replace`. `db`: Named database to operate on. If unspecified, defaults to the database given to the :py:class:`Transaction` constructor. Use a temporary cursor to invoke :py:meth:`Cursor.pop`. `db`: Named database to operate on. If unspecified, defaults to the database given to the :py:class:`Transaction` constructor. Delete a key from the database. Equivalent to `mdb_del() <http://lmdb.tech/doc/group__mdb.html#gab8182f9360ea69ac0afd4a4eaab1ddb0>`_ `key`: The key to delete. value: If the database was opened with dupsort=True and value is not the empty bytestring, then delete elements matching only this `(key, value)` pair, otherwise all values for key are deleted. Returns True if at least one key was deleted. # for bug-compatibility with cpython impl Shortcut for ``lmdb.Cursor(db, self)`` Structure for navigating a database. Equivalent to `mdb_cursor_open() <http://lmdb.tech/doc/group__mdb.html#ga9ff5d7bd42557fd5ee235dc1d62613aa>`_ `db`: :py:class:`_Database` to navigate. `txn`: :py:class:`Transaction` to navigate. As a convenience, :py:meth:`Transaction.cursor` can be used to quickly return a cursor: :: >>> env = lmdb.open('/tmp/foo') >>> child_db = env.open_db('child_db') >>> with env.begin() as txn: ... cursor = txn.cursor() # Cursor on main database. ... cursor2 = txn.cursor(child_db) # Cursor on child database. Cursors start in an unpositioned state. If :py:meth:`iternext` or :py:meth:`iterprev` are used in this state, iteration proceeds from the start or end respectively. Iterators directly position using the cursor, meaning strange behavior results when multiple iterators exist on the same cursor. .. note:: From the perspective of the Python binding, cursors return to an 'unpositioned' state once any scanning or seeking method (e.g. :py:meth:`next`, :py:meth:`prev_nodup`, :py:meth:`set_range`) returns ``False`` or raises an exception. This is primarily to ensure safe, consistent semantics in the face of any error condition. When the Cursor returns to an unpositioned state, its :py:meth:`key` and :py:meth:`value` return empty strings to indicate there is no active position, although internally the LMDB cursor may still have a valid position. This may lead to slightly surprising behaviour when iterating the values for a `dupsort=True` database's keys, since methods such as :py:meth:`iternext_dup` will cause Cursor to appear unpositioned, despite it returning ``False`` only to indicate there are no more values for the current key. In that case, simply calling :py:meth:`next` would cause iteration to resume at the next available key. This behaviour may change in future. Iterator methods such as :py:meth:`iternext` and :py:meth:`iterprev` accept `keys` and `values` arguments. If both are ``True``, then the value of :py:meth:`item` is yielded on each iteration. If only `keys` is ``True``, :py:meth:`key` is yielded, otherwise only :py:meth:`value` is yielded. Prior to iteration, a cursor can be positioned anywhere in the database: :: >>> with env.begin() as txn: ... cursor = txn.cursor() ... if not cursor.set_range('5'): # Position at first key >= '5'. ... print('Not found!') ... else: ... for key, value in cursor: # Iterate from first key >= '5'. ... print((key, value)) Iteration is not required to navigate, and sometimes results in ugly or inefficient code. In cases where the iteration order is not obvious, or is related to the data being read, use of :py:meth:`set_key`, :py:meth:`set_range`, :py:meth:`key`, :py:meth:`value`, and :py:meth:`item` may be preferable: :: >>> # Record the path from a child to the root of a tree. >>> path = ['child14123'] >>> while path[-1] != 'root': ... assert cursor.set_key(path[-1]), \\ ... 'Tree is broken! Path: %s' % (path,) ... path.append(cursor.value()) # hold ref # hold ref # If Transaction.mutations!=last_mutation, must MDB_GET_CURRENT to # refresh `key' and `val'. Close the cursor, freeing its associated resources. Return the current key. # Must refresh `key` and `val` following mutation. Return the current value. # Must refresh `key` and `val` following mutation. Return the current `(key, value)` pair. # Must refresh `key` and `val` following mutation. Return a forward iterator that yields the current element before calling :py:meth:`next`, repeating until the end of the database is reached. As a convenience, :py:class:`Cursor` implements the iterator protocol by automatically returning a forward iterator when invoked: :: >>> # Equivalent: >>> it = iter(cursor) >>> it = cursor.iternext(keys=True, values=True) If the cursor is not yet positioned, it is moved to the first key in the database, otherwise iteration proceeds from the current position. Return a forward iterator that yields the current value ("duplicate") of the current key before calling :py:meth:`next_dup`, repeating until the last value of the current key is reached. Only meaningful for databases opened with `dupsort=True`. .. code-block:: python if not cursor.set_key("foo"): print("No values found for 'foo'") else: for idx, data in enumerate(cursor.iternext_dup()): print("%d'th value for 'foo': %s" % (idx, data)) Return a forward iterator that yields the current value ("duplicate") of the current key before calling :py:meth:`next_nodup`, repeating until the end of the database is reached. Only meaningful for databases opened with `dupsort=True`. If the cursor is not yet positioned, it is moved to the first key in the database, otherwise iteration proceeds from the current position. .. code-block:: python for key in cursor.iternext_nodup(): print("Key '%s' has %d values" % (key, cursor.count())) Return a reverse iterator that yields the current element before calling :py:meth:`prev`, until the start of the database is reached. If the cursor is not yet positioned, it is moved to the last key in the database, otherwise iteration proceeds from the current position. :: >>> with env.begin() as txn: ... for i, (key, value) in enumerate(txn.cursor().iterprev()): ... print('%dth last item is (%r, %r)' % (1+i, key, value)) Return a reverse iterator that yields the current value ("duplicate") of the current key before calling :py:meth:`prev_dup`, repeating until the first value of the current key is reached. Only meaningful for databases opened with `dupsort=True`. Return a reverse iterator that yields the current value ("duplicate") of the current key before calling :py:meth:`prev_nodup`, repeating until the start of the database is reached. If the cursor is not yet positioned, it is moved to the last key in the database, otherwise iteration proceeds from the current position. Only meaningful for databases opened with `dupsort=True`. Move to the first key in the database, returning ``True`` on success or ``False`` if the database is empty. If the database was opened with `dupsort=True` and the key contains duplicates, the cursor is positioned on the first value ("duplicate"). Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_FIRST <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ Move to the first value ("duplicate") for the current key, returning ``True`` on success or ``False`` if the database is empty. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_FIRST_DUP <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ Move to the last key in the database, returning ``True`` on success or ``False`` if the database is empty. If the database was opened with `dupsort=True` and the key contains duplicates, the cursor is positioned on the last value ("duplicate"). Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_LAST <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ Move to the last value ("duplicate") for the current key, returning ``True`` on success or ``False`` if the database is empty. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_LAST_DUP <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ Move to the previous element, returning ``True`` on success or ``False`` if there is no previous item. For databases opened with `dupsort=True`, moves to the previous data item ("duplicate") for the current key if one exists, otherwise moves to the previous key. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_PREV <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ Move to the previous value ("duplicate") of the current key, returning ``True`` on success or ``False`` if there is no previous value. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_PREV_DUP <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ Move to the last value ("duplicate") of the previous key, returning ``True`` on success or ``False`` if there is no previous key. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_PREV_NODUP <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ Move to the next element, returning ``True`` on success or ``False`` if there is no next element. For databases opened with `dupsort=True`, moves to the next value ("duplicate") for the current key if one exists, otherwise moves to the first value of the next key. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_NEXT <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ Move to the next value ("duplicate") of the current key, returning ``True`` on success or ``False`` if there is no next value. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_NEXT_DUP <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ Move to the first value ("duplicate") of the next key, returning ``True`` on success or ``False`` if there is no next key. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_NEXT_NODUP <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ Seek exactly to `key`, returning ``True`` on success or ``False`` if the exact key was not found. It is an error to :py:meth:`set_key` the empty bytestring. For databases opened with `dupsort=True`, moves to the first value ("duplicate") for the key. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_SET_KEY <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ Seek exactly to `(key, value)`, returning ``True`` on success or ``False`` if the exact key and value was not found. It is an error to :py:meth:`set_key` the empty bytestring. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_GET_BOTH <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ Equivalent to :py:meth:`set_key()`, except :py:meth:`value` is returned when `key` is found, otherwise `default`. Returns an iterable of `(key, value)` 2-tuples containing results for each key in the iterable `keys`. `keys`: Iterable to read keys from. `dupdata`: If ``True`` and database was opened with `dupsort=True`, read all duplicate values for each matching key. `dupfixed_bytes`: If database was opened with `dupsort=True` and `dupfixed=True`, accepts the size of each value, in bytes, and applies an optimization reducing the number of database lookups. `keyfixed`: If `dupfixed_bytes` is set and database key size is fixed, setting keyfixed=True will result in this function returning a memoryview to the results as a structured array of bytes. The structured array can be instantiated by passing the memoryview buffer to NumPy: .. code-block:: python key_bytes, val_bytes = 4, 8 dtype = np.dtype([(f'S{key_bytes}', f'S{val_bytes}}')]) arr = np.frombuffer( cur.getmulti(keys, dupdata=True, dupfixed_bytes=val_bytes, keyfixed=True) ) Seek to the first key greater than or equal to `key`, returning ``True`` on success, or ``False`` to indicate key was past end of database. Behaves like :py:meth:`first` if `key` is the empty bytestring. For databases opened with `dupsort=True`, moves to the first value ("duplicate") for the key. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_SET_RANGE <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ Seek to the first key/value pair greater than or equal to `key`, returning ``True`` on success, or ``False`` to indicate that `value` was past the last value of `key` or that `(key, value)` was past the end end of database. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_GET_BOTH_RANGE <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ # issue #126: MDB_GET_BOTH_RANGE does not satisfy its documentation, # and fails to update `key` and `value` on success. Therefore # explicitly call MDB_GET_CURRENT after MDB_GET_BOTH_RANGE. Delete the current element and move to the next, returning ``True`` on success or ``False`` if the database was empty. If `dupdata` is ``True``, delete all values ("duplicates") for the current key, otherwise delete only the currently positioned value. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_del() <http://lmdb.tech/doc/group__mdb.html#ga26a52d3efcfd72e5bf6bd6960bf75f95>`_ Return the number of values ("duplicates") for the current key. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_count() <http://lmdb.tech/doc/group__mdb.html#ga4041fd1e1862c6b7d5f10590b86ffbe2>`_ Store a record, returning ``True`` if it was written, or ``False`` to indicate the key was already present and `overwrite=False`. On success, the cursor is positioned on the key. Equivalent to `mdb_cursor_put() <http://lmdb.tech/doc/group__mdb.html#ga1f83ccb40011837ff37cc32be01ad91e>`_ `key`: Bytestring key to store. `val`: Bytestring value to store. `dupdata`: If ``False`` and database was opened with `dupsort=True`, will return ``False`` if the key already has that value. In other words, this only affects the return value. `overwrite`: If ``False``, do not overwrite the value for the key if it exists, just return ``False``. For databases opened with `dupsort=True`, ``False`` will always be returned if a duplicate key/value pair is inserted, regardless of the setting for `overwrite`. `append`: If ``True``, append the pair to the end of the database without comparing its order first. Appending a key that is not greater than the highest existing key will fail and return ``False``. Invoke :py:meth:`put` for each `(key, value)` 2-tuple from the iterable `items`. Elements must be exactly 2-tuples, they may not be of any other type, or tuple subclass. Returns a tuple `(consumed, added)`, where `consumed` is the number of elements read from the iterable, and `added` is the number of new entries added to the database. `added` may be less than `consumed` when `overwrite=False`. `items`: Iterable to read records from. `dupdata`: If ``True`` and database was opened with `dupsort=True`, add pair as a duplicate if the given key already exists. Otherwise overwrite any existing matching key. `overwrite`: If ``False``, do not overwrite the value for the key if it exists, just return ``False``. For databases opened with `dupsort=True`, ``False`` will always be returned if a duplicate key/value pair is inserted, regardless of the setting for `overwrite`. `append`: If ``True``, append records to the end of the database without comparing their order first. Appending a key that is not greater than the highest existing key will cause corruption. Store a record, returning its previous value if one existed. Returns ``None`` if no previous value existed. This uses the best available mechanism to minimize the cost of a `set-and-return-previous` operation. For databases opened with `dupsort=True`, only the first data element ("duplicate") is returned if it existed, all data elements are removed and the new `(key, data)` pair is inserted. `key`: Bytestring key to store. `value`: Bytestring value to store. Fetch a record's value then delete it. Returns ``None`` if no previous value existed. This uses the best available mechanism to minimize the cost of a `delete-and-return-previous` operation. For databases opened with `dupsort=True`, the first data element ("duplicate") for the key will be popped. `key`: Bytestring key to delete. Helper for centidb. Please do not rely on this interface, it may be removed in future. | 1.481447 | 1 |
.virtual_documents/00_core.ipynb.py | AtomScott/image_folder_datasets | 0 | 7684 | <gh_stars>0
# default_exp core
#hide
from nbdev.showdoc import *
from fastcore.test import *
# export
import os
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
import warnings
import torchvision
from torchvision.datasets import MNIST, ImageFolder
from torchvision.transforms import ToTensor, Resize, Compose, CenterCrop, Normalize
import pytorch_lightning as pl
# from pytorch_lightning.metrics.functional import classification, f1
from pytorch_lightning.loggers import TensorBoardLogger
import fastai.vision.augment
import fastai.vision.data
# from fastai.vision.data import ImageDataLoaders
# from fastai.vision.augment import Resize
#export
class ImageFolderDataModule(pl.LightningDataModule):
def __init__(self, data_dir, batch_size, transform):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.transform = transform
# Compose([
# Resize(256, interpolation=2),
# CenterCrop(224),
# ToTensor(),
# # TODO: check whether normalize is the same for imagenet and fractalDB
# Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
def prepare_data(self, stage=None):
pass
def setup(self, stage=None):
data_dir = self.data_dir
transform = self.transform
self.dls = fastai.vision.data.ImageDataLoaders.from_folder(data_dir, item_tfms=fastai.vision.augment.Resize(224))
self.trainset = ImageFolder(os.path.join(data_dir, 'train'), transform)
self.valset = ImageFolder(os.path.join(data_dir, 'valid'), transform)
def train_dataloader(self):
return DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True)
def val_dataloader(self):
return DataLoader(self.valset, batch_size=self.batch_size, shuffle=False)
def test_dataloader(self):
pass
data_dir = 'Datasets/cifar10'
transform = Compose([
Resize(256, interpolation=2),
CenterCrop(224),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
dm = ImageFolderDataModule(data_dir, 128, transform)
dm.setup()
for x,y in dm.train_dataloader():
test_eq(type(x), torch.Tensor)
test_eq(type(y), torch.Tensor)
break
#export
class CNNModule(pl.LightningModule):
def __init__(self, model=None, pretrained=False, freeze_extractor=False, log_level=10, num_classes=None, weight_path=None):
super().__init__()
self.num_classes = num_classes
self.pretrained = pretrained
self.freeze_extractor = freeze_extractor
assert model is not None, 'Select model from torchvision'
assert num_classes is not None, 'Must configure number of classes with num_classes'
if not model.startswith('resnet'):
warnings.warn('models other than resnet variants may need different setup for finetuning to work.')
# Prepare model for finetuning
if weight_path is not None:
param = torch.load(weight_path)
backbone = eval(f'torchvision.models.{model}(pretrained={False})')
backbone.load_state_dict(param)
else:
backbone = eval(f'torchvision.models.{model}(pretrained={pretrained})')
num_filters = backbone.fc.in_features
layers = list(backbone.children())[:-1]
self.feature_extractor = torch.nn.Sequential(*layers)
self.classifier = nn.Linear(num_filters, num_classes)
def forward(self, x):
if self.freeze_extractor:
self.feature_extractor.eval()
with torch.no_grad():
representations = self.feature_extractor(x).flatten(1)
else:
representations = self.feature_extractor(x).flatten(1)
y = self.classifier(representations)
return y
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
outputs = self.calculate_metrics(y_hat=y_hat, y=y)
return outputs
def training_epoch_end(self, outputs):
avg_metrics = {}
for metric in outputs[0].keys():
val = torch.stack([x[metric] for x in outputs]).mean()
self.logger.experiment.add_scalar(f"{metric}/train", val, self.current_epoch)
avg_metrics[metric] = val
# epoch_dictionary = {'loss': avg_metrics['loss']}
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
outputs = self.calculate_metrics(y_hat=y_hat, y=y)
return outputs
def validation_epoch_end(self, outputs):
avg_metrics = {}
for metric in outputs[0].keys():
val = torch.stack([x[metric] for x in outputs]).mean()
self.logger.experiment.add_scalar(f"{metric}/validation", val, self.current_epoch)
avg_metrics[metric] = val
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02, weight_decay=1e-04)
# > return torch.optim.SGF(self.parameters(), lr=self.lr, aldsfk'a)
def calculate_metrics(self, y, y_hat):
loss = F.cross_entropy(y_hat, y)
y_pred = y_hat.argmax(dim=1)
acc = classification.accuracy(y_pred, y)
f1_score = f1(y_pred, y, self.num_classes)
return {
"loss":loss,
"acc": acc,
"f1": f1_score
}
def on_sanity_check_start(self):
self.logger.disable()
def on_sanity_check_end(self):
self.logger.enable()
modelname = 'resnet18'
logger = TensorBoardLogger('tb_logs', name=modelname)
trainer = pl.Trainer(gpus=1, checkpoint_callback=False, logger=logger, fast_dev_run=5)
model = CNNModule(modelname, pretrained=True, num_classes=len(dm.trainset.classes))
test_eq(trainer.fit(model, dm), 1)
weight_path = 'FractalDB-1000_resnet50_epoch90.pth'
modelname = 'resnet50'
logger = TensorBoardLogger('tb_logs', name=modelname)
trainer = pl.Trainer(gpus=1, checkpoint_callback=False, logger=logger, fast_dev_run=5)
model = CNNModule(modelname, pretrained=True, num_classes=len(dm.trainset.classes), weight_path=weight_path)
test_eq(trainer.fit(model, dm), 1)
| # default_exp core
#hide
from nbdev.showdoc import *
from fastcore.test import *
# export
import os
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
import warnings
import torchvision
from torchvision.datasets import MNIST, ImageFolder
from torchvision.transforms import ToTensor, Resize, Compose, CenterCrop, Normalize
import pytorch_lightning as pl
# from pytorch_lightning.metrics.functional import classification, f1
from pytorch_lightning.loggers import TensorBoardLogger
import fastai.vision.augment
import fastai.vision.data
# from fastai.vision.data import ImageDataLoaders
# from fastai.vision.augment import Resize
#export
class ImageFolderDataModule(pl.LightningDataModule):
def __init__(self, data_dir, batch_size, transform):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.transform = transform
# Compose([
# Resize(256, interpolation=2),
# CenterCrop(224),
# ToTensor(),
# # TODO: check whether normalize is the same for imagenet and fractalDB
# Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
def prepare_data(self, stage=None):
pass
def setup(self, stage=None):
data_dir = self.data_dir
transform = self.transform
self.dls = fastai.vision.data.ImageDataLoaders.from_folder(data_dir, item_tfms=fastai.vision.augment.Resize(224))
self.trainset = ImageFolder(os.path.join(data_dir, 'train'), transform)
self.valset = ImageFolder(os.path.join(data_dir, 'valid'), transform)
def train_dataloader(self):
return DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True)
def val_dataloader(self):
return DataLoader(self.valset, batch_size=self.batch_size, shuffle=False)
def test_dataloader(self):
pass
data_dir = 'Datasets/cifar10'
transform = Compose([
Resize(256, interpolation=2),
CenterCrop(224),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
dm = ImageFolderDataModule(data_dir, 128, transform)
dm.setup()
for x,y in dm.train_dataloader():
test_eq(type(x), torch.Tensor)
test_eq(type(y), torch.Tensor)
break
#export
class CNNModule(pl.LightningModule):
def __init__(self, model=None, pretrained=False, freeze_extractor=False, log_level=10, num_classes=None, weight_path=None):
super().__init__()
self.num_classes = num_classes
self.pretrained = pretrained
self.freeze_extractor = freeze_extractor
assert model is not None, 'Select model from torchvision'
assert num_classes is not None, 'Must configure number of classes with num_classes'
if not model.startswith('resnet'):
warnings.warn('models other than resnet variants may need different setup for finetuning to work.')
# Prepare model for finetuning
if weight_path is not None:
param = torch.load(weight_path)
backbone = eval(f'torchvision.models.{model}(pretrained={False})')
backbone.load_state_dict(param)
else:
backbone = eval(f'torchvision.models.{model}(pretrained={pretrained})')
num_filters = backbone.fc.in_features
layers = list(backbone.children())[:-1]
self.feature_extractor = torch.nn.Sequential(*layers)
self.classifier = nn.Linear(num_filters, num_classes)
def forward(self, x):
if self.freeze_extractor:
self.feature_extractor.eval()
with torch.no_grad():
representations = self.feature_extractor(x).flatten(1)
else:
representations = self.feature_extractor(x).flatten(1)
y = self.classifier(representations)
return y
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
outputs = self.calculate_metrics(y_hat=y_hat, y=y)
return outputs
def training_epoch_end(self, outputs):
avg_metrics = {}
for metric in outputs[0].keys():
val = torch.stack([x[metric] for x in outputs]).mean()
self.logger.experiment.add_scalar(f"{metric}/train", val, self.current_epoch)
avg_metrics[metric] = val
# epoch_dictionary = {'loss': avg_metrics['loss']}
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
outputs = self.calculate_metrics(y_hat=y_hat, y=y)
return outputs
def validation_epoch_end(self, outputs):
avg_metrics = {}
for metric in outputs[0].keys():
val = torch.stack([x[metric] for x in outputs]).mean()
self.logger.experiment.add_scalar(f"{metric}/validation", val, self.current_epoch)
avg_metrics[metric] = val
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02, weight_decay=1e-04)
# > return torch.optim.SGF(self.parameters(), lr=self.lr, aldsfk'a)
def calculate_metrics(self, y, y_hat):
loss = F.cross_entropy(y_hat, y)
y_pred = y_hat.argmax(dim=1)
acc = classification.accuracy(y_pred, y)
f1_score = f1(y_pred, y, self.num_classes)
return {
"loss":loss,
"acc": acc,
"f1": f1_score
}
def on_sanity_check_start(self):
self.logger.disable()
def on_sanity_check_end(self):
self.logger.enable()
modelname = 'resnet18'
logger = TensorBoardLogger('tb_logs', name=modelname)
trainer = pl.Trainer(gpus=1, checkpoint_callback=False, logger=logger, fast_dev_run=5)
model = CNNModule(modelname, pretrained=True, num_classes=len(dm.trainset.classes))
test_eq(trainer.fit(model, dm), 1)
weight_path = 'FractalDB-1000_resnet50_epoch90.pth'
modelname = 'resnet50'
logger = TensorBoardLogger('tb_logs', name=modelname)
trainer = pl.Trainer(gpus=1, checkpoint_callback=False, logger=logger, fast_dev_run=5)
model = CNNModule(modelname, pretrained=True, num_classes=len(dm.trainset.classes), weight_path=weight_path)
test_eq(trainer.fit(model, dm), 1) | en | 0.348988 | # default_exp core #hide # export # from pytorch_lightning.metrics.functional import classification, f1 # from fastai.vision.data import ImageDataLoaders # from fastai.vision.augment import Resize #export # Compose([ # Resize(256, interpolation=2), # CenterCrop(224), # ToTensor(), # # TODO: check whether normalize is the same for imagenet and fractalDB # Normalize(mean=[0.485, 0.456, 0.406], # std=[0.229, 0.224, 0.225]) # ]) #export # Prepare model for finetuning # epoch_dictionary = {'loss': avg_metrics['loss']} # > return torch.optim.SGF(self.parameters(), lr=self.lr, aldsfk'a) | 1.991793 | 2 |
src/modules/iam/module.py | pgorecki/python-ddd | 10 | 7685 | from seedwork.application.modules import BusinessModule
from modules.iam.application.services import AuthenticationService
class IdentityAndAccessModule(BusinessModule):
def __init__(self, authentication_service: AuthenticationService):
self.authentication_service = authentication_service
# @staticmethod
# def create(container):
# assert False
# """Factory method for creating a module by using dependencies from a DI container"""
# return IdentityAndAccessModule(
# logger=container.logger(),
# authentication_service=container.authentication_service(),
# )
| from seedwork.application.modules import BusinessModule
from modules.iam.application.services import AuthenticationService
class IdentityAndAccessModule(BusinessModule):
def __init__(self, authentication_service: AuthenticationService):
self.authentication_service = authentication_service
# @staticmethod
# def create(container):
# assert False
# """Factory method for creating a module by using dependencies from a DI container"""
# return IdentityAndAccessModule(
# logger=container.logger(),
# authentication_service=container.authentication_service(),
# )
| en | 0.401272 | # @staticmethod # def create(container): # assert False # """Factory method for creating a module by using dependencies from a DI container""" # return IdentityAndAccessModule( # logger=container.logger(), # authentication_service=container.authentication_service(), # ) | 2.350945 | 2 |
test_scripts/pyfora2/containerTests.py | ufora/ufora | 571 | 7686 | # Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pyfora
import ufora.config.Setup as Setup
import ufora.FORA.python.PurePython.DictTestCases as DictTestCases
import ufora.FORA.python.PurePython.ListTestCases as ListTestCases
import ufora.FORA.python.PurePython.TupleTestCases as TupleTestCases
import ufora.FORA.python.PurePython.ExecutorTestCommon as ExecutorTestCommon
import ufora.test.ClusterSimulation as ClusterSimulation
class ExecutorSimulationTest(
unittest.TestCase,
ExecutorTestCommon.ExecutorTestCommon,
DictTestCases.DictTestCases,
ListTestCases.ListTestCases,
TupleTestCases.TupleTestCases):
@classmethod
def setUpClass(cls):
cls.config = Setup.config()
cls.executor = None
cls.simulation = ClusterSimulation.Simulator.createGlobalSimulator()
cls.simulation.startService()
cls.simulation.getDesirePublisher().desireNumberOfWorkers(1)
@classmethod
def tearDownClass(cls):
cls.simulation.stopService()
@classmethod
def create_executor(cls, allowCached=True):
if not allowCached:
return pyfora.connect('http://localhost:30000')
if cls.executor is None:
cls.executor = pyfora.connect('http://localhost:30000')
cls.executor.stayOpenOnExit = True
return cls.executor
if __name__ == '__main__':
import ufora.config.Mainline as Mainline
Mainline.UnitTestMainline()
| # Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pyfora
import ufora.config.Setup as Setup
import ufora.FORA.python.PurePython.DictTestCases as DictTestCases
import ufora.FORA.python.PurePython.ListTestCases as ListTestCases
import ufora.FORA.python.PurePython.TupleTestCases as TupleTestCases
import ufora.FORA.python.PurePython.ExecutorTestCommon as ExecutorTestCommon
import ufora.test.ClusterSimulation as ClusterSimulation
class ExecutorSimulationTest(
unittest.TestCase,
ExecutorTestCommon.ExecutorTestCommon,
DictTestCases.DictTestCases,
ListTestCases.ListTestCases,
TupleTestCases.TupleTestCases):
@classmethod
def setUpClass(cls):
cls.config = Setup.config()
cls.executor = None
cls.simulation = ClusterSimulation.Simulator.createGlobalSimulator()
cls.simulation.startService()
cls.simulation.getDesirePublisher().desireNumberOfWorkers(1)
@classmethod
def tearDownClass(cls):
cls.simulation.stopService()
@classmethod
def create_executor(cls, allowCached=True):
if not allowCached:
return pyfora.connect('http://localhost:30000')
if cls.executor is None:
cls.executor = pyfora.connect('http://localhost:30000')
cls.executor.stayOpenOnExit = True
return cls.executor
if __name__ == '__main__':
import ufora.config.Mainline as Mainline
Mainline.UnitTestMainline()
| en | 0.848067 | # Copyright 2015 Ufora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.684021 | 2 |
src/utils/torch_common.py | quochungto/SIIM-COVID19-Detection | 0 | 7687 | import os
import gc
import random
import numpy as np
import torch
def seed_everything(seed):
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def memory_cleanup():
"""
Cleans up GPU memory
https://github.com/huggingface/transformers/issues/1742
"""
for obj in gc.get_objects():
if torch.is_tensor(obj):
del obj
gc.collect()
torch.cuda.empty_cache()
| import os
import gc
import random
import numpy as np
import torch
def seed_everything(seed):
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def memory_cleanup():
"""
Cleans up GPU memory
https://github.com/huggingface/transformers/issues/1742
"""
for obj in gc.get_objects():
if torch.is_tensor(obj):
del obj
gc.collect()
torch.cuda.empty_cache()
| en | 0.715796 | Cleans up GPU memory https://github.com/huggingface/transformers/issues/1742 | 2.35495 | 2 |
recipes/freeimage/all/conanfile.py | marsven/conan-center-index | 0 | 7688 | from conans import ConanFile, CMake, tools
import os
import shutil
required_conan_version = ">=1.43.0"
class FreeImageConan(ConanFile):
name = "freeimage"
description = "Open Source library project for developers who would like to support popular graphics image formats"\
"like PNG, BMP, JPEG, TIFF and others as needed by today's multimedia applications."
homepage = "https://freeimage.sourceforge.io"
url = "https://github.com/conan-io/conan-center-index"
license = "FreeImage", "GPL-3.0-or-later", "GPL-2.0-or-later"
topics = ("freeimage", "image", "decoding", "graphics")
generators = "cmake", "cmake_find_package"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_jpeg": [False, "libjpeg", "libjpeg-turbo"],
"with_png": [True, False],
"with_tiff": [True, False],
"with_jpeg2000": [True, False],
"with_openexr": [True, False],
"with_eigen": [True, False],
"with_webp": [True, False],
"with_raw": [True, False],
"with_jxr": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_jpeg": "libjpeg",
"with_png": True,
"with_tiff": True,
"with_jpeg2000": True,
"with_openexr": True,
"with_eigen": True,
"with_webp": True,
"with_raw": True,
"with_jxr": True,
}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
tools.check_min_cppstd(self, "11")
if self.options.shared:
del self.options.fPIC
self.output.warn("G3 plugin and JPEGTransform are disabled.")
if self.options.with_jpeg is not None:
if self.options.with_tiff:
self.options["libtiff"].jpeg = self.options.with_jpeg
def requirements(self):
self.requires("zlib/1.2.11")
if self.options.with_jpeg == "libjpeg":
self.requires("libjpeg/9d")
elif self.options.with_jpeg == "libjpeg-turbo":
self.requires("libjpeg-turbo/2.1.2")
if self.options.with_jpeg2000:
self.requires("openjpeg/2.4.0")
if self.options.with_png:
self.requires("libpng/1.6.37")
if self.options.with_webp:
self.requires("libwebp/1.2.2")
if self.options.with_openexr:
self.requires("openexr/2.5.7")
if self.options.with_raw:
self.requires("libraw/0.20.2")
if self.options.with_jxr:
self.requires("jxrlib/cci.20170615")
if self.options.with_tiff:
self.requires("libtiff/4.3.0")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["WITH_JPEG"] = self.options.with_jpeg != False
self._cmake.definitions["WITH_OPENJPEG"] = self.options.with_jpeg2000
self._cmake.definitions["WITH_PNG"] = self.options.with_png
self._cmake.definitions["WITH_WEBP"] = self.options.with_webp
self._cmake.definitions["WITH_OPENEXR"] = self.options.with_openexr
self._cmake.definitions["WITH_RAW"] = self.options.with_raw
self._cmake.definitions["WITH_JXR"] = self.options.with_jxr
self._cmake.definitions["WITH_TIFF"] = self.options.with_tiff
self._cmake.configure(build_dir=self._build_subfolder)
return self._cmake
def build(self):
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibPNG"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibTIFF4"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibOpenJPEG"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibJXR"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibWebP"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibRawLite"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "OpenEXR"))
for patch in self.conan_data.get("patches", {}).get(self.version, {}):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("license-fi.txt", dst="licenses", src=self._source_subfolder)
self.copy("license-gplv3.txt", dst="licenses", src=self._source_subfolder)
self.copy("license-gplv2.txt", dst="licenses", src=self._source_subfolder)
def package_info(self):
def imageformats_deps():
components = []
components.append("zlib::zlib")
if self.options.with_jpeg:
components.append("{0}::{0}".format(self.options.with_jpeg))
if self.options.with_jpeg2000:
components.append("openjpeg::openjpeg")
if self.options.with_png:
components.append("libpng::libpng")
if self.options.with_webp:
components.append("libwebp::libwebp")
if self.options.with_openexr:
components.append("openexr::openexr")
if self.options.with_raw:
components.append("libraw::libraw")
if self.options.with_jxr:
components.append("jxrlib::jxrlib")
if self.options.with_tiff:
components.append("libtiff::libtiff")
return components
self.cpp_info.names["pkg_config"] = "freeimage"
self.cpp_info.names["cmake_find_package"] = "FreeImage"
self.cpp_info.names["cmake_find_package_multi"] = "FreeImage"
self.cpp_info.components["FreeImage"].libs = ["freeimage"]
self.cpp_info.components["FreeImage"].requires = imageformats_deps()
self.cpp_info.components["FreeImagePlus"].libs = ["freeimageplus"]
self.cpp_info.components["FreeImagePlus"].requires = ["FreeImage"]
if not self.options.shared:
self.cpp_info.components["FreeImage"].defines.append("FREEIMAGE_LIB")
| from conans import ConanFile, CMake, tools
import os
import shutil
required_conan_version = ">=1.43.0"
class FreeImageConan(ConanFile):
name = "freeimage"
description = "Open Source library project for developers who would like to support popular graphics image formats"\
"like PNG, BMP, JPEG, TIFF and others as needed by today's multimedia applications."
homepage = "https://freeimage.sourceforge.io"
url = "https://github.com/conan-io/conan-center-index"
license = "FreeImage", "GPL-3.0-or-later", "GPL-2.0-or-later"
topics = ("freeimage", "image", "decoding", "graphics")
generators = "cmake", "cmake_find_package"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_jpeg": [False, "libjpeg", "libjpeg-turbo"],
"with_png": [True, False],
"with_tiff": [True, False],
"with_jpeg2000": [True, False],
"with_openexr": [True, False],
"with_eigen": [True, False],
"with_webp": [True, False],
"with_raw": [True, False],
"with_jxr": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_jpeg": "libjpeg",
"with_png": True,
"with_tiff": True,
"with_jpeg2000": True,
"with_openexr": True,
"with_eigen": True,
"with_webp": True,
"with_raw": True,
"with_jxr": True,
}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
tools.check_min_cppstd(self, "11")
if self.options.shared:
del self.options.fPIC
self.output.warn("G3 plugin and JPEGTransform are disabled.")
if self.options.with_jpeg is not None:
if self.options.with_tiff:
self.options["libtiff"].jpeg = self.options.with_jpeg
def requirements(self):
self.requires("zlib/1.2.11")
if self.options.with_jpeg == "libjpeg":
self.requires("libjpeg/9d")
elif self.options.with_jpeg == "libjpeg-turbo":
self.requires("libjpeg-turbo/2.1.2")
if self.options.with_jpeg2000:
self.requires("openjpeg/2.4.0")
if self.options.with_png:
self.requires("libpng/1.6.37")
if self.options.with_webp:
self.requires("libwebp/1.2.2")
if self.options.with_openexr:
self.requires("openexr/2.5.7")
if self.options.with_raw:
self.requires("libraw/0.20.2")
if self.options.with_jxr:
self.requires("jxrlib/cci.20170615")
if self.options.with_tiff:
self.requires("libtiff/4.3.0")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["WITH_JPEG"] = self.options.with_jpeg != False
self._cmake.definitions["WITH_OPENJPEG"] = self.options.with_jpeg2000
self._cmake.definitions["WITH_PNG"] = self.options.with_png
self._cmake.definitions["WITH_WEBP"] = self.options.with_webp
self._cmake.definitions["WITH_OPENEXR"] = self.options.with_openexr
self._cmake.definitions["WITH_RAW"] = self.options.with_raw
self._cmake.definitions["WITH_JXR"] = self.options.with_jxr
self._cmake.definitions["WITH_TIFF"] = self.options.with_tiff
self._cmake.configure(build_dir=self._build_subfolder)
return self._cmake
def build(self):
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibPNG"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibTIFF4"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibOpenJPEG"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibJXR"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibWebP"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibRawLite"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "OpenEXR"))
for patch in self.conan_data.get("patches", {}).get(self.version, {}):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("license-fi.txt", dst="licenses", src=self._source_subfolder)
self.copy("license-gplv3.txt", dst="licenses", src=self._source_subfolder)
self.copy("license-gplv2.txt", dst="licenses", src=self._source_subfolder)
def package_info(self):
def imageformats_deps():
components = []
components.append("zlib::zlib")
if self.options.with_jpeg:
components.append("{0}::{0}".format(self.options.with_jpeg))
if self.options.with_jpeg2000:
components.append("openjpeg::openjpeg")
if self.options.with_png:
components.append("libpng::libpng")
if self.options.with_webp:
components.append("libwebp::libwebp")
if self.options.with_openexr:
components.append("openexr::openexr")
if self.options.with_raw:
components.append("libraw::libraw")
if self.options.with_jxr:
components.append("jxrlib::jxrlib")
if self.options.with_tiff:
components.append("libtiff::libtiff")
return components
self.cpp_info.names["pkg_config"] = "freeimage"
self.cpp_info.names["cmake_find_package"] = "FreeImage"
self.cpp_info.names["cmake_find_package_multi"] = "FreeImage"
self.cpp_info.components["FreeImage"].libs = ["freeimage"]
self.cpp_info.components["FreeImage"].requires = imageformats_deps()
self.cpp_info.components["FreeImagePlus"].libs = ["freeimageplus"]
self.cpp_info.components["FreeImagePlus"].requires = ["FreeImage"]
if not self.options.shared:
self.cpp_info.components["FreeImage"].defines.append("FREEIMAGE_LIB")
| none | 1 | 1.867886 | 2 |
|
src/google/appengine/datastore/datastore_query.py | myelin/appengine-python-standard | 0 | 7689 | <reponame>myelin/appengine-python-standard<gh_stars>0
#!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A thin wrapper around datastore query RPC calls.
This provides wrappers around the internal only datastore_pb library and is
designed to be the lowest-level API to be used by all Python datastore client
libraries for executing queries. It provides a layer of protection so the actual
RPC syntax can change without affecting client libraries.
Any class, function, field or argument starting with an '_' is for INTERNAL use
only and should not be used by developers!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import base64
import collections
import functools
import pickle
import six
from google.appengine.api import cmp_compat
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_index
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_pbs
from google.appengine.datastore import datastore_rpc
from google.protobuf import message
from google.appengine.datastore import entity_bytes_pb2 as entity_pb2
__all__ = ['Batch',
'Batcher',
'CompositeFilter',
'CompositeOrder',
'CorrelationFilter',
'Cursor',
'FetchOptions',
'FilterPredicate',
'Order',
'PropertyFilter',
'PropertyOrder',
'Query',
'QueryOptions',
'ResultsIterator',
'make_filter',
'apply_query',
'inject_results']
if datastore_pbs._CLOUD_DATASTORE_ENABLED:
from google.appengine.datastore.datastore_pbs import googledatastore
class _BaseComponent(object):
"""A base class for query components.
Currently just implements basic == and != functions.
"""
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self is other or self.__dict__ == other.__dict__
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return equal
return not equal
def make_filter(name, op, values):
"""Constructs a FilterPredicate from the given name, op and values.
Args:
name: A non-empty string, the name of the property to filter.
op: One of PropertyFilter._OPERATORS.keys(), the operator to use.
values: A supported value, the value to compare against.
Returns:
if values is a list, a CompositeFilter that uses AND to combine all
values, otherwise a PropertyFilter for the single value.
Raises:
datastore_errors.BadPropertyError: if the property name is invalid.
datastore_errors.BadValueError: if the property did not validate correctly
or the value was an empty list.
Other exception types (like OverflowError): if the property value does not
meet type-specific criteria.
"""
datastore_types.ValidateProperty(name, values)
properties = datastore_types.ToPropertyPb(name, values)
if isinstance(properties, list):
filters = [PropertyFilter(op, prop) for prop in properties]
return CompositeFilter(CompositeFilter.AND, filters)
else:
return PropertyFilter(op, properties)
def _make_key_value_map(entity, property_names):
"""Extracts key values from the given entity.
Args:
entity: The entity_pb2.EntityProto to extract values from.
property_names: The names of the properties from which to extract values.
Returns:
A dict mapping property names to a lists of key values.
"""
value_map = dict((six.ensure_text(name), []) for name in property_names)
for prop in entity.property:
prop_name = six.ensure_text(prop.name)
if prop_name in value_map:
value_map[prop_name].append(
datastore_types.PropertyValueToKeyValue(prop.value))
key_prop = six.ensure_text(datastore_types.KEY_SPECIAL_PROPERTY)
if key_prop in value_map:
value_map[key_prop] = [datastore_types.ReferenceToKeyValue(entity.key)]
return value_map
class _PropertyComponent(_BaseComponent):
"""A component that operates on a specific set of properties."""
def _get_prop_names(self):
"""Returns a set of property names used by the filter."""
raise NotImplementedError
class FilterPredicate(_PropertyComponent):
"""An abstract base class for all query filters.
All sub-classes must be immutable as these are often stored without creating a
defensive copy.
"""
def __call__(self, entity):
"""Applies the filter predicate to the given entity.
Args:
entity: the datastore_pb.EntityProto to test.
Returns:
True if the given entity matches the filter, False otherwise.
"""
return self._apply(_make_key_value_map(entity, self._get_prop_names()))
def _apply(self, key_value_map):
"""Apply the given component to the comparable value map.
A filter matches a list of values if at least one value in the list
matches the filter, for example:
'prop: [1, 2]' matches both 'prop = 1' and 'prop = 2' but not 'prop = 3'
Note: the values are actually represented as tuples whose first item
encodes the type; see datastore_types.PropertyValueToKeyValue().
Args:
key_value_map: A dict mapping property names to a list of
comparable values.
Return:
A boolean indicating if the given map matches the filter.
"""
raise NotImplementedError
def _prune(self, key_value_map):
"""Removes values from the given map that do not match the filter.
When doing a scan in the datastore, only index values that match the filters
are seen. When multiple values that point to the same entity are seen, the
entity only appears where the first value is found. This function removes
all values that don't match the query so that the first value in the map
is the same one the datastore would see first.
Args:
key_value_map: the comparable value map from which to remove
values. Does not need to contain values for all filtered properties.
Returns:
A value that evaluates to False if every value in a single list was
completely removed. This effectively applies the filter but is less
efficient than _apply().
"""
raise NotImplementedError
def _to_pb(self):
"""Internal only function to generate a pb."""
raise NotImplementedError(
'This filter only supports in memory operations (%r)' % self)
def _to_pbs(self):
"""Internal only function to generate a list of pbs."""
return [self._to_pb()]
def _to_pb_v1(self, adapter):
"""Internal only function to generate a v1 pb.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
raise NotImplementedError(
'This filter only supports in memory operations (%r)' % self)
class _SinglePropertyFilter(FilterPredicate):
"""Base class for a filter that operates on a single property."""
def _get_prop_name(self):
"""Returns the name of the property being filtered."""
raise NotImplementedError
def _apply_to_value(self, value):
"""Apply the filter to the given value.
Args:
value: The comparable value to check.
Returns:
A boolean indicating if the given value matches the filter.
"""
raise NotImplementedError
def _get_prop_names(self):
return set([self._get_prop_name()])
def _apply(self, value_map):
for other_value in value_map[self._get_prop_name()]:
if self._apply_to_value(other_value):
return True
return False
def _prune(self, value_map):
if self._get_prop_name() not in value_map:
return True
values = [value for value in value_map[self._get_prop_name()]
if self._apply_to_value(value)]
value_map[self._get_prop_name()] = values
return bool(values)
class PropertyFilter(_SinglePropertyFilter):
"""An immutable filter predicate that constrains a single property."""
_OPERATORS = {
'<': datastore_pb.Query.Filter.LESS_THAN,
'<=': datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL,
'>': datastore_pb.Query.Filter.GREATER_THAN,
'>=': datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL,
'=': datastore_pb.Query.Filter.EQUAL,
}
_OPERATORS_INVERSE = dict((value, key)
for key, value in _OPERATORS.items())
_OPERATORS_TO_PYTHON_OPERATOR = {
datastore_pb.Query.Filter.LESS_THAN: '<',
datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL: '<=',
datastore_pb.Query.Filter.GREATER_THAN: '>',
datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL: '>=',
datastore_pb.Query.Filter.EQUAL: '==',
}
_INEQUALITY_OPERATORS = frozenset(['<', '<=', '>', '>='])
_INEQUALITY_OPERATORS_ENUM = frozenset([
datastore_pb.Query.Filter.LESS_THAN,
datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL,
datastore_pb.Query.Filter.GREATER_THAN,
datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL,
])
_UPPERBOUND_INEQUALITY_OPERATORS = frozenset(['<', '<='])
def __init__(self, op, value):
"""Constructor.
Args:
op: A string representing the operator to use.
value: A entity_pb2.Property, the property and value to compare against.
Raises:
datastore_errors.BadArgumentError if op has an unsupported value or value
is not an entity_pb2.Property.
"""
if op not in self._OPERATORS:
raise datastore_errors.BadArgumentError('unknown operator: %r' % (op,))
if not isinstance(value, entity_pb2.Property):
raise datastore_errors.BadArgumentError(
'value argument should be entity_pb2.Property (%r)' % (value,))
super(PropertyFilter, self).__init__()
self._filter = datastore_pb.Query.Filter()
self._filter.op = self._OPERATORS[op]
self._filter.property.add().CopyFrom(value)
@property
def op(self):
raw_op = self._filter.op
return self._OPERATORS_INVERSE.get(raw_op, str(raw_op))
@property
def value(self):
return self._filter.property[0]
def __repr__(self):
prop = self.value
name = prop.name
value = datastore_types.FromPropertyPb(prop)
if six.PY2 and isinstance(value, long):
value = int(value)
return '%s(%r, <%r, %r>)' % (self.__class__.__name__, six.ensure_str(
self.op), six.ensure_str(name), value)
def _get_prop_name(self):
return self._filter.property[0].name
def _apply_to_value(self, value):
if not hasattr(self, '_cmp_value'):
if self._filter.op == datastore_pb.Query.Filter.EXISTS:
return True
self._cmp_value = datastore_types.PropertyValueToKeyValue(
self._filter.property[0].value)
self._condition = ('value %s self._cmp_value' %
self._OPERATORS_TO_PYTHON_OPERATOR[self._filter.op])
return eval(self._condition)
def _has_inequality(self):
"""Returns True if the filter predicate contains inequalities filters."""
return self._filter.op in self._INEQUALITY_OPERATORS_ENUM
@classmethod
def _from_pb(cls, filter_pb):
self = cls.__new__(cls)
self._filter = filter_pb
return self
def _to_pb(self):
"""Returns the internal only pb representation."""
return self._filter
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.Filter representation of the filter.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
filter_pb = googledatastore.Filter()
prop_filter_pb = filter_pb.property_filter
adapter.get_query_converter()._v3_filter_to_v1_property_filter(
self._filter, prop_filter_pb)
return filter_pb
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.PropertyFilter is unsupported.')
def __eq__(self, other):
if self.__class__ is not other.__class__:
if other.__class__ is _PropertyRangeFilter:
return [self._filter] == other._to_pbs()
return NotImplemented
return self._filter == other._filter
class _PropertyRangeFilter(_SinglePropertyFilter):
"""A filter predicate that represents a range of values.
Since we allow multi-valued properties there is a large difference between
"x > 0 AND x < 1" and "0 < x < 1." An entity with x = [-1, 2] will match the
first but not the second.
Since the datastore only allows a single inequality filter, multiple
in-equality filters are merged into a single range filter in the
datastore (unlike equality filters). This class is used by
datastore_query.CompositeFilter to implement the same logic.
"""
_start_key_value = None
_end_key_value = None
@datastore_rpc._positional(1)
def __init__(self, start=None, start_incl=True, end=None, end_incl=True):
"""Constructs a range filter using start and end properties.
Args:
start: A entity_pb2.Property to use as a lower bound or None to indicate
no lower bound.
start_incl: A boolean that indicates if the lower bound is inclusive.
end: A entity_pb2.Property to use as an upper bound or None to indicate
no upper bound.
end_incl: A boolean that indicates if the upper bound is inclusive.
"""
if start is not None and not isinstance(start, entity_pb2.Property):
raise datastore_errors.BadArgumentError(
'start argument should be entity_pb2.Property (%r)' % (start,))
if end is not None and not isinstance(end, entity_pb2.Property):
raise datastore_errors.BadArgumentError(
'start argument should be entity_pb2.Property (%r)' % (end,))
if start and end and start.name != end.name:
raise datastore_errors.BadArgumentError(
'start and end arguments must be on the same property (%s != %s)' %
(start.name, end.name))
if not start and not end:
raise datastore_errors.BadArgumentError(
'Unbounded ranges are not supported.')
super(_PropertyRangeFilter, self).__init__()
self._start = start
self._start_incl = start_incl
self._end = end
self._end_incl = end_incl
@classmethod
def from_property_filter(cls, prop_filter):
op = prop_filter._filter.op
if op == datastore_pb.Query.Filter.GREATER_THAN:
return cls(start=prop_filter._filter.property[0], start_incl=False)
elif op == datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL:
return cls(start=prop_filter._filter.property[0])
elif op == datastore_pb.Query.Filter.LESS_THAN:
return cls(end=prop_filter._filter.property[0], end_incl=False)
elif op == datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL:
return cls(end=prop_filter._filter.property[0])
else:
raise datastore_errors.BadArgumentError(
'Unsupported operator (%s)' % (op,))
def intersect(self, other):
"""Returns a filter representing the intersection of self and other."""
if isinstance(other, PropertyFilter):
other = self.from_property_filter(other)
elif not isinstance(other, _PropertyRangeFilter):
raise datastore_errors.BadArgumentError(
'other argument should be a _PropertyRangeFilter (%r)' % (other,))
if other._get_prop_name() != self._get_prop_name():
raise datastore_errors.BadArgumentError(
'other argument must be on the same property (%s != %s)' %
(other._get_prop_name(), self._get_prop_name()))
start_source = None
if other._start:
if self._start:
result = cmp_compat.cmp(
self._get_start_key_value(), other._get_start_key_value())
if result == 0:
result = cmp_compat.cmp(other._start_incl, self._start_incl)
if result > 0:
start_source = self
elif result < 0:
start_source = other
else:
start_source = other
elif self._start:
start_source = self
end_source = None
if other._end:
if self._end:
result = cmp_compat.cmp(
self._get_end_key_value(), other._get_end_key_value())
if result == 0:
result = cmp_compat.cmp(self._end_incl, other._end_incl)
if result < 0:
end_source = self
elif result > 0:
end_source = other
else:
end_source = other
elif self._end:
end_source = self
if start_source:
if end_source in (start_source, None):
return start_source
result = _PropertyRangeFilter(start=start_source._start,
start_incl=start_source._start_incl,
end=end_source._end,
end_incl=end_source._end_incl)
result._start_key_value = start_source._start_key_value
result._end_key_value = end_source._end_key_value
return result
else:
return end_source or self
def _get_start_key_value(self):
if self._start_key_value is None:
self._start_key_value = datastore_types.PropertyValueToKeyValue(
self._start.value)
return self._start_key_value
def _get_end_key_value(self):
if self._end_key_value is None:
self._end_key_value = datastore_types.PropertyValueToKeyValue(
self._end.value)
return self._end_key_value
def _apply_to_value(self, value):
"""Apply the filter to the given value.
Args:
value: The comparable value to check.
Returns:
A boolean indicating if the given value matches the filter.
"""
if self._start:
result = cmp_compat.cmp(self._get_start_key_value(), value)
if result > 0 or (result == 0 and not self._start_incl):
return False
if self._end:
result = cmp_compat.cmp(self._get_end_key_value(), value)
if result < 0 or (result == 0 and not self._end_incl):
return False
return True
def _get_prop_name(self):
if self._start:
return self._start.name
if self._end:
return self._end.name
assert False
def _to_pbs(self):
pbs = []
if self._start:
if self._start_incl:
op = datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL
else:
op = datastore_pb.Query.Filter.GREATER_THAN
pb = datastore_pb.Query.Filter()
pb.op = op
pb.property.add().CopyFrom(self._start)
pbs.append(pb)
if self._end:
if self._end_incl:
op = datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL
else:
op = datastore_pb.Query.Filter.LESS_THAN
pb = datastore_pb.Query.Filter()
pb.op = op
pb.property.add().CopyFrom(self._end)
pbs.append(pb)
return pbs
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.Filter representation of the filter.
Args:
adapter: A datastore_rpc.AbstractAdapter.
"""
filter_pb = googledatastore.Filter()
composite_filter = filter_pb.composite_filter
composite_filter.op = googledatastore.CompositeFilter.AND
if self._start:
if self._start_incl:
op = googledatastore.PropertyFilter.GREATER_THAN_OR_EQUAL
else:
op = googledatastore.PropertyFilter.GREATER_THAN
pb = composite_filter.filters.add().property_filter
pb.op = op
pb.property.name = self._start.name
adapter.get_entity_converter().v3_property_to_v1_value(
self._start, True, pb.value)
if self._end:
if self._end_incl:
op = googledatastore.PropertyFilter.LESS_THAN_OR_EQUAL
else:
op = googledatastore.PropertyFilter.LESS_THAN
pb = composite_filter.filters.add().property_filter
pb.op = op
pb.property.name = self._end.name
adapter.get_entity_converter().v3_property_to_v1_value(
self._end, True, pb.value)
return filter_pb
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return (self._start == other._start and
self._end == other._end and
(self._start_incl == other._start_incl or self._start is None) and
(self._end_incl == other._end_incl or self._end is None))
class _PropertyExistsFilter(FilterPredicate):
"""A FilterPredicate that matches entities containing specific properties.
Only works as an in-memory filter. Used internally to filter out entities
that don't have all properties in a given Order.
"""
def __init__(self, names):
super(_PropertyExistsFilter, self).__init__()
self._names = frozenset(names)
def _apply(self, value_map):
for name in self._names:
if not value_map.get(name):
return False
return True
def _get_prop_names(self):
return self._names
def _prune(self, _):
raise NotImplementedError
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
class CorrelationFilter(FilterPredicate):
"""A filter that isolates correlated values and applies a sub-filter on them.
This filter assumes that every property used by the sub-filter should be
grouped before being passed to the sub-filter. The default grouping puts
each value in its own group. Consider:
e = {a: [1, 2], b: [2, 1, 3], c: 4}
A correlation filter with a sub-filter that operates on (a, b) will be tested
against the following 3 sets of values:
{a: 1, b: 2}
{a: 2, b: 1}
{b: 3}
In this case CorrelationFilter('a = 2 AND b = 2') won't match this entity but
CorrelationFilter('a = 2 AND b = 1') will. To apply an uncorrelated filter on
c, the filter must be applied in parallel to the correlation filter. For
example:
CompositeFilter(AND, [CorrelationFilter('a = 2 AND b = 1'), 'c = 3'])
If 'c = 3' was included in the correlation filter, c would be grouped as well.
This would result in the following values:
{a: 1, b: 2, c: 3}
{a: 2, b: 1}
{b: 3}
If any set of correlated values match the sub-filter then the entity matches
the correlation filter.
"""
def __init__(self, subfilter):
"""Constructor.
Args:
subfilter: A FilterPredicate to apply to the correlated values
"""
self._subfilter = subfilter
@property
def subfilter(self):
return self._subfilter
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.subfilter)
def _apply(self, value_map):
base_map = dict((prop, []) for prop in self._get_prop_names())
value_maps = []
for prop in base_map:
grouped = self._group_values(prop, value_map[prop])
while len(value_maps) < len(grouped):
value_maps.append(base_map.copy())
for value, m in six.moves.zip(grouped, value_maps):
m[prop] = value
return self._apply_correlated(value_maps)
def _apply_correlated(self, value_maps):
"""Applies sub-filter to the correlated value maps.
The default implementation matches when any value_map in value_maps
matches the sub-filter.
Args:
value_maps: A list of correlated value_maps.
Returns:
True if any the entity matches the correlation filter.
"""
for map in value_maps:
if self._subfilter._apply(map):
return True
return False
def _group_values(self, prop, values):
"""A function that groups the given values.
Override this function to introduce custom grouping logic. The default
implementation assumes each value belongs in its own group.
Args:
prop: The name of the property who's values are being grouped.
values: A list of opaque values.
Returns:
A list of lists of grouped values.
"""
return [[value] for value in values]
def _get_prop_names(self):
return self._subfilter._get_prop_names()
class CompositeFilter(FilterPredicate):
"""An immutable filter predicate that combines other predicates.
This class proactively merges sub-filters that are combined using the same
operator. For example:
CompositeFilter(AND, [f1, f2, CompositeFilter(AND, [f3, f4]), f5, f6])
is equivalent to:
CompositeFilter(AND, [f1, f2, f3, f4, f5, f6])
Currently filters can only be combined using an AND operator.
"""
AND = 'and'
_OPERATORS = frozenset([AND])
def __init__(self, op, filters):
"""Constructor.
Args:
op: The operator to use to combine the given filters
filters: A list of one or more filters to combine
Raises:
datastore_errors.BadArgumentError if op is not in CompsiteFilter.OPERATORS
or filters is not a non-empty list containing only FilterPredicates.
"""
if not op in self._OPERATORS:
raise datastore_errors.BadArgumentError('unknown operator (%s)' % (op,))
if not filters or not isinstance(filters, (list, tuple)):
raise datastore_errors.BadArgumentError(
'filters argument should be a non-empty list (%r)' % (filters,))
super(CompositeFilter, self).__init__()
self._op = op
flattened = []
for f in filters:
if isinstance(f, CompositeFilter) and f._op == self._op:
flattened.extend(f._filters)
elif isinstance(f, FilterPredicate):
flattened.append(f)
else:
raise datastore_errors.BadArgumentError(
'filters argument must be a list of FilterPredicates, found (%r)' %
(f,))
if op == self.AND:
filters = flattened
flattened = []
ineq_map = {}
for f in filters:
if (isinstance(f, _PropertyRangeFilter) or
(isinstance(f, PropertyFilter) and f._has_inequality())):
name = f._get_prop_name()
index = ineq_map.get(name)
if index is not None:
range_filter = flattened[index]
flattened[index] = range_filter.intersect(f)
else:
if isinstance(f, PropertyFilter):
range_filter = _PropertyRangeFilter.from_property_filter(f)
else:
range_filter = f
ineq_map[name] = len(flattened)
flattened.append(range_filter)
else:
flattened.append(f)
self._filters = tuple(flattened)
@property
def op(self):
return self._op
@property
def filters(self):
return self._filters
def __repr__(self):
op = self.op
if op == self.AND:
op = 'AND'
else:
op = str(op)
return '%s(%s, %r)' % (self.__class__.__name__, op, list(self.filters))
def _get_prop_names(self):
names = set()
for f in self._filters:
names |= f._get_prop_names()
return names
def _apply(self, value_map):
if self._op == self.AND:
for f in self._filters:
if not f._apply(value_map):
return False
return True
raise NotImplementedError
def _prune(self, value_map):
if self._op == self.AND:
matches = collections.defaultdict(set)
for f in self._filters:
props = f._get_prop_names()
local_value_map = dict((k, v) for k, v in value_map.items()
if k in props)
if not f._prune(local_value_map):
return False
for (prop, values) in local_value_map.items():
matches[prop].update(values)
for prop, value_set in matches.items():
value_map[prop] = sorted(value_set)
return True
raise NotImplementedError
def _to_pbs(self):
"""Returns the internal only pb representation."""
pbs = []
for f in self._filters:
pbs.extend(f._to_pbs())
return pbs
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.Filter.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
if not self._filters:
return None
if len(self._filters) == 1:
return self._filters[0]._to_pb_v1(adapter)
pb = googledatastore.Filter()
comp_pb = pb.composite_filter
if self.op == self.AND:
comp_pb.op = googledatastore.CompositeFilter.AND
else:
raise datastore_errors.BadArgumentError(
'Datastore V4 only supports CompositeFilter with AND operator.')
for f in self._filters:
comp_pb.filters.add().CopyFrom(f._to_pb_v1(adapter))
return pb
def __eq__(self, other):
if self.__class__ is other.__class__:
return super(CompositeFilter, self).__eq__(other)
if len(self._filters) == 1:
result = self._filters[0].__eq__(other)
if result is NotImplemented and hasattr(other, '__eq__'):
return other.__eq__(self._filters[0])
return result
return NotImplemented
class _IgnoreFilter(_SinglePropertyFilter):
"""A filter that removes all entities with the given keys."""
def __init__(self, key_value_set):
super(_IgnoreFilter, self).__init__()
self._keys = key_value_set
def _get_prop_name(self):
return datastore_types.KEY_SPECIAL_PROPERTY
def _apply_to_value(self, value):
return value not in self._keys
class _DedupingFilter(_IgnoreFilter):
"""A filter that removes duplicate keys."""
def __init__(self, key_value_set=None):
super(_DedupingFilter, self).__init__(key_value_set or set())
def _apply_to_value(self, value):
if super(_DedupingFilter, self)._apply_to_value(value):
self._keys.add(value)
return True
return False
class Order(_PropertyComponent):
"""A base class that represents a sort order on a query.
All sub-classes must be immutable as these are often stored without creating a
defensive copying.
This class can be used as either the cmp or key arg in sorted() or
list.sort(). To provide a stable ordering a trailing key ascending order is
always used.
"""
@datastore_rpc._positional(1)
def reversed(self, group_by=None):
"""Constructs an order representing the reverse of the current order.
This function takes into account the effects of orders on properties not in
the group_by clause of a query. For example, consider:
SELECT A, First(B) ... GROUP BY A ORDER BY A, B
Changing the order of B would effect which value is listed in the 'First(B)'
column which would actually change the results instead of just reversing
them.
Args:
group_by: If specified, only orders on properties in group_by will be
reversed.
Returns:
A new order representing the reverse direction.
"""
raise NotImplementedError
def _key(self, lhs_value_map):
"""Creates a key for the given value map."""
raise NotImplementedError
def _cmp(self, lhs_value_map, rhs_value_map):
"""Compares the given value maps."""
raise NotImplementedError
def _to_pb(self):
"""Internal only function to generate a filter pb."""
raise NotImplementedError
def _to_pb_v1(self, adapter):
"""Internal only function to generate a v1 filter pb.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
raise NotImplementedError
def key_for_filter(self, filter_predicate):
if filter_predicate:
return lambda x: self.key(x, filter_predicate)
return self.key
def cmp_for_filter(self, filter_predicate):
if filter_predicate:
return lambda x, y: self.cmp(x, y, filter_predicate)
return self.cmp
def key(self, entity, filter_predicate=None):
"""Constructs a "key" value for the given entity based on the current order.
This function can be used as the key argument for list.sort() and sorted().
Args:
entity: The entity_pb2.EntityProto to convert
filter_predicate: A FilterPredicate used to prune values before comparing
entities or None.
Returns:
A key value that identifies the position of the entity when sorted by
the current order.
"""
names = self._get_prop_names()
names.add(datastore_types.KEY_SPECIAL_PROPERTY)
if filter_predicate is not None:
names |= filter_predicate._get_prop_names()
value_map = _make_key_value_map(entity, names)
if filter_predicate is not None:
filter_predicate._prune(value_map)
return (self._key(value_map),
value_map[datastore_types.KEY_SPECIAL_PROPERTY])
def cmp(self, lhs, rhs, filter_predicate=None):
"""Compares the given values taking into account any filters.
This function can be used as the cmp argument for list.sort() and sorted().
This function is slightly more efficient that Order.key when comparing two
entities, however it is much less efficient when sorting a list of entities.
Args:
lhs: An entity_pb2.EntityProto
rhs: An entity_pb2.EntityProto
filter_predicate: A FilterPredicate used to prune values before comparing
entities or None.
Returns:
An integer <, = or > 0 representing the operator that goes in between lhs
and rhs that to create a true statement.
"""
names = self._get_prop_names()
if filter_predicate is not None:
names |= filter_predicate._get_prop_names()
lhs_value_map = _make_key_value_map(lhs, names)
rhs_value_map = _make_key_value_map(rhs, names)
if filter_predicate is not None:
filter_predicate._prune(lhs_value_map)
filter_predicate._prune(rhs_value_map)
result = self._cmp(lhs_value_map, rhs_value_map)
if result:
return result
if not lhs.HasField('key') and not rhs.HasField('key'):
return 0
lhs_key = (lhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or
datastore_types.ReferenceToKeyValue(lhs.key))
rhs_key = (rhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or
datastore_types.ReferenceToKeyValue(rhs.key))
return cmp_compat.cmp(lhs_key, rhs_key)
@cmp_compat.total_ordering_from_cmp
class _ReverseOrder(_BaseComponent):
"""Reverses the comparison for the given object."""
def __init__(self, obj):
"""Constructor for _ReverseOrder.
Args:
obj: Any comparable and hashable object.
"""
super(_ReverseOrder, self).__init__()
self._obj = obj
def __hash__(self):
return hash(self._obj)
def __cmp__(self, other):
assert self.__class__ == other.__class__, (
'A datastore_query._ReverseOrder object can only be compared to '
'an object of the same type.')
return -cmp_compat.cmp(self._obj, other._obj)
class PropertyOrder(Order):
"""An immutable class that represents a sort order for a single property."""
ASCENDING = datastore_pb.Query.Order.ASCENDING
DESCENDING = datastore_pb.Query.Order.DESCENDING
_DIRECTIONS = frozenset([ASCENDING, DESCENDING])
def __init__(self, prop, direction=ASCENDING):
"""Constructor.
Args:
prop: the name of the prop by which to sort.
direction: the direction in which to sort the given prop.
Raises:
datastore_errors.BadArgumentError if the prop name or direction is
invalid.
"""
datastore_types.ValidateString(prop,
'prop',
datastore_errors.BadArgumentError)
if not direction in self._DIRECTIONS:
raise datastore_errors.BadArgumentError('unknown direction: %r' %
(direction,))
super(PropertyOrder, self).__init__()
self.__order = datastore_pb.Query.Order()
self.__order.property = six.ensure_binary(prop, 'utf-8')
self.__order.direction = direction
@property
def prop(self):
return self.__order.property
@property
def direction(self):
return self.__order.direction
def __repr__(self):
extra = ''
if self.direction == self.DESCENDING:
extra = ', DESCENDING'
name = repr(six.ensure_str(self.prop))[1:-1]
return '%s(<%s>%s)' % (self.__class__.__name__, name, extra)
@datastore_rpc._positional(1)
def reversed(self, group_by=None):
if group_by and self.__order.property not in group_by:
return self
if self.__order.direction == self.ASCENDING:
return PropertyOrder(
six.ensure_text(self.__order.property), self.DESCENDING)
else:
return PropertyOrder(
six.ensure_text(self.__order.property), self.ASCENDING)
def _get_prop_names(self):
return set([self.__order.property])
def _key(self, lhs_value_map):
lhs_values = lhs_value_map[self.__order.property]
if not lhs_values:
raise datastore_errors.BadArgumentError(
'Missing value for property (%s)' % self.__order.property)
if self.__order.direction == self.ASCENDING:
return min(lhs_values)
else:
return _ReverseOrder(max(lhs_values))
def _cmp(self, lhs_value_map, rhs_value_map):
lhs_values = lhs_value_map[self.__order.property]
rhs_values = rhs_value_map[self.__order.property]
if not lhs_values and not rhs_values:
return 0
if not lhs_values:
raise datastore_errors.BadArgumentError(
'LHS missing value for property (%s)' % self.__order.property)
if not rhs_values:
raise datastore_errors.BadArgumentError(
'RHS missing value for property (%s)' % self.__order.property)
if self.__order.direction == self.ASCENDING:
return cmp_compat.cmp(min(lhs_values), min(rhs_values))
else:
return cmp_compat.cmp(max(rhs_values), max(lhs_values))
@classmethod
def _from_pb(cls, order_pb):
self = cls.__new__(cls)
self.__order = order_pb
return self
def _to_pb(self):
"""Returns the internal only pb representation."""
return self.__order
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.PropertyOrder representation of the order.
Args:
adapter: A datastore_rpc.AbstractAdapter.
"""
v1_order = googledatastore.PropertyOrder()
adapter.get_query_converter().v3_order_to_v1_order(self.__order, v1_order)
return v1_order
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.PropertyOrder is unsupported.')
class CompositeOrder(Order):
"""An immutable class that represents a sequence of Orders.
This class proactively flattens sub-orders that are of type CompositeOrder.
For example:
CompositeOrder([O1, CompositeOrder([02, 03]), O4])
is equivalent to:
CompositeOrder([O1, 02, 03, O4])
"""
def __init__(self, orders):
"""Constructor.
Args:
orders: A list of Orders which are applied in order.
"""
if not isinstance(orders, (list, tuple)):
raise datastore_errors.BadArgumentError(
'orders argument should be list or tuple (%r)' % (orders,))
super(CompositeOrder, self).__init__()
flattened = []
for order in orders:
if isinstance(order, CompositeOrder):
flattened.extend(order._orders)
elif isinstance(order, Order):
flattened.append(order)
else:
raise datastore_errors.BadArgumentError(
'orders argument should only contain Order (%r)' % (order,))
self._orders = tuple(flattened)
@property
def orders(self):
return self._orders
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(self.orders))
@datastore_rpc._positional(1)
def reversed(self, group_by=None):
return CompositeOrder([order.reversed(group_by=group_by)
for order in self._orders])
def _get_prop_names(self):
names = set()
for order in self._orders:
names |= order._get_prop_names()
return names
def _key(self, lhs_value_map):
result = []
for order in self._orders:
result.append(order._key(lhs_value_map))
return tuple(result)
def _cmp(self, lhs_value_map, rhs_value_map):
for order in self._orders:
result = order._cmp(lhs_value_map, rhs_value_map)
if result != 0:
return result
return 0
def size(self):
"""Returns the number of sub-orders the instance contains."""
return len(self._orders)
def _to_pbs(self):
"""Returns an ordered list of internal only pb representations."""
return [order._to_pb() for order in self._orders]
def _to_pb_v1(self, adapter):
"""Returns an ordered list of googledatastore.PropertyOrder.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
return [order._to_pb_v1(adapter) for order in self._orders]
def __eq__(self, other):
if self.__class__ is other.__class__:
return super(CompositeOrder, self).__eq__(other)
if len(self._orders) == 1:
result = self._orders[0].__eq__(other)
if result is NotImplemented and hasattr(other, '__eq__'):
return other.__eq__(self._orders[0])
return result
return NotImplemented
class FetchOptions(datastore_rpc.Configuration):
"""An immutable class that contains all options for fetching results.
These options apply to any request that pulls results from a query.
This class reserves the right to define configuration options of any name
except those that start with 'user_'. External subclasses should only define
function or variables with names that start with in 'user_'.
Options are set by passing keyword arguments to the constructor corresponding
to the configuration options defined below and in datastore_rpc.Configuration.
This object can be used as the default config for a datastore_rpc.Connection
but in that case some options will be ignored, see option documentation below
for details.
"""
@datastore_rpc.ConfigOption
def produce_cursors(value):
"""If a Cursor should be returned with the fetched results.
Raises:
datastore_errors.BadArgumentError if value is not a bool.
"""
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'produce_cursors argument should be bool (%r)' % (value,))
return value
@datastore_rpc.ConfigOption
def offset(value):
"""The number of results to skip before returning the first result.
Only applies to the first request it is used with and is ignored if present
on datastore_rpc.Connection.config.
Raises:
datastore_errors.BadArgumentError if value is not a integer or is less
than zero.
"""
datastore_types.ValidateInteger(value,
'offset',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@datastore_rpc.ConfigOption
def batch_size(value):
"""The number of results to attempt to retrieve in a batch.
Raises:
datastore_errors.BadArgumentError if value is not a integer or is not
greater than zero.
"""
datastore_types.ValidateInteger(value,
'batch_size',
datastore_errors.BadArgumentError)
return value
class QueryOptions(FetchOptions):
"""An immutable class that contains all options for running a query.
This class contains options that control execution process (deadline,
batch_size, read_policy, etc) and what part of the query results are returned
(keys_only, projection, offset, limit, etc) Options that control the contents
of the query results are specified on the datastore_query.Query directly.
This class reserves the right to define configuration options of any name
except those that start with 'user_'. External subclasses should only define
function or variables with names that start with in 'user_'.
Options are set by passing keyword arguments to the constructor corresponding
to the configuration options defined below and in FetchOptions and
datastore_rpc.Configuration.
This object can be used as the default config for a datastore_rpc.Connection
but in that case some options will be ignored, see below for details.
"""
ORDER_FIRST = datastore_pb.Query.ORDER_FIRST
ANCESTOR_FIRST = datastore_pb.Query.ANCESTOR_FIRST
FILTER_FIRST = datastore_pb.Query.FILTER_FIRST
_HINTS = frozenset([ORDER_FIRST, ANCESTOR_FIRST, FILTER_FIRST])
@datastore_rpc.ConfigOption
def keys_only(value):
"""If the query should only return keys.
Raises:
datastore_errors.BadArgumentError if value is not a bool.
"""
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'keys_only argument should be bool (%r)' % (value,))
return value
@datastore_rpc.ConfigOption
def projection(value):
"""A list or tuple of property names to project.
If None, the entire entity is returned.
Specifying a projection:
- may change the index requirements for the given query;
- will cause a partial entity to be returned;
- will cause only entities that contain those properties to be returned;
A partial entities only contain the property name and value for properties
in the projection (meaning and multiple will not be set). They will also
only contain a single value for any multi-valued property. However, if a
multi-valued property is specified in the order, an inequality property, or
the projected properties, the entity will be returned multiple times. Once
for each unique combination of values.
However, projection queries are significantly faster than normal queries.
Raises:
datastore_errors.BadArgumentError if value is empty or not a list or tuple
of strings.
"""
if isinstance(value, list):
value = tuple(value)
elif not isinstance(value, tuple):
raise datastore_errors.BadArgumentError(
'projection argument should be a list or tuple (%r)' % (value,))
if not value:
raise datastore_errors.BadArgumentError(
'projection argument cannot be empty')
for prop in value:
if not isinstance(prop, six.string_types + (six.binary_type,)):
raise datastore_errors.BadArgumentError(
'projection argument should contain only strings (%r)' % (prop,))
return value
@datastore_rpc.ConfigOption
def limit(value):
"""Limit on the number of results to return.
Raises:
datastore_errors.BadArgumentError if value is not an integer or is less
than zero.
"""
datastore_types.ValidateInteger(value,
'limit',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@datastore_rpc.ConfigOption
def prefetch_size(value):
"""Number of results to attempt to return on the initial request.
Raises:
datastore_errors.BadArgumentError if value is not an integer or is not
greater than zero.
"""
datastore_types.ValidateInteger(value,
'prefetch_size',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@datastore_rpc.ConfigOption
def start_cursor(value):
"""Cursor to use a start position.
Ignored if present on datastore_rpc.Connection.config.
Raises:
datastore_errors.BadArgumentError if value is not a Cursor.
"""
if not isinstance(value, Cursor):
raise datastore_errors.BadArgumentError(
'start_cursor argument should be datastore_query.Cursor (%r)' %
(value,))
return value
@datastore_rpc.ConfigOption
def end_cursor(value):
"""Cursor to use as an end position.
Ignored if present on datastore_rpc.Connection.config.
Raises:
datastore_errors.BadArgumentError if value is not a Cursor.
"""
if not isinstance(value, Cursor):
raise datastore_errors.BadArgumentError(
'end_cursor argument should be datastore_query.Cursor (%r)' %
(value,))
return value
@datastore_rpc.ConfigOption
def hint(value):
"""Hint on how the datastore should plan the query.
Raises:
datastore_errors.BadArgumentError if value is not a known hint.
"""
if value not in QueryOptions._HINTS:
raise datastore_errors.BadArgumentError('Unknown query hint (%r)' %
(value,))
return value
class Cursor(_BaseComponent):
"""An immutable class that represents a relative position in a query.
The position denoted by a Cursor is relative to a result in a query even
if the result has been removed from the given query. Usually to position
immediately after the last result returned by a batch.
A cursor should only be used on a query with an identical signature to the
one that produced it or on a query with its sort order reversed.
"""
@datastore_rpc._positional(1)
def __init__(self, urlsafe=None, _cursor_bytes=None):
"""Constructor.
A Cursor constructed with no arguments points the first result of any
query. If such a Cursor is used as an end_cursor no results will ever be
returned.
"""
super(Cursor, self).__init__()
if urlsafe is not None:
if _cursor_bytes is not None:
raise datastore_errors.BadArgumentError(
'Can only specify one of urlsafe and _cursor_bytes')
_cursor_bytes = self._urlsafe_to_bytes(urlsafe)
if _cursor_bytes is not None:
self.__cursor_bytes = _cursor_bytes
else:
self.__cursor_bytes = six.binary_type()
def __repr__(self):
arg = six.ensure_str(self.to_websafe_string())
if arg:
arg = '<%s>' % arg
return '%s(%s)' % (self.__class__.__name__, arg)
def reversed(self):
"""DEPRECATED. It is no longer necessary to call reversed() on cursors.
A cursor returned by a query may also be used in a query whose sort order
has been reversed. This method returns a copy of the original cursor.
"""
return Cursor(_cursor_bytes=self.__cursor_bytes)
def to_bytes(self):
"""Serialize cursor as a byte string."""
return self.__cursor_bytes
@staticmethod
def from_bytes(cursor):
"""Gets a Cursor given its byte string serialized form.
The serialized form of a cursor may change in a non-backwards compatible
way. In this case cursors must be regenerated from a new Query request.
Args:
cursor: A serialized cursor as returned by .to_bytes.
Returns:
A Cursor.
Raises:
datastore_errors.BadValueError if the cursor argument does not represent a
serialized cursor.
"""
return Cursor(_cursor_bytes=cursor)
def urlsafe(self):
"""Serialize cursor as a websafe string.
Returns:
A base64-encoded serialized cursor.
"""
return base64.urlsafe_b64encode(self.to_bytes())
to_websafe_string = urlsafe
@staticmethod
def from_websafe_string(cursor):
"""Gets a Cursor given its websafe serialized form.
The serialized form of a cursor may change in a non-backwards compatible
way. In this case cursors must be regenerated from a new Query request.
Args:
cursor: A serialized cursor as returned by .to_websafe_string.
Returns:
A Cursor.
Raises:
datastore_errors.BadValueError if the cursor argument is not a string
type of does not represent a serialized cursor.
"""
decoded_bytes = Cursor._urlsafe_to_bytes(cursor)
return Cursor.from_bytes(decoded_bytes)
@staticmethod
def _urlsafe_to_bytes(cursor):
if not isinstance(cursor, six.string_types + (six.binary_type,)):
raise datastore_errors.BadValueError(
'cursor argument should be str or unicode (%r)' % (cursor,))
try:
decoded_bytes = base64.urlsafe_b64decode(
six.ensure_binary(cursor, 'ascii'))
except (ValueError, TypeError) as e:
raise datastore_errors.BadValueError(
'Invalid cursor %s. Details: %s' % (cursor, e))
return decoded_bytes
def advance(self, offset, query, conn):
"""Advances a Cursor by the given offset.
Args:
offset: The amount to advance the current query.
query: A Query identical to the one this cursor was created from.
conn: The datastore_rpc.Connection to use.
Returns:
A new cursor that is advanced by offset using the given query.
"""
datastore_types.ValidateInteger(offset,
'offset',
datastore_errors.BadArgumentError)
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument should be datastore_query.Query (%r)' % (query,))
query_options = QueryOptions(
start_cursor=self, offset=offset, limit=0, produce_cursors=True)
return query.run(conn, query_options).next_batch(
Batcher.AT_LEAST_OFFSET).cursor(0)
def __setstate__(self, state):
if '_Cursor__compiled_cursor' in state:
self.__cursor_bytes = state['_Cursor__compiled_cursor'].SerializeToString()
else:
self.__dict__ = state
class _QueryKeyFilter(_BaseComponent):
"""A class that implements the key filters available on a Query."""
@datastore_rpc._positional(1)
def __init__(self, app=None, namespace=None, kind=None, ancestor=None):
"""Constructs a _QueryKeyFilter.
If app/namespace and ancestor are not defined, the app/namespace set in the
environment is used.
Args:
app: a string representing the required app id or None.
namespace: a string representing the required namespace or None.
kind: a string representing the required kind or None.
ancestor: a entity_pb2.Reference representing the required ancestor or
None.
Raises:
datastore_erros.BadArgumentError if app and ancestor.app() do not match or
an unexpected type is passed in for any argument.
"""
if kind is not None:
datastore_types.ValidateString(
kind, 'kind', datastore_errors.BadArgumentError)
if ancestor is not None:
if not isinstance(ancestor, entity_pb2.Reference):
raise datastore_errors.BadArgumentError(
'ancestor argument should be entity_pb2.Reference (%r)' %
(ancestor,))
ancestor_app = six.ensure_binary(ancestor.app)
if app is None:
app = ancestor_app
elif six.ensure_binary(app) != ancestor_app:
raise datastore_errors.BadArgumentError(
'ancestor argument should match app ("%r" != "%r")' %
(ancestor.app, app))
ancestor_namespace = six.ensure_binary(ancestor.name_space)
if namespace is None:
namespace = ancestor_namespace
elif six.ensure_binary(namespace) != ancestor_namespace:
raise datastore_errors.BadArgumentError(
'ancestor argument should match namespace ("%r" != "%r")' %
(six.ensure_binary(namespace), ancestor_namespace))
pb = entity_pb2.Reference()
pb.CopyFrom(ancestor)
ancestor = pb
self.__ancestor = ancestor
self.__path = list(ancestor.path.element)
else:
self.__ancestor = None
self.__path = None
super(_QueryKeyFilter, self).__init__()
self.__app = six.ensure_text(datastore_types.ResolveAppId(app), 'utf-8')
self.__namespace = (
six.ensure_text(datastore_types.ResolveNamespace(namespace), 'utf-8'))
self.__kind = kind
@property
def app(self):
return self.__app
@property
def namespace(self):
return self.__namespace
@property
def kind(self):
return self.__kind
@property
def ancestor(self):
return self.__ancestor
def __call__(self, entity_or_reference):
"""Apply the filter.
Accepts either an entity or a reference to avoid the need to extract keys
from entities when we have a list of entities (which is a common case).
Args:
entity_or_reference: Either an entity_pb2.EntityProto or
entity_pb2.Reference.
"""
if isinstance(entity_or_reference, entity_pb2.Reference):
key = entity_or_reference
elif isinstance(entity_or_reference, entity_pb2.EntityProto):
key = entity_or_reference.key
else:
raise datastore_errors.BadArgumentError(
'entity_or_reference argument must be an entity_pb2.EntityProto ' +
six.ensure_str('or entity_pb2.Reference (%r)' %
(entity_or_reference), 'utf-8'))
return (six.ensure_text(key.app, 'utf-8') == self.__app and
six.ensure_text(key.name_space, 'utf-8') == self.__namespace and
(not self.__kind or key.path.element[-1].type == self.__kind) and
(not self.__path or
key.path.element[0:len(self.__path)] == self.__path))
def _to_pb(self):
"""Returns an internal pb representation."""
pb = datastore_pb.Query()
pb.app = self.__app
datastore_types.SetNamespace(pb, self.__namespace)
if self.__kind is not None:
pb.kind = self.__kind
if self.__ancestor:
ancestor = pb.ancestor
ancestor.CopyFrom(self.__ancestor)
return pb
def _to_pb_v1(self, adapter):
"""Returns a v1 internal proto representation of the query key filter.
Args:
adapter: A datastore_rpc.AbstractAdapter.
Returns:
A tuple (googledatastore.RunQueryRequest, googledatastore.Filter).
The second tuple value is a Filter representing the ancestor portion of the
query. If there is no ancestor constraint, this value will be None
"""
pb = googledatastore.RunQueryRequest()
partition_id = pb.partition_id
partition_id.project_id = (
adapter.get_entity_converter().app_to_project_id(self.__app))
if self.__namespace:
partition_id.namespace_id = self.__namespace
if self.__kind is not None:
pb.query.kind.add().name = self.__kind
ancestor_filter = None
if self.__ancestor:
ancestor_filter = googledatastore.Filter()
ancestor_prop_filter = ancestor_filter.property_filter
ancestor_prop_filter.op = (
googledatastore.PropertyFilter.HAS_ANCESTOR)
prop_pb = ancestor_prop_filter.property
prop_pb.name = datastore_types.KEY_SPECIAL_PROPERTY
adapter.get_entity_converter().v3_to_v1_key(
self.ancestor,
ancestor_prop_filter.value.key_value)
return pb, ancestor_filter
class _BaseQuery(_BaseComponent):
"""A base class for query implementations."""
def run(self, conn, query_options=None):
"""Runs the query using provided datastore_rpc.Connection.
Args:
conn: The datastore_rpc.Connection to use
query_options: Optional query options to use
Returns:
A Batcher that implicitly fetches query results asynchronously.
Raises:
datastore_errors.BadArgumentError if any of the arguments are invalid.
"""
return Batcher(query_options, self.run_async(conn, query_options))
def run_async(self, conn, query_options=None):
"""Runs the query using the provided datastore_rpc.Connection.
Args:
conn: the datastore_rpc.Connection on which to run the query.
query_options: Optional QueryOptions with which to run the query.
Returns:
An async object that can be used to grab the first Batch. Additional
batches can be retrieved by calling Batch.next_batch/next_batch_async.
Raises:
datastore_errors.BadArgumentError if any of the arguments are invalid.
"""
raise NotImplementedError
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
class Query(_BaseQuery):
"""An immutable class that represents a query signature.
A query signature consists of a source of entities (specified as app,
namespace and optionally kind and ancestor) as well as a FilterPredicate,
grouping and a desired ordering.
"""
@datastore_rpc._positional(1)
def __init__(self, app=None, namespace=None, kind=None, ancestor=None,
filter_predicate=None, group_by=None, order=None,
read_time_us=None):
"""Constructor.
Args:
app: Optional app to query, derived from the environment if not specified.
namespace: Optional namespace to query, derived from the environment if
not specified.
kind: Optional kind to query.
ancestor: Optional ancestor to query, an entity_pb2.Reference.
filter_predicate: Optional FilterPredicate by which to restrict the query.
group_by: Optional list of properties to group the results by.
order: Optional Order in which to return results.
read_time_us: Optional timestamp to read the storage from. Internal use
only.
Raises:
datastore_errors.BadArgumentError if any argument is invalid.
"""
super(Query, self).__init__()
if filter_predicate is not None and not isinstance(filter_predicate,
FilterPredicate):
raise datastore_errors.BadArgumentError(
'filter_predicate should be datastore_query.FilterPredicate (%r)' %
(filter_predicate,))
if isinstance(order, CompositeOrder):
if order.size() == 0:
order = None
elif isinstance(order, Order):
order = CompositeOrder([order])
elif order is not None:
raise datastore_errors.BadArgumentError(
'order should be Order (%r)' % (order,))
if group_by is not None:
if isinstance(group_by, list):
group_by = tuple(group_by)
elif not isinstance(group_by, tuple):
raise datastore_errors.BadArgumentError(
'group_by argument should be a list or tuple (%r)' % (group_by,))
if not group_by:
raise datastore_errors.BadArgumentError(
'group_by argument cannot be empty')
for prop in group_by:
if not isinstance(prop, six.string_types + (six.binary_type,)):
raise datastore_errors.BadArgumentError(
'group_by argument should contain only strings (%r)' % (prop,))
self._key_filter = _QueryKeyFilter(app=app, namespace=namespace, kind=kind,
ancestor=ancestor)
self._order = order
self._filter_predicate = filter_predicate
self._group_by = group_by
self._read_time_us = read_time_us
@property
def app(self):
return self._key_filter.app
@property
def namespace(self):
return self._key_filter.namespace
@property
def kind(self):
return self._key_filter.kind
@property
def ancestor(self):
return self._key_filter.ancestor
@property
def filter_predicate(self):
return self._filter_predicate
@property
def order(self):
return self._order
@property
def group_by(self):
return self._group_by
@property
def read_time_us(self):
return self._read_time_us
def __repr__(self):
args = []
args.append('app=%r' % six.ensure_str(self.app))
ns = self.namespace
if ns:
args.append('namespace=%r' % six.ensure_str(ns))
kind = self.kind
if kind is not None:
args.append('kind=%r' % six.ensure_str(kind))
ancestor = self.ancestor
if ancestor is not None:
websafe = base64.urlsafe_b64encode(ancestor.SerializeToString())
args.append('ancestor=<%s>' % six.ensure_str(websafe))
filter_predicate = self.filter_predicate
if filter_predicate is not None:
args.append('filter_predicate=%r' % filter_predicate)
order = self.order
if order is not None:
args.append('order=%r' % order)
group_by = self.group_by
if group_by is not None:
args.append('group_by=%r' % (tuple(six.ensure_str(x) for x in group_by),))
read_time_us = self.read_time_us
if read_time_us is not None:
args.append('read_time_us=%r' % (read_time_us,))
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
def run_async(self, conn, query_options=None):
if not isinstance(conn, datastore_rpc.BaseConnection):
raise datastore_errors.BadArgumentError(
'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))
if not QueryOptions.is_configuration(query_options):
query_options = QueryOptions(config=query_options)
start_cursor = query_options.start_cursor
if not start_cursor and query_options.produce_cursors:
start_cursor = Cursor()
if conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
req = self._to_pb_v1(conn, query_options)
else:
req = self._to_pb(conn, query_options)
return Batch.create_async(self, query_options, conn, req,
start_cursor=start_cursor)
@classmethod
def _from_pb(cls, query_pb):
kind = query_pb.HasField('kind') and query_pb.kind or None
ancestor = query_pb.HasField('ancestor') and query_pb.ancestor or None
filter_predicate = None
if query_pb.filter:
filter_predicate = CompositeFilter(
CompositeFilter.AND,
[PropertyFilter._from_pb(filter_pb) for filter_pb in query_pb.filter])
order = None
if query_pb.order:
order = CompositeOrder(
[PropertyOrder._from_pb(order_pb) for order_pb in query_pb.order])
group_by = None
if query_pb.group_by_property_name:
group_by = tuple(
six.ensure_text(name) for name in query_pb.group_by_property_name)
read_time_us = None
if query_pb.HasField('read_time_us'):
read_time_us = query_pb.read_time_us
return Query(
app=query_pb.app,
namespace=query_pb.name_space,
kind=kind,
ancestor=ancestor,
filter_predicate=filter_predicate,
order=order,
group_by=group_by,
read_time_us=read_time_us)
def _to_pb_v1(self, conn, query_options):
"""Returns a googledatastore.RunQueryRequest."""
v1_req, v1_ancestor_filter = self._key_filter._to_pb_v1(conn.adapter)
v1_query = v1_req.query
if self.filter_predicate:
filter_predicate_pb = self._filter_predicate._to_pb_v1(conn.adapter)
if self.filter_predicate and v1_ancestor_filter:
comp_filter_pb = v1_query.filter.composite_filter
comp_filter_pb.op = googledatastore.CompositeFilter.AND
comp_filter_pb.filters.add().CopyFrom(filter_predicate_pb)
comp_filter_pb.filters.add().CopyFrom(v1_ancestor_filter)
elif self.filter_predicate:
v1_query.filter.CopyFrom(filter_predicate_pb)
elif v1_ancestor_filter:
v1_query.filter.CopyFrom(v1_ancestor_filter)
if self._order:
for order in self._order._to_pb_v1(conn.adapter):
v1_query.order.add().CopyFrom(order)
if QueryOptions.keys_only(query_options, conn.config):
prop_ref_pb = v1_query.projection.add().property
prop_ref_pb.name = datastore_pbs.PROPERTY_NAME_KEY
projection = QueryOptions.projection(query_options, conn.config)
self._validate_projection_and_group_by(projection, self._group_by)
if projection:
for prop in projection:
prop_ref_pb = v1_query.projection.add().property
prop_ref_pb.name = prop
if self._group_by:
for group_by in self._group_by:
v1_query.distinct_on.add().name = group_by
limit = QueryOptions.limit(query_options, conn.config)
if limit is not None:
v1_query.limit.value = limit
count = QueryOptions.batch_size(query_options, conn.config)
if count is None:
count = QueryOptions.prefetch_size(query_options, conn.config)
if count is not None:
pass
if query_options.offset:
v1_query.offset = query_options.offset
if query_options.start_cursor is not None:
v1_query.start_cursor = query_options.start_cursor.to_bytes()
if query_options.end_cursor is not None:
v1_query.end_cursor = query_options.end_cursor.to_bytes()
conn._set_request_read_policy(v1_req, query_options)
conn._set_request_transaction(v1_req)
return v1_req
def _to_pb(self, conn, query_options):
"""Returns the internal only pb representation."""
pb = self._key_filter._to_pb()
if self._filter_predicate:
for f in self._filter_predicate._to_pbs():
pb.filter.add().CopyFrom(f)
if self._order:
for order in self._order._to_pbs():
pb.order.add().CopyFrom(order)
if QueryOptions.keys_only(query_options, conn.config):
pb.keys_only = True
projection = QueryOptions.projection(query_options, conn.config)
self._validate_projection_and_group_by(projection, self._group_by)
if projection:
pb.property_name.extend(projection)
if self._group_by:
pb.group_by_property_name.extend(self._group_by)
if QueryOptions.produce_cursors(query_options, conn.config):
pb.compile = True
limit = QueryOptions.limit(query_options, conn.config)
if limit is not None:
pb.limit = limit
count = QueryOptions.prefetch_size(query_options, conn.config)
if count is None:
count = QueryOptions.batch_size(query_options, conn.config)
if count is not None:
pb.count = count
if query_options.offset:
pb.offset = query_options.offset
if query_options.start_cursor is not None:
try:
pb.compiled_cursor.ParseFromString(
query_options.start_cursor.to_bytes())
except message.DecodeError:
raise datastore_errors.BadValueError('invalid cursor')
if query_options.end_cursor is not None:
try:
pb.end_compiled_cursor.ParseFromString(
query_options.end_cursor.to_bytes())
except message.DecodeError:
raise datastore_errors.BadValueError('invalid cursor')
if ((query_options.hint == QueryOptions.ORDER_FIRST and len(pb.order)) or
(query_options.hint == QueryOptions.ANCESTOR_FIRST and
pb.HasField('ancestor')) or
(query_options.hint == QueryOptions.FILTER_FIRST and pb.filter)):
pb.hint = query_options.hint
if self.read_time_us is not None:
pb.read_time_us = self.read_time_us
conn._set_request_read_policy(pb, query_options)
conn._set_request_transaction(pb)
return pb
def _validate_projection_and_group_by(self, projection, group_by):
"""Validates that a query's projection and group by match.
Args:
projection: A set of string property names in the projection.
group_by: A set of string property names in the group by.
Raises:
datastore_errors.BadRequestError: if the projection and group
by sets are not equal.
"""
if projection:
if group_by:
extra = set(projection) - set(group_by)
if extra:
raise datastore_errors.BadRequestError(
'projections includes properties not in the group_by argument: %s'
% extra)
elif group_by:
raise datastore_errors.BadRequestError(
'cannot specify group_by without a projection')
def apply_query(query, entities, _key=None):
"""Performs the given query on a set of in-memory results.
This function can perform queries impossible in the datastore (e.g a query
with multiple inequality filters on different properties) because all
operations are done in memory. For queries that can also be executed on the
the datastore, the results produced by this function may not use the same
implicit ordering as the datastore. To ensure compatibility, explicit
ordering must be used (e.g. 'ORDER BY ineq_prop, ..., __key__').
Order by __key__ should always be used when a consistent result is desired
(unless there is a sort order on another globally unique property).
Args:
query: a datastore_query.Query to apply
entities: a list of results, of arbitrary type, on which to apply the query.
_key: a function that takes an element of the result array as an argument
and must return an entity_pb2.EntityProto. If not specified, the
identity function is used (and entities must be a list of
entity_pb2.EntityProto).
Returns:
A subset of entities, filtered and ordered according to the query.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument must be a datastore_query.Query (%r)' % (query,))
if not isinstance(entities, list):
raise datastore_errors.BadArgumentError(
'entities argument must be a list (%r)' % (entities,))
key = _key or (lambda x: x)
filtered_results = [r for r in entities if query._key_filter(key(r))]
if not query._order:
if query._filter_predicate:
return [r for r in filtered_results if query._filter_predicate(key(r))]
return filtered_results
names = query._order._get_prop_names()
if query._filter_predicate:
names |= query._filter_predicate._get_prop_names()
exists_filter = _PropertyExistsFilter(names)
value_maps = []
for result in filtered_results:
value_map = _make_key_value_map(key(result), names)
if exists_filter._apply(value_map) and (
not query._filter_predicate or
query._filter_predicate._prune(value_map)):
value_map['__result__'] = result
value_maps.append(value_map)
value_maps.sort(key=functools.cmp_to_key(query._order._cmp))
return [value_map['__result__'] for value_map in value_maps]
class _AugmentedQuery(_BaseQuery):
"""A query that combines a datastore query with in-memory filters/results."""
@datastore_rpc._positional(2)
def __init__(self, query, in_memory_results=None, in_memory_filter=None,
max_filtered_count=None):
"""Constructor for _AugmentedQuery.
Do not call directly. Use the utility functions instead (e.g.
datastore_query.inject_results)
Args:
query: A datastore_query.Query object to augment.
in_memory_results: a list of pre- sorted and filtered result to add to the
stream of datastore results or None .
in_memory_filter: a set of in-memory filters to apply to the datastore
results or None.
max_filtered_count: the maximum number of datastore entities that will be
filtered out by in_memory_filter if known.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument should be datastore_query.Query (%r)' % (query,))
if (in_memory_filter is not None and
not isinstance(in_memory_filter, FilterPredicate)):
raise datastore_errors.BadArgumentError(
'in_memory_filter argument should be ' + six.ensure_str(
'datastore_query.FilterPredicate (%r)' %
(in_memory_filter,), 'utf-8'))
if (in_memory_results is not None and
not isinstance(in_memory_results, list)):
raise datastore_errors.BadArgumentError(
'in_memory_results argument should be a list of' +
six.ensure_str('datastore_pv.EntityProto (%r)' %
(in_memory_results,), 'utf-8'))
datastore_types.ValidateInteger(max_filtered_count,
'max_filtered_count',
empty_ok=True,
zero_ok=True)
self._query = query
self._max_filtered_count = max_filtered_count
self._in_memory_filter = in_memory_filter
self._in_memory_results = in_memory_results
@property
def app(self):
return self._query._key_filter.app
@property
def namespace(self):
return self._query._key_filter.namespace
@property
def kind(self):
return self._query._key_filter.kind
@property
def ancestor(self):
return self._query._key_filter.ancestor
@property
def filter_predicate(self):
return self._query._filter_predicate
@property
def order(self):
return self._query._order
@property
def group_by(self):
return self._query._group_by
def run_async(self, conn, query_options=None):
if not isinstance(conn, datastore_rpc.BaseConnection):
raise datastore_errors.BadArgumentError(
'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))
if not QueryOptions.is_configuration(query_options):
query_options = QueryOptions(config=query_options)
if self._query._order:
changes = {'keys_only': False}
else:
changes = {}
if self._in_memory_filter or self._in_memory_results:
in_memory_offset = query_options.offset
in_memory_limit = query_options.limit
if in_memory_limit is not None:
if self._in_memory_filter is None:
changes['limit'] = in_memory_limit
elif self._max_filtered_count is not None:
changes['limit'] = in_memory_limit + self._max_filtered_count
else:
changes['limit'] = None
if in_memory_offset:
changes['offset'] = None
if changes.get('limit', None) is not None:
changes['limit'] += in_memory_offset
else:
in_memory_offset = None
else:
in_memory_offset = None
in_memory_limit = None
modified_query_options = QueryOptions(config=query_options, **changes)
if conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
req = self._query._to_pb_v1(conn, modified_query_options)
else:
req = self._query._to_pb(conn, modified_query_options)
start_cursor = query_options.start_cursor
if not start_cursor and query_options.produce_cursors:
start_cursor = Cursor()
return _AugmentedBatch.create_async(self, modified_query_options, conn, req,
in_memory_offset=in_memory_offset,
in_memory_limit=in_memory_limit,
start_cursor=start_cursor)
@datastore_rpc._positional(1)
def inject_results(query, updated_entities=None, deleted_keys=None):
"""Creates a query object that will inject changes into results.
Args:
query: The datastore_query.Query to augment
updated_entities: A list of entity_pb2.EntityProto's that have been updated
and should take priority over any values returned by query.
deleted_keys: A list of entity_pb2.Reference's for entities that have been
deleted and should be removed from query results.
Returns:
A datastore_query.AugmentedQuery if in memory filtering is required,
query otherwise.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument should be datastore_query.Query (%r)' % (query,))
overridden_keys = set()
if deleted_keys is not None:
if not isinstance(deleted_keys, list):
raise datastore_errors.BadArgumentError(
'deleted_keys argument must be a list (%r)' % (deleted_keys,))
deleted_keys = list(six.moves.filter(query._key_filter, deleted_keys))
for key in deleted_keys:
overridden_keys.add(datastore_types.ReferenceToKeyValue(key))
if updated_entities is not None:
if not isinstance(updated_entities, list):
raise datastore_errors.BadArgumentError(
'updated_entities argument must be a list (%r)' % (updated_entities,))
updated_entities = list(
six.moves.filter(query._key_filter, updated_entities))
for entity in updated_entities:
overridden_keys.add(datastore_types.ReferenceToKeyValue(entity.key))
updated_entities = apply_query(query, updated_entities)
else:
updated_entities = []
if not overridden_keys:
return query
return _AugmentedQuery(query,
in_memory_filter=_IgnoreFilter(overridden_keys),
in_memory_results=updated_entities,
max_filtered_count=len(overridden_keys))
class _BatchShared(object):
"""Data shared among the batches of a query."""
def __init__(self, query, query_options, conn,
augmented_query=None, initial_offset=None):
self.__query = query
self.__query_options = query_options
self.__conn = conn
self.__augmented_query = augmented_query
self.__was_first_result_processed = False
if initial_offset is None:
initial_offset = query_options.offset or 0
self.__expected_offset = initial_offset
self.__remaining_limit = query_options.limit
@property
def query(self):
return self.__query
@property
def query_options(self):
return self.__query_options
@property
def conn(self):
return self.__conn
@property
def augmented_query(self):
return self.__augmented_query
@property
def keys_only(self):
return self.__keys_only
@property
def compiled_query(self):
return self.__compiled_query
@property
def expected_offset(self):
return self.__expected_offset
@property
def remaining_limit(self):
return self.__remaining_limit
@property
def index_list(self):
"""Returns the list of indexes used by the query.
Possibly None when the adapter does not implement pb_to_index.
"""
return self.__index_list
def process_batch(self, batch):
if self.conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
skipped_results = batch.skipped_results
num_results = len(batch.entity_results)
else:
skipped_results = batch.skipped_results
num_results = len(batch.result)
self.__expected_offset -= skipped_results
if self.__remaining_limit is not None:
self.__remaining_limit -= num_results
if not self.__was_first_result_processed:
self.__was_first_result_processed = True
if self.conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
result_type = batch.entity_result_type
self.__keys_only = result_type == googledatastore.EntityResult.KEY_ONLY
self.__compiled_query = None
self.__index_list = None
else:
self.__keys_only = batch.keys_only
if batch.HasField('compiled_query'):
self.__compiled_query = batch.compiled_query
else:
self.__compiled_query = None
try:
self.__index_list = [
self.__conn.adapter.pb_to_index(index_pb)
for index_pb in batch.index
]
except NotImplementedError:
self.__index_list = None
class Batch(object):
"""A batch of results returned by a query.
This class contains a batch of results returned from the datastore and
relevant metadata. This metadata includes:
query: The query that produced this batch
query_options: The QueryOptions used to run the query. This does not
contained any options passed to the .next_batch() call that created the
current batch.
start_cursor, end_cursor: These are the cursors that can be used
with a query to re-fetch this batch. They can also be used to
find all entities before or after the given batch (by use start_cursor as
an end cursor or vice versa). start_cursor can also be advanced to
point to a position within the batch using Cursor.advance().
skipped_results: the number of result skipped because of the offset
given to the request that generated it. This can be set either on
the original Query.run() request or in subsequent .next_batch() calls.
more_results: If this is true there are more results that can be retrieved
either by .next_batch() or Batcher.next().
This class is also able to fetch the next batch of the query using
.next_batch(). As batches of results must be fetched serially, .next_batch()
can only be called once. Additional calls to .next_batch() will return None.
When there are no more batches .next_batch() will return None as well. Note
that batches returned by iterating over Batcher will always return None for
.next_batch() as the Bather handles fetching the next batch automatically.
A Batch typically represents the result of a single RPC request. The datastore
operates on a "best effort" basis so the batch returned by .next_batch()
or Query.run_async().get_result() may not have satisfied the requested offset
or number of results (specified through FetchOptions.offset and
FetchOptions.batch_size respectively). To satisfy these restrictions
additional batches may be needed (with FetchOptions that specify the remaining
offset or results needed). The Batcher class hides these limitations.
"""
__skipped_cursor = None
__end_cursor = None
@classmethod
@datastore_rpc._positional(5)
def create_async(cls, query, query_options, conn, req,
start_cursor):
batch_shared = _BatchShared(query, query_options, conn)
batch0 = cls(batch_shared, start_cursor=start_cursor)
return batch0._make_query_rpc_call(query_options, req)
@datastore_rpc._positional(2)
def __init__(self, batch_shared, start_cursor=Cursor()):
"""Constructor.
This class is constructed in stages (one when an RPC is sent and another
when an rpc is completed) and should not be constructed directly!!
Use Query.run_async().get_result() to create a Batch or Query.run()
to use a batcher.
This constructor does not perform verification.
Args:
batch_shared: Data shared between batches for a a single query run.
start_cursor: Optional cursor pointing before this batch.
"""
self._batch_shared = batch_shared
self.__start_cursor = start_cursor
@property
def query_options(self):
"""The QueryOptions used to retrieve the first batch."""
return self._batch_shared.query_options
@property
def query(self):
"""The query the current batch came from."""
return self._batch_shared.query
@property
def results(self):
"""A list of entities in this batch."""
return self.__results
@property
def keys_only(self):
"""Whether the entities in this batch only contain keys."""
return self._batch_shared.keys_only
@property
def index_list(self):
"""Returns the list of indexes used to peform this batch's query.
Possibly None when the adapter does not implement pb_to_index.
"""
return self._batch_shared.index_list
@property
def start_cursor(self):
"""A cursor that points to the position just before the current batch."""
return self.__start_cursor
@property
def end_cursor(self):
"""A cursor that points to the position just after the current batch."""
return self.__end_cursor
@property
def skipped_results(self):
"""The number of results skipped because of an offset in the request.
An offset is satisfied before any results are returned. The start_cursor
points to the position in the query before the skipped results.
"""
return self._skipped_results
@property
def more_results(self):
"""Whether more results can be retrieved from the query."""
return self.__more_results
def next_batch(self, fetch_options=None):
"""Synchronously get the next batch or None if there are no more batches.
Args:
fetch_options: Optional fetch options to use when fetching the next batch.
Merged with both the fetch options on the original call and the
connection.
Returns:
A new Batch of results or None if either the next batch has already been
fetched or there are no more results.
"""
async_ = self.next_batch_async(fetch_options)
if async_ is None:
return None
return async_.get_result()
def _compiled_query(self):
return self._batch_shared.compiled_query
def cursor(self, index):
"""Gets the cursor that points just after the result at index - 1.
The index is relative to first result in .results. Since start_cursor
points to the position before the first skipped result, the range of
indexes this function supports is limited to
[-skipped_results, len(results)].
For example, using start_cursor=batch.cursor(i) and
end_cursor=batch.cursor(j) will return the results found in
batch.results[i:j]. Note that any result added in the range (i-1, j]
will appear in the new query's results.
Warning: Any index in the range (-skipped_results, 0) may cause
continuation to miss or duplicate results if outside a transaction.
Args:
index: An int, the index relative to the first result before which the
cursor should point.
Returns:
A Cursor that points to a position just after the result index - 1,
which if used as a start_cursor will cause the first result to be
batch.result[index].
"""
if not isinstance(index, six.integer_types):
raise datastore_errors.BadArgumentError(
'index argument should be an integer (%r)' % (index,))
if not -self._skipped_results <= index <= len(self.__results):
raise datastore_errors.BadArgumentError(
'index argument must be in the inclusive range [%d, %d]' %
(-self._skipped_results, len(self.__results)))
if index == -self._skipped_results:
return self.__start_cursor
elif (index == 0 and
self.__skipped_cursor):
return self.__skipped_cursor
elif index > 0 and self.__result_cursors:
return self.__result_cursors[index - 1]
elif index == len(self.__results):
return self.__end_cursor
else:
return self.__start_cursor.advance(index + self._skipped_results,
self._batch_shared.query,
self._batch_shared.conn)
def next_batch_async(self, fetch_options=None):
"""Asynchronously get the next batch or None if there are no more batches.
Args:
fetch_options: Optional fetch options to use when fetching the next batch.
Merged with both the fetch options on the original call and the
connection.
Returns:
An async object that can be used to get the next Batch or None if either
the next batch has already been fetched or there are no more results.
"""
if not self.__datastore_cursor:
return None
fetch_options, next_batch = self._make_next_batch(fetch_options)
if (fetch_options is not None and
not FetchOptions.is_configuration(fetch_options)):
raise datastore_errors.BadArgumentError('Invalid fetch options.')
config = self._batch_shared.query_options.merge(fetch_options)
conn = next_batch._batch_shared.conn
requested_offset = 0
if fetch_options is not None and fetch_options.offset is not None:
requested_offset = fetch_options.offset
if conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
if self._batch_shared.expected_offset != requested_offset:
raise datastore_errors.BadArgumentError(
'Cannot request the next batch with a different offset than '
' expected. Expected: %s, Got: %s.'
% (self._batch_shared.expected_offset, requested_offset))
limit = self._batch_shared.remaining_limit
next_options = QueryOptions(offset=self._batch_shared.expected_offset,
limit=limit,
start_cursor=self.__datastore_cursor)
config = config.merge(next_options)
result = next_batch._make_query_rpc_call(
config,
self._batch_shared.query._to_pb_v1(conn, config))
else:
result = next_batch._make_next_rpc_call(config,
self._to_pb(fetch_options))
self.__datastore_cursor = None
return result
def _to_pb(self, fetch_options=None):
req = datastore_pb.NextRequest()
if FetchOptions.produce_cursors(fetch_options,
self._batch_shared.query_options,
self._batch_shared.conn.config):
req.compile = True
count = FetchOptions.batch_size(fetch_options,
self._batch_shared.query_options,
self._batch_shared.conn.config)
if count is not None:
req.count = count
if fetch_options is not None and fetch_options.offset:
req.offset = fetch_options.offset
req.cursor.CopyFrom(self.__datastore_cursor)
return req
def _extend(self, next_batch):
"""Combines the current batch with the next one. Called by batcher."""
self.__datastore_cursor = next_batch.__datastore_cursor
next_batch.__datastore_cursor = None
self.__more_results = next_batch.__more_results
if not self.__results:
self.__skipped_cursor = next_batch.__skipped_cursor
self.__results.extend(next_batch.__results)
self.__result_cursors.extend(next_batch.__result_cursors)
self.__end_cursor = next_batch.__end_cursor
self._skipped_results += next_batch._skipped_results
def _make_query_rpc_call(self, config, req):
"""Makes a RunQuery call that will modify the instance.
Args:
config: The datastore_rpc.Configuration to use for the call.
req: The request to send with the call.
Returns:
A UserRPC object that can be used to fetch the result of the RPC.
"""
_api_version = self._batch_shared.conn._api_version
if _api_version == datastore_rpc._CLOUD_DATASTORE_V1:
return self._batch_shared.conn._make_rpc_call(
config, 'RunQuery', req, googledatastore.RunQueryResponse(),
self.__v1_run_query_response_hook)
return self._batch_shared.conn._make_rpc_call(config, 'RunQuery', req,
datastore_pb.QueryResult(),
self.__query_result_hook)
def _make_next_rpc_call(self, config, req):
"""Makes a Next call that will modify the instance.
Args:
config: The datastore_rpc.Configuration to use for the call.
req: The request to send with the call.
Returns:
A UserRPC object that can be used to fetch the result of the RPC.
"""
return self._batch_shared.conn._make_rpc_call(config, 'Next', req,
datastore_pb.QueryResult(),
self.__query_result_hook)
_need_index_header = 'The suggested index for this query is:'
def __v1_run_query_response_hook(self, rpc):
try:
self._batch_shared.conn.check_rpc_success(rpc)
except datastore_errors.NeedIndexError:
raise
batch = rpc.response.batch
self._batch_shared.process_batch(batch)
if batch.skipped_cursor:
self.__skipped_cursor = Cursor(_cursor_bytes=batch.skipped_cursor)
self.__result_cursors = [Cursor(_cursor_bytes=result.cursor)
for result in batch.entity_results
if result.cursor]
if batch.end_cursor:
self.__end_cursor = Cursor(_cursor_bytes=batch.end_cursor)
self._skipped_results = batch.skipped_results
if batch.more_results == googledatastore.QueryResultBatch.NOT_FINISHED:
self.__more_results = True
self.__datastore_cursor = self.__end_cursor or self.__skipped_cursor
if self.__datastore_cursor == self.__start_cursor:
raise datastore_errors.Timeout(
'The query was not able to make progress.')
else:
self._end()
self.__results = self._process_v1_results(batch.entity_results)
return self
def __query_result_hook(self, rpc):
"""Internal method used as get_result_hook for RunQuery/Next operation."""
try:
self._batch_shared.conn.check_rpc_success(rpc)
except datastore_errors.NeedIndexError as exc:
if isinstance(rpc.request, datastore_pb.Query):
_, kind, ancestor, props = datastore_index.CompositeIndexForQuery(
rpc.request)
props = datastore_index.GetRecommendedIndexProperties(props)
yaml = datastore_index.IndexYamlForQuery(kind, ancestor, props)
xml = datastore_index.IndexXmlForQuery(kind, ancestor, props)
raise datastore_errors.NeedIndexError(
'\n'.join([str(exc), self._need_index_header, yaml]),
original_message=str(exc), header=self._need_index_header,
yaml_index=yaml, xml_index=xml)
raise
query_result = rpc.response
self._batch_shared.process_batch(query_result)
if query_result.HasField('skipped_results_compiled_cursor'):
self.__skipped_cursor = Cursor(
_cursor_bytes=query_result.skipped_results_compiled_cursor
.SerializeToString())
self.__result_cursors = [
Cursor(_cursor_bytes=result.SerializeToString())
for result in query_result.result_compiled_cursor
]
if query_result.HasField('compiled_cursor'):
self.__end_cursor = Cursor(
_cursor_bytes=query_result.compiled_cursor.SerializeToString())
self._skipped_results = query_result.skipped_results
if query_result.more_results:
self.__datastore_cursor = query_result.cursor
self.__more_results = True
else:
self._end()
self.__results = self._process_results(query_result.result)
return self
def _end(self):
"""Changes the internal state so that no more batches can be produced."""
self.__datastore_cursor = None
self.__more_results = False
def _make_next_batch(self, fetch_options):
"""Creates the object to store the next batch.
Args:
fetch_options: The datastore_query.FetchOptions passed in by the user or
None.
Returns:
A tuple containing the fetch options that should be used internally and
the object that should be used to contain the next batch.
"""
return fetch_options, Batch(self._batch_shared,
start_cursor=self.__end_cursor)
def _process_results(self, results):
"""Converts the datastore results into results returned to the user.
Args:
results: A list of entity_pb2.EntityProto's returned by the datastore
Returns:
A list of results that should be returned to the user.
"""
converter = self._batch_shared.conn.adapter.pb_to_query_result
return [converter(result, self._batch_shared.query_options)
for result in results]
def _process_v1_results(self, results):
"""Converts the datastore results into results returned to the user.
Args:
results: A list of googledatastore.EntityResults.
Returns:
A list of results that should be returned to the user.
"""
converter = self._batch_shared.conn.adapter.pb_v1_to_query_result
return [converter(result.entity, self._batch_shared.query_options)
for result in results]
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.Batch is unsupported.')
class _AugmentedBatch(Batch):
"""A batch produced by a datastore_query._AugmentedQuery."""
@classmethod
@datastore_rpc._positional(5)
def create_async(cls, augmented_query, query_options, conn, req,
in_memory_offset, in_memory_limit, start_cursor):
initial_offset = 0 if in_memory_offset is not None else None
batch_shared = _BatchShared(augmented_query._query,
query_options,
conn,
augmented_query,
initial_offset=initial_offset)
batch0 = cls(batch_shared,
in_memory_offset=in_memory_offset,
in_memory_limit=in_memory_limit,
start_cursor=start_cursor)
return batch0._make_query_rpc_call(query_options, req)
@datastore_rpc._positional(2)
def __init__(self, batch_shared,
in_memory_offset=None,
in_memory_limit=None,
next_index=0,
start_cursor=Cursor()):
"""A Constructor for datastore_query._AugmentedBatch.
Constructed by datastore_query._AugmentedQuery. Should not be called
directly.
"""
super(_AugmentedBatch, self).__init__(batch_shared,
start_cursor=start_cursor)
self.__in_memory_offset = in_memory_offset
self.__in_memory_limit = in_memory_limit
self.__next_index = next_index
@property
def query(self):
"""The query the current batch came from."""
return self._batch_shared.augmented_query
def cursor(self, index):
raise NotImplementedError
def _extend(self, next_batch):
super(_AugmentedBatch, self)._extend(next_batch)
self.__in_memory_limit = next_batch.__in_memory_limit
self.__in_memory_offset = next_batch.__in_memory_offset
self.__next_index = next_batch.__next_index
def _process_v1_results(self, results):
"""Process V4 results by converting to V3 and calling _process_results."""
v3_results = []
is_projection = bool(self.query_options.projection)
for v1_result in results:
v3_entity = entity_pb2.EntityProto()
self._batch_shared.conn.adapter.get_entity_converter().v1_to_v3_entity(
v1_result.entity, v3_entity, is_projection)
v3_results.append(v3_entity)
return self._process_results(v3_results)
def _process_results(self, results):
in_memory_filter = self._batch_shared.augmented_query._in_memory_filter
if in_memory_filter:
results = list(filter(in_memory_filter, results))
in_memory_results = self._batch_shared.augmented_query._in_memory_results
if in_memory_results and self.__next_index < len(in_memory_results):
original_query = super(_AugmentedBatch, self).query
if original_query._order:
if results:
next_result = in_memory_results[self.__next_index]
next_key = original_query._order.key(next_result)
i = 0
while i < len(results):
result = results[i]
result_key = original_query._order.key(result)
while next_key <= result_key:
results.insert(i, next_result)
i += 1
self.__next_index += 1
if self.__next_index >= len(in_memory_results):
break
next_result = in_memory_results[self.__next_index]
next_key = original_query._order.key(next_result)
i += 1
elif results or not super(_AugmentedBatch, self).more_results:
results = in_memory_results + results
self.__next_index = len(in_memory_results)
if self.__in_memory_offset:
assert not self._skipped_results
offset = min(self.__in_memory_offset, len(results))
if offset:
self._skipped_results += offset
self.__in_memory_offset -= offset
results = results[offset:]
if self.__in_memory_limit is not None:
results = results[:self.__in_memory_limit]
self.__in_memory_limit -= len(results)
if self.__in_memory_limit <= 0:
self._end()
return super(_AugmentedBatch, self)._process_results(results)
def _make_next_batch(self, fetch_options):
in_memory_offset = FetchOptions.offset(fetch_options)
augmented_query = self._batch_shared.augmented_query
if in_memory_offset and (augmented_query._in_memory_filter or
augmented_query._in_memory_results):
fetch_options = FetchOptions(offset=0)
else:
in_memory_offset = None
return (fetch_options,
_AugmentedBatch(self._batch_shared,
in_memory_offset=in_memory_offset,
in_memory_limit=self.__in_memory_limit,
start_cursor=self.end_cursor,
next_index=self.__next_index))
class Batcher(object):
"""A class that implements the Iterator interface for Batches.
Typically constructed by a call to Query.run().
The class hides the "best effort" nature of the datastore by potentially
making multiple requests to the datastore and merging the resulting batches.
This is accomplished efficiently by prefetching results and mixing both
non-blocking and blocking calls to the datastore as needed.
Iterating through batches is almost always more efficient than pulling all
results at once as RPC latency is hidden by asynchronously prefetching
results.
The batches produce by this class cannot be used to fetch the next batch
(through Batch.next_batch()) as before the current batch is returned the
request for the next batch has already been sent.
"""
ASYNC_ONLY = None
AT_LEAST_OFFSET = 0
AT_LEAST_ONE = object()
def __init__(self, query_options, first_async_batch):
"""Constructor.
Although this class can be manually constructed, it is preferable to use
Query.run(query_options).
Args:
query_options: The QueryOptions used to create the first batch.
first_async_batch: The first batch produced by
Query.run_async(query_options).
"""
self.__next_batch = first_async_batch
self.__initial_offset = QueryOptions.offset(query_options) or 0
self.__skipped_results = 0
def next(self):
"""Get the next batch. See .next_batch()."""
return self.next_batch(self.AT_LEAST_ONE)
def __next__(self):
return self.next()
def next_batch(self, min_batch_size):
"""Get the next batch.
The batch returned by this function cannot be used to fetch the next batch
(through Batch.next_batch()). Instead this function will always return None.
To retrieve the next batch use .next() or .next_batch(N).
This function may return a batch larger than min_to_fetch, but will never
return smaller unless there are no more results.
Special values can be used for min_batch_size:
ASYNC_ONLY - Do not perform any synchrounous fetches from the datastore
even if the this produces a batch with no results.
AT_LEAST_OFFSET - Only pull enough results to satifiy the offset.
AT_LEAST_ONE - Pull batches until at least one result is returned.
Args:
min_batch_size: The minimum number of results to retrieve or one of
(ASYNC_ONLY, AT_LEAST_OFFSET, AT_LEAST_ONE)
Returns:
The next Batch of results.
"""
if min_batch_size in (Batcher.ASYNC_ONLY, Batcher.AT_LEAST_OFFSET,
Batcher.AT_LEAST_ONE):
exact = False
else:
exact = True
datastore_types.ValidateInteger(min_batch_size,
'min_batch_size',
datastore_errors.BadArgumentError)
if not self.__next_batch:
raise StopIteration
batch = self.__next_batch.get_result()
self.__next_batch = None
self.__skipped_results += batch.skipped_results
if min_batch_size is not Batcher.ASYNC_ONLY:
if min_batch_size is Batcher.AT_LEAST_ONE:
min_batch_size = 1
needed_results = min_batch_size - len(batch.results)
while (batch.more_results and
(self.__skipped_results < self.__initial_offset or
needed_results > 0)):
if batch.query_options.batch_size:
batch_size = max(batch.query_options.batch_size, needed_results)
elif exact:
batch_size = needed_results
else:
batch_size = None
self.__next_batch = batch.next_batch_async(FetchOptions(
offset=max(0, self.__initial_offset - self.__skipped_results),
batch_size=batch_size))
next_batch = self.__next_batch.get_result()
self.__next_batch = None
self.__skipped_results += next_batch.skipped_results
needed_results = max(0, needed_results - len(next_batch.results))
batch._extend(next_batch)
self.__next_batch = batch.next_batch_async()
return batch
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.Batcher is unsupported.')
def __iter__(self):
return self
class ResultsIterator(six.Iterator):
"""An iterator over the results from Batches obtained from a Batcher.
ResultsIterator implements Python's iterator protocol, so results can be
accessed with the for-statement:
> it = ResultsIterator(Query(kind='Person').run())
> for person in it:
> print 'Hi, %s!' % person['name']
At any time ResultsIterator.cursor() can be used to grab the Cursor that
points just after the last result returned by the iterator.
"""
__current_batch = None
__current_pos = 0
__last_cursor = None
def __init__(self, batcher):
"""Constructor.
Args:
batcher: A datastore_query.Batcher
"""
if not isinstance(batcher, Batcher):
raise datastore_errors.BadArgumentError(
'batcher argument should be datastore_query.Batcher (%r)' %
(batcher,))
self.__batcher = batcher
def index_list(self):
"""Returns the list of indexes used to perform the query.
Possibly None when the adapter does not implement pb_to_index.
"""
return self._ensure_current_batch().index_list
def cursor(self):
"""Returns a cursor that points just after the last result returned.
If next() throws an exception, this function returns the end_cursor from
the last successful batch or throws the same exception if no batch was
successful.
"""
return (self.__last_cursor or
self._ensure_current_batch().cursor(self.__current_pos))
def _ensure_current_batch(self):
if not self.__current_batch:
self.__current_batch = self.__batcher.next_batch(Batcher.AT_LEAST_OFFSET)
self.__current_pos = 0
return self.__current_batch
def _compiled_query(self):
"""Returns the compiled query associated with the iterator.
Internal only do not use.
"""
return self._ensure_current_batch()._compiled_query()
def __next__(self):
"""Returns the next query result."""
while (not self.__current_batch or
self.__current_pos >= len(self.__current_batch.results)):
try:
next_batch = self.__batcher.next_batch(Batcher.AT_LEAST_OFFSET)
except:
if self.__current_batch:
self.__last_cursor = self.__current_batch.end_cursor
raise
self.__current_pos = 0
self.__current_batch = next_batch
result = self.__current_batch.results[self.__current_pos]
self.__current_pos += 1
return result
def __iter__(self):
return self
def next(self):
return self.__next__()
| #!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A thin wrapper around datastore query RPC calls.
This provides wrappers around the internal only datastore_pb library and is
designed to be the lowest-level API to be used by all Python datastore client
libraries for executing queries. It provides a layer of protection so the actual
RPC syntax can change without affecting client libraries.
Any class, function, field or argument starting with an '_' is for INTERNAL use
only and should not be used by developers!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import base64
import collections
import functools
import pickle
import six
from google.appengine.api import cmp_compat
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_index
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_pbs
from google.appengine.datastore import datastore_rpc
from google.protobuf import message
from google.appengine.datastore import entity_bytes_pb2 as entity_pb2
__all__ = ['Batch',
'Batcher',
'CompositeFilter',
'CompositeOrder',
'CorrelationFilter',
'Cursor',
'FetchOptions',
'FilterPredicate',
'Order',
'PropertyFilter',
'PropertyOrder',
'Query',
'QueryOptions',
'ResultsIterator',
'make_filter',
'apply_query',
'inject_results']
if datastore_pbs._CLOUD_DATASTORE_ENABLED:
from google.appengine.datastore.datastore_pbs import googledatastore
class _BaseComponent(object):
"""A base class for query components.
Currently just implements basic == and != functions.
"""
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self is other or self.__dict__ == other.__dict__
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return equal
return not equal
def make_filter(name, op, values):
"""Constructs a FilterPredicate from the given name, op and values.
Args:
name: A non-empty string, the name of the property to filter.
op: One of PropertyFilter._OPERATORS.keys(), the operator to use.
values: A supported value, the value to compare against.
Returns:
if values is a list, a CompositeFilter that uses AND to combine all
values, otherwise a PropertyFilter for the single value.
Raises:
datastore_errors.BadPropertyError: if the property name is invalid.
datastore_errors.BadValueError: if the property did not validate correctly
or the value was an empty list.
Other exception types (like OverflowError): if the property value does not
meet type-specific criteria.
"""
datastore_types.ValidateProperty(name, values)
properties = datastore_types.ToPropertyPb(name, values)
if isinstance(properties, list):
filters = [PropertyFilter(op, prop) for prop in properties]
return CompositeFilter(CompositeFilter.AND, filters)
else:
return PropertyFilter(op, properties)
def _make_key_value_map(entity, property_names):
"""Extracts key values from the given entity.
Args:
entity: The entity_pb2.EntityProto to extract values from.
property_names: The names of the properties from which to extract values.
Returns:
A dict mapping property names to a lists of key values.
"""
value_map = dict((six.ensure_text(name), []) for name in property_names)
for prop in entity.property:
prop_name = six.ensure_text(prop.name)
if prop_name in value_map:
value_map[prop_name].append(
datastore_types.PropertyValueToKeyValue(prop.value))
key_prop = six.ensure_text(datastore_types.KEY_SPECIAL_PROPERTY)
if key_prop in value_map:
value_map[key_prop] = [datastore_types.ReferenceToKeyValue(entity.key)]
return value_map
class _PropertyComponent(_BaseComponent):
"""A component that operates on a specific set of properties."""
def _get_prop_names(self):
"""Returns a set of property names used by the filter."""
raise NotImplementedError
class FilterPredicate(_PropertyComponent):
"""An abstract base class for all query filters.
All sub-classes must be immutable as these are often stored without creating a
defensive copy.
"""
def __call__(self, entity):
"""Applies the filter predicate to the given entity.
Args:
entity: the datastore_pb.EntityProto to test.
Returns:
True if the given entity matches the filter, False otherwise.
"""
return self._apply(_make_key_value_map(entity, self._get_prop_names()))
def _apply(self, key_value_map):
"""Apply the given component to the comparable value map.
A filter matches a list of values if at least one value in the list
matches the filter, for example:
'prop: [1, 2]' matches both 'prop = 1' and 'prop = 2' but not 'prop = 3'
Note: the values are actually represented as tuples whose first item
encodes the type; see datastore_types.PropertyValueToKeyValue().
Args:
key_value_map: A dict mapping property names to a list of
comparable values.
Return:
A boolean indicating if the given map matches the filter.
"""
raise NotImplementedError
def _prune(self, key_value_map):
"""Removes values from the given map that do not match the filter.
When doing a scan in the datastore, only index values that match the filters
are seen. When multiple values that point to the same entity are seen, the
entity only appears where the first value is found. This function removes
all values that don't match the query so that the first value in the map
is the same one the datastore would see first.
Args:
key_value_map: the comparable value map from which to remove
values. Does not need to contain values for all filtered properties.
Returns:
A value that evaluates to False if every value in a single list was
completely removed. This effectively applies the filter but is less
efficient than _apply().
"""
raise NotImplementedError
def _to_pb(self):
"""Internal only function to generate a pb."""
raise NotImplementedError(
'This filter only supports in memory operations (%r)' % self)
def _to_pbs(self):
"""Internal only function to generate a list of pbs."""
return [self._to_pb()]
def _to_pb_v1(self, adapter):
"""Internal only function to generate a v1 pb.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
raise NotImplementedError(
'This filter only supports in memory operations (%r)' % self)
class _SinglePropertyFilter(FilterPredicate):
"""Base class for a filter that operates on a single property."""
def _get_prop_name(self):
"""Returns the name of the property being filtered."""
raise NotImplementedError
def _apply_to_value(self, value):
"""Apply the filter to the given value.
Args:
value: The comparable value to check.
Returns:
A boolean indicating if the given value matches the filter.
"""
raise NotImplementedError
def _get_prop_names(self):
return set([self._get_prop_name()])
def _apply(self, value_map):
for other_value in value_map[self._get_prop_name()]:
if self._apply_to_value(other_value):
return True
return False
def _prune(self, value_map):
if self._get_prop_name() not in value_map:
return True
values = [value for value in value_map[self._get_prop_name()]
if self._apply_to_value(value)]
value_map[self._get_prop_name()] = values
return bool(values)
class PropertyFilter(_SinglePropertyFilter):
"""An immutable filter predicate that constrains a single property."""
_OPERATORS = {
'<': datastore_pb.Query.Filter.LESS_THAN,
'<=': datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL,
'>': datastore_pb.Query.Filter.GREATER_THAN,
'>=': datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL,
'=': datastore_pb.Query.Filter.EQUAL,
}
_OPERATORS_INVERSE = dict((value, key)
for key, value in _OPERATORS.items())
_OPERATORS_TO_PYTHON_OPERATOR = {
datastore_pb.Query.Filter.LESS_THAN: '<',
datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL: '<=',
datastore_pb.Query.Filter.GREATER_THAN: '>',
datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL: '>=',
datastore_pb.Query.Filter.EQUAL: '==',
}
_INEQUALITY_OPERATORS = frozenset(['<', '<=', '>', '>='])
_INEQUALITY_OPERATORS_ENUM = frozenset([
datastore_pb.Query.Filter.LESS_THAN,
datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL,
datastore_pb.Query.Filter.GREATER_THAN,
datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL,
])
_UPPERBOUND_INEQUALITY_OPERATORS = frozenset(['<', '<='])
def __init__(self, op, value):
"""Constructor.
Args:
op: A string representing the operator to use.
value: A entity_pb2.Property, the property and value to compare against.
Raises:
datastore_errors.BadArgumentError if op has an unsupported value or value
is not an entity_pb2.Property.
"""
if op not in self._OPERATORS:
raise datastore_errors.BadArgumentError('unknown operator: %r' % (op,))
if not isinstance(value, entity_pb2.Property):
raise datastore_errors.BadArgumentError(
'value argument should be entity_pb2.Property (%r)' % (value,))
super(PropertyFilter, self).__init__()
self._filter = datastore_pb.Query.Filter()
self._filter.op = self._OPERATORS[op]
self._filter.property.add().CopyFrom(value)
@property
def op(self):
raw_op = self._filter.op
return self._OPERATORS_INVERSE.get(raw_op, str(raw_op))
@property
def value(self):
return self._filter.property[0]
def __repr__(self):
prop = self.value
name = prop.name
value = datastore_types.FromPropertyPb(prop)
if six.PY2 and isinstance(value, long):
value = int(value)
return '%s(%r, <%r, %r>)' % (self.__class__.__name__, six.ensure_str(
self.op), six.ensure_str(name), value)
def _get_prop_name(self):
return self._filter.property[0].name
def _apply_to_value(self, value):
if not hasattr(self, '_cmp_value'):
if self._filter.op == datastore_pb.Query.Filter.EXISTS:
return True
self._cmp_value = datastore_types.PropertyValueToKeyValue(
self._filter.property[0].value)
self._condition = ('value %s self._cmp_value' %
self._OPERATORS_TO_PYTHON_OPERATOR[self._filter.op])
return eval(self._condition)
def _has_inequality(self):
"""Returns True if the filter predicate contains inequalities filters."""
return self._filter.op in self._INEQUALITY_OPERATORS_ENUM
@classmethod
def _from_pb(cls, filter_pb):
self = cls.__new__(cls)
self._filter = filter_pb
return self
def _to_pb(self):
"""Returns the internal only pb representation."""
return self._filter
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.Filter representation of the filter.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
filter_pb = googledatastore.Filter()
prop_filter_pb = filter_pb.property_filter
adapter.get_query_converter()._v3_filter_to_v1_property_filter(
self._filter, prop_filter_pb)
return filter_pb
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.PropertyFilter is unsupported.')
def __eq__(self, other):
if self.__class__ is not other.__class__:
if other.__class__ is _PropertyRangeFilter:
return [self._filter] == other._to_pbs()
return NotImplemented
return self._filter == other._filter
class _PropertyRangeFilter(_SinglePropertyFilter):
"""A filter predicate that represents a range of values.
Since we allow multi-valued properties there is a large difference between
"x > 0 AND x < 1" and "0 < x < 1." An entity with x = [-1, 2] will match the
first but not the second.
Since the datastore only allows a single inequality filter, multiple
in-equality filters are merged into a single range filter in the
datastore (unlike equality filters). This class is used by
datastore_query.CompositeFilter to implement the same logic.
"""
_start_key_value = None
_end_key_value = None
@datastore_rpc._positional(1)
def __init__(self, start=None, start_incl=True, end=None, end_incl=True):
"""Constructs a range filter using start and end properties.
Args:
start: A entity_pb2.Property to use as a lower bound or None to indicate
no lower bound.
start_incl: A boolean that indicates if the lower bound is inclusive.
end: A entity_pb2.Property to use as an upper bound or None to indicate
no upper bound.
end_incl: A boolean that indicates if the upper bound is inclusive.
"""
if start is not None and not isinstance(start, entity_pb2.Property):
raise datastore_errors.BadArgumentError(
'start argument should be entity_pb2.Property (%r)' % (start,))
if end is not None and not isinstance(end, entity_pb2.Property):
raise datastore_errors.BadArgumentError(
'start argument should be entity_pb2.Property (%r)' % (end,))
if start and end and start.name != end.name:
raise datastore_errors.BadArgumentError(
'start and end arguments must be on the same property (%s != %s)' %
(start.name, end.name))
if not start and not end:
raise datastore_errors.BadArgumentError(
'Unbounded ranges are not supported.')
super(_PropertyRangeFilter, self).__init__()
self._start = start
self._start_incl = start_incl
self._end = end
self._end_incl = end_incl
@classmethod
def from_property_filter(cls, prop_filter):
op = prop_filter._filter.op
if op == datastore_pb.Query.Filter.GREATER_THAN:
return cls(start=prop_filter._filter.property[0], start_incl=False)
elif op == datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL:
return cls(start=prop_filter._filter.property[0])
elif op == datastore_pb.Query.Filter.LESS_THAN:
return cls(end=prop_filter._filter.property[0], end_incl=False)
elif op == datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL:
return cls(end=prop_filter._filter.property[0])
else:
raise datastore_errors.BadArgumentError(
'Unsupported operator (%s)' % (op,))
def intersect(self, other):
"""Returns a filter representing the intersection of self and other."""
if isinstance(other, PropertyFilter):
other = self.from_property_filter(other)
elif not isinstance(other, _PropertyRangeFilter):
raise datastore_errors.BadArgumentError(
'other argument should be a _PropertyRangeFilter (%r)' % (other,))
if other._get_prop_name() != self._get_prop_name():
raise datastore_errors.BadArgumentError(
'other argument must be on the same property (%s != %s)' %
(other._get_prop_name(), self._get_prop_name()))
start_source = None
if other._start:
if self._start:
result = cmp_compat.cmp(
self._get_start_key_value(), other._get_start_key_value())
if result == 0:
result = cmp_compat.cmp(other._start_incl, self._start_incl)
if result > 0:
start_source = self
elif result < 0:
start_source = other
else:
start_source = other
elif self._start:
start_source = self
end_source = None
if other._end:
if self._end:
result = cmp_compat.cmp(
self._get_end_key_value(), other._get_end_key_value())
if result == 0:
result = cmp_compat.cmp(self._end_incl, other._end_incl)
if result < 0:
end_source = self
elif result > 0:
end_source = other
else:
end_source = other
elif self._end:
end_source = self
if start_source:
if end_source in (start_source, None):
return start_source
result = _PropertyRangeFilter(start=start_source._start,
start_incl=start_source._start_incl,
end=end_source._end,
end_incl=end_source._end_incl)
result._start_key_value = start_source._start_key_value
result._end_key_value = end_source._end_key_value
return result
else:
return end_source or self
def _get_start_key_value(self):
if self._start_key_value is None:
self._start_key_value = datastore_types.PropertyValueToKeyValue(
self._start.value)
return self._start_key_value
def _get_end_key_value(self):
if self._end_key_value is None:
self._end_key_value = datastore_types.PropertyValueToKeyValue(
self._end.value)
return self._end_key_value
def _apply_to_value(self, value):
"""Apply the filter to the given value.
Args:
value: The comparable value to check.
Returns:
A boolean indicating if the given value matches the filter.
"""
if self._start:
result = cmp_compat.cmp(self._get_start_key_value(), value)
if result > 0 or (result == 0 and not self._start_incl):
return False
if self._end:
result = cmp_compat.cmp(self._get_end_key_value(), value)
if result < 0 or (result == 0 and not self._end_incl):
return False
return True
def _get_prop_name(self):
if self._start:
return self._start.name
if self._end:
return self._end.name
assert False
def _to_pbs(self):
pbs = []
if self._start:
if self._start_incl:
op = datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL
else:
op = datastore_pb.Query.Filter.GREATER_THAN
pb = datastore_pb.Query.Filter()
pb.op = op
pb.property.add().CopyFrom(self._start)
pbs.append(pb)
if self._end:
if self._end_incl:
op = datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL
else:
op = datastore_pb.Query.Filter.LESS_THAN
pb = datastore_pb.Query.Filter()
pb.op = op
pb.property.add().CopyFrom(self._end)
pbs.append(pb)
return pbs
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.Filter representation of the filter.
Args:
adapter: A datastore_rpc.AbstractAdapter.
"""
filter_pb = googledatastore.Filter()
composite_filter = filter_pb.composite_filter
composite_filter.op = googledatastore.CompositeFilter.AND
if self._start:
if self._start_incl:
op = googledatastore.PropertyFilter.GREATER_THAN_OR_EQUAL
else:
op = googledatastore.PropertyFilter.GREATER_THAN
pb = composite_filter.filters.add().property_filter
pb.op = op
pb.property.name = self._start.name
adapter.get_entity_converter().v3_property_to_v1_value(
self._start, True, pb.value)
if self._end:
if self._end_incl:
op = googledatastore.PropertyFilter.LESS_THAN_OR_EQUAL
else:
op = googledatastore.PropertyFilter.LESS_THAN
pb = composite_filter.filters.add().property_filter
pb.op = op
pb.property.name = self._end.name
adapter.get_entity_converter().v3_property_to_v1_value(
self._end, True, pb.value)
return filter_pb
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return (self._start == other._start and
self._end == other._end and
(self._start_incl == other._start_incl or self._start is None) and
(self._end_incl == other._end_incl or self._end is None))
class _PropertyExistsFilter(FilterPredicate):
"""A FilterPredicate that matches entities containing specific properties.
Only works as an in-memory filter. Used internally to filter out entities
that don't have all properties in a given Order.
"""
def __init__(self, names):
super(_PropertyExistsFilter, self).__init__()
self._names = frozenset(names)
def _apply(self, value_map):
for name in self._names:
if not value_map.get(name):
return False
return True
def _get_prop_names(self):
return self._names
def _prune(self, _):
raise NotImplementedError
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
class CorrelationFilter(FilterPredicate):
"""A filter that isolates correlated values and applies a sub-filter on them.
This filter assumes that every property used by the sub-filter should be
grouped before being passed to the sub-filter. The default grouping puts
each value in its own group. Consider:
e = {a: [1, 2], b: [2, 1, 3], c: 4}
A correlation filter with a sub-filter that operates on (a, b) will be tested
against the following 3 sets of values:
{a: 1, b: 2}
{a: 2, b: 1}
{b: 3}
In this case CorrelationFilter('a = 2 AND b = 2') won't match this entity but
CorrelationFilter('a = 2 AND b = 1') will. To apply an uncorrelated filter on
c, the filter must be applied in parallel to the correlation filter. For
example:
CompositeFilter(AND, [CorrelationFilter('a = 2 AND b = 1'), 'c = 3'])
If 'c = 3' was included in the correlation filter, c would be grouped as well.
This would result in the following values:
{a: 1, b: 2, c: 3}
{a: 2, b: 1}
{b: 3}
If any set of correlated values match the sub-filter then the entity matches
the correlation filter.
"""
def __init__(self, subfilter):
"""Constructor.
Args:
subfilter: A FilterPredicate to apply to the correlated values
"""
self._subfilter = subfilter
@property
def subfilter(self):
return self._subfilter
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.subfilter)
def _apply(self, value_map):
base_map = dict((prop, []) for prop in self._get_prop_names())
value_maps = []
for prop in base_map:
grouped = self._group_values(prop, value_map[prop])
while len(value_maps) < len(grouped):
value_maps.append(base_map.copy())
for value, m in six.moves.zip(grouped, value_maps):
m[prop] = value
return self._apply_correlated(value_maps)
def _apply_correlated(self, value_maps):
"""Applies sub-filter to the correlated value maps.
The default implementation matches when any value_map in value_maps
matches the sub-filter.
Args:
value_maps: A list of correlated value_maps.
Returns:
True if any the entity matches the correlation filter.
"""
for map in value_maps:
if self._subfilter._apply(map):
return True
return False
def _group_values(self, prop, values):
"""A function that groups the given values.
Override this function to introduce custom grouping logic. The default
implementation assumes each value belongs in its own group.
Args:
prop: The name of the property who's values are being grouped.
values: A list of opaque values.
Returns:
A list of lists of grouped values.
"""
return [[value] for value in values]
def _get_prop_names(self):
return self._subfilter._get_prop_names()
class CompositeFilter(FilterPredicate):
"""An immutable filter predicate that combines other predicates.
This class proactively merges sub-filters that are combined using the same
operator. For example:
CompositeFilter(AND, [f1, f2, CompositeFilter(AND, [f3, f4]), f5, f6])
is equivalent to:
CompositeFilter(AND, [f1, f2, f3, f4, f5, f6])
Currently filters can only be combined using an AND operator.
"""
AND = 'and'
_OPERATORS = frozenset([AND])
def __init__(self, op, filters):
"""Constructor.
Args:
op: The operator to use to combine the given filters
filters: A list of one or more filters to combine
Raises:
datastore_errors.BadArgumentError if op is not in CompsiteFilter.OPERATORS
or filters is not a non-empty list containing only FilterPredicates.
"""
if not op in self._OPERATORS:
raise datastore_errors.BadArgumentError('unknown operator (%s)' % (op,))
if not filters or not isinstance(filters, (list, tuple)):
raise datastore_errors.BadArgumentError(
'filters argument should be a non-empty list (%r)' % (filters,))
super(CompositeFilter, self).__init__()
self._op = op
flattened = []
for f in filters:
if isinstance(f, CompositeFilter) and f._op == self._op:
flattened.extend(f._filters)
elif isinstance(f, FilterPredicate):
flattened.append(f)
else:
raise datastore_errors.BadArgumentError(
'filters argument must be a list of FilterPredicates, found (%r)' %
(f,))
if op == self.AND:
filters = flattened
flattened = []
ineq_map = {}
for f in filters:
if (isinstance(f, _PropertyRangeFilter) or
(isinstance(f, PropertyFilter) and f._has_inequality())):
name = f._get_prop_name()
index = ineq_map.get(name)
if index is not None:
range_filter = flattened[index]
flattened[index] = range_filter.intersect(f)
else:
if isinstance(f, PropertyFilter):
range_filter = _PropertyRangeFilter.from_property_filter(f)
else:
range_filter = f
ineq_map[name] = len(flattened)
flattened.append(range_filter)
else:
flattened.append(f)
self._filters = tuple(flattened)
@property
def op(self):
return self._op
@property
def filters(self):
return self._filters
def __repr__(self):
op = self.op
if op == self.AND:
op = 'AND'
else:
op = str(op)
return '%s(%s, %r)' % (self.__class__.__name__, op, list(self.filters))
def _get_prop_names(self):
names = set()
for f in self._filters:
names |= f._get_prop_names()
return names
def _apply(self, value_map):
if self._op == self.AND:
for f in self._filters:
if not f._apply(value_map):
return False
return True
raise NotImplementedError
def _prune(self, value_map):
if self._op == self.AND:
matches = collections.defaultdict(set)
for f in self._filters:
props = f._get_prop_names()
local_value_map = dict((k, v) for k, v in value_map.items()
if k in props)
if not f._prune(local_value_map):
return False
for (prop, values) in local_value_map.items():
matches[prop].update(values)
for prop, value_set in matches.items():
value_map[prop] = sorted(value_set)
return True
raise NotImplementedError
def _to_pbs(self):
"""Returns the internal only pb representation."""
pbs = []
for f in self._filters:
pbs.extend(f._to_pbs())
return pbs
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.Filter.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
if not self._filters:
return None
if len(self._filters) == 1:
return self._filters[0]._to_pb_v1(adapter)
pb = googledatastore.Filter()
comp_pb = pb.composite_filter
if self.op == self.AND:
comp_pb.op = googledatastore.CompositeFilter.AND
else:
raise datastore_errors.BadArgumentError(
'Datastore V4 only supports CompositeFilter with AND operator.')
for f in self._filters:
comp_pb.filters.add().CopyFrom(f._to_pb_v1(adapter))
return pb
def __eq__(self, other):
if self.__class__ is other.__class__:
return super(CompositeFilter, self).__eq__(other)
if len(self._filters) == 1:
result = self._filters[0].__eq__(other)
if result is NotImplemented and hasattr(other, '__eq__'):
return other.__eq__(self._filters[0])
return result
return NotImplemented
class _IgnoreFilter(_SinglePropertyFilter):
"""A filter that removes all entities with the given keys."""
def __init__(self, key_value_set):
super(_IgnoreFilter, self).__init__()
self._keys = key_value_set
def _get_prop_name(self):
return datastore_types.KEY_SPECIAL_PROPERTY
def _apply_to_value(self, value):
return value not in self._keys
class _DedupingFilter(_IgnoreFilter):
"""A filter that removes duplicate keys."""
def __init__(self, key_value_set=None):
super(_DedupingFilter, self).__init__(key_value_set or set())
def _apply_to_value(self, value):
if super(_DedupingFilter, self)._apply_to_value(value):
self._keys.add(value)
return True
return False
class Order(_PropertyComponent):
"""A base class that represents a sort order on a query.
All sub-classes must be immutable as these are often stored without creating a
defensive copying.
This class can be used as either the cmp or key arg in sorted() or
list.sort(). To provide a stable ordering a trailing key ascending order is
always used.
"""
@datastore_rpc._positional(1)
def reversed(self, group_by=None):
"""Constructs an order representing the reverse of the current order.
This function takes into account the effects of orders on properties not in
the group_by clause of a query. For example, consider:
SELECT A, First(B) ... GROUP BY A ORDER BY A, B
Changing the order of B would effect which value is listed in the 'First(B)'
column which would actually change the results instead of just reversing
them.
Args:
group_by: If specified, only orders on properties in group_by will be
reversed.
Returns:
A new order representing the reverse direction.
"""
raise NotImplementedError
def _key(self, lhs_value_map):
"""Creates a key for the given value map."""
raise NotImplementedError
def _cmp(self, lhs_value_map, rhs_value_map):
"""Compares the given value maps."""
raise NotImplementedError
def _to_pb(self):
"""Internal only function to generate a filter pb."""
raise NotImplementedError
def _to_pb_v1(self, adapter):
"""Internal only function to generate a v1 filter pb.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
raise NotImplementedError
def key_for_filter(self, filter_predicate):
if filter_predicate:
return lambda x: self.key(x, filter_predicate)
return self.key
def cmp_for_filter(self, filter_predicate):
if filter_predicate:
return lambda x, y: self.cmp(x, y, filter_predicate)
return self.cmp
def key(self, entity, filter_predicate=None):
"""Constructs a "key" value for the given entity based on the current order.
This function can be used as the key argument for list.sort() and sorted().
Args:
entity: The entity_pb2.EntityProto to convert
filter_predicate: A FilterPredicate used to prune values before comparing
entities or None.
Returns:
A key value that identifies the position of the entity when sorted by
the current order.
"""
names = self._get_prop_names()
names.add(datastore_types.KEY_SPECIAL_PROPERTY)
if filter_predicate is not None:
names |= filter_predicate._get_prop_names()
value_map = _make_key_value_map(entity, names)
if filter_predicate is not None:
filter_predicate._prune(value_map)
return (self._key(value_map),
value_map[datastore_types.KEY_SPECIAL_PROPERTY])
def cmp(self, lhs, rhs, filter_predicate=None):
"""Compares the given values taking into account any filters.
This function can be used as the cmp argument for list.sort() and sorted().
This function is slightly more efficient that Order.key when comparing two
entities, however it is much less efficient when sorting a list of entities.
Args:
lhs: An entity_pb2.EntityProto
rhs: An entity_pb2.EntityProto
filter_predicate: A FilterPredicate used to prune values before comparing
entities or None.
Returns:
An integer <, = or > 0 representing the operator that goes in between lhs
and rhs that to create a true statement.
"""
names = self._get_prop_names()
if filter_predicate is not None:
names |= filter_predicate._get_prop_names()
lhs_value_map = _make_key_value_map(lhs, names)
rhs_value_map = _make_key_value_map(rhs, names)
if filter_predicate is not None:
filter_predicate._prune(lhs_value_map)
filter_predicate._prune(rhs_value_map)
result = self._cmp(lhs_value_map, rhs_value_map)
if result:
return result
if not lhs.HasField('key') and not rhs.HasField('key'):
return 0
lhs_key = (lhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or
datastore_types.ReferenceToKeyValue(lhs.key))
rhs_key = (rhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or
datastore_types.ReferenceToKeyValue(rhs.key))
return cmp_compat.cmp(lhs_key, rhs_key)
@cmp_compat.total_ordering_from_cmp
class _ReverseOrder(_BaseComponent):
"""Reverses the comparison for the given object."""
def __init__(self, obj):
"""Constructor for _ReverseOrder.
Args:
obj: Any comparable and hashable object.
"""
super(_ReverseOrder, self).__init__()
self._obj = obj
def __hash__(self):
return hash(self._obj)
def __cmp__(self, other):
assert self.__class__ == other.__class__, (
'A datastore_query._ReverseOrder object can only be compared to '
'an object of the same type.')
return -cmp_compat.cmp(self._obj, other._obj)
class PropertyOrder(Order):
"""An immutable class that represents a sort order for a single property."""
ASCENDING = datastore_pb.Query.Order.ASCENDING
DESCENDING = datastore_pb.Query.Order.DESCENDING
_DIRECTIONS = frozenset([ASCENDING, DESCENDING])
def __init__(self, prop, direction=ASCENDING):
"""Constructor.
Args:
prop: the name of the prop by which to sort.
direction: the direction in which to sort the given prop.
Raises:
datastore_errors.BadArgumentError if the prop name or direction is
invalid.
"""
datastore_types.ValidateString(prop,
'prop',
datastore_errors.BadArgumentError)
if not direction in self._DIRECTIONS:
raise datastore_errors.BadArgumentError('unknown direction: %r' %
(direction,))
super(PropertyOrder, self).__init__()
self.__order = datastore_pb.Query.Order()
self.__order.property = six.ensure_binary(prop, 'utf-8')
self.__order.direction = direction
@property
def prop(self):
return self.__order.property
@property
def direction(self):
return self.__order.direction
def __repr__(self):
extra = ''
if self.direction == self.DESCENDING:
extra = ', DESCENDING'
name = repr(six.ensure_str(self.prop))[1:-1]
return '%s(<%s>%s)' % (self.__class__.__name__, name, extra)
@datastore_rpc._positional(1)
def reversed(self, group_by=None):
if group_by and self.__order.property not in group_by:
return self
if self.__order.direction == self.ASCENDING:
return PropertyOrder(
six.ensure_text(self.__order.property), self.DESCENDING)
else:
return PropertyOrder(
six.ensure_text(self.__order.property), self.ASCENDING)
def _get_prop_names(self):
return set([self.__order.property])
def _key(self, lhs_value_map):
lhs_values = lhs_value_map[self.__order.property]
if not lhs_values:
raise datastore_errors.BadArgumentError(
'Missing value for property (%s)' % self.__order.property)
if self.__order.direction == self.ASCENDING:
return min(lhs_values)
else:
return _ReverseOrder(max(lhs_values))
def _cmp(self, lhs_value_map, rhs_value_map):
lhs_values = lhs_value_map[self.__order.property]
rhs_values = rhs_value_map[self.__order.property]
if not lhs_values and not rhs_values:
return 0
if not lhs_values:
raise datastore_errors.BadArgumentError(
'LHS missing value for property (%s)' % self.__order.property)
if not rhs_values:
raise datastore_errors.BadArgumentError(
'RHS missing value for property (%s)' % self.__order.property)
if self.__order.direction == self.ASCENDING:
return cmp_compat.cmp(min(lhs_values), min(rhs_values))
else:
return cmp_compat.cmp(max(rhs_values), max(lhs_values))
@classmethod
def _from_pb(cls, order_pb):
self = cls.__new__(cls)
self.__order = order_pb
return self
def _to_pb(self):
"""Returns the internal only pb representation."""
return self.__order
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.PropertyOrder representation of the order.
Args:
adapter: A datastore_rpc.AbstractAdapter.
"""
v1_order = googledatastore.PropertyOrder()
adapter.get_query_converter().v3_order_to_v1_order(self.__order, v1_order)
return v1_order
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.PropertyOrder is unsupported.')
class CompositeOrder(Order):
"""An immutable class that represents a sequence of Orders.
This class proactively flattens sub-orders that are of type CompositeOrder.
For example:
CompositeOrder([O1, CompositeOrder([02, 03]), O4])
is equivalent to:
CompositeOrder([O1, 02, 03, O4])
"""
def __init__(self, orders):
"""Constructor.
Args:
orders: A list of Orders which are applied in order.
"""
if not isinstance(orders, (list, tuple)):
raise datastore_errors.BadArgumentError(
'orders argument should be list or tuple (%r)' % (orders,))
super(CompositeOrder, self).__init__()
flattened = []
for order in orders:
if isinstance(order, CompositeOrder):
flattened.extend(order._orders)
elif isinstance(order, Order):
flattened.append(order)
else:
raise datastore_errors.BadArgumentError(
'orders argument should only contain Order (%r)' % (order,))
self._orders = tuple(flattened)
@property
def orders(self):
return self._orders
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(self.orders))
@datastore_rpc._positional(1)
def reversed(self, group_by=None):
return CompositeOrder([order.reversed(group_by=group_by)
for order in self._orders])
def _get_prop_names(self):
names = set()
for order in self._orders:
names |= order._get_prop_names()
return names
def _key(self, lhs_value_map):
result = []
for order in self._orders:
result.append(order._key(lhs_value_map))
return tuple(result)
def _cmp(self, lhs_value_map, rhs_value_map):
for order in self._orders:
result = order._cmp(lhs_value_map, rhs_value_map)
if result != 0:
return result
return 0
def size(self):
"""Returns the number of sub-orders the instance contains."""
return len(self._orders)
def _to_pbs(self):
"""Returns an ordered list of internal only pb representations."""
return [order._to_pb() for order in self._orders]
def _to_pb_v1(self, adapter):
"""Returns an ordered list of googledatastore.PropertyOrder.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
return [order._to_pb_v1(adapter) for order in self._orders]
def __eq__(self, other):
if self.__class__ is other.__class__:
return super(CompositeOrder, self).__eq__(other)
if len(self._orders) == 1:
result = self._orders[0].__eq__(other)
if result is NotImplemented and hasattr(other, '__eq__'):
return other.__eq__(self._orders[0])
return result
return NotImplemented
class FetchOptions(datastore_rpc.Configuration):
"""An immutable class that contains all options for fetching results.
These options apply to any request that pulls results from a query.
This class reserves the right to define configuration options of any name
except those that start with 'user_'. External subclasses should only define
function or variables with names that start with in 'user_'.
Options are set by passing keyword arguments to the constructor corresponding
to the configuration options defined below and in datastore_rpc.Configuration.
This object can be used as the default config for a datastore_rpc.Connection
but in that case some options will be ignored, see option documentation below
for details.
"""
@datastore_rpc.ConfigOption
def produce_cursors(value):
"""If a Cursor should be returned with the fetched results.
Raises:
datastore_errors.BadArgumentError if value is not a bool.
"""
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'produce_cursors argument should be bool (%r)' % (value,))
return value
@datastore_rpc.ConfigOption
def offset(value):
"""The number of results to skip before returning the first result.
Only applies to the first request it is used with and is ignored if present
on datastore_rpc.Connection.config.
Raises:
datastore_errors.BadArgumentError if value is not a integer or is less
than zero.
"""
datastore_types.ValidateInteger(value,
'offset',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@datastore_rpc.ConfigOption
def batch_size(value):
"""The number of results to attempt to retrieve in a batch.
Raises:
datastore_errors.BadArgumentError if value is not a integer or is not
greater than zero.
"""
datastore_types.ValidateInteger(value,
'batch_size',
datastore_errors.BadArgumentError)
return value
class QueryOptions(FetchOptions):
"""An immutable class that contains all options for running a query.
This class contains options that control execution process (deadline,
batch_size, read_policy, etc) and what part of the query results are returned
(keys_only, projection, offset, limit, etc) Options that control the contents
of the query results are specified on the datastore_query.Query directly.
This class reserves the right to define configuration options of any name
except those that start with 'user_'. External subclasses should only define
function or variables with names that start with in 'user_'.
Options are set by passing keyword arguments to the constructor corresponding
to the configuration options defined below and in FetchOptions and
datastore_rpc.Configuration.
This object can be used as the default config for a datastore_rpc.Connection
but in that case some options will be ignored, see below for details.
"""
ORDER_FIRST = datastore_pb.Query.ORDER_FIRST
ANCESTOR_FIRST = datastore_pb.Query.ANCESTOR_FIRST
FILTER_FIRST = datastore_pb.Query.FILTER_FIRST
_HINTS = frozenset([ORDER_FIRST, ANCESTOR_FIRST, FILTER_FIRST])
@datastore_rpc.ConfigOption
def keys_only(value):
"""If the query should only return keys.
Raises:
datastore_errors.BadArgumentError if value is not a bool.
"""
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'keys_only argument should be bool (%r)' % (value,))
return value
@datastore_rpc.ConfigOption
def projection(value):
"""A list or tuple of property names to project.
If None, the entire entity is returned.
Specifying a projection:
- may change the index requirements for the given query;
- will cause a partial entity to be returned;
- will cause only entities that contain those properties to be returned;
A partial entities only contain the property name and value for properties
in the projection (meaning and multiple will not be set). They will also
only contain a single value for any multi-valued property. However, if a
multi-valued property is specified in the order, an inequality property, or
the projected properties, the entity will be returned multiple times. Once
for each unique combination of values.
However, projection queries are significantly faster than normal queries.
Raises:
datastore_errors.BadArgumentError if value is empty or not a list or tuple
of strings.
"""
if isinstance(value, list):
value = tuple(value)
elif not isinstance(value, tuple):
raise datastore_errors.BadArgumentError(
'projection argument should be a list or tuple (%r)' % (value,))
if not value:
raise datastore_errors.BadArgumentError(
'projection argument cannot be empty')
for prop in value:
if not isinstance(prop, six.string_types + (six.binary_type,)):
raise datastore_errors.BadArgumentError(
'projection argument should contain only strings (%r)' % (prop,))
return value
@datastore_rpc.ConfigOption
def limit(value):
"""Limit on the number of results to return.
Raises:
datastore_errors.BadArgumentError if value is not an integer or is less
than zero.
"""
datastore_types.ValidateInteger(value,
'limit',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@datastore_rpc.ConfigOption
def prefetch_size(value):
"""Number of results to attempt to return on the initial request.
Raises:
datastore_errors.BadArgumentError if value is not an integer or is not
greater than zero.
"""
datastore_types.ValidateInteger(value,
'prefetch_size',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@datastore_rpc.ConfigOption
def start_cursor(value):
"""Cursor to use a start position.
Ignored if present on datastore_rpc.Connection.config.
Raises:
datastore_errors.BadArgumentError if value is not a Cursor.
"""
if not isinstance(value, Cursor):
raise datastore_errors.BadArgumentError(
'start_cursor argument should be datastore_query.Cursor (%r)' %
(value,))
return value
@datastore_rpc.ConfigOption
def end_cursor(value):
"""Cursor to use as an end position.
Ignored if present on datastore_rpc.Connection.config.
Raises:
datastore_errors.BadArgumentError if value is not a Cursor.
"""
if not isinstance(value, Cursor):
raise datastore_errors.BadArgumentError(
'end_cursor argument should be datastore_query.Cursor (%r)' %
(value,))
return value
@datastore_rpc.ConfigOption
def hint(value):
"""Hint on how the datastore should plan the query.
Raises:
datastore_errors.BadArgumentError if value is not a known hint.
"""
if value not in QueryOptions._HINTS:
raise datastore_errors.BadArgumentError('Unknown query hint (%r)' %
(value,))
return value
class Cursor(_BaseComponent):
"""An immutable class that represents a relative position in a query.
The position denoted by a Cursor is relative to a result in a query even
if the result has been removed from the given query. Usually to position
immediately after the last result returned by a batch.
A cursor should only be used on a query with an identical signature to the
one that produced it or on a query with its sort order reversed.
"""
@datastore_rpc._positional(1)
def __init__(self, urlsafe=None, _cursor_bytes=None):
"""Constructor.
A Cursor constructed with no arguments points the first result of any
query. If such a Cursor is used as an end_cursor no results will ever be
returned.
"""
super(Cursor, self).__init__()
if urlsafe is not None:
if _cursor_bytes is not None:
raise datastore_errors.BadArgumentError(
'Can only specify one of urlsafe and _cursor_bytes')
_cursor_bytes = self._urlsafe_to_bytes(urlsafe)
if _cursor_bytes is not None:
self.__cursor_bytes = _cursor_bytes
else:
self.__cursor_bytes = six.binary_type()
def __repr__(self):
arg = six.ensure_str(self.to_websafe_string())
if arg:
arg = '<%s>' % arg
return '%s(%s)' % (self.__class__.__name__, arg)
def reversed(self):
"""DEPRECATED. It is no longer necessary to call reversed() on cursors.
A cursor returned by a query may also be used in a query whose sort order
has been reversed. This method returns a copy of the original cursor.
"""
return Cursor(_cursor_bytes=self.__cursor_bytes)
def to_bytes(self):
"""Serialize cursor as a byte string."""
return self.__cursor_bytes
@staticmethod
def from_bytes(cursor):
"""Gets a Cursor given its byte string serialized form.
The serialized form of a cursor may change in a non-backwards compatible
way. In this case cursors must be regenerated from a new Query request.
Args:
cursor: A serialized cursor as returned by .to_bytes.
Returns:
A Cursor.
Raises:
datastore_errors.BadValueError if the cursor argument does not represent a
serialized cursor.
"""
return Cursor(_cursor_bytes=cursor)
def urlsafe(self):
"""Serialize cursor as a websafe string.
Returns:
A base64-encoded serialized cursor.
"""
return base64.urlsafe_b64encode(self.to_bytes())
to_websafe_string = urlsafe
@staticmethod
def from_websafe_string(cursor):
"""Gets a Cursor given its websafe serialized form.
The serialized form of a cursor may change in a non-backwards compatible
way. In this case cursors must be regenerated from a new Query request.
Args:
cursor: A serialized cursor as returned by .to_websafe_string.
Returns:
A Cursor.
Raises:
datastore_errors.BadValueError if the cursor argument is not a string
type of does not represent a serialized cursor.
"""
decoded_bytes = Cursor._urlsafe_to_bytes(cursor)
return Cursor.from_bytes(decoded_bytes)
@staticmethod
def _urlsafe_to_bytes(cursor):
if not isinstance(cursor, six.string_types + (six.binary_type,)):
raise datastore_errors.BadValueError(
'cursor argument should be str or unicode (%r)' % (cursor,))
try:
decoded_bytes = base64.urlsafe_b64decode(
six.ensure_binary(cursor, 'ascii'))
except (ValueError, TypeError) as e:
raise datastore_errors.BadValueError(
'Invalid cursor %s. Details: %s' % (cursor, e))
return decoded_bytes
def advance(self, offset, query, conn):
"""Advances a Cursor by the given offset.
Args:
offset: The amount to advance the current query.
query: A Query identical to the one this cursor was created from.
conn: The datastore_rpc.Connection to use.
Returns:
A new cursor that is advanced by offset using the given query.
"""
datastore_types.ValidateInteger(offset,
'offset',
datastore_errors.BadArgumentError)
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument should be datastore_query.Query (%r)' % (query,))
query_options = QueryOptions(
start_cursor=self, offset=offset, limit=0, produce_cursors=True)
return query.run(conn, query_options).next_batch(
Batcher.AT_LEAST_OFFSET).cursor(0)
def __setstate__(self, state):
if '_Cursor__compiled_cursor' in state:
self.__cursor_bytes = state['_Cursor__compiled_cursor'].SerializeToString()
else:
self.__dict__ = state
class _QueryKeyFilter(_BaseComponent):
"""A class that implements the key filters available on a Query."""
@datastore_rpc._positional(1)
def __init__(self, app=None, namespace=None, kind=None, ancestor=None):
"""Constructs a _QueryKeyFilter.
If app/namespace and ancestor are not defined, the app/namespace set in the
environment is used.
Args:
app: a string representing the required app id or None.
namespace: a string representing the required namespace or None.
kind: a string representing the required kind or None.
ancestor: a entity_pb2.Reference representing the required ancestor or
None.
Raises:
datastore_erros.BadArgumentError if app and ancestor.app() do not match or
an unexpected type is passed in for any argument.
"""
if kind is not None:
datastore_types.ValidateString(
kind, 'kind', datastore_errors.BadArgumentError)
if ancestor is not None:
if not isinstance(ancestor, entity_pb2.Reference):
raise datastore_errors.BadArgumentError(
'ancestor argument should be entity_pb2.Reference (%r)' %
(ancestor,))
ancestor_app = six.ensure_binary(ancestor.app)
if app is None:
app = ancestor_app
elif six.ensure_binary(app) != ancestor_app:
raise datastore_errors.BadArgumentError(
'ancestor argument should match app ("%r" != "%r")' %
(ancestor.app, app))
ancestor_namespace = six.ensure_binary(ancestor.name_space)
if namespace is None:
namespace = ancestor_namespace
elif six.ensure_binary(namespace) != ancestor_namespace:
raise datastore_errors.BadArgumentError(
'ancestor argument should match namespace ("%r" != "%r")' %
(six.ensure_binary(namespace), ancestor_namespace))
pb = entity_pb2.Reference()
pb.CopyFrom(ancestor)
ancestor = pb
self.__ancestor = ancestor
self.__path = list(ancestor.path.element)
else:
self.__ancestor = None
self.__path = None
super(_QueryKeyFilter, self).__init__()
self.__app = six.ensure_text(datastore_types.ResolveAppId(app), 'utf-8')
self.__namespace = (
six.ensure_text(datastore_types.ResolveNamespace(namespace), 'utf-8'))
self.__kind = kind
@property
def app(self):
return self.__app
@property
def namespace(self):
return self.__namespace
@property
def kind(self):
return self.__kind
@property
def ancestor(self):
return self.__ancestor
def __call__(self, entity_or_reference):
"""Apply the filter.
Accepts either an entity or a reference to avoid the need to extract keys
from entities when we have a list of entities (which is a common case).
Args:
entity_or_reference: Either an entity_pb2.EntityProto or
entity_pb2.Reference.
"""
if isinstance(entity_or_reference, entity_pb2.Reference):
key = entity_or_reference
elif isinstance(entity_or_reference, entity_pb2.EntityProto):
key = entity_or_reference.key
else:
raise datastore_errors.BadArgumentError(
'entity_or_reference argument must be an entity_pb2.EntityProto ' +
six.ensure_str('or entity_pb2.Reference (%r)' %
(entity_or_reference), 'utf-8'))
return (six.ensure_text(key.app, 'utf-8') == self.__app and
six.ensure_text(key.name_space, 'utf-8') == self.__namespace and
(not self.__kind or key.path.element[-1].type == self.__kind) and
(not self.__path or
key.path.element[0:len(self.__path)] == self.__path))
def _to_pb(self):
"""Returns an internal pb representation."""
pb = datastore_pb.Query()
pb.app = self.__app
datastore_types.SetNamespace(pb, self.__namespace)
if self.__kind is not None:
pb.kind = self.__kind
if self.__ancestor:
ancestor = pb.ancestor
ancestor.CopyFrom(self.__ancestor)
return pb
def _to_pb_v1(self, adapter):
"""Returns a v1 internal proto representation of the query key filter.
Args:
adapter: A datastore_rpc.AbstractAdapter.
Returns:
A tuple (googledatastore.RunQueryRequest, googledatastore.Filter).
The second tuple value is a Filter representing the ancestor portion of the
query. If there is no ancestor constraint, this value will be None
"""
pb = googledatastore.RunQueryRequest()
partition_id = pb.partition_id
partition_id.project_id = (
adapter.get_entity_converter().app_to_project_id(self.__app))
if self.__namespace:
partition_id.namespace_id = self.__namespace
if self.__kind is not None:
pb.query.kind.add().name = self.__kind
ancestor_filter = None
if self.__ancestor:
ancestor_filter = googledatastore.Filter()
ancestor_prop_filter = ancestor_filter.property_filter
ancestor_prop_filter.op = (
googledatastore.PropertyFilter.HAS_ANCESTOR)
prop_pb = ancestor_prop_filter.property
prop_pb.name = datastore_types.KEY_SPECIAL_PROPERTY
adapter.get_entity_converter().v3_to_v1_key(
self.ancestor,
ancestor_prop_filter.value.key_value)
return pb, ancestor_filter
class _BaseQuery(_BaseComponent):
"""A base class for query implementations."""
def run(self, conn, query_options=None):
"""Runs the query using provided datastore_rpc.Connection.
Args:
conn: The datastore_rpc.Connection to use
query_options: Optional query options to use
Returns:
A Batcher that implicitly fetches query results asynchronously.
Raises:
datastore_errors.BadArgumentError if any of the arguments are invalid.
"""
return Batcher(query_options, self.run_async(conn, query_options))
def run_async(self, conn, query_options=None):
"""Runs the query using the provided datastore_rpc.Connection.
Args:
conn: the datastore_rpc.Connection on which to run the query.
query_options: Optional QueryOptions with which to run the query.
Returns:
An async object that can be used to grab the first Batch. Additional
batches can be retrieved by calling Batch.next_batch/next_batch_async.
Raises:
datastore_errors.BadArgumentError if any of the arguments are invalid.
"""
raise NotImplementedError
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
class Query(_BaseQuery):
"""An immutable class that represents a query signature.
A query signature consists of a source of entities (specified as app,
namespace and optionally kind and ancestor) as well as a FilterPredicate,
grouping and a desired ordering.
"""
@datastore_rpc._positional(1)
def __init__(self, app=None, namespace=None, kind=None, ancestor=None,
filter_predicate=None, group_by=None, order=None,
read_time_us=None):
"""Constructor.
Args:
app: Optional app to query, derived from the environment if not specified.
namespace: Optional namespace to query, derived from the environment if
not specified.
kind: Optional kind to query.
ancestor: Optional ancestor to query, an entity_pb2.Reference.
filter_predicate: Optional FilterPredicate by which to restrict the query.
group_by: Optional list of properties to group the results by.
order: Optional Order in which to return results.
read_time_us: Optional timestamp to read the storage from. Internal use
only.
Raises:
datastore_errors.BadArgumentError if any argument is invalid.
"""
super(Query, self).__init__()
if filter_predicate is not None and not isinstance(filter_predicate,
FilterPredicate):
raise datastore_errors.BadArgumentError(
'filter_predicate should be datastore_query.FilterPredicate (%r)' %
(filter_predicate,))
if isinstance(order, CompositeOrder):
if order.size() == 0:
order = None
elif isinstance(order, Order):
order = CompositeOrder([order])
elif order is not None:
raise datastore_errors.BadArgumentError(
'order should be Order (%r)' % (order,))
if group_by is not None:
if isinstance(group_by, list):
group_by = tuple(group_by)
elif not isinstance(group_by, tuple):
raise datastore_errors.BadArgumentError(
'group_by argument should be a list or tuple (%r)' % (group_by,))
if not group_by:
raise datastore_errors.BadArgumentError(
'group_by argument cannot be empty')
for prop in group_by:
if not isinstance(prop, six.string_types + (six.binary_type,)):
raise datastore_errors.BadArgumentError(
'group_by argument should contain only strings (%r)' % (prop,))
self._key_filter = _QueryKeyFilter(app=app, namespace=namespace, kind=kind,
ancestor=ancestor)
self._order = order
self._filter_predicate = filter_predicate
self._group_by = group_by
self._read_time_us = read_time_us
@property
def app(self):
return self._key_filter.app
@property
def namespace(self):
return self._key_filter.namespace
@property
def kind(self):
return self._key_filter.kind
@property
def ancestor(self):
return self._key_filter.ancestor
@property
def filter_predicate(self):
return self._filter_predicate
@property
def order(self):
return self._order
@property
def group_by(self):
return self._group_by
@property
def read_time_us(self):
return self._read_time_us
def __repr__(self):
args = []
args.append('app=%r' % six.ensure_str(self.app))
ns = self.namespace
if ns:
args.append('namespace=%r' % six.ensure_str(ns))
kind = self.kind
if kind is not None:
args.append('kind=%r' % six.ensure_str(kind))
ancestor = self.ancestor
if ancestor is not None:
websafe = base64.urlsafe_b64encode(ancestor.SerializeToString())
args.append('ancestor=<%s>' % six.ensure_str(websafe))
filter_predicate = self.filter_predicate
if filter_predicate is not None:
args.append('filter_predicate=%r' % filter_predicate)
order = self.order
if order is not None:
args.append('order=%r' % order)
group_by = self.group_by
if group_by is not None:
args.append('group_by=%r' % (tuple(six.ensure_str(x) for x in group_by),))
read_time_us = self.read_time_us
if read_time_us is not None:
args.append('read_time_us=%r' % (read_time_us,))
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
def run_async(self, conn, query_options=None):
if not isinstance(conn, datastore_rpc.BaseConnection):
raise datastore_errors.BadArgumentError(
'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))
if not QueryOptions.is_configuration(query_options):
query_options = QueryOptions(config=query_options)
start_cursor = query_options.start_cursor
if not start_cursor and query_options.produce_cursors:
start_cursor = Cursor()
if conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
req = self._to_pb_v1(conn, query_options)
else:
req = self._to_pb(conn, query_options)
return Batch.create_async(self, query_options, conn, req,
start_cursor=start_cursor)
@classmethod
def _from_pb(cls, query_pb):
kind = query_pb.HasField('kind') and query_pb.kind or None
ancestor = query_pb.HasField('ancestor') and query_pb.ancestor or None
filter_predicate = None
if query_pb.filter:
filter_predicate = CompositeFilter(
CompositeFilter.AND,
[PropertyFilter._from_pb(filter_pb) for filter_pb in query_pb.filter])
order = None
if query_pb.order:
order = CompositeOrder(
[PropertyOrder._from_pb(order_pb) for order_pb in query_pb.order])
group_by = None
if query_pb.group_by_property_name:
group_by = tuple(
six.ensure_text(name) for name in query_pb.group_by_property_name)
read_time_us = None
if query_pb.HasField('read_time_us'):
read_time_us = query_pb.read_time_us
return Query(
app=query_pb.app,
namespace=query_pb.name_space,
kind=kind,
ancestor=ancestor,
filter_predicate=filter_predicate,
order=order,
group_by=group_by,
read_time_us=read_time_us)
def _to_pb_v1(self, conn, query_options):
"""Returns a googledatastore.RunQueryRequest."""
v1_req, v1_ancestor_filter = self._key_filter._to_pb_v1(conn.adapter)
v1_query = v1_req.query
if self.filter_predicate:
filter_predicate_pb = self._filter_predicate._to_pb_v1(conn.adapter)
if self.filter_predicate and v1_ancestor_filter:
comp_filter_pb = v1_query.filter.composite_filter
comp_filter_pb.op = googledatastore.CompositeFilter.AND
comp_filter_pb.filters.add().CopyFrom(filter_predicate_pb)
comp_filter_pb.filters.add().CopyFrom(v1_ancestor_filter)
elif self.filter_predicate:
v1_query.filter.CopyFrom(filter_predicate_pb)
elif v1_ancestor_filter:
v1_query.filter.CopyFrom(v1_ancestor_filter)
if self._order:
for order in self._order._to_pb_v1(conn.adapter):
v1_query.order.add().CopyFrom(order)
if QueryOptions.keys_only(query_options, conn.config):
prop_ref_pb = v1_query.projection.add().property
prop_ref_pb.name = datastore_pbs.PROPERTY_NAME_KEY
projection = QueryOptions.projection(query_options, conn.config)
self._validate_projection_and_group_by(projection, self._group_by)
if projection:
for prop in projection:
prop_ref_pb = v1_query.projection.add().property
prop_ref_pb.name = prop
if self._group_by:
for group_by in self._group_by:
v1_query.distinct_on.add().name = group_by
limit = QueryOptions.limit(query_options, conn.config)
if limit is not None:
v1_query.limit.value = limit
count = QueryOptions.batch_size(query_options, conn.config)
if count is None:
count = QueryOptions.prefetch_size(query_options, conn.config)
if count is not None:
pass
if query_options.offset:
v1_query.offset = query_options.offset
if query_options.start_cursor is not None:
v1_query.start_cursor = query_options.start_cursor.to_bytes()
if query_options.end_cursor is not None:
v1_query.end_cursor = query_options.end_cursor.to_bytes()
conn._set_request_read_policy(v1_req, query_options)
conn._set_request_transaction(v1_req)
return v1_req
def _to_pb(self, conn, query_options):
"""Returns the internal only pb representation."""
pb = self._key_filter._to_pb()
if self._filter_predicate:
for f in self._filter_predicate._to_pbs():
pb.filter.add().CopyFrom(f)
if self._order:
for order in self._order._to_pbs():
pb.order.add().CopyFrom(order)
if QueryOptions.keys_only(query_options, conn.config):
pb.keys_only = True
projection = QueryOptions.projection(query_options, conn.config)
self._validate_projection_and_group_by(projection, self._group_by)
if projection:
pb.property_name.extend(projection)
if self._group_by:
pb.group_by_property_name.extend(self._group_by)
if QueryOptions.produce_cursors(query_options, conn.config):
pb.compile = True
limit = QueryOptions.limit(query_options, conn.config)
if limit is not None:
pb.limit = limit
count = QueryOptions.prefetch_size(query_options, conn.config)
if count is None:
count = QueryOptions.batch_size(query_options, conn.config)
if count is not None:
pb.count = count
if query_options.offset:
pb.offset = query_options.offset
if query_options.start_cursor is not None:
try:
pb.compiled_cursor.ParseFromString(
query_options.start_cursor.to_bytes())
except message.DecodeError:
raise datastore_errors.BadValueError('invalid cursor')
if query_options.end_cursor is not None:
try:
pb.end_compiled_cursor.ParseFromString(
query_options.end_cursor.to_bytes())
except message.DecodeError:
raise datastore_errors.BadValueError('invalid cursor')
if ((query_options.hint == QueryOptions.ORDER_FIRST and len(pb.order)) or
(query_options.hint == QueryOptions.ANCESTOR_FIRST and
pb.HasField('ancestor')) or
(query_options.hint == QueryOptions.FILTER_FIRST and pb.filter)):
pb.hint = query_options.hint
if self.read_time_us is not None:
pb.read_time_us = self.read_time_us
conn._set_request_read_policy(pb, query_options)
conn._set_request_transaction(pb)
return pb
def _validate_projection_and_group_by(self, projection, group_by):
"""Validates that a query's projection and group by match.
Args:
projection: A set of string property names in the projection.
group_by: A set of string property names in the group by.
Raises:
datastore_errors.BadRequestError: if the projection and group
by sets are not equal.
"""
if projection:
if group_by:
extra = set(projection) - set(group_by)
if extra:
raise datastore_errors.BadRequestError(
'projections includes properties not in the group_by argument: %s'
% extra)
elif group_by:
raise datastore_errors.BadRequestError(
'cannot specify group_by without a projection')
def apply_query(query, entities, _key=None):
"""Performs the given query on a set of in-memory results.
This function can perform queries impossible in the datastore (e.g a query
with multiple inequality filters on different properties) because all
operations are done in memory. For queries that can also be executed on the
the datastore, the results produced by this function may not use the same
implicit ordering as the datastore. To ensure compatibility, explicit
ordering must be used (e.g. 'ORDER BY ineq_prop, ..., __key__').
Order by __key__ should always be used when a consistent result is desired
(unless there is a sort order on another globally unique property).
Args:
query: a datastore_query.Query to apply
entities: a list of results, of arbitrary type, on which to apply the query.
_key: a function that takes an element of the result array as an argument
and must return an entity_pb2.EntityProto. If not specified, the
identity function is used (and entities must be a list of
entity_pb2.EntityProto).
Returns:
A subset of entities, filtered and ordered according to the query.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument must be a datastore_query.Query (%r)' % (query,))
if not isinstance(entities, list):
raise datastore_errors.BadArgumentError(
'entities argument must be a list (%r)' % (entities,))
key = _key or (lambda x: x)
filtered_results = [r for r in entities if query._key_filter(key(r))]
if not query._order:
if query._filter_predicate:
return [r for r in filtered_results if query._filter_predicate(key(r))]
return filtered_results
names = query._order._get_prop_names()
if query._filter_predicate:
names |= query._filter_predicate._get_prop_names()
exists_filter = _PropertyExistsFilter(names)
value_maps = []
for result in filtered_results:
value_map = _make_key_value_map(key(result), names)
if exists_filter._apply(value_map) and (
not query._filter_predicate or
query._filter_predicate._prune(value_map)):
value_map['__result__'] = result
value_maps.append(value_map)
value_maps.sort(key=functools.cmp_to_key(query._order._cmp))
return [value_map['__result__'] for value_map in value_maps]
class _AugmentedQuery(_BaseQuery):
"""A query that combines a datastore query with in-memory filters/results."""
@datastore_rpc._positional(2)
def __init__(self, query, in_memory_results=None, in_memory_filter=None,
max_filtered_count=None):
"""Constructor for _AugmentedQuery.
Do not call directly. Use the utility functions instead (e.g.
datastore_query.inject_results)
Args:
query: A datastore_query.Query object to augment.
in_memory_results: a list of pre- sorted and filtered result to add to the
stream of datastore results or None .
in_memory_filter: a set of in-memory filters to apply to the datastore
results or None.
max_filtered_count: the maximum number of datastore entities that will be
filtered out by in_memory_filter if known.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument should be datastore_query.Query (%r)' % (query,))
if (in_memory_filter is not None and
not isinstance(in_memory_filter, FilterPredicate)):
raise datastore_errors.BadArgumentError(
'in_memory_filter argument should be ' + six.ensure_str(
'datastore_query.FilterPredicate (%r)' %
(in_memory_filter,), 'utf-8'))
if (in_memory_results is not None and
not isinstance(in_memory_results, list)):
raise datastore_errors.BadArgumentError(
'in_memory_results argument should be a list of' +
six.ensure_str('datastore_pv.EntityProto (%r)' %
(in_memory_results,), 'utf-8'))
datastore_types.ValidateInteger(max_filtered_count,
'max_filtered_count',
empty_ok=True,
zero_ok=True)
self._query = query
self._max_filtered_count = max_filtered_count
self._in_memory_filter = in_memory_filter
self._in_memory_results = in_memory_results
@property
def app(self):
return self._query._key_filter.app
@property
def namespace(self):
return self._query._key_filter.namespace
@property
def kind(self):
return self._query._key_filter.kind
@property
def ancestor(self):
return self._query._key_filter.ancestor
@property
def filter_predicate(self):
return self._query._filter_predicate
@property
def order(self):
return self._query._order
@property
def group_by(self):
return self._query._group_by
def run_async(self, conn, query_options=None):
if not isinstance(conn, datastore_rpc.BaseConnection):
raise datastore_errors.BadArgumentError(
'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))
if not QueryOptions.is_configuration(query_options):
query_options = QueryOptions(config=query_options)
if self._query._order:
changes = {'keys_only': False}
else:
changes = {}
if self._in_memory_filter or self._in_memory_results:
in_memory_offset = query_options.offset
in_memory_limit = query_options.limit
if in_memory_limit is not None:
if self._in_memory_filter is None:
changes['limit'] = in_memory_limit
elif self._max_filtered_count is not None:
changes['limit'] = in_memory_limit + self._max_filtered_count
else:
changes['limit'] = None
if in_memory_offset:
changes['offset'] = None
if changes.get('limit', None) is not None:
changes['limit'] += in_memory_offset
else:
in_memory_offset = None
else:
in_memory_offset = None
in_memory_limit = None
modified_query_options = QueryOptions(config=query_options, **changes)
if conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
req = self._query._to_pb_v1(conn, modified_query_options)
else:
req = self._query._to_pb(conn, modified_query_options)
start_cursor = query_options.start_cursor
if not start_cursor and query_options.produce_cursors:
start_cursor = Cursor()
return _AugmentedBatch.create_async(self, modified_query_options, conn, req,
in_memory_offset=in_memory_offset,
in_memory_limit=in_memory_limit,
start_cursor=start_cursor)
@datastore_rpc._positional(1)
def inject_results(query, updated_entities=None, deleted_keys=None):
"""Creates a query object that will inject changes into results.
Args:
query: The datastore_query.Query to augment
updated_entities: A list of entity_pb2.EntityProto's that have been updated
and should take priority over any values returned by query.
deleted_keys: A list of entity_pb2.Reference's for entities that have been
deleted and should be removed from query results.
Returns:
A datastore_query.AugmentedQuery if in memory filtering is required,
query otherwise.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument should be datastore_query.Query (%r)' % (query,))
overridden_keys = set()
if deleted_keys is not None:
if not isinstance(deleted_keys, list):
raise datastore_errors.BadArgumentError(
'deleted_keys argument must be a list (%r)' % (deleted_keys,))
deleted_keys = list(six.moves.filter(query._key_filter, deleted_keys))
for key in deleted_keys:
overridden_keys.add(datastore_types.ReferenceToKeyValue(key))
if updated_entities is not None:
if not isinstance(updated_entities, list):
raise datastore_errors.BadArgumentError(
'updated_entities argument must be a list (%r)' % (updated_entities,))
updated_entities = list(
six.moves.filter(query._key_filter, updated_entities))
for entity in updated_entities:
overridden_keys.add(datastore_types.ReferenceToKeyValue(entity.key))
updated_entities = apply_query(query, updated_entities)
else:
updated_entities = []
if not overridden_keys:
return query
return _AugmentedQuery(query,
in_memory_filter=_IgnoreFilter(overridden_keys),
in_memory_results=updated_entities,
max_filtered_count=len(overridden_keys))
class _BatchShared(object):
"""Data shared among the batches of a query."""
def __init__(self, query, query_options, conn,
augmented_query=None, initial_offset=None):
self.__query = query
self.__query_options = query_options
self.__conn = conn
self.__augmented_query = augmented_query
self.__was_first_result_processed = False
if initial_offset is None:
initial_offset = query_options.offset or 0
self.__expected_offset = initial_offset
self.__remaining_limit = query_options.limit
@property
def query(self):
return self.__query
@property
def query_options(self):
return self.__query_options
@property
def conn(self):
return self.__conn
@property
def augmented_query(self):
return self.__augmented_query
@property
def keys_only(self):
return self.__keys_only
@property
def compiled_query(self):
return self.__compiled_query
@property
def expected_offset(self):
return self.__expected_offset
@property
def remaining_limit(self):
return self.__remaining_limit
@property
def index_list(self):
"""Returns the list of indexes used by the query.
Possibly None when the adapter does not implement pb_to_index.
"""
return self.__index_list
def process_batch(self, batch):
if self.conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
skipped_results = batch.skipped_results
num_results = len(batch.entity_results)
else:
skipped_results = batch.skipped_results
num_results = len(batch.result)
self.__expected_offset -= skipped_results
if self.__remaining_limit is not None:
self.__remaining_limit -= num_results
if not self.__was_first_result_processed:
self.__was_first_result_processed = True
if self.conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
result_type = batch.entity_result_type
self.__keys_only = result_type == googledatastore.EntityResult.KEY_ONLY
self.__compiled_query = None
self.__index_list = None
else:
self.__keys_only = batch.keys_only
if batch.HasField('compiled_query'):
self.__compiled_query = batch.compiled_query
else:
self.__compiled_query = None
try:
self.__index_list = [
self.__conn.adapter.pb_to_index(index_pb)
for index_pb in batch.index
]
except NotImplementedError:
self.__index_list = None
class Batch(object):
"""A batch of results returned by a query.
This class contains a batch of results returned from the datastore and
relevant metadata. This metadata includes:
query: The query that produced this batch
query_options: The QueryOptions used to run the query. This does not
contained any options passed to the .next_batch() call that created the
current batch.
start_cursor, end_cursor: These are the cursors that can be used
with a query to re-fetch this batch. They can also be used to
find all entities before or after the given batch (by use start_cursor as
an end cursor or vice versa). start_cursor can also be advanced to
point to a position within the batch using Cursor.advance().
skipped_results: the number of result skipped because of the offset
given to the request that generated it. This can be set either on
the original Query.run() request or in subsequent .next_batch() calls.
more_results: If this is true there are more results that can be retrieved
either by .next_batch() or Batcher.next().
This class is also able to fetch the next batch of the query using
.next_batch(). As batches of results must be fetched serially, .next_batch()
can only be called once. Additional calls to .next_batch() will return None.
When there are no more batches .next_batch() will return None as well. Note
that batches returned by iterating over Batcher will always return None for
.next_batch() as the Bather handles fetching the next batch automatically.
A Batch typically represents the result of a single RPC request. The datastore
operates on a "best effort" basis so the batch returned by .next_batch()
or Query.run_async().get_result() may not have satisfied the requested offset
or number of results (specified through FetchOptions.offset and
FetchOptions.batch_size respectively). To satisfy these restrictions
additional batches may be needed (with FetchOptions that specify the remaining
offset or results needed). The Batcher class hides these limitations.
"""
__skipped_cursor = None
__end_cursor = None
@classmethod
@datastore_rpc._positional(5)
def create_async(cls, query, query_options, conn, req,
start_cursor):
batch_shared = _BatchShared(query, query_options, conn)
batch0 = cls(batch_shared, start_cursor=start_cursor)
return batch0._make_query_rpc_call(query_options, req)
@datastore_rpc._positional(2)
def __init__(self, batch_shared, start_cursor=Cursor()):
"""Constructor.
This class is constructed in stages (one when an RPC is sent and another
when an rpc is completed) and should not be constructed directly!!
Use Query.run_async().get_result() to create a Batch or Query.run()
to use a batcher.
This constructor does not perform verification.
Args:
batch_shared: Data shared between batches for a a single query run.
start_cursor: Optional cursor pointing before this batch.
"""
self._batch_shared = batch_shared
self.__start_cursor = start_cursor
@property
def query_options(self):
"""The QueryOptions used to retrieve the first batch."""
return self._batch_shared.query_options
@property
def query(self):
"""The query the current batch came from."""
return self._batch_shared.query
@property
def results(self):
"""A list of entities in this batch."""
return self.__results
@property
def keys_only(self):
"""Whether the entities in this batch only contain keys."""
return self._batch_shared.keys_only
@property
def index_list(self):
"""Returns the list of indexes used to peform this batch's query.
Possibly None when the adapter does not implement pb_to_index.
"""
return self._batch_shared.index_list
@property
def start_cursor(self):
"""A cursor that points to the position just before the current batch."""
return self.__start_cursor
@property
def end_cursor(self):
"""A cursor that points to the position just after the current batch."""
return self.__end_cursor
@property
def skipped_results(self):
"""The number of results skipped because of an offset in the request.
An offset is satisfied before any results are returned. The start_cursor
points to the position in the query before the skipped results.
"""
return self._skipped_results
@property
def more_results(self):
"""Whether more results can be retrieved from the query."""
return self.__more_results
def next_batch(self, fetch_options=None):
"""Synchronously get the next batch or None if there are no more batches.
Args:
fetch_options: Optional fetch options to use when fetching the next batch.
Merged with both the fetch options on the original call and the
connection.
Returns:
A new Batch of results or None if either the next batch has already been
fetched or there are no more results.
"""
async_ = self.next_batch_async(fetch_options)
if async_ is None:
return None
return async_.get_result()
def _compiled_query(self):
return self._batch_shared.compiled_query
def cursor(self, index):
"""Gets the cursor that points just after the result at index - 1.
The index is relative to first result in .results. Since start_cursor
points to the position before the first skipped result, the range of
indexes this function supports is limited to
[-skipped_results, len(results)].
For example, using start_cursor=batch.cursor(i) and
end_cursor=batch.cursor(j) will return the results found in
batch.results[i:j]. Note that any result added in the range (i-1, j]
will appear in the new query's results.
Warning: Any index in the range (-skipped_results, 0) may cause
continuation to miss or duplicate results if outside a transaction.
Args:
index: An int, the index relative to the first result before which the
cursor should point.
Returns:
A Cursor that points to a position just after the result index - 1,
which if used as a start_cursor will cause the first result to be
batch.result[index].
"""
if not isinstance(index, six.integer_types):
raise datastore_errors.BadArgumentError(
'index argument should be an integer (%r)' % (index,))
if not -self._skipped_results <= index <= len(self.__results):
raise datastore_errors.BadArgumentError(
'index argument must be in the inclusive range [%d, %d]' %
(-self._skipped_results, len(self.__results)))
if index == -self._skipped_results:
return self.__start_cursor
elif (index == 0 and
self.__skipped_cursor):
return self.__skipped_cursor
elif index > 0 and self.__result_cursors:
return self.__result_cursors[index - 1]
elif index == len(self.__results):
return self.__end_cursor
else:
return self.__start_cursor.advance(index + self._skipped_results,
self._batch_shared.query,
self._batch_shared.conn)
def next_batch_async(self, fetch_options=None):
"""Asynchronously get the next batch or None if there are no more batches.
Args:
fetch_options: Optional fetch options to use when fetching the next batch.
Merged with both the fetch options on the original call and the
connection.
Returns:
An async object that can be used to get the next Batch or None if either
the next batch has already been fetched or there are no more results.
"""
if not self.__datastore_cursor:
return None
fetch_options, next_batch = self._make_next_batch(fetch_options)
if (fetch_options is not None and
not FetchOptions.is_configuration(fetch_options)):
raise datastore_errors.BadArgumentError('Invalid fetch options.')
config = self._batch_shared.query_options.merge(fetch_options)
conn = next_batch._batch_shared.conn
requested_offset = 0
if fetch_options is not None and fetch_options.offset is not None:
requested_offset = fetch_options.offset
if conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
if self._batch_shared.expected_offset != requested_offset:
raise datastore_errors.BadArgumentError(
'Cannot request the next batch with a different offset than '
' expected. Expected: %s, Got: %s.'
% (self._batch_shared.expected_offset, requested_offset))
limit = self._batch_shared.remaining_limit
next_options = QueryOptions(offset=self._batch_shared.expected_offset,
limit=limit,
start_cursor=self.__datastore_cursor)
config = config.merge(next_options)
result = next_batch._make_query_rpc_call(
config,
self._batch_shared.query._to_pb_v1(conn, config))
else:
result = next_batch._make_next_rpc_call(config,
self._to_pb(fetch_options))
self.__datastore_cursor = None
return result
def _to_pb(self, fetch_options=None):
req = datastore_pb.NextRequest()
if FetchOptions.produce_cursors(fetch_options,
self._batch_shared.query_options,
self._batch_shared.conn.config):
req.compile = True
count = FetchOptions.batch_size(fetch_options,
self._batch_shared.query_options,
self._batch_shared.conn.config)
if count is not None:
req.count = count
if fetch_options is not None and fetch_options.offset:
req.offset = fetch_options.offset
req.cursor.CopyFrom(self.__datastore_cursor)
return req
def _extend(self, next_batch):
"""Combines the current batch with the next one. Called by batcher."""
self.__datastore_cursor = next_batch.__datastore_cursor
next_batch.__datastore_cursor = None
self.__more_results = next_batch.__more_results
if not self.__results:
self.__skipped_cursor = next_batch.__skipped_cursor
self.__results.extend(next_batch.__results)
self.__result_cursors.extend(next_batch.__result_cursors)
self.__end_cursor = next_batch.__end_cursor
self._skipped_results += next_batch._skipped_results
def _make_query_rpc_call(self, config, req):
"""Makes a RunQuery call that will modify the instance.
Args:
config: The datastore_rpc.Configuration to use for the call.
req: The request to send with the call.
Returns:
A UserRPC object that can be used to fetch the result of the RPC.
"""
_api_version = self._batch_shared.conn._api_version
if _api_version == datastore_rpc._CLOUD_DATASTORE_V1:
return self._batch_shared.conn._make_rpc_call(
config, 'RunQuery', req, googledatastore.RunQueryResponse(),
self.__v1_run_query_response_hook)
return self._batch_shared.conn._make_rpc_call(config, 'RunQuery', req,
datastore_pb.QueryResult(),
self.__query_result_hook)
def _make_next_rpc_call(self, config, req):
"""Makes a Next call that will modify the instance.
Args:
config: The datastore_rpc.Configuration to use for the call.
req: The request to send with the call.
Returns:
A UserRPC object that can be used to fetch the result of the RPC.
"""
return self._batch_shared.conn._make_rpc_call(config, 'Next', req,
datastore_pb.QueryResult(),
self.__query_result_hook)
_need_index_header = 'The suggested index for this query is:'
def __v1_run_query_response_hook(self, rpc):
try:
self._batch_shared.conn.check_rpc_success(rpc)
except datastore_errors.NeedIndexError:
raise
batch = rpc.response.batch
self._batch_shared.process_batch(batch)
if batch.skipped_cursor:
self.__skipped_cursor = Cursor(_cursor_bytes=batch.skipped_cursor)
self.__result_cursors = [Cursor(_cursor_bytes=result.cursor)
for result in batch.entity_results
if result.cursor]
if batch.end_cursor:
self.__end_cursor = Cursor(_cursor_bytes=batch.end_cursor)
self._skipped_results = batch.skipped_results
if batch.more_results == googledatastore.QueryResultBatch.NOT_FINISHED:
self.__more_results = True
self.__datastore_cursor = self.__end_cursor or self.__skipped_cursor
if self.__datastore_cursor == self.__start_cursor:
raise datastore_errors.Timeout(
'The query was not able to make progress.')
else:
self._end()
self.__results = self._process_v1_results(batch.entity_results)
return self
def __query_result_hook(self, rpc):
"""Internal method used as get_result_hook for RunQuery/Next operation."""
try:
self._batch_shared.conn.check_rpc_success(rpc)
except datastore_errors.NeedIndexError as exc:
if isinstance(rpc.request, datastore_pb.Query):
_, kind, ancestor, props = datastore_index.CompositeIndexForQuery(
rpc.request)
props = datastore_index.GetRecommendedIndexProperties(props)
yaml = datastore_index.IndexYamlForQuery(kind, ancestor, props)
xml = datastore_index.IndexXmlForQuery(kind, ancestor, props)
raise datastore_errors.NeedIndexError(
'\n'.join([str(exc), self._need_index_header, yaml]),
original_message=str(exc), header=self._need_index_header,
yaml_index=yaml, xml_index=xml)
raise
query_result = rpc.response
self._batch_shared.process_batch(query_result)
if query_result.HasField('skipped_results_compiled_cursor'):
self.__skipped_cursor = Cursor(
_cursor_bytes=query_result.skipped_results_compiled_cursor
.SerializeToString())
self.__result_cursors = [
Cursor(_cursor_bytes=result.SerializeToString())
for result in query_result.result_compiled_cursor
]
if query_result.HasField('compiled_cursor'):
self.__end_cursor = Cursor(
_cursor_bytes=query_result.compiled_cursor.SerializeToString())
self._skipped_results = query_result.skipped_results
if query_result.more_results:
self.__datastore_cursor = query_result.cursor
self.__more_results = True
else:
self._end()
self.__results = self._process_results(query_result.result)
return self
def _end(self):
"""Changes the internal state so that no more batches can be produced."""
self.__datastore_cursor = None
self.__more_results = False
def _make_next_batch(self, fetch_options):
"""Creates the object to store the next batch.
Args:
fetch_options: The datastore_query.FetchOptions passed in by the user or
None.
Returns:
A tuple containing the fetch options that should be used internally and
the object that should be used to contain the next batch.
"""
return fetch_options, Batch(self._batch_shared,
start_cursor=self.__end_cursor)
def _process_results(self, results):
"""Converts the datastore results into results returned to the user.
Args:
results: A list of entity_pb2.EntityProto's returned by the datastore
Returns:
A list of results that should be returned to the user.
"""
converter = self._batch_shared.conn.adapter.pb_to_query_result
return [converter(result, self._batch_shared.query_options)
for result in results]
def _process_v1_results(self, results):
"""Converts the datastore results into results returned to the user.
Args:
results: A list of googledatastore.EntityResults.
Returns:
A list of results that should be returned to the user.
"""
converter = self._batch_shared.conn.adapter.pb_v1_to_query_result
return [converter(result.entity, self._batch_shared.query_options)
for result in results]
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.Batch is unsupported.')
class _AugmentedBatch(Batch):
"""A batch produced by a datastore_query._AugmentedQuery."""
@classmethod
@datastore_rpc._positional(5)
def create_async(cls, augmented_query, query_options, conn, req,
in_memory_offset, in_memory_limit, start_cursor):
initial_offset = 0 if in_memory_offset is not None else None
batch_shared = _BatchShared(augmented_query._query,
query_options,
conn,
augmented_query,
initial_offset=initial_offset)
batch0 = cls(batch_shared,
in_memory_offset=in_memory_offset,
in_memory_limit=in_memory_limit,
start_cursor=start_cursor)
return batch0._make_query_rpc_call(query_options, req)
@datastore_rpc._positional(2)
def __init__(self, batch_shared,
in_memory_offset=None,
in_memory_limit=None,
next_index=0,
start_cursor=Cursor()):
"""A Constructor for datastore_query._AugmentedBatch.
Constructed by datastore_query._AugmentedQuery. Should not be called
directly.
"""
super(_AugmentedBatch, self).__init__(batch_shared,
start_cursor=start_cursor)
self.__in_memory_offset = in_memory_offset
self.__in_memory_limit = in_memory_limit
self.__next_index = next_index
@property
def query(self):
"""The query the current batch came from."""
return self._batch_shared.augmented_query
def cursor(self, index):
raise NotImplementedError
def _extend(self, next_batch):
super(_AugmentedBatch, self)._extend(next_batch)
self.__in_memory_limit = next_batch.__in_memory_limit
self.__in_memory_offset = next_batch.__in_memory_offset
self.__next_index = next_batch.__next_index
def _process_v1_results(self, results):
"""Process V4 results by converting to V3 and calling _process_results."""
v3_results = []
is_projection = bool(self.query_options.projection)
for v1_result in results:
v3_entity = entity_pb2.EntityProto()
self._batch_shared.conn.adapter.get_entity_converter().v1_to_v3_entity(
v1_result.entity, v3_entity, is_projection)
v3_results.append(v3_entity)
return self._process_results(v3_results)
def _process_results(self, results):
in_memory_filter = self._batch_shared.augmented_query._in_memory_filter
if in_memory_filter:
results = list(filter(in_memory_filter, results))
in_memory_results = self._batch_shared.augmented_query._in_memory_results
if in_memory_results and self.__next_index < len(in_memory_results):
original_query = super(_AugmentedBatch, self).query
if original_query._order:
if results:
next_result = in_memory_results[self.__next_index]
next_key = original_query._order.key(next_result)
i = 0
while i < len(results):
result = results[i]
result_key = original_query._order.key(result)
while next_key <= result_key:
results.insert(i, next_result)
i += 1
self.__next_index += 1
if self.__next_index >= len(in_memory_results):
break
next_result = in_memory_results[self.__next_index]
next_key = original_query._order.key(next_result)
i += 1
elif results or not super(_AugmentedBatch, self).more_results:
results = in_memory_results + results
self.__next_index = len(in_memory_results)
if self.__in_memory_offset:
assert not self._skipped_results
offset = min(self.__in_memory_offset, len(results))
if offset:
self._skipped_results += offset
self.__in_memory_offset -= offset
results = results[offset:]
if self.__in_memory_limit is not None:
results = results[:self.__in_memory_limit]
self.__in_memory_limit -= len(results)
if self.__in_memory_limit <= 0:
self._end()
return super(_AugmentedBatch, self)._process_results(results)
def _make_next_batch(self, fetch_options):
in_memory_offset = FetchOptions.offset(fetch_options)
augmented_query = self._batch_shared.augmented_query
if in_memory_offset and (augmented_query._in_memory_filter or
augmented_query._in_memory_results):
fetch_options = FetchOptions(offset=0)
else:
in_memory_offset = None
return (fetch_options,
_AugmentedBatch(self._batch_shared,
in_memory_offset=in_memory_offset,
in_memory_limit=self.__in_memory_limit,
start_cursor=self.end_cursor,
next_index=self.__next_index))
class Batcher(object):
"""A class that implements the Iterator interface for Batches.
Typically constructed by a call to Query.run().
The class hides the "best effort" nature of the datastore by potentially
making multiple requests to the datastore and merging the resulting batches.
This is accomplished efficiently by prefetching results and mixing both
non-blocking and blocking calls to the datastore as needed.
Iterating through batches is almost always more efficient than pulling all
results at once as RPC latency is hidden by asynchronously prefetching
results.
The batches produce by this class cannot be used to fetch the next batch
(through Batch.next_batch()) as before the current batch is returned the
request for the next batch has already been sent.
"""
ASYNC_ONLY = None
AT_LEAST_OFFSET = 0
AT_LEAST_ONE = object()
def __init__(self, query_options, first_async_batch):
"""Constructor.
Although this class can be manually constructed, it is preferable to use
Query.run(query_options).
Args:
query_options: The QueryOptions used to create the first batch.
first_async_batch: The first batch produced by
Query.run_async(query_options).
"""
self.__next_batch = first_async_batch
self.__initial_offset = QueryOptions.offset(query_options) or 0
self.__skipped_results = 0
def next(self):
"""Get the next batch. See .next_batch()."""
return self.next_batch(self.AT_LEAST_ONE)
def __next__(self):
return self.next()
def next_batch(self, min_batch_size):
"""Get the next batch.
The batch returned by this function cannot be used to fetch the next batch
(through Batch.next_batch()). Instead this function will always return None.
To retrieve the next batch use .next() or .next_batch(N).
This function may return a batch larger than min_to_fetch, but will never
return smaller unless there are no more results.
Special values can be used for min_batch_size:
ASYNC_ONLY - Do not perform any synchrounous fetches from the datastore
even if the this produces a batch with no results.
AT_LEAST_OFFSET - Only pull enough results to satifiy the offset.
AT_LEAST_ONE - Pull batches until at least one result is returned.
Args:
min_batch_size: The minimum number of results to retrieve or one of
(ASYNC_ONLY, AT_LEAST_OFFSET, AT_LEAST_ONE)
Returns:
The next Batch of results.
"""
if min_batch_size in (Batcher.ASYNC_ONLY, Batcher.AT_LEAST_OFFSET,
Batcher.AT_LEAST_ONE):
exact = False
else:
exact = True
datastore_types.ValidateInteger(min_batch_size,
'min_batch_size',
datastore_errors.BadArgumentError)
if not self.__next_batch:
raise StopIteration
batch = self.__next_batch.get_result()
self.__next_batch = None
self.__skipped_results += batch.skipped_results
if min_batch_size is not Batcher.ASYNC_ONLY:
if min_batch_size is Batcher.AT_LEAST_ONE:
min_batch_size = 1
needed_results = min_batch_size - len(batch.results)
while (batch.more_results and
(self.__skipped_results < self.__initial_offset or
needed_results > 0)):
if batch.query_options.batch_size:
batch_size = max(batch.query_options.batch_size, needed_results)
elif exact:
batch_size = needed_results
else:
batch_size = None
self.__next_batch = batch.next_batch_async(FetchOptions(
offset=max(0, self.__initial_offset - self.__skipped_results),
batch_size=batch_size))
next_batch = self.__next_batch.get_result()
self.__next_batch = None
self.__skipped_results += next_batch.skipped_results
needed_results = max(0, needed_results - len(next_batch.results))
batch._extend(next_batch)
self.__next_batch = batch.next_batch_async()
return batch
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.Batcher is unsupported.')
def __iter__(self):
return self
class ResultsIterator(six.Iterator):
"""An iterator over the results from Batches obtained from a Batcher.
ResultsIterator implements Python's iterator protocol, so results can be
accessed with the for-statement:
> it = ResultsIterator(Query(kind='Person').run())
> for person in it:
> print 'Hi, %s!' % person['name']
At any time ResultsIterator.cursor() can be used to grab the Cursor that
points just after the last result returned by the iterator.
"""
__current_batch = None
__current_pos = 0
__last_cursor = None
def __init__(self, batcher):
"""Constructor.
Args:
batcher: A datastore_query.Batcher
"""
if not isinstance(batcher, Batcher):
raise datastore_errors.BadArgumentError(
'batcher argument should be datastore_query.Batcher (%r)' %
(batcher,))
self.__batcher = batcher
def index_list(self):
"""Returns the list of indexes used to perform the query.
Possibly None when the adapter does not implement pb_to_index.
"""
return self._ensure_current_batch().index_list
def cursor(self):
"""Returns a cursor that points just after the last result returned.
If next() throws an exception, this function returns the end_cursor from
the last successful batch or throws the same exception if no batch was
successful.
"""
return (self.__last_cursor or
self._ensure_current_batch().cursor(self.__current_pos))
def _ensure_current_batch(self):
if not self.__current_batch:
self.__current_batch = self.__batcher.next_batch(Batcher.AT_LEAST_OFFSET)
self.__current_pos = 0
return self.__current_batch
def _compiled_query(self):
"""Returns the compiled query associated with the iterator.
Internal only do not use.
"""
return self._ensure_current_batch()._compiled_query()
def __next__(self):
"""Returns the next query result."""
while (not self.__current_batch or
self.__current_pos >= len(self.__current_batch.results)):
try:
next_batch = self.__batcher.next_batch(Batcher.AT_LEAST_OFFSET)
except:
if self.__current_batch:
self.__last_cursor = self.__current_batch.end_cursor
raise
self.__current_pos = 0
self.__current_batch = next_batch
result = self.__current_batch.results[self.__current_pos]
self.__current_pos += 1
return result
def __iter__(self):
return self
def next(self):
return self.__next__() | en | 0.807157 | #!/usr/bin/env python # # Copyright 2007 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A thin wrapper around datastore query RPC calls. This provides wrappers around the internal only datastore_pb library and is designed to be the lowest-level API to be used by all Python datastore client libraries for executing queries. It provides a layer of protection so the actual RPC syntax can change without affecting client libraries. Any class, function, field or argument starting with an '_' is for INTERNAL use only and should not be used by developers! A base class for query components. Currently just implements basic == and != functions. Constructs a FilterPredicate from the given name, op and values. Args: name: A non-empty string, the name of the property to filter. op: One of PropertyFilter._OPERATORS.keys(), the operator to use. values: A supported value, the value to compare against. Returns: if values is a list, a CompositeFilter that uses AND to combine all values, otherwise a PropertyFilter for the single value. Raises: datastore_errors.BadPropertyError: if the property name is invalid. datastore_errors.BadValueError: if the property did not validate correctly or the value was an empty list. Other exception types (like OverflowError): if the property value does not meet type-specific criteria. Extracts key values from the given entity. Args: entity: The entity_pb2.EntityProto to extract values from. property_names: The names of the properties from which to extract values. Returns: A dict mapping property names to a lists of key values. A component that operates on a specific set of properties. Returns a set of property names used by the filter. An abstract base class for all query filters. All sub-classes must be immutable as these are often stored without creating a defensive copy. Applies the filter predicate to the given entity. Args: entity: the datastore_pb.EntityProto to test. Returns: True if the given entity matches the filter, False otherwise. Apply the given component to the comparable value map. A filter matches a list of values if at least one value in the list matches the filter, for example: 'prop: [1, 2]' matches both 'prop = 1' and 'prop = 2' but not 'prop = 3' Note: the values are actually represented as tuples whose first item encodes the type; see datastore_types.PropertyValueToKeyValue(). Args: key_value_map: A dict mapping property names to a list of comparable values. Return: A boolean indicating if the given map matches the filter. Removes values from the given map that do not match the filter. When doing a scan in the datastore, only index values that match the filters are seen. When multiple values that point to the same entity are seen, the entity only appears where the first value is found. This function removes all values that don't match the query so that the first value in the map is the same one the datastore would see first. Args: key_value_map: the comparable value map from which to remove values. Does not need to contain values for all filtered properties. Returns: A value that evaluates to False if every value in a single list was completely removed. This effectively applies the filter but is less efficient than _apply(). Internal only function to generate a pb. Internal only function to generate a list of pbs. Internal only function to generate a v1 pb. Args: adapter: A datastore_rpc.AbstractAdapter Base class for a filter that operates on a single property. Returns the name of the property being filtered. Apply the filter to the given value. Args: value: The comparable value to check. Returns: A boolean indicating if the given value matches the filter. An immutable filter predicate that constrains a single property. Constructor. Args: op: A string representing the operator to use. value: A entity_pb2.Property, the property and value to compare against. Raises: datastore_errors.BadArgumentError if op has an unsupported value or value is not an entity_pb2.Property. Returns True if the filter predicate contains inequalities filters. Returns the internal only pb representation. Returns a googledatastore.Filter representation of the filter. Args: adapter: A datastore_rpc.AbstractAdapter A filter predicate that represents a range of values. Since we allow multi-valued properties there is a large difference between "x > 0 AND x < 1" and "0 < x < 1." An entity with x = [-1, 2] will match the first but not the second. Since the datastore only allows a single inequality filter, multiple in-equality filters are merged into a single range filter in the datastore (unlike equality filters). This class is used by datastore_query.CompositeFilter to implement the same logic. Constructs a range filter using start and end properties. Args: start: A entity_pb2.Property to use as a lower bound or None to indicate no lower bound. start_incl: A boolean that indicates if the lower bound is inclusive. end: A entity_pb2.Property to use as an upper bound or None to indicate no upper bound. end_incl: A boolean that indicates if the upper bound is inclusive. Returns a filter representing the intersection of self and other. Apply the filter to the given value. Args: value: The comparable value to check. Returns: A boolean indicating if the given value matches the filter. Returns a googledatastore.Filter representation of the filter. Args: adapter: A datastore_rpc.AbstractAdapter. A FilterPredicate that matches entities containing specific properties. Only works as an in-memory filter. Used internally to filter out entities that don't have all properties in a given Order. A filter that isolates correlated values and applies a sub-filter on them. This filter assumes that every property used by the sub-filter should be grouped before being passed to the sub-filter. The default grouping puts each value in its own group. Consider: e = {a: [1, 2], b: [2, 1, 3], c: 4} A correlation filter with a sub-filter that operates on (a, b) will be tested against the following 3 sets of values: {a: 1, b: 2} {a: 2, b: 1} {b: 3} In this case CorrelationFilter('a = 2 AND b = 2') won't match this entity but CorrelationFilter('a = 2 AND b = 1') will. To apply an uncorrelated filter on c, the filter must be applied in parallel to the correlation filter. For example: CompositeFilter(AND, [CorrelationFilter('a = 2 AND b = 1'), 'c = 3']) If 'c = 3' was included in the correlation filter, c would be grouped as well. This would result in the following values: {a: 1, b: 2, c: 3} {a: 2, b: 1} {b: 3} If any set of correlated values match the sub-filter then the entity matches the correlation filter. Constructor. Args: subfilter: A FilterPredicate to apply to the correlated values Applies sub-filter to the correlated value maps. The default implementation matches when any value_map in value_maps matches the sub-filter. Args: value_maps: A list of correlated value_maps. Returns: True if any the entity matches the correlation filter. A function that groups the given values. Override this function to introduce custom grouping logic. The default implementation assumes each value belongs in its own group. Args: prop: The name of the property who's values are being grouped. values: A list of opaque values. Returns: A list of lists of grouped values. An immutable filter predicate that combines other predicates. This class proactively merges sub-filters that are combined using the same operator. For example: CompositeFilter(AND, [f1, f2, CompositeFilter(AND, [f3, f4]), f5, f6]) is equivalent to: CompositeFilter(AND, [f1, f2, f3, f4, f5, f6]) Currently filters can only be combined using an AND operator. Constructor. Args: op: The operator to use to combine the given filters filters: A list of one or more filters to combine Raises: datastore_errors.BadArgumentError if op is not in CompsiteFilter.OPERATORS or filters is not a non-empty list containing only FilterPredicates. Returns the internal only pb representation. Returns a googledatastore.Filter. Args: adapter: A datastore_rpc.AbstractAdapter A filter that removes all entities with the given keys. A filter that removes duplicate keys. A base class that represents a sort order on a query. All sub-classes must be immutable as these are often stored without creating a defensive copying. This class can be used as either the cmp or key arg in sorted() or list.sort(). To provide a stable ordering a trailing key ascending order is always used. Constructs an order representing the reverse of the current order. This function takes into account the effects of orders on properties not in the group_by clause of a query. For example, consider: SELECT A, First(B) ... GROUP BY A ORDER BY A, B Changing the order of B would effect which value is listed in the 'First(B)' column which would actually change the results instead of just reversing them. Args: group_by: If specified, only orders on properties in group_by will be reversed. Returns: A new order representing the reverse direction. Creates a key for the given value map. Compares the given value maps. Internal only function to generate a filter pb. Internal only function to generate a v1 filter pb. Args: adapter: A datastore_rpc.AbstractAdapter Constructs a "key" value for the given entity based on the current order. This function can be used as the key argument for list.sort() and sorted(). Args: entity: The entity_pb2.EntityProto to convert filter_predicate: A FilterPredicate used to prune values before comparing entities or None. Returns: A key value that identifies the position of the entity when sorted by the current order. Compares the given values taking into account any filters. This function can be used as the cmp argument for list.sort() and sorted(). This function is slightly more efficient that Order.key when comparing two entities, however it is much less efficient when sorting a list of entities. Args: lhs: An entity_pb2.EntityProto rhs: An entity_pb2.EntityProto filter_predicate: A FilterPredicate used to prune values before comparing entities or None. Returns: An integer <, = or > 0 representing the operator that goes in between lhs and rhs that to create a true statement. Reverses the comparison for the given object. Constructor for _ReverseOrder. Args: obj: Any comparable and hashable object. An immutable class that represents a sort order for a single property. Constructor. Args: prop: the name of the prop by which to sort. direction: the direction in which to sort the given prop. Raises: datastore_errors.BadArgumentError if the prop name or direction is invalid. Returns the internal only pb representation. Returns a googledatastore.PropertyOrder representation of the order. Args: adapter: A datastore_rpc.AbstractAdapter. An immutable class that represents a sequence of Orders. This class proactively flattens sub-orders that are of type CompositeOrder. For example: CompositeOrder([O1, CompositeOrder([02, 03]), O4]) is equivalent to: CompositeOrder([O1, 02, 03, O4]) Constructor. Args: orders: A list of Orders which are applied in order. Returns the number of sub-orders the instance contains. Returns an ordered list of internal only pb representations. Returns an ordered list of googledatastore.PropertyOrder. Args: adapter: A datastore_rpc.AbstractAdapter An immutable class that contains all options for fetching results. These options apply to any request that pulls results from a query. This class reserves the right to define configuration options of any name except those that start with 'user_'. External subclasses should only define function or variables with names that start with in 'user_'. Options are set by passing keyword arguments to the constructor corresponding to the configuration options defined below and in datastore_rpc.Configuration. This object can be used as the default config for a datastore_rpc.Connection but in that case some options will be ignored, see option documentation below for details. If a Cursor should be returned with the fetched results. Raises: datastore_errors.BadArgumentError if value is not a bool. The number of results to skip before returning the first result. Only applies to the first request it is used with and is ignored if present on datastore_rpc.Connection.config. Raises: datastore_errors.BadArgumentError if value is not a integer or is less than zero. The number of results to attempt to retrieve in a batch. Raises: datastore_errors.BadArgumentError if value is not a integer or is not greater than zero. An immutable class that contains all options for running a query. This class contains options that control execution process (deadline, batch_size, read_policy, etc) and what part of the query results are returned (keys_only, projection, offset, limit, etc) Options that control the contents of the query results are specified on the datastore_query.Query directly. This class reserves the right to define configuration options of any name except those that start with 'user_'. External subclasses should only define function or variables with names that start with in 'user_'. Options are set by passing keyword arguments to the constructor corresponding to the configuration options defined below and in FetchOptions and datastore_rpc.Configuration. This object can be used as the default config for a datastore_rpc.Connection but in that case some options will be ignored, see below for details. If the query should only return keys. Raises: datastore_errors.BadArgumentError if value is not a bool. A list or tuple of property names to project. If None, the entire entity is returned. Specifying a projection: - may change the index requirements for the given query; - will cause a partial entity to be returned; - will cause only entities that contain those properties to be returned; A partial entities only contain the property name and value for properties in the projection (meaning and multiple will not be set). They will also only contain a single value for any multi-valued property. However, if a multi-valued property is specified in the order, an inequality property, or the projected properties, the entity will be returned multiple times. Once for each unique combination of values. However, projection queries are significantly faster than normal queries. Raises: datastore_errors.BadArgumentError if value is empty or not a list or tuple of strings. Limit on the number of results to return. Raises: datastore_errors.BadArgumentError if value is not an integer or is less than zero. Number of results to attempt to return on the initial request. Raises: datastore_errors.BadArgumentError if value is not an integer or is not greater than zero. Cursor to use a start position. Ignored if present on datastore_rpc.Connection.config. Raises: datastore_errors.BadArgumentError if value is not a Cursor. Cursor to use as an end position. Ignored if present on datastore_rpc.Connection.config. Raises: datastore_errors.BadArgumentError if value is not a Cursor. Hint on how the datastore should plan the query. Raises: datastore_errors.BadArgumentError if value is not a known hint. An immutable class that represents a relative position in a query. The position denoted by a Cursor is relative to a result in a query even if the result has been removed from the given query. Usually to position immediately after the last result returned by a batch. A cursor should only be used on a query with an identical signature to the one that produced it or on a query with its sort order reversed. Constructor. A Cursor constructed with no arguments points the first result of any query. If such a Cursor is used as an end_cursor no results will ever be returned. DEPRECATED. It is no longer necessary to call reversed() on cursors. A cursor returned by a query may also be used in a query whose sort order has been reversed. This method returns a copy of the original cursor. Serialize cursor as a byte string. Gets a Cursor given its byte string serialized form. The serialized form of a cursor may change in a non-backwards compatible way. In this case cursors must be regenerated from a new Query request. Args: cursor: A serialized cursor as returned by .to_bytes. Returns: A Cursor. Raises: datastore_errors.BadValueError if the cursor argument does not represent a serialized cursor. Serialize cursor as a websafe string. Returns: A base64-encoded serialized cursor. Gets a Cursor given its websafe serialized form. The serialized form of a cursor may change in a non-backwards compatible way. In this case cursors must be regenerated from a new Query request. Args: cursor: A serialized cursor as returned by .to_websafe_string. Returns: A Cursor. Raises: datastore_errors.BadValueError if the cursor argument is not a string type of does not represent a serialized cursor. Advances a Cursor by the given offset. Args: offset: The amount to advance the current query. query: A Query identical to the one this cursor was created from. conn: The datastore_rpc.Connection to use. Returns: A new cursor that is advanced by offset using the given query. A class that implements the key filters available on a Query. Constructs a _QueryKeyFilter. If app/namespace and ancestor are not defined, the app/namespace set in the environment is used. Args: app: a string representing the required app id or None. namespace: a string representing the required namespace or None. kind: a string representing the required kind or None. ancestor: a entity_pb2.Reference representing the required ancestor or None. Raises: datastore_erros.BadArgumentError if app and ancestor.app() do not match or an unexpected type is passed in for any argument. Apply the filter. Accepts either an entity or a reference to avoid the need to extract keys from entities when we have a list of entities (which is a common case). Args: entity_or_reference: Either an entity_pb2.EntityProto or entity_pb2.Reference. Returns an internal pb representation. Returns a v1 internal proto representation of the query key filter. Args: adapter: A datastore_rpc.AbstractAdapter. Returns: A tuple (googledatastore.RunQueryRequest, googledatastore.Filter). The second tuple value is a Filter representing the ancestor portion of the query. If there is no ancestor constraint, this value will be None A base class for query implementations. Runs the query using provided datastore_rpc.Connection. Args: conn: The datastore_rpc.Connection to use query_options: Optional query options to use Returns: A Batcher that implicitly fetches query results asynchronously. Raises: datastore_errors.BadArgumentError if any of the arguments are invalid. Runs the query using the provided datastore_rpc.Connection. Args: conn: the datastore_rpc.Connection on which to run the query. query_options: Optional QueryOptions with which to run the query. Returns: An async object that can be used to grab the first Batch. Additional batches can be retrieved by calling Batch.next_batch/next_batch_async. Raises: datastore_errors.BadArgumentError if any of the arguments are invalid. An immutable class that represents a query signature. A query signature consists of a source of entities (specified as app, namespace and optionally kind and ancestor) as well as a FilterPredicate, grouping and a desired ordering. Constructor. Args: app: Optional app to query, derived from the environment if not specified. namespace: Optional namespace to query, derived from the environment if not specified. kind: Optional kind to query. ancestor: Optional ancestor to query, an entity_pb2.Reference. filter_predicate: Optional FilterPredicate by which to restrict the query. group_by: Optional list of properties to group the results by. order: Optional Order in which to return results. read_time_us: Optional timestamp to read the storage from. Internal use only. Raises: datastore_errors.BadArgumentError if any argument is invalid. Returns a googledatastore.RunQueryRequest. Returns the internal only pb representation. Validates that a query's projection and group by match. Args: projection: A set of string property names in the projection. group_by: A set of string property names in the group by. Raises: datastore_errors.BadRequestError: if the projection and group by sets are not equal. Performs the given query on a set of in-memory results. This function can perform queries impossible in the datastore (e.g a query with multiple inequality filters on different properties) because all operations are done in memory. For queries that can also be executed on the the datastore, the results produced by this function may not use the same implicit ordering as the datastore. To ensure compatibility, explicit ordering must be used (e.g. 'ORDER BY ineq_prop, ..., __key__'). Order by __key__ should always be used when a consistent result is desired (unless there is a sort order on another globally unique property). Args: query: a datastore_query.Query to apply entities: a list of results, of arbitrary type, on which to apply the query. _key: a function that takes an element of the result array as an argument and must return an entity_pb2.EntityProto. If not specified, the identity function is used (and entities must be a list of entity_pb2.EntityProto). Returns: A subset of entities, filtered and ordered according to the query. A query that combines a datastore query with in-memory filters/results. Constructor for _AugmentedQuery. Do not call directly. Use the utility functions instead (e.g. datastore_query.inject_results) Args: query: A datastore_query.Query object to augment. in_memory_results: a list of pre- sorted and filtered result to add to the stream of datastore results or None . in_memory_filter: a set of in-memory filters to apply to the datastore results or None. max_filtered_count: the maximum number of datastore entities that will be filtered out by in_memory_filter if known. Creates a query object that will inject changes into results. Args: query: The datastore_query.Query to augment updated_entities: A list of entity_pb2.EntityProto's that have been updated and should take priority over any values returned by query. deleted_keys: A list of entity_pb2.Reference's for entities that have been deleted and should be removed from query results. Returns: A datastore_query.AugmentedQuery if in memory filtering is required, query otherwise. Data shared among the batches of a query. Returns the list of indexes used by the query. Possibly None when the adapter does not implement pb_to_index. A batch of results returned by a query. This class contains a batch of results returned from the datastore and relevant metadata. This metadata includes: query: The query that produced this batch query_options: The QueryOptions used to run the query. This does not contained any options passed to the .next_batch() call that created the current batch. start_cursor, end_cursor: These are the cursors that can be used with a query to re-fetch this batch. They can also be used to find all entities before or after the given batch (by use start_cursor as an end cursor or vice versa). start_cursor can also be advanced to point to a position within the batch using Cursor.advance(). skipped_results: the number of result skipped because of the offset given to the request that generated it. This can be set either on the original Query.run() request or in subsequent .next_batch() calls. more_results: If this is true there are more results that can be retrieved either by .next_batch() or Batcher.next(). This class is also able to fetch the next batch of the query using .next_batch(). As batches of results must be fetched serially, .next_batch() can only be called once. Additional calls to .next_batch() will return None. When there are no more batches .next_batch() will return None as well. Note that batches returned by iterating over Batcher will always return None for .next_batch() as the Bather handles fetching the next batch automatically. A Batch typically represents the result of a single RPC request. The datastore operates on a "best effort" basis so the batch returned by .next_batch() or Query.run_async().get_result() may not have satisfied the requested offset or number of results (specified through FetchOptions.offset and FetchOptions.batch_size respectively). To satisfy these restrictions additional batches may be needed (with FetchOptions that specify the remaining offset or results needed). The Batcher class hides these limitations. Constructor. This class is constructed in stages (one when an RPC is sent and another when an rpc is completed) and should not be constructed directly!! Use Query.run_async().get_result() to create a Batch or Query.run() to use a batcher. This constructor does not perform verification. Args: batch_shared: Data shared between batches for a a single query run. start_cursor: Optional cursor pointing before this batch. The QueryOptions used to retrieve the first batch. The query the current batch came from. A list of entities in this batch. Whether the entities in this batch only contain keys. Returns the list of indexes used to peform this batch's query. Possibly None when the adapter does not implement pb_to_index. A cursor that points to the position just before the current batch. A cursor that points to the position just after the current batch. The number of results skipped because of an offset in the request. An offset is satisfied before any results are returned. The start_cursor points to the position in the query before the skipped results. Whether more results can be retrieved from the query. Synchronously get the next batch or None if there are no more batches. Args: fetch_options: Optional fetch options to use when fetching the next batch. Merged with both the fetch options on the original call and the connection. Returns: A new Batch of results or None if either the next batch has already been fetched or there are no more results. Gets the cursor that points just after the result at index - 1. The index is relative to first result in .results. Since start_cursor points to the position before the first skipped result, the range of indexes this function supports is limited to [-skipped_results, len(results)]. For example, using start_cursor=batch.cursor(i) and end_cursor=batch.cursor(j) will return the results found in batch.results[i:j]. Note that any result added in the range (i-1, j] will appear in the new query's results. Warning: Any index in the range (-skipped_results, 0) may cause continuation to miss or duplicate results if outside a transaction. Args: index: An int, the index relative to the first result before which the cursor should point. Returns: A Cursor that points to a position just after the result index - 1, which if used as a start_cursor will cause the first result to be batch.result[index]. Asynchronously get the next batch or None if there are no more batches. Args: fetch_options: Optional fetch options to use when fetching the next batch. Merged with both the fetch options on the original call and the connection. Returns: An async object that can be used to get the next Batch or None if either the next batch has already been fetched or there are no more results. Combines the current batch with the next one. Called by batcher. Makes a RunQuery call that will modify the instance. Args: config: The datastore_rpc.Configuration to use for the call. req: The request to send with the call. Returns: A UserRPC object that can be used to fetch the result of the RPC. Makes a Next call that will modify the instance. Args: config: The datastore_rpc.Configuration to use for the call. req: The request to send with the call. Returns: A UserRPC object that can be used to fetch the result of the RPC. Internal method used as get_result_hook for RunQuery/Next operation. Changes the internal state so that no more batches can be produced. Creates the object to store the next batch. Args: fetch_options: The datastore_query.FetchOptions passed in by the user or None. Returns: A tuple containing the fetch options that should be used internally and the object that should be used to contain the next batch. Converts the datastore results into results returned to the user. Args: results: A list of entity_pb2.EntityProto's returned by the datastore Returns: A list of results that should be returned to the user. Converts the datastore results into results returned to the user. Args: results: A list of googledatastore.EntityResults. Returns: A list of results that should be returned to the user. A batch produced by a datastore_query._AugmentedQuery. A Constructor for datastore_query._AugmentedBatch. Constructed by datastore_query._AugmentedQuery. Should not be called directly. The query the current batch came from. Process V4 results by converting to V3 and calling _process_results. A class that implements the Iterator interface for Batches. Typically constructed by a call to Query.run(). The class hides the "best effort" nature of the datastore by potentially making multiple requests to the datastore and merging the resulting batches. This is accomplished efficiently by prefetching results and mixing both non-blocking and blocking calls to the datastore as needed. Iterating through batches is almost always more efficient than pulling all results at once as RPC latency is hidden by asynchronously prefetching results. The batches produce by this class cannot be used to fetch the next batch (through Batch.next_batch()) as before the current batch is returned the request for the next batch has already been sent. Constructor. Although this class can be manually constructed, it is preferable to use Query.run(query_options). Args: query_options: The QueryOptions used to create the first batch. first_async_batch: The first batch produced by Query.run_async(query_options). Get the next batch. See .next_batch(). Get the next batch. The batch returned by this function cannot be used to fetch the next batch (through Batch.next_batch()). Instead this function will always return None. To retrieve the next batch use .next() or .next_batch(N). This function may return a batch larger than min_to_fetch, but will never return smaller unless there are no more results. Special values can be used for min_batch_size: ASYNC_ONLY - Do not perform any synchrounous fetches from the datastore even if the this produces a batch with no results. AT_LEAST_OFFSET - Only pull enough results to satifiy the offset. AT_LEAST_ONE - Pull batches until at least one result is returned. Args: min_batch_size: The minimum number of results to retrieve or one of (ASYNC_ONLY, AT_LEAST_OFFSET, AT_LEAST_ONE) Returns: The next Batch of results. An iterator over the results from Batches obtained from a Batcher. ResultsIterator implements Python's iterator protocol, so results can be accessed with the for-statement: > it = ResultsIterator(Query(kind='Person').run()) > for person in it: > print 'Hi, %s!' % person['name'] At any time ResultsIterator.cursor() can be used to grab the Cursor that points just after the last result returned by the iterator. Constructor. Args: batcher: A datastore_query.Batcher Returns the list of indexes used to perform the query. Possibly None when the adapter does not implement pb_to_index. Returns a cursor that points just after the last result returned. If next() throws an exception, this function returns the end_cursor from the last successful batch or throws the same exception if no batch was successful. Returns the compiled query associated with the iterator. Internal only do not use. Returns the next query result. | 1.680886 | 2 |
tests/Metrics/test_recall.py | Neklaustares-tPtwP/torchflare | 1 | 7690 | <reponame>Neklaustares-tPtwP/torchflare
# flake8: noqa
import warnings
import pytest
import torch
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics import recall_score
from torchflare.metrics.recall_meter import Recall
from torchflare.metrics.meters import _BaseInputHandler
torch.manual_seed(42)
def test_binary_inputs():
def _test(num_classes, threshold, multilabel, average):
rc = Recall(num_classes=num_classes, threshold=threshold, multilabel=multilabel, average=average,)
outputs = torch.randn(100, 1)
targets = torch.randint(0, 2, size=(100,))
bs = _BaseInputHandler(num_classes=num_classes, average=average, threshold=0.5, multilabel=multilabel,)
np_outputs, np_targets = bs._compute(outputs=outputs, targets=targets)
rc.accumulate(outputs=outputs, targets=targets)
rec_val = rc.value
assert rc.case_type == "binary"
rec_skm = recall_score(np_targets.numpy(), np_outputs.numpy(), average="binary")
assert rec_skm == pytest.approx(rec_val.item())
rc.reset()
bs = 16
iters = targets.shape[0] // bs + 1
for i in range(iters):
idx = i * bs
rc.accumulate(outputs=outputs[idx : idx + bs], targets=targets[idx : idx + bs])
m_rc = rc.value
assert rec_skm == pytest.approx(m_rc.item())
for _ in range(10):
_test(num_classes=2, threshold=0.5, multilabel=False, average="macro")
_test(num_classes=2, threshold=0.5, multilabel=False, average="micro")
def test_multiclass_inputs():
def _test(num_classes, threshold, multilabel, average):
rc = Recall(num_classes=num_classes, threshold=threshold, multilabel=multilabel, average=average,)
outputs = torch.randn(100, 4)
targets = torch.randint(0, 4, size=(100,))
bs = _BaseInputHandler(num_classes=num_classes, average=average, threshold=0.5, multilabel=multilabel,)
np_outputs, np_targets = bs._compute(outputs=outputs, targets=targets)
rc.accumulate(outputs=outputs, targets=targets)
rec_val = rc.value
assert rc.case_type == "multiclass"
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
rec_skm = recall_score(np_targets.numpy(), np_outputs.numpy(), average=average)
assert rec_skm == pytest.approx(rec_val.item())
rc.reset()
bs = 16
iters = targets.shape[0] // bs + 1
for i in range(iters):
idx = i * bs
rc.accumulate(outputs=outputs[idx : idx + bs], targets=targets[idx : idx + bs])
rec_m = rc.value
assert rec_skm == pytest.approx(rec_m.item())
for _ in range(10):
_test(num_classes=4, threshold=0.5, multilabel=False, average="macro")
_test(num_classes=4, threshold=0.5, multilabel=False, average="micro")
def test_multilabel_inputs():
def _test(num_classes, threshold, multilabel, average):
rc = Recall(num_classes=num_classes, threshold=threshold, multilabel=multilabel, average=average,)
outputs = torch.randn(100, 4)
targets = torch.randint(0, 2, size=(100, 4))
bs = _BaseInputHandler(num_classes=num_classes, average=average, threshold=0.5, multilabel=multilabel,)
np_outputs, np_targets = bs._compute(outputs=outputs, targets=targets)
rc.accumulate(outputs=outputs, targets=targets)
rec_val = rc.value
assert rc.case_type == "multilabel"
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
rec_skm = recall_score(np_targets, np_outputs, average=average)
assert rec_skm == pytest.approx(rec_val.item())
rc.reset()
bs = 16
iters = targets.shape[0] // bs + 1
for i in range(iters):
idx = i * bs
rc.accumulate(
outputs=outputs[idx : idx + bs], targets=targets[idx : idx + bs],
)
m_rc = rc.value
assert rec_skm == pytest.approx(m_rc.item())
for _ in range(10):
_test(num_classes=4, threshold=0.5, multilabel=True, average="macro")
_test(num_classes=4, threshold=0.5, multilabel=True, average="micro")
| # flake8: noqa
import warnings
import pytest
import torch
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics import recall_score
from torchflare.metrics.recall_meter import Recall
from torchflare.metrics.meters import _BaseInputHandler
torch.manual_seed(42)
def test_binary_inputs():
def _test(num_classes, threshold, multilabel, average):
rc = Recall(num_classes=num_classes, threshold=threshold, multilabel=multilabel, average=average,)
outputs = torch.randn(100, 1)
targets = torch.randint(0, 2, size=(100,))
bs = _BaseInputHandler(num_classes=num_classes, average=average, threshold=0.5, multilabel=multilabel,)
np_outputs, np_targets = bs._compute(outputs=outputs, targets=targets)
rc.accumulate(outputs=outputs, targets=targets)
rec_val = rc.value
assert rc.case_type == "binary"
rec_skm = recall_score(np_targets.numpy(), np_outputs.numpy(), average="binary")
assert rec_skm == pytest.approx(rec_val.item())
rc.reset()
bs = 16
iters = targets.shape[0] // bs + 1
for i in range(iters):
idx = i * bs
rc.accumulate(outputs=outputs[idx : idx + bs], targets=targets[idx : idx + bs])
m_rc = rc.value
assert rec_skm == pytest.approx(m_rc.item())
for _ in range(10):
_test(num_classes=2, threshold=0.5, multilabel=False, average="macro")
_test(num_classes=2, threshold=0.5, multilabel=False, average="micro")
def test_multiclass_inputs():
def _test(num_classes, threshold, multilabel, average):
rc = Recall(num_classes=num_classes, threshold=threshold, multilabel=multilabel, average=average,)
outputs = torch.randn(100, 4)
targets = torch.randint(0, 4, size=(100,))
bs = _BaseInputHandler(num_classes=num_classes, average=average, threshold=0.5, multilabel=multilabel,)
np_outputs, np_targets = bs._compute(outputs=outputs, targets=targets)
rc.accumulate(outputs=outputs, targets=targets)
rec_val = rc.value
assert rc.case_type == "multiclass"
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
rec_skm = recall_score(np_targets.numpy(), np_outputs.numpy(), average=average)
assert rec_skm == pytest.approx(rec_val.item())
rc.reset()
bs = 16
iters = targets.shape[0] // bs + 1
for i in range(iters):
idx = i * bs
rc.accumulate(outputs=outputs[idx : idx + bs], targets=targets[idx : idx + bs])
rec_m = rc.value
assert rec_skm == pytest.approx(rec_m.item())
for _ in range(10):
_test(num_classes=4, threshold=0.5, multilabel=False, average="macro")
_test(num_classes=4, threshold=0.5, multilabel=False, average="micro")
def test_multilabel_inputs():
def _test(num_classes, threshold, multilabel, average):
rc = Recall(num_classes=num_classes, threshold=threshold, multilabel=multilabel, average=average,)
outputs = torch.randn(100, 4)
targets = torch.randint(0, 2, size=(100, 4))
bs = _BaseInputHandler(num_classes=num_classes, average=average, threshold=0.5, multilabel=multilabel,)
np_outputs, np_targets = bs._compute(outputs=outputs, targets=targets)
rc.accumulate(outputs=outputs, targets=targets)
rec_val = rc.value
assert rc.case_type == "multilabel"
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
rec_skm = recall_score(np_targets, np_outputs, average=average)
assert rec_skm == pytest.approx(rec_val.item())
rc.reset()
bs = 16
iters = targets.shape[0] // bs + 1
for i in range(iters):
idx = i * bs
rc.accumulate(
outputs=outputs[idx : idx + bs], targets=targets[idx : idx + bs],
)
m_rc = rc.value
assert rec_skm == pytest.approx(m_rc.item())
for _ in range(10):
_test(num_classes=4, threshold=0.5, multilabel=True, average="macro")
_test(num_classes=4, threshold=0.5, multilabel=True, average="micro") | it | 0.238973 | # flake8: noqa | 2.156482 | 2 |
parsing/tests/test_utils.py | davesque/parsing.py | 1 | 7691 | <filename>parsing/tests/test_utils.py
from __future__ import unicode_literals
import unittest
from ..utils import compose, flatten, truncate, join, unary, equals
class TestEquals(unittest.TestCase):
def test_it_should_return_a_function_that_compares_against_x(self):
self.assertTrue(equals(234)(234))
self.assertFalse(equals(234)(123))
class TestUnary(unittest.TestCase):
def test_it_should_convert_a_function_into_a_unary_version_of_itself(self):
self.assertEqual(unary(lambda x, y: x + y)([1, 2]), 3)
class TestJoin(unittest.TestCase):
def test_it_should_join_a_sequence_into_a_string(self):
self.assertEqual(join(list('arst')), 'arst')
self.assertEqual(join(map(str, [1, 2, 3, 4])), '1234')
class TestTruncate(unittest.TestCase):
def test_it_should_truncate_a_string(self):
self.assertEqual(truncate('arst'), 'arst')
self.assertEqual(truncate('arstarstar'), 'arstarstar')
self.assertEqual(truncate('arstarstars'), 'arstarstar...')
self.assertEqual(truncate('arstarstarstarstarstarstarstarst'), 'arstarstar...')
class TestCompose(unittest.TestCase):
def test_it_should_compose_the_given_functions(self):
f = compose(
lambda x: x + 1,
lambda x: x * 2,
lambda x: x ** 3,
)
self.assertEqual(f(1), 3)
self.assertEqual(f(2), 17)
self.assertEqual(f(3), 55)
class TestFlatten(unittest.TestCase):
def test_it_should_flatten_an_arbitrarily_nested_list(self):
self.assertEqual(
flatten([1, 2, [3, 4, [5, 6]]]),
[1, 2, 3, 4, 5, 6],
)
heavily_nested = reduce(lambda a, i: (a, i), range(1000))
self.assertEqual(
flatten(heavily_nested),
list(range(1000)),
)
| <filename>parsing/tests/test_utils.py
from __future__ import unicode_literals
import unittest
from ..utils import compose, flatten, truncate, join, unary, equals
class TestEquals(unittest.TestCase):
def test_it_should_return_a_function_that_compares_against_x(self):
self.assertTrue(equals(234)(234))
self.assertFalse(equals(234)(123))
class TestUnary(unittest.TestCase):
def test_it_should_convert_a_function_into_a_unary_version_of_itself(self):
self.assertEqual(unary(lambda x, y: x + y)([1, 2]), 3)
class TestJoin(unittest.TestCase):
def test_it_should_join_a_sequence_into_a_string(self):
self.assertEqual(join(list('arst')), 'arst')
self.assertEqual(join(map(str, [1, 2, 3, 4])), '1234')
class TestTruncate(unittest.TestCase):
def test_it_should_truncate_a_string(self):
self.assertEqual(truncate('arst'), 'arst')
self.assertEqual(truncate('arstarstar'), 'arstarstar')
self.assertEqual(truncate('arstarstars'), 'arstarstar...')
self.assertEqual(truncate('arstarstarstarstarstarstarstarst'), 'arstarstar...')
class TestCompose(unittest.TestCase):
def test_it_should_compose_the_given_functions(self):
f = compose(
lambda x: x + 1,
lambda x: x * 2,
lambda x: x ** 3,
)
self.assertEqual(f(1), 3)
self.assertEqual(f(2), 17)
self.assertEqual(f(3), 55)
class TestFlatten(unittest.TestCase):
def test_it_should_flatten_an_arbitrarily_nested_list(self):
self.assertEqual(
flatten([1, 2, [3, 4, [5, 6]]]),
[1, 2, 3, 4, 5, 6],
)
heavily_nested = reduce(lambda a, i: (a, i), range(1000))
self.assertEqual(
flatten(heavily_nested),
list(range(1000)),
)
| none | 1 | 2.748088 | 3 |
|
Array/Final450/Move_Negative_Nums_To_One_End/relative_order_matters/move_negative_nums_to_one_end--insertion_sort_modified.py | prash-kr-meena/GoogleR | 0 | 7692 | <reponame>prash-kr-meena/GoogleR
from Utils.Array import input_array
# Time : O(n2)
# Space : O(1) Constant space
"""
Ill be having 2 pointers here
one of them will move through the array looking for -ve numbers to operate on
and another will be pointing to the correct location where i can put the -ve elements, after i find them
also this same location will denote the starting of the 1st +ve number in the array,
--> as we will be going to move them forward
Finally when you find a -ve number, store it temporarily
do the swapping, to move all the +ve numbers forward by one step to, make place for the stored -ve number
then finally put that number in its correct position and move the pointer to store future -ve numbers
"""
def rearrange_via_modified_insertion_sort(A):
# walking_index = 0
index_to_place_nums = 0 # for placing -ve nums that i find
for walking_index in range(0, len(A)): # go through the array
if A[walking_index] >= 0: # +ve num, so move on
continue
# -ve num
found_num = A[walking_index] # temporary storage
# move all the +ve numbers, before it forward by one step
ptr = walking_index - 1
while ptr >= index_to_place_nums: # till it reaches the first +ve number
A[ptr + 1] = A[ptr]
ptr -= 1 # go back one step
# reached, now put the -ve found, at its correct position
A[index_to_place_nums] = found_num
index_to_place_nums += 1 # updating, for the index of next -ve number
if __name__ == "__main__":
arr = input_array()
rearrange_via_modified_insertion_sort(arr)
print(arr)
"""
12 11 -13 -5 6 -7 5 -3 -6
-1 2 -3 4 5 6 -7 8 9
2 3 -1 -4 -6 # Reverse
4 3 2 1 0 -1 -2 -3 # Reverse containing 0
"""
| from Utils.Array import input_array
# Time : O(n2)
# Space : O(1) Constant space
"""
Ill be having 2 pointers here
one of them will move through the array looking for -ve numbers to operate on
and another will be pointing to the correct location where i can put the -ve elements, after i find them
also this same location will denote the starting of the 1st +ve number in the array,
--> as we will be going to move them forward
Finally when you find a -ve number, store it temporarily
do the swapping, to move all the +ve numbers forward by one step to, make place for the stored -ve number
then finally put that number in its correct position and move the pointer to store future -ve numbers
"""
def rearrange_via_modified_insertion_sort(A):
# walking_index = 0
index_to_place_nums = 0 # for placing -ve nums that i find
for walking_index in range(0, len(A)): # go through the array
if A[walking_index] >= 0: # +ve num, so move on
continue
# -ve num
found_num = A[walking_index] # temporary storage
# move all the +ve numbers, before it forward by one step
ptr = walking_index - 1
while ptr >= index_to_place_nums: # till it reaches the first +ve number
A[ptr + 1] = A[ptr]
ptr -= 1 # go back one step
# reached, now put the -ve found, at its correct position
A[index_to_place_nums] = found_num
index_to_place_nums += 1 # updating, for the index of next -ve number
if __name__ == "__main__":
arr = input_array()
rearrange_via_modified_insertion_sort(arr)
print(arr)
"""
12 11 -13 -5 6 -7 5 -3 -6
-1 2 -3 4 5 6 -7 8 9
2 3 -1 -4 -6 # Reverse
4 3 2 1 0 -1 -2 -3 # Reverse containing 0
""" | en | 0.826033 | # Time : O(n2) # Space : O(1) Constant space Ill be having 2 pointers here one of them will move through the array looking for -ve numbers to operate on and another will be pointing to the correct location where i can put the -ve elements, after i find them also this same location will denote the starting of the 1st +ve number in the array, --> as we will be going to move them forward Finally when you find a -ve number, store it temporarily do the swapping, to move all the +ve numbers forward by one step to, make place for the stored -ve number then finally put that number in its correct position and move the pointer to store future -ve numbers # walking_index = 0 # for placing -ve nums that i find # go through the array # +ve num, so move on # -ve num # temporary storage # move all the +ve numbers, before it forward by one step # till it reaches the first +ve number # go back one step # reached, now put the -ve found, at its correct position # updating, for the index of next -ve number 12 11 -13 -5 6 -7 5 -3 -6 -1 2 -3 4 5 6 -7 8 9 2 3 -1 -4 -6 # Reverse 4 3 2 1 0 -1 -2 -3 # Reverse containing 0 | 4.131996 | 4 |
python_test/test_epoll/test_epoll.py | zhtsh/test-examples | 0 | 7693 | <reponame>zhtsh/test-examples
# coding=utf8
import socket
import select
from datetime import datetime
from datetime import timedelta
EOL = b'\n\n'
response = b'HTTP/1.0 200 OK\nDate: Mon, 1 Jan 1996 01:01:01 GMT\n'
response += b'Content-Type: text/plain\nContent-Length: 13\n\n'
response += b'Hello, world!\n'
# 创建套接字对象并绑定监听端口
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(('0.0.0.0', 8080))
serversocket.listen(1)
serversocket.setblocking(0)
# 创建epoll对象,并注册socket对象的 epoll可读事件
epoll = select.epoll()
epoll.register(serversocket.fileno(), select.EPOLLIN)
try:
connections = {}
requests = {}
responses = {}
while True:
# 主循环,epoll的系统调用,一旦有网络IO事件发生,poll调用返回。这是和select系统调用的关键区别
events = epoll.poll(1)
# 通过事件通知获得监听的文件描述符,进而处理
for fileno, event in events:
# 注册监听的socket对象可读,获取连接,并注册连接的可读事件
if fileno == serversocket.fileno():
connection, address = serversocket.accept()
connection.setblocking(0)
epoll.register(connection.fileno(), select.EPOLLIN)
connections[connection.fileno()] = connection
requests[connection.fileno()] = b''
responses[connection.fileno()] = response
elif event & select.EPOLLIN:
# 连接对象可读,处理客户端发生的信息,并注册连接对象可写
try:
requests[fileno] += connections[fileno].recv(1024)
if EOL in requests[fileno]:
epoll.modify(fileno, event | select.EPOLLOUT)
print(requests[fileno])
except Exception as e:
print(e)
epoll.unregister(fileno)
del connections[fileno]
elif event & select.EPOLLOUT:
# 连接对象可写事件发生,发送数据到客户端
try:
byteswritten = connections[fileno].send(responses[fileno])
# responses[fileno] = responses[fileno][byteswritten:]
# if len(responses[fileno]) == 0:
# epoll.modify(fileno, 0)
# connections[fileno].shutdown(socket.SHUT_RDWR)
except Exception as e:
print(e)
# epoll.modify(fileno, 0)
epoll.unregister(fileno)
del connections[fileno]
elif event & select.EPOLLHUP:
epoll.unregister(fileno)
connections[fileno].close()
del connections[fileno]
finally:
epoll.unregister(serversocket.fileno())
epoll.close()
serversocket.close()
| # coding=utf8
import socket
import select
from datetime import datetime
from datetime import timedelta
EOL = b'\n\n'
response = b'HTTP/1.0 200 OK\nDate: Mon, 1 Jan 1996 01:01:01 GMT\n'
response += b'Content-Type: text/plain\nContent-Length: 13\n\n'
response += b'Hello, world!\n'
# 创建套接字对象并绑定监听端口
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(('0.0.0.0', 8080))
serversocket.listen(1)
serversocket.setblocking(0)
# 创建epoll对象,并注册socket对象的 epoll可读事件
epoll = select.epoll()
epoll.register(serversocket.fileno(), select.EPOLLIN)
try:
connections = {}
requests = {}
responses = {}
while True:
# 主循环,epoll的系统调用,一旦有网络IO事件发生,poll调用返回。这是和select系统调用的关键区别
events = epoll.poll(1)
# 通过事件通知获得监听的文件描述符,进而处理
for fileno, event in events:
# 注册监听的socket对象可读,获取连接,并注册连接的可读事件
if fileno == serversocket.fileno():
connection, address = serversocket.accept()
connection.setblocking(0)
epoll.register(connection.fileno(), select.EPOLLIN)
connections[connection.fileno()] = connection
requests[connection.fileno()] = b''
responses[connection.fileno()] = response
elif event & select.EPOLLIN:
# 连接对象可读,处理客户端发生的信息,并注册连接对象可写
try:
requests[fileno] += connections[fileno].recv(1024)
if EOL in requests[fileno]:
epoll.modify(fileno, event | select.EPOLLOUT)
print(requests[fileno])
except Exception as e:
print(e)
epoll.unregister(fileno)
del connections[fileno]
elif event & select.EPOLLOUT:
# 连接对象可写事件发生,发送数据到客户端
try:
byteswritten = connections[fileno].send(responses[fileno])
# responses[fileno] = responses[fileno][byteswritten:]
# if len(responses[fileno]) == 0:
# epoll.modify(fileno, 0)
# connections[fileno].shutdown(socket.SHUT_RDWR)
except Exception as e:
print(e)
# epoll.modify(fileno, 0)
epoll.unregister(fileno)
del connections[fileno]
elif event & select.EPOLLHUP:
epoll.unregister(fileno)
connections[fileno].close()
del connections[fileno]
finally:
epoll.unregister(serversocket.fileno())
epoll.close()
serversocket.close() | zh | 0.686383 | # coding=utf8 # 创建套接字对象并绑定监听端口 # 创建epoll对象,并注册socket对象的 epoll可读事件 # 主循环,epoll的系统调用,一旦有网络IO事件发生,poll调用返回。这是和select系统调用的关键区别 # 通过事件通知获得监听的文件描述符,进而处理 # 注册监听的socket对象可读,获取连接,并注册连接的可读事件 # 连接对象可读,处理客户端发生的信息,并注册连接对象可写 # 连接对象可写事件发生,发送数据到客户端 # responses[fileno] = responses[fileno][byteswritten:] # if len(responses[fileno]) == 0: # epoll.modify(fileno, 0) # connections[fileno].shutdown(socket.SHUT_RDWR) # epoll.modify(fileno, 0) | 3.002098 | 3 |
20.2-Donut/Donut2.py | Kehvarl/AdventOfCode2019 | 1 | 7694 | <gh_stars>1-10
import collections
from pprint import pprint
example1 = open("input.txt", "r").read()
# grid = [[val for val in line] for line in example1.split("\n")]
grid = example1.split("\n")
length = 0
for line in grid:
length = max(len(line), length)
out = []
for line in grid:
out.append(line[::-1].zfill(length)[::-1])
grid = out
scanned = []
neighbors = [(0, 1), (0, -1), (1, 0), (-1, 0)]
def find_dot(dot_x, dot_y):
for (_dx, _dy) in neighbors:
if 0 <= dot_x + _dx < len(grid[0]) and 0 <= dot_y + _dy < len(grid):
if grid[dot_y + _dy][dot_x + _dx] == ".":
return (dot_x + _dx, dot_y + _dy), (dot_x - _dx, dot_y - _dy) # (dot), (tag)
return False
# Find portals
# For each portal:
# Inner edge: recurse
# Outer edge: return
portals = {}
portal_links = {}
height = len(grid) - 1
width = len(grid[0]) - 1
for y in range(len(grid)):
for x in range(len(grid[0])):
if grid[y][x].isalpha():
portal = find_dot(x, y)
if portal:
dot, (tag_x, tag_y) = portal
dot_x, dot_y = dot
edge = dot_x == 2 or dot_x == width - 2 or dot_y == 2 or dot_y == height - 2
tag = "".join(sorted(grid[y][x] + grid[tag_y][tag_x]))
if not portals.get(tag):
portals[tag] = []
portals[tag].append(((x, y), dot, edge))
gx, gy, sx, sy = (0, 0, 0, 0)
for link in portals:
ends = portals[link]
if len(ends) == 2:
(a, (a_x, a_y), a_edge), (b, (b_x, b_y), b_edge) = ends
portal_links[a] = (b_x, b_y, a_edge, link)
portal_links[b] = (a_x, a_y, b_edge, link)
elif link == "ZZ":
goal, (gx, gy), ge = ends[0]
elif link == "AA":
start, (sx, sy), se = ends[0]
pprint(portals)
print(portal_links)
bfs = collections.deque([((sx, sy), 0, 0)])
seen = {(sx, sy, 0)}
running = True
while running:
pos, level, dist = bfs.popleft()
if pos == (gx, gy) and level == 0:
print(dist)
running = False
break
for neighbor in neighbors:
dx, dy = neighbor
tx, ty = pos
tx, ty = tx + dx, ty + dy
t_level = level
if (tx, ty) in portal_links:
px, py, p_edge, link = portal_links[(tx, ty)]
# print(link, (tx, ty), (px, py), p_edge)
if p_edge and t_level > 0:
t_level -= 1
tx, ty = px, py
elif not p_edge:
t_level += 1
tx, ty = px, py
if (tx, ty, t_level) in seen:
continue
seen.add((tx, ty, t_level))
if grid[ty][tx] == '.':
p = (tx, ty)
s = (p, t_level, dist + 1)
bfs.append(s)
print("complete")
| import collections
from pprint import pprint
example1 = open("input.txt", "r").read()
# grid = [[val for val in line] for line in example1.split("\n")]
grid = example1.split("\n")
length = 0
for line in grid:
length = max(len(line), length)
out = []
for line in grid:
out.append(line[::-1].zfill(length)[::-1])
grid = out
scanned = []
neighbors = [(0, 1), (0, -1), (1, 0), (-1, 0)]
def find_dot(dot_x, dot_y):
for (_dx, _dy) in neighbors:
if 0 <= dot_x + _dx < len(grid[0]) and 0 <= dot_y + _dy < len(grid):
if grid[dot_y + _dy][dot_x + _dx] == ".":
return (dot_x + _dx, dot_y + _dy), (dot_x - _dx, dot_y - _dy) # (dot), (tag)
return False
# Find portals
# For each portal:
# Inner edge: recurse
# Outer edge: return
portals = {}
portal_links = {}
height = len(grid) - 1
width = len(grid[0]) - 1
for y in range(len(grid)):
for x in range(len(grid[0])):
if grid[y][x].isalpha():
portal = find_dot(x, y)
if portal:
dot, (tag_x, tag_y) = portal
dot_x, dot_y = dot
edge = dot_x == 2 or dot_x == width - 2 or dot_y == 2 or dot_y == height - 2
tag = "".join(sorted(grid[y][x] + grid[tag_y][tag_x]))
if not portals.get(tag):
portals[tag] = []
portals[tag].append(((x, y), dot, edge))
gx, gy, sx, sy = (0, 0, 0, 0)
for link in portals:
ends = portals[link]
if len(ends) == 2:
(a, (a_x, a_y), a_edge), (b, (b_x, b_y), b_edge) = ends
portal_links[a] = (b_x, b_y, a_edge, link)
portal_links[b] = (a_x, a_y, b_edge, link)
elif link == "ZZ":
goal, (gx, gy), ge = ends[0]
elif link == "AA":
start, (sx, sy), se = ends[0]
pprint(portals)
print(portal_links)
bfs = collections.deque([((sx, sy), 0, 0)])
seen = {(sx, sy, 0)}
running = True
while running:
pos, level, dist = bfs.popleft()
if pos == (gx, gy) and level == 0:
print(dist)
running = False
break
for neighbor in neighbors:
dx, dy = neighbor
tx, ty = pos
tx, ty = tx + dx, ty + dy
t_level = level
if (tx, ty) in portal_links:
px, py, p_edge, link = portal_links[(tx, ty)]
# print(link, (tx, ty), (px, py), p_edge)
if p_edge and t_level > 0:
t_level -= 1
tx, ty = px, py
elif not p_edge:
t_level += 1
tx, ty = px, py
if (tx, ty, t_level) in seen:
continue
seen.add((tx, ty, t_level))
if grid[ty][tx] == '.':
p = (tx, ty)
s = (p, t_level, dist + 1)
bfs.append(s)
print("complete") | en | 0.536844 | # grid = [[val for val in line] for line in example1.split("\n")] # (dot), (tag) # Find portals # For each portal: # Inner edge: recurse # Outer edge: return # print(link, (tx, ty), (px, py), p_edge) | 3.354449 | 3 |
OR_Client_Library/openrefine_client/tests/test_history.py | idaks/OpenRefine-Provenance-Tools | 0 | 7695 | #!/usr/bin/env python
"""
test_history.py
"""
# Copyright (c) 2011 <NAME>, Real Programmers. All rights reserved.
import unittest
from OR_Client_Library.openrefine_client.google.refine.history import *
class HistoryTest(unittest.TestCase):
def test_init(self):
response = {
u"code": "ok",
u"historyEntry": {
u"id": 1303851435223,
u"description": "Split 4 cells",
u"time": "2011-04-26T16:45:08Z"
}
}
he = response['historyEntry']
entry = HistoryEntry(he['id'], he['time'], he['description'])
self.assertEqual(entry.id, 1303851435223)
self.assertEqual(entry.description, 'Split 4 cells')
self.assertEqual(entry.time, '2011-04-26T16:45:08Z')
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python
"""
test_history.py
"""
# Copyright (c) 2011 <NAME>, Real Programmers. All rights reserved.
import unittest
from OR_Client_Library.openrefine_client.google.refine.history import *
class HistoryTest(unittest.TestCase):
def test_init(self):
response = {
u"code": "ok",
u"historyEntry": {
u"id": 1303851435223,
u"description": "Split 4 cells",
u"time": "2011-04-26T16:45:08Z"
}
}
he = response['historyEntry']
entry = HistoryEntry(he['id'], he['time'], he['description'])
self.assertEqual(entry.id, 1303851435223)
self.assertEqual(entry.description, 'Split 4 cells')
self.assertEqual(entry.time, '2011-04-26T16:45:08Z')
if __name__ == '__main__':
unittest.main()
| en | 0.593522 | #!/usr/bin/env python test_history.py # Copyright (c) 2011 <NAME>, Real Programmers. All rights reserved. | 2.67815 | 3 |
tests/batch/test_get_batch.py | Remmeauth/remme-core-cli | 0 | 7696 | """
Provide tests for command line interface's get batch command.
"""
import json
import pytest
from click.testing import CliRunner
from cli.constants import (
DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING,
FAILED_EXIT_FROM_COMMAND_CODE,
PASSED_EXIT_FROM_COMMAND_CODE,
)
from cli.entrypoint import cli
from cli.utils import dict_to_pretty_json
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE = '<KEY>' \
'<KEY>'
def test_get_batch():
"""
Case: get a batch by identifier.
Expect: batch is returned.
"""
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE,
'--node-url',
DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING,
])
assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert isinstance(json.loads(result.output), dict)
def test_get_batch_with_invalid_id():
"""
Case: get a batch by its invalid identifier.
Expect: the following identifier is invalid error message.
"""
invalid_batch_id = 'abcefg'
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
invalid_batch_id,
'--node-url',
DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING,
])
expected_error_message = {
'errors': {
'id': [
f'The following identifier `{invalid_batch_id}` is invalid.',
],
},
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error_message) in result.output
def test_get_batch_without_node_url(mocker):
"""
Case: get a batch by its identifier without passing node URL.
Expect: batch is returned from a node on localhost.
"""
batch_id = '6f200995e766da7218ec2a3d0aeabbe1151128063cdf4e954cd08390a879b28e' \
'085a06f8708d2e6bb34f6501e8ddc981f0353627c1d4f90c80a656a8090c8751'
expected_result = {
"data": {
"header": {
"signer_public_key": "<KEY>",
"transaction_ids": [
"5a84ff8747e16d15a988a8b13134d24981a6b516bb41042e6ea95c47f6c9429c"
"1c6fdf787ca2ea7fb8725b2bc2d0cd6aa3836aadfe85354deb714e048d41b4d7",
],
},
"header_signature": "57692f2bcc9be7fe2b59c052d5938eb92bd7be8a36487c1c7efc2c5758bf108e"
"232892987e898071e5ea13b4cbe283e96ac45d8f63cd9065522df7b85b050977",
"transactions": [
{
"header": {
"batcher_public_key": "<KEY>",
"family_name": "sawtooth_settings",
"family_version": "1.0",
"inputs": [
"<KEY>",
"000000a87cb5eafdcca6a8cde0fb0dec1400c5ab274474a6aa82c12840f169a04216b7",
],
"outputs": [
"<KEY>",
],
"signer_public_key": "<KEY>",
},
"header_signature": "5a84ff8747e16d15a988a8b13134d24981a6b516bb41042e6ea95c47f6c9429c"
"1c6fdf787ca2ea7fb8725b2bc2d0cd6aa3836aadfe85354deb714e048d41b4d7",
"payload": "CAESgAEKJnNhd3Rvb3RoLnNldHRpbmdzLnZvdGUuYyaXplZF9rZXlzEkIwM2Q0MjVkMmQxN2I2NGUzZWY4Zm"
"VlMDI4MDg5YTU2N2ZiYjA1YmQ1NTZmOThjMGI2ZmIJjNMGVhNjJiOGYaEjB4ZDU0NzJhOTY1NWJkYTNmNg==",
},
],
},
}
mock_get_batch_by_id = mocker.patch('cli.batch.service.loop.run_until_complete')
mock_get_batch_by_id.return_value = expected_result
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
batch_id,
])
assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert expected_result.get('data') == json.loads(result.output).get('result')
def test_get_batch_with_invalid_node_url():
"""
Case: get a batch by its identifier by passing an invalid node URL.
Expect: the following node URL is invalid error message.
"""
invalid_node_url = 'my-node-url.com'
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE,
'--node-url',
invalid_node_url,
])
expected_error_message = {
'errors': f'Please check if your node running at http://{invalid_node_url}:8080.',
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error_message) in result.output
@pytest.mark.parametrize('node_url_with_protocol', ['http://masternode.com', 'https://masternode.com'])
def test_get_batch_node_url_with_protocol(node_url_with_protocol):
"""
Case: get a batch by its identifier by passing node URL with an explicit protocol.
Expect: the following node URL contains a protocol error message.
"""
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE,
'--node-url',
node_url_with_protocol,
])
expected_error = {
'errors': {
'node_url': [
f'Pass the following node URL `{node_url_with_protocol}` without protocol (http, https, etc.).',
],
},
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error) in result.output
| """
Provide tests for command line interface's get batch command.
"""
import json
import pytest
from click.testing import CliRunner
from cli.constants import (
DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING,
FAILED_EXIT_FROM_COMMAND_CODE,
PASSED_EXIT_FROM_COMMAND_CODE,
)
from cli.entrypoint import cli
from cli.utils import dict_to_pretty_json
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE = '<KEY>' \
'<KEY>'
def test_get_batch():
"""
Case: get a batch by identifier.
Expect: batch is returned.
"""
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE,
'--node-url',
DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING,
])
assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert isinstance(json.loads(result.output), dict)
def test_get_batch_with_invalid_id():
"""
Case: get a batch by its invalid identifier.
Expect: the following identifier is invalid error message.
"""
invalid_batch_id = 'abcefg'
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
invalid_batch_id,
'--node-url',
DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING,
])
expected_error_message = {
'errors': {
'id': [
f'The following identifier `{invalid_batch_id}` is invalid.',
],
},
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error_message) in result.output
def test_get_batch_without_node_url(mocker):
"""
Case: get a batch by its identifier without passing node URL.
Expect: batch is returned from a node on localhost.
"""
batch_id = '6f200995e766da7218ec2a3d0aeabbe1151128063cdf4e954cd08390a879b28e' \
'085a06f8708d2e6bb34f6501e8ddc981f0353627c1d4f90c80a656a8090c8751'
expected_result = {
"data": {
"header": {
"signer_public_key": "<KEY>",
"transaction_ids": [
"5a84ff8747e16d15a988a8b13134d24981a6b516bb41042e6ea95c47f6c9429c"
"1c6fdf787ca2ea7fb8725b2bc2d0cd6aa3836aadfe85354deb714e048d41b4d7",
],
},
"header_signature": "57692f2bcc9be7fe2b59c052d5938eb92bd7be8a36487c1c7efc2c5758bf108e"
"232892987e898071e5ea13b4cbe283e96ac45d8f63cd9065522df7b85b050977",
"transactions": [
{
"header": {
"batcher_public_key": "<KEY>",
"family_name": "sawtooth_settings",
"family_version": "1.0",
"inputs": [
"<KEY>",
"000000a87cb5eafdcca6a8cde0fb0dec1400c5ab274474a6aa82c12840f169a04216b7",
],
"outputs": [
"<KEY>",
],
"signer_public_key": "<KEY>",
},
"header_signature": "5a84ff8747e16d15a988a8b13134d24981a6b516bb41042e6ea95c47f6c9429c"
"1c6fdf787ca2ea7fb8725b2bc2d0cd6aa3836aadfe85354deb714e048d41b4d7",
"payload": "CAESgAEKJnNhd3Rvb3RoLnNldHRpbmdzLnZvdGUuYyaXplZF9rZXlzEkIwM2Q0MjVkMmQxN2I2NGUzZWY4Zm"
"VlMDI4MDg5YTU2N2ZiYjA1YmQ1NTZmOThjMGI2ZmIJjNMGVhNjJiOGYaEjB4ZDU0NzJhOTY1NWJkYTNmNg==",
},
],
},
}
mock_get_batch_by_id = mocker.patch('cli.batch.service.loop.run_until_complete')
mock_get_batch_by_id.return_value = expected_result
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
batch_id,
])
assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert expected_result.get('data') == json.loads(result.output).get('result')
def test_get_batch_with_invalid_node_url():
"""
Case: get a batch by its identifier by passing an invalid node URL.
Expect: the following node URL is invalid error message.
"""
invalid_node_url = 'my-node-url.com'
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE,
'--node-url',
invalid_node_url,
])
expected_error_message = {
'errors': f'Please check if your node running at http://{invalid_node_url}:8080.',
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error_message) in result.output
@pytest.mark.parametrize('node_url_with_protocol', ['http://masternode.com', 'https://masternode.com'])
def test_get_batch_node_url_with_protocol(node_url_with_protocol):
"""
Case: get a batch by its identifier by passing node URL with an explicit protocol.
Expect: the following node URL contains a protocol error message.
"""
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE,
'--node-url',
node_url_with_protocol,
])
expected_error = {
'errors': {
'node_url': [
f'Pass the following node URL `{node_url_with_protocol}` without protocol (http, https, etc.).',
],
},
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error) in result.output
| en | 0.874945 | Provide tests for command line interface's get batch command. Case: get a batch by identifier. Expect: batch is returned. Case: get a batch by its invalid identifier. Expect: the following identifier is invalid error message. Case: get a batch by its identifier without passing node URL. Expect: batch is returned from a node on localhost. Case: get a batch by its identifier by passing an invalid node URL. Expect: the following node URL is invalid error message. Case: get a batch by its identifier by passing node URL with an explicit protocol. Expect: the following node URL contains a protocol error message. | 2.508089 | 3 |
experiments/scripts/preprocess_dataset.py | pbielak/graph-barlow-twins | 9 | 7697 | import sys
from gssl.datasets import load_dataset
from gssl.inductive.datasets import load_ppi
from gssl.utils import seed
def main():
seed()
# Read dataset name
dataset_name = sys.argv[1]
# Load dataset
if dataset_name == "PPI":
load_ppi()
else:
load_dataset(name=dataset_name)
if __name__ == "__main__":
main()
| import sys
from gssl.datasets import load_dataset
from gssl.inductive.datasets import load_ppi
from gssl.utils import seed
def main():
seed()
# Read dataset name
dataset_name = sys.argv[1]
# Load dataset
if dataset_name == "PPI":
load_ppi()
else:
load_dataset(name=dataset_name)
if __name__ == "__main__":
main()
| en | 0.208773 | # Read dataset name # Load dataset | 1.845412 | 2 |
agro_site/orders/migrations/0001_initial.py | LukoninDmitryPy/agro_site-2 | 0 | 7698 | <reponame>LukoninDmitryPy/agro_site-2<filename>agro_site/orders/migrations/0001_initial.py
# Generated by Django 2.2.16 on 2022-04-12 13:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.expressions
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('sales_backend', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('D', 'Dialog'), ('C', 'Chat')], default='D', max_length=1, verbose_name='Тип')),
('members', models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='Участник')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=250)),
('postal_code', models.CharField(max_length=20)),
('city', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('paid', models.BooleanField(default=False)),
('status_order', models.CharField(choices=[('В обработке', 'В обработке'), ('Заказ собран', 'Заказ собран'), ('Заказ отправлен', 'Заказ отправлен')], default='В обработке', max_length=20)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Заказы',
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='orders.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_items', to='sales_backend.Product')),
('seller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seller', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_users', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField(verbose_name='Сообщение')),
('pub_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Дата сообщения')),
('is_readed', models.BooleanField(default=False, verbose_name='Прочитано')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь')),
('chat', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.Chat', verbose_name='Чат')),
],
options={
'ordering': ['pub_date'],
},
),
migrations.AddConstraint(
model_name='orderitem',
constraint=models.CheckConstraint(check=models.Q(_negated=True, user=django.db.models.expressions.F('seller')), name='dont_buy_yourself'),
),
]
| # Generated by Django 2.2.16 on 2022-04-12 13:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.expressions
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('sales_backend', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('D', 'Dialog'), ('C', 'Chat')], default='D', max_length=1, verbose_name='Тип')),
('members', models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='Участник')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=250)),
('postal_code', models.CharField(max_length=20)),
('city', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('paid', models.BooleanField(default=False)),
('status_order', models.CharField(choices=[('В обработке', 'В обработке'), ('Заказ собран', 'Заказ собран'), ('Заказ отправлен', 'Заказ отправлен')], default='В обработке', max_length=20)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Заказы',
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='orders.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_items', to='sales_backend.Product')),
('seller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seller', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_users', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField(verbose_name='Сообщение')),
('pub_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Дата сообщения')),
('is_readed', models.BooleanField(default=False, verbose_name='Прочитано')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь')),
('chat', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.Chat', verbose_name='Чат')),
],
options={
'ordering': ['pub_date'],
},
),
migrations.AddConstraint(
model_name='orderitem',
constraint=models.CheckConstraint(check=models.Q(_negated=True, user=django.db.models.expressions.F('seller')), name='dont_buy_yourself'),
),
] | en | 0.800953 | # Generated by Django 2.2.16 on 2022-04-12 13:28 | 1.659172 | 2 |
app/forms.py | FakeYou/flask-microblog | 0 | 7699 | from flask.ext.wtf import Form
from wtforms import StringField, BooleanField, PasswordField
from wtforms.validators import InputRequired, Email, EqualTo, Length
class LoginForm(Form):
nickname = StringField('nickname', validators=[InputRequired()])
password = PasswordField('password', validators=[InputRequired()])
remember_me = BooleanField('remember_me', default=False)
class RegisterForm(Form):
nickname = StringField('nickname', validators=[InputRequired()])
email = StringField('email', validators=[InputRequired(), Email()])
password = PasswordField('password', validators=[InputRequired(),
EqualTo('confirm', message='Password must match')])
confirm = PasswordField('<PASSWORD>')
class NewPostForm(Form):
body = StringField('body', validators=[InputRequired(), Length(max=140)]) | from flask.ext.wtf import Form
from wtforms import StringField, BooleanField, PasswordField
from wtforms.validators import InputRequired, Email, EqualTo, Length
class LoginForm(Form):
nickname = StringField('nickname', validators=[InputRequired()])
password = PasswordField('password', validators=[InputRequired()])
remember_me = BooleanField('remember_me', default=False)
class RegisterForm(Form):
nickname = StringField('nickname', validators=[InputRequired()])
email = StringField('email', validators=[InputRequired(), Email()])
password = PasswordField('password', validators=[InputRequired(),
EqualTo('confirm', message='Password must match')])
confirm = PasswordField('<PASSWORD>')
class NewPostForm(Form):
body = StringField('body', validators=[InputRequired(), Length(max=140)]) | none | 1 | 2.819196 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.