hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7b24aa6646e92566319ce68092ddf4db0af43da1 | 2,600 | py | Python | make.py | loicseguin/astronomie | b489d615adb136991ff3fc82ca06c4f6791ca8c6 | [
"BSD-2-Clause"
]
| null | null | null | make.py | loicseguin/astronomie | b489d615adb136991ff3fc82ca06c4f6791ca8c6 | [
"BSD-2-Clause"
]
| 7 | 2020-01-19T21:27:07.000Z | 2020-01-19T21:28:09.000Z | make.py | loicseguin/astronomie | b489d615adb136991ff3fc82ca06c4f6791ca8c6 | [
"BSD-2-Clause"
]
| null | null | null | """Construit le site Explorer et comprendre l'Univers, incluant les diapositives
et le livre. Le logiciel Pandoc est utilisé pour obtenir des présentations
dans différents formats.
On peut construire tous les fichiers html avec la commande
$ python make.py
"""
import subprocess
import os
import sys
# Dossiers de présentation
DIAPOS_DIRS = [os.path.join('diapos', d) for d in os.listdir('diapos')
if d != 'reveal.js']
def run(call_str):
"""Exécute la chaîne de caractère sur la ligne de commande."""
try:
subprocess.check_call(call_str.split())
print("complet!")
except subprocess.CalledProcessError as e:
print(call_str, end='... ')
print("erreur, la compilation a échoué")
def revealjs(in_fname, out_fname):
"""Crée une présentation avec la librairie javascript Reveal.js."""
call_str = "pandoc -t revealjs " \
"-V revealjs-url=../reveal.js -s " \
"--slide-level=1 " \
"--mathjax {} -o {}".format(in_fname, out_fname)
run(call_str)
def diapos():
"""Construits les fichiers HTML des diapositives."""
cwd = os.getcwd()
for folder in DIAPOS_DIRS:
try:
os.chdir(folder)
except (FileNotFoundError, NotADirectoryError):
os.chdir(cwd)
continue
# Déterminer le nom du fichier source.
for fname in os.listdir():
if fname.endswith(".md"):
break
else:
os.chdir(cwd)
continue
in_fname = fname
out_fname = "{}.html".format(os.path.splitext(os.path.basename(fname))[0])
print("{}: ".format(folder), end='')
revealjs(in_fname, out_fname)
os.chdir(cwd)
def livre():
"""Construit les fichiers HTML du livre."""
for fname in os.listdir('livre'):
if not fname.endswith('.md'):
continue
in_fname = os.path.join('livre', fname)
out_fname = os.path.join(
'livre',
'{}.html'.format(os.path.splitext(os.path.basename(fname))[0]))
call_str = 'pandoc -s -c ../www/style.css --mathjax ' \
'--template www/book-template.html ' \
'--include-after-body www/sidebar.html ' \
'--include-after-body www/footer.html ' \
'{} -o {}'.format(in_fname, out_fname)
print("{}: ".format(in_fname), end='')
run(call_str)
if __name__ == '__main__':
if len(sys.argv) != 1:
print("usage: python make.py\n")
exit()
diapos()
livre()
| 30.232558 | 82 | 0.576154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,004 | 0.38438 |
7b25b9ec772098b6a401939f74bc6b08ca37a58b | 280 | py | Python | geosnap/tests/get_data.py | WawNun/geosnap | 9838498b89d42c94fef73ee2983dd385dab17345 | [
"BSD-3-Clause"
]
| 148 | 2019-04-19T00:16:59.000Z | 2022-03-24T06:35:47.000Z | geosnap/tests/get_data.py | WawNun/geosnap | 9838498b89d42c94fef73ee2983dd385dab17345 | [
"BSD-3-Clause"
]
| 178 | 2019-04-15T21:54:36.000Z | 2022-03-31T03:08:29.000Z | geosnap/tests/get_data.py | WawNun/geosnap | 9838498b89d42c94fef73ee2983dd385dab17345 | [
"BSD-3-Clause"
]
| 25 | 2019-04-19T21:27:56.000Z | 2022-03-28T21:03:31.000Z | import os
from pathlib import PurePath
try:
from geosnap import io
except:
pass
path = os.getcwd()
try:
io.store_ltdb(sample=PurePath(path, 'ltdb_sample.zip'), fullcount=PurePath(path, 'ltdb_full.zip'))
io.store_ncdb(PurePath(path, "ncdb.csv"))
except:
pass | 18.666667 | 102 | 0.707143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.15 |
7b26132c0d8b78762b805dd6438fa5d2c8d060b1 | 13,370 | py | Python | plotting/utils.py | plai-group/amortized-rejection-sampling | 1e85253ae1e6ef1c939e1c488e55f9d95ee48355 | [
"MIT"
]
| null | null | null | plotting/utils.py | plai-group/amortized-rejection-sampling | 1e85253ae1e6ef1c939e1c488e55f9d95ee48355 | [
"MIT"
]
| null | null | null | plotting/utils.py | plai-group/amortized-rejection-sampling | 1e85253ae1e6ef1c939e1c488e55f9d95ee48355 | [
"MIT"
]
| null | null | null | import numpy as np
import torch
from tqdm import tqdm
import matplotlib as mpl
# https://gist.github.com/thriveth/8560036
color_cycle = ['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']
labels_dict = {"ic": "IC",
"prior": "Prior",
"ars-1": r"$\mathrm{ARS}_{M=1}$",
"ars-2": r"$\mathrm{ARS}_{M=2}$",
"ars-5": r"$\mathrm{ARS}_{M=5}$",
"ars-10": r"$\mathrm{ARS}_{M=10}$",
"ars-20": r"$\mathrm{ARS}_{M=20}$",
"ars-50": r"$\mathrm{ARS}_{M=50}$",
"biased": "Biased",
"gt": "Groundtruth",
"is": "IS",
"collapsed": "Collapsed"}
color_dict = {'gt': color_cycle[0],
'prior': color_cycle[5],
'ic': color_cycle[2],
'biased': color_cycle[3],
'ars-1': color_cycle[4],
'ars-2': color_cycle[1],
'ars-5': color_cycle[7],
'ars-10': color_cycle[6],
'ars-100': color_cycle[8],
'ars-50': color_cycle[8],
'is': color_cycle[8],
'ars-20': "C1",
"collapsed": color_cycle[7]}
########################################
## matplotlib style and configs ##
########################################
def setup_matplotlib():
import seaborn as sns
# mpl.use('Agg')
# plt.style.use('classic')
# sns.set(font_scale=1.5)
sns.set_style('white')
sns.color_palette('colorblind')
nice_fonts = {
# Use LaTeX to write all text
"text.usetex": True,
'text.latex.preamble': r'\usepackage{amsfonts}',
"font.family": "serif",
# Use 10pt font in plots, to match 10pt font in document
"axes.labelsize": 10,
"font.size": 10,
# Make the legend/label fonts a little smaller
"legend.fontsize": 8,
"xtick.labelsize": 7,
"ytick.labelsize": 7,
}
mpl.rcParams.update(nice_fonts)
def set_size(width, fraction=1, subplots=(1, 1)):
# https://jwalton.info/Embed-Publication-Matplotlib-Latex/
""" Set aesthetic figure dimensions to avoid scaling in latex.
Parameters
----------
width: float
Width in pts
fraction: float
Fraction of the width which you wish the figure to occupy
subplots: array-like, optional
The number of rows and columns of subplots.
Returns
-------
fig_dim: tuple
Dimensions of figure in inches
"""
if width == 'thesis':
width_pt = 426.79135
elif width == 'beamer':
width_pt = 307.28987
elif width == 'pnas':
width_pt = 246.09686
elif width == 'aistats22':
width_pt = 487.8225
else:
width_pt = width
# Width of figure
fig_width_pt = width_pt * fraction
# Convert from pt to inches
inches_per_pt = 1 / 72.27
# Golden ratio to set aesthetic figure height
golden_ratio = (5**.5 - 1) / 2
# Figure width in inches
fig_width_in = fig_width_pt * inches_per_pt
# Figure height in inches
fig_height_in = fig_width_in * golden_ratio * (subplots[0] / subplots[1])
return (fig_width_in, fig_height_in)
class OOMFormatter(mpl.ticker.ScalarFormatter):
"""OrderOfMagnitude formatter
Source:
https://stackoverflow.com/questions/42656139/set-scientific-notation-with-fixed-exponent-and-significant-digits-for-multiple
"""
def __init__(self, order=0, fformat="%1.1f", *args, **kwargs):
self.oom = order
self.fformat = fformat
mpl.ticker.ScalarFormatter.__init__(self,*args, **kwargs)
def _set_order_of_magnitude(self):
super()._set_order_of_magnitude()
self.orderOfMagnitude = self.oom
def add_center_aligned_legend(fig, handles, ncol, **kwargs):
nlines = len(handles)
leg1 = fig.legend(handles=handles[:nlines//ncol*ncol], ncol=ncol, **kwargs)
if nlines % ncol != 0:
fig.add_artist(leg1)
leg2 = fig.legend(handles=handles[nlines//ncol*ncol:], ncol=nlines-nlines//ncol*ncol)
leg2.remove()
leg1._legend_box._children.append(leg2._legend_handle_box)
leg1._legend_box.stale = True
########################################
## Loading from disk ##
########################################
def load_log_weights(log_weights_root, iw_mode):
"""Loads the log_weights from the disk. It assumes a file structure of <log_weights_root>/<iw_mode>/*.npy
of mulyiple npy files. This function loads all the weights in a single numpy array, concatenating all npy files.
Finally, it caches the result in a file stored at <log_weights_root>/<iw_mode>.npy
In the further calls, it reuses the cached file.
Args:
log_weights_root (str or pathlib.Path)
iw_mode (str)
Returns:
np.ndarray: log importance weights
"""
agg_weights_file = log_weights_root / f"{iw_mode}.npy"
agg_weights_dir = log_weights_root / iw_mode
assert agg_weights_dir.exists() or agg_weights_file.exists()
if not agg_weights_file.exists():
log_weights = np.concatenate(
[np.load(weight_file) for weight_file in agg_weights_dir.glob("*.npy")])
np.save(agg_weights_file, log_weights)
else:
log_weights = np.load(agg_weights_file)
print(f"{log_weights_root} / {iw_mode} has {len(log_weights):,} traces")
return log_weights
########################################
## Estimators and metrics ##
########################################
def _compute_estimator_helper(log_weights, dx, estimator_func, **kwargs):
"""A helper function for computing the plotting data. It generates the
x-values and y-values of the plot. x-values is an increasing sequence of
integers, with incremens of dx and ending with N. y-values is a TxK tensor
where T is the number of trials and K is the size of x-values. The j-th
column of y-values is the estimator applied to the log_weights up to the
corresponding x-value.
Args:
log_weights (torch.FloatTensor of shape TxN): All the log importance weights
of a particular experiment.
dx (int): different between points of evaluating the estimator.
estimator_func (function): the estimator function that operates on a tensor
of shape Txn where n <= N.
**kwargs: optional additional arguments to the estimator function
"""
(T, N) = log_weights.shape
xvals = _get_xvals(end=N, dx=dx)
yvals_all = [estimator_func(log_weights[:, :x], **kwargs) for x in xvals]
yvals_all = torch.stack(yvals_all, dim=1)
return xvals, yvals_all
def _get_xvals(end, dx):
"""Returns a integer numpy array of x-values incrementing by "dx"
and ending with "end".
Args:
end (int)
dx (int)
"""
arange = np.arange(0, end-1+dx, dx, dtype=int)
xvals = arange[1:]
return xvals
def _log_evidence_func(arr):
"""Returns an estimate of the log evidence from a set of log importance wegiths
in arr. arr has shape TxN where T is the number of trials and N is the number
of samples for estimation.
Args:
arr (torch.FloatTensor of shape TxN): log importance weights
Returns:
A tensor of shape (T,) representing the estimates for each set of sampels.
"""
T, N = arr.shape
log_evidence = torch.logsumexp(arr, dim=1) - np.log(N)
return log_evidence
def _ess_func(arr):
"""Effective sample size (ESS)"""
a = torch.logsumexp(arr, dim=1) * 2
b = torch.logsumexp(2 * arr, dim=1)
return torch.exp(a - b)
def _ess_inf_func(arr):
"""ESS-infinity (Q_n)"""
a = torch.max(arr, dim=1)[0]
b = torch.logsumexp(arr, dim=1)
return torch.exp(a - b)
def get_evidence_estimate(log_weights, dx):
return _compute_estimator_helper(log_weights, estimator_func=lambda x: _log_evidence_func(x).exp(), dx=dx)
def get_log_evidence_estimate(log_weights, dx):
return _compute_estimator_helper(log_weights, estimator_func=_log_evidence_func, dx=dx)
def get_ess(log_weights, dx):
return _compute_estimator_helper(log_weights, estimator_func=_ess_func, dx=dx)
def get_ness(log_weights, dx):
"""Normalized ESS (ESS / N)"""
xvals, yvals = get_ess(log_weights, dx=dx)
return xvals, yvals / xvals
def get_qn(log_weights, dx):
return _compute_estimator_helper(log_weights, estimator_func=_ess_inf_func, dx=dx)
########################################
## Plotting functions ##
########################################
def _lineplot_helper(*, name, func, ax, log_weights_dict, iw_mode_list, dx, bias=None, **kwargs):
"""A helper function for making the line functions of the paper.
Args:
name (string): Metric name. Used for logging only.
func (function): The metric computation function. Should be a function that takes in log_weights and dx
and returns x-values and y-values. Any additional arguments in kwargs will be passed to this function.
ax (matplotlib.axes): A matrplotlib ax object in which the plot should be drawn.
log_weights_dict (dict): A dictionary of the form {iw_mode: log_imprtance_weights as a TxN tensor}
iw_mode_list (list): An ordered list of iw modes specifying the order of drawing the lines.
dx (int): The distance between consequent x-values.
bias (float, optional): If not None, shifts all the line's y-values according to it. Defaults to None.
"""
for iw_mode in tqdm(iw_mode_list, desc=name):
if iw_mode not in log_weights_dict:
print(f"Skipping {iw_mode}.")
continue
log_weights = torch.tensor(log_weights_dict[iw_mode])
label = labels_dict[iw_mode]
color = color_dict[iw_mode]
xs, ys_all = func(log_weights, dx=dx)
means = ys_all.mean(dim=0)
stds = ys_all.std(dim=0)
if bias is not None:
means -= bias
ax.plot(xs, means, color=color, label=label)
ax.fill_between(xs, means - stds, means + stds, color=color, alpha=0.2)
print(f"> ({name}) {iw_mode, means[-1].item(), stds[-1].item()}")
def plot_evidence(**kwargs):
_lineplot_helper(name="Evidence plot", func=get_evidence_estimate, **kwargs)
def plot_log_evidence(**kwargs):
_lineplot_helper(name="Evidence plot", func=get_log_evidence_estimate, **kwargs)
def plot_ness(**kwargs):
_lineplot_helper(name="NESS plot", func=get_ness, **kwargs)
def plot_qn(**kwargs):
_lineplot_helper(name="Qn plot", func=get_qn, **kwargs)
def plot_convergence(ax, log_weights_dict, dx, iw_mode_list,
qn_threshold, n_splits=10):
plot_labels = []
plot_x = []
for iw_mode in tqdm(iw_mode_list, desc="Convergence plot"):
if iw_mode not in log_weights_dict:
print(f"Skipping {iw_mode}.")
continue
log_weights = torch.tensor(log_weights_dict[iw_mode])
label = labels_dict[iw_mode]
xs, qns_all = get_qn(log_weights, dx=dx)
assert qns_all.shape[0] % n_splits == 0, f"The number of trials ({qns_all.shape[0]}) should be divisible by {n_splits}"
qns_all = qns_all.reshape(n_splits, qns_all.shape[0] // n_splits, -1)
qn_means = qns_all.mean(dim=0)
print(f"> (Convergence plot) {iw_mode, qn_means.mean(dim=0)[-1].item()} out of {log_weights.shape[-1]} samples")
converged = (qn_means < qn_threshold).cpu().numpy()
plot_labels.append(label)
if not converged.any(axis=-1).all(): # Some of them are not converged ever
plot_x.append([])
else:
plot_x.append(converged.argmax(axis=-1) * dx)
ax.boxplot(plot_x, labels=plot_labels, showmeans=True, meanline=True)
def plot_convergence_2(ax, log_weights_dict, dx, iw_mode_list, qn_threshold):
# Source: https://stackoverflow.com/questions/33328774/box-plot-with-min-max-average-and-standard-deviation/33330997
plot_labels = []
plot_x = []
for iw_mode in tqdm(iw_mode_list, desc="Convergence plot"):
if iw_mode not in log_weights_dict:
print(f"Skipping {iw_mode}.")
continue
log_weights = torch.tensor(log_weights_dict[iw_mode])
label = labels_dict[iw_mode]
xs, qns_all = get_qn(log_weights, dx=dx)
assert qns_all.shape[0] % 10 == 0
qns_all = qns_all.reshape(10, qns_all.shape[0] // 10, -1)
qn_means = qns_all.mean(dim=0)
converged = (qn_means < qn_threshold).cpu().numpy()
plot_labels.append(label)
if not converged.any(axis=-1).all(): # Some of them are not converged ever
plot_x.append([])
else:
plot_x.append(converged.argmax(axis=-1) * dx)
xvals = [i for i in range(len(plot_x)) if plot_x[i] != []]
x = np.stack([x for x in plot_x if x != []])
mins = x.min(axis=1)
maxes = x.max(axis=1)
means = x.mean(axis=1)
std = x.std(axis=1)
# create stacked errorbars:
ax.errorbar(xvals, means, std, fmt='ok', lw=3)
ax.errorbar(xvals, means, [means - mins, maxes - means],
fmt='.k', ecolor='gray', lw=1)
ax.set_xticks(np.arange(len(plot_x)))
ax.set_xticklabels(plot_labels) | 35.558511 | 128 | 0.618624 | 546 | 0.040838 | 0 | 0 | 0 | 0 | 0 | 0 | 5,657 | 0.423111 |
7b28352f856a9eaa1fa2b24d293fcd81d28eb11c | 4,750 | py | Python | dfa/visualize.py | garyzhao/FRGAN | 8aeb064fc93b45d3d8e074c5253b4f7a287582f4 | [
"Apache-2.0"
]
| 39 | 2018-07-28T04:37:48.000Z | 2022-01-20T18:34:37.000Z | dfa/visualize.py | garyzhao/FRGAN | 8aeb064fc93b45d3d8e074c5253b4f7a287582f4 | [
"Apache-2.0"
]
| 2 | 2018-08-27T08:19:22.000Z | 2019-08-16T09:15:34.000Z | dfa/visualize.py | garyzhao/FRGAN | 8aeb064fc93b45d3d8e074c5253b4f7a287582f4 | [
"Apache-2.0"
]
| 8 | 2018-07-31T09:33:49.000Z | 2020-12-06T10:16:53.000Z | from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
import matplotlib.pyplot as plt
from .face import compute_bbox_size
end_list = np.array([17, 22, 27, 42, 48, 31, 36, 68], dtype=np.int32) - 1
def plot_kpt(image, kpt):
''' Draw 68 key points
Args:
image: the input image
kpt: (68, 3).
'''
image = image.copy()
kpt = np.round(kpt).astype(np.int32)
for i in range(kpt.shape[0]):
st = kpt[i, :2]
image = cv2.circle(image, (st[0], st[1]), 1, (0, 0, 255), 2)
if i in end_list:
continue
ed = kpt[i + 1, :2]
image = cv2.line(image, (st[0], st[1]), (ed[0], ed[1]), (255, 255, 255), 1)
return image
def build_camera_box(rear_size=90):
point_3d = []
rear_depth = 0
point_3d.append((-rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, rear_size, rear_depth))
point_3d.append((rear_size, rear_size, rear_depth))
point_3d.append((rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, -rear_size, rear_depth))
front_size = int(4 / 3 * rear_size)
front_depth = int(4 / 3 * rear_size)
point_3d.append((-front_size, -front_size, front_depth))
point_3d.append((-front_size, front_size, front_depth))
point_3d.append((front_size, front_size, front_depth))
point_3d.append((front_size, -front_size, front_depth))
point_3d.append((-front_size, -front_size, front_depth))
point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)
return point_3d
def plot_pose_box(image, Ps, pts68s, color=(40, 255, 0), line_width=2):
''' Draw a 3D box as annotation of pose. Ref:https://github.com/yinguobing/head-pose-estimation/blob/master/pose_estimator.py
Args:
image: the input image
P: (3, 4). Affine Camera Matrix.
kpt: (2, 68) or (3, 68)
'''
image = image.copy()
if not isinstance(pts68s, list):
pts68s = [pts68s]
if not isinstance(Ps, list):
Ps = [Ps]
for i in range(len(pts68s)):
pts68 = pts68s[i]
llength = compute_bbox_size(pts68)
point_3d = build_camera_box(llength)
P = Ps[i]
# Map to 2d image points
point_3d_homo = np.hstack((point_3d, np.ones([point_3d.shape[0], 1]))) # n x 4
point_2d = point_3d_homo.dot(P.T)[:, :2]
point_2d[:, 1] = - point_2d[:, 1]
point_2d[:, :2] = point_2d[:, :2] - np.mean(point_2d[:4, :2], 0) + np.mean(pts68[:2, :27], 1)
point_2d = np.int32(point_2d.reshape(-1, 2))
# Draw all the lines
cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[1]), tuple(
point_2d[6]), color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[2]), tuple(
point_2d[7]), color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[3]), tuple(
point_2d[8]), color, line_width, cv2.LINE_AA)
return image
def draw_landmarks(img, pts, style='fancy', wfp=None, show_flg=False, **kwargs):
"""Draw landmarks using matplotlib"""
# height, width = img.shape[:2]
# plt.figure(figsize=(12, height / width * 12))
plt.imshow(img[:, :, ::-1])
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
plt.axis('off')
if not type(pts) in [tuple, list]:
pts = [pts]
for i in range(len(pts)):
if style == 'simple':
plt.plot(pts[i][0, :], pts[i][1, :], 'o', markersize=4, color='g')
elif style == 'fancy':
alpha = 0.8
markersize = 4
lw = 1.5
color = kwargs.get('color', 'w')
markeredgecolor = kwargs.get('markeredgecolor', 'black')
nums = [0, 17, 22, 27, 31, 36, 42, 48, 60, 68]
# close eyes and mouths
plot_close = lambda i1, i2: plt.plot([pts[i][0, i1], pts[i][0, i2]], [pts[i][1, i1], pts[i][1, i2]],
color=color, lw=lw, alpha=alpha - 0.1)
plot_close(41, 36)
plot_close(47, 42)
plot_close(59, 48)
plot_close(67, 60)
for ind in range(len(nums) - 1):
l, r = nums[ind], nums[ind + 1]
plt.plot(pts[i][0, l:r], pts[i][1, l:r], color=color, lw=lw, alpha=alpha - 0.1)
plt.plot(pts[i][0, l:r], pts[i][1, l:r], marker='o', linestyle='None', markersize=markersize,
color=color,
markeredgecolor=markeredgecolor, alpha=alpha)
if wfp is not None:
plt.savefig(wfp, dpi=200)
print('Save visualization result to {}'.format(wfp))
if show_flg:
plt.show()
| 35.714286 | 129 | 0.573895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 638 | 0.134316 |
7b2c39567282edd435ce6c7b2d8bdb6da59671bf | 439 | py | Python | bin/curvature.py | AgeYY/prednet | 90668d98b88e29bbaa68a7709e4fcb3664c110e8 | [
"MIT"
]
| null | null | null | bin/curvature.py | AgeYY/prednet | 90668d98b88e29bbaa68a7709e4fcb3664c110e8 | [
"MIT"
]
| null | null | null | bin/curvature.py | AgeYY/prednet | 90668d98b88e29bbaa68a7709e4fcb3664c110e8 | [
"MIT"
]
| null | null | null | # calculate the curverture
import numpy as np
import matplotlib.pyplot as plt
from predusion.tools import curvature
radius = 2
n_point = 10
circle_curve = [[radius * np.sin(t), radius * np.cos(t)] for t in np.linspace(0, 2 * np.pi, n_point, endpoint=False)]
circle_curve = np.array(circle_curve)
#plt.figure()
#plt.scatter(circle_curve[:, 0], circle_curve[:, 1])
#plt.show()
ct, ct_mean = curvature(circle_curve)
print(ct, ct_mean)
| 20.904762 | 117 | 0.724374 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.232346 |
7b2c3dcb95bb9538fdb4cb9f25daeb1cf42bc3eb | 875 | py | Python | cocos/tests/test_numerics/test_statistics/test_mean.py | michaelnowotny/cocos | 3c34940d7d9eb8592a97788a5df84b8d472f2928 | [
"MIT"
]
| 101 | 2019-03-30T05:23:01.000Z | 2021-11-27T09:09:40.000Z | cocos/tests/test_numerics/test_statistics/test_mean.py | michaelnowotny/cocos | 3c34940d7d9eb8592a97788a5df84b8d472f2928 | [
"MIT"
]
| 3 | 2019-04-17T06:04:12.000Z | 2020-12-14T17:36:01.000Z | cocos/tests/test_numerics/test_statistics/test_mean.py | michaelnowotny/cocos | 3c34940d7d9eb8592a97788a5df84b8d472f2928 | [
"MIT"
]
| 5 | 2020-02-07T14:29:50.000Z | 2020-12-09T17:54:07.000Z | import cocos.device
import cocos.numerics as cn
import numpy as np
import pytest
test_data = [np.array([[1, 2, 3], [4, 5, 6], [7, 8, 20]],
dtype=np.int32),
np.array([[0.2, 1.0, 0.5], [0.4, 0.5, 0.6], [0.7, 0.2, 0.25]],
dtype=np.float32),
np.array([[0.5, 2.3, 3.1], [4, 5.5, 6], [7 - 9j, 8 + 1j, 2 + 10j]],
dtype=np.complex64)]
@pytest.mark.parametrize("A", test_data)
def test_mean(A):
cocos.device.init()
A_arch = cn.array(A)
# # using numpy
# mean_numpy = np.mean(A)
#
# # using Archimedes
# mean_arch = cn.mean(A_arch)
# conduct tests
# tests mean
assert np.allclose(np.mean(A), cn.mean(A_arch))
assert np.allclose(np.mean(A, axis=0), cn.mean(A_arch, axis=0))
assert np.allclose(np.mean(A, axis=1), cn.mean(A_arch, axis=1))
| 26.515152 | 80 | 0.537143 | 0 | 0 | 0 | 0 | 451 | 0.515429 | 0 | 0 | 120 | 0.137143 |
7b2f67783a54c7281fccbf52bb33f6fc8f65fc62 | 482 | py | Python | tests/individual_samples/long_doc.py | MiWeiss/docstr_coverage | 502ab0174ea261383f497af2476317d4cc199665 | [
"MIT"
]
| 50 | 2019-01-25T16:53:39.000Z | 2022-03-17T22:02:06.000Z | tests/individual_samples/long_doc.py | HunterMcGushion/docstr_coverage | 502ab0174ea261383f497af2476317d4cc199665 | [
"MIT"
]
| 66 | 2019-01-25T11:45:43.000Z | 2022-03-30T11:55:47.000Z | tests/individual_samples/long_doc.py | MiWeiss/docstr_coverage | 502ab0174ea261383f497af2476317d4cc199665 | [
"MIT"
]
| 23 | 2019-01-28T08:37:42.000Z | 2021-06-16T12:35:27.000Z | """
this is a very long docstring
this is a very long docstring
this is a very long docstring
this is a very long docstring
this is a very long docstring
this is a very long docstring
this is a very long docstring
this is a very long docstring
this is a very long docstring
"""
class A:
"""This is the first class in the alphabeth."""
# docstr-coverage:excused `test ignore after long docstrings`
def ignored(self):
pass
def missing(self):
pass
| 20.083333 | 65 | 0.707469 | 200 | 0.414938 | 0 | 0 | 0 | 0 | 0 | 0 | 386 | 0.80083 |
7b2fdc657bc9709a4e827c864106583a0abe59bc | 461 | py | Python | Lib/site-packages/elasticsearch_django/signals.py | Nibraz15/FullTextSearch | 79d03a9b5c0fc94219ad9a70fe57818496844660 | [
"bzip2-1.0.6"
]
| null | null | null | Lib/site-packages/elasticsearch_django/signals.py | Nibraz15/FullTextSearch | 79d03a9b5c0fc94219ad9a70fe57818496844660 | [
"bzip2-1.0.6"
]
| null | null | null | Lib/site-packages/elasticsearch_django/signals.py | Nibraz15/FullTextSearch | 79d03a9b5c0fc94219ad9a70fe57818496844660 | [
"bzip2-1.0.6"
]
| null | null | null | import django.dispatch
# signal fired just before calling model.index_search_document
pre_index = django.dispatch.Signal(providing_args=["instance", "index"])
# signal fired just before calling model.update_search_document
pre_update = django.dispatch.Signal(
providing_args=["instance", "index", "update_fields"]
)
# signal fired just before calling model.delete_search_document
pre_delete = django.dispatch.Signal(providing_args=["instance", "index"])
| 35.461538 | 73 | 0.796095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 254 | 0.550976 |
7b30e1e10fc484e48de9eae99bc4b49a95428432 | 528 | py | Python | adverse/signals.py | michael-xander/communique-webapp | 85b450d7f6d0313c5e5ef53a262a850b7e93c3d6 | [
"MIT"
]
| null | null | null | adverse/signals.py | michael-xander/communique-webapp | 85b450d7f6d0313c5e5ef53a262a850b7e93c3d6 | [
"MIT"
]
| null | null | null | adverse/signals.py | michael-xander/communique-webapp | 85b450d7f6d0313c5e5ef53a262a850b7e93c3d6 | [
"MIT"
]
| null | null | null | from django.db.models.signals import post_save
from django.dispatch import receiver
from communique.utils.utils_signals import generate_notifications
from user.models import NotificationRegistration
from .models import AdverseEvent
@receiver(post_save, sender=AdverseEvent)
def post_adverse_event_save_callback(sender, **kwargs):
"""
Creates notifications informing all registered users that an adverse event has been created/updated
"""
generate_notifications(NotificationRegistration.ADVERSE_EVENTS, kwargs) | 37.714286 | 103 | 0.829545 | 0 | 0 | 0 | 0 | 293 | 0.554924 | 0 | 0 | 115 | 0.217803 |
7b32ae7712bef36c9a2b8c71ee2035133eed9f7e | 1,117 | py | Python | hoomd/test-py/test_run_callback.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
]
| 2 | 2020-03-30T14:38:50.000Z | 2020-06-02T05:53:41.000Z | hoomd/test-py/test_run_callback.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
]
| null | null | null | hoomd/test-py/test_run_callback.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
]
| 1 | 2020-05-20T07:00:08.000Z | 2020-05-20T07:00:08.000Z | # -*- coding: iso-8859-1 -*-
# Maintainer: joaander
import hoomd
hoomd.context.initialize()
import unittest
class analyze_callback_tests(unittest.TestCase):
def setUp(self):
sysdef = hoomd.init.create_lattice(unitcell=hoomd.lattice.sq(a=2.0),
n=[1,2]);
self.a = -1;
def test_simple(self):
def cb(step):
self.a = step;
self.a = -1;
hoomd.run(10, callback=cb);
self.assertEqual(self.a, 10);
def test_period(self):
def cb(step):
self.a = step;
self.a = -1;
hoomd.run(10, callback=cb, callback_period=7);
self.assertEqual(self.a, 7);
def test_cancel(self):
def cb(step):
self.a = step;
if step == 3:
return -1;
else:
return 0;
self.a = -1;
hoomd.run(10, callback=cb, callback_period=1);
self.assertEqual(self.a, 3);
def tearDown(self):
hoomd.context.initialize();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| 23.270833 | 76 | 0.521038 | 934 | 0.836168 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.065354 |
7b332b95f4298d84e9d671c6d88abc96e79fcae6 | 7,145 | py | Python | cheshire3/parser.py | cheshire3/cheshire3 | 306348831ec110229c78a7c5f0f2026a0f394d2c | [
"Python-2.0",
"Unlicense"
]
| 3 | 2015-08-02T09:03:28.000Z | 2017-12-06T09:26:14.000Z | cheshire3/parser.py | cheshire3/cheshire3 | 306348831ec110229c78a7c5f0f2026a0f394d2c | [
"Python-2.0",
"Unlicense"
]
| 5 | 2015-08-17T01:16:35.000Z | 2015-09-16T21:51:27.000Z | cheshire3/parser.py | cheshire3/cheshire3 | 306348831ec110229c78a7c5f0f2026a0f394d2c | [
"Python-2.0",
"Unlicense"
]
| 6 | 2015-05-17T15:32:20.000Z | 2020-04-22T08:43:16.000Z |
import cStringIO
import StringIO
from xml.sax import make_parser, ErrorHandler, SAXParseException
from xml.sax import InputSource as SaxInput
from xml.dom.minidom import parseString as domParseString
from xml.parsers.expat import ExpatError
from lxml import etree
from cheshire3.baseObjects import Parser
from cheshire3.record import (
SaxRecord,
SaxContentHandler,
DomRecord,
MinidomRecord,
MarcRecord
)
from cheshire3.record import LxmlRecord
from cheshire3.utils import nonTextToken
from exceptions import XMLSyntaxError
class BaseParser(Parser):
def _copyData(self, doc, rec):
# Utility function to update data on record from document
rec.id = doc.id
rec.filename = doc.filename
rec.tagName = doc.tagName
rec.processHistory = doc.processHistory
rec.processHistory.append(self.id)
if doc.documentStore:
rec.parent = ('document', doc.documentStore, doc.id)
elif doc.parent:
rec.parent = doc.parent
class MinidomParser(BaseParser):
"""Use default Python Minidom implementation to parse document."""
def process_document(self, session, doc):
xml = doc.get_raw(session)
try:
dom = domParseString(xml)
except ExpatError as e:
raise XMLSyntaxError(e.message)
rec = MinidomRecord(dom, xml)
self._copyData(doc, rec)
return rec
class SaxParser(BaseParser):
"""Default SAX based parser. Creates SaxRecord."""
_possibleSettings = {
'namespaces': {
'docs': "Enable namespace processing in SAX"
},
'stripWhitespace': {
'docs': "Strip additional whitespace when processing."
},
'attrHash': {
'docs': "Tag/Attribute combinations to include in hash."
}
}
def __init__(self, session, config, parent):
Parser.__init__(self, session, config, parent)
self.parser = make_parser()
self.errorHandler = ErrorHandler()
self.parser.setErrorHandler(self.errorHandler)
self.inputSource = SaxInput()
ch = SaxContentHandler()
self.contentHandler = ch
self.parser.setContentHandler(ch)
self.keepError = 1
if (self.get_setting(session, 'namespaces')):
self.parser.setFeature('http://xml.org/sax/features/namespaces',
1)
p = self.get_setting(session, 'attrHash')
if (p):
l = p.split()
for i in l:
(a, b) = i.split("@")
try:
ch.hashAttributesNames[a].append(b)
except:
ch.hashAttributesNames[a] = [b]
if self.get_setting(session, 'stripWhitespace'):
ch.stripWS = 1
def process_document(self, session, doc):
xml = doc.get_raw(session)
if type(xml) == unicode:
# SAX parser cannot deal with unicode
xml = xml.encode('utf-8')
self.inputSource.setByteStream(cStringIO.StringIO(xml))
ch = self.contentHandler
ch.reinit()
try:
self.parser.parse(self.inputSource)
except SAXParseException as e:
# Splat. Reset self and reraise
if self.keepError:
# Work out path
path = []
for l in ch.pathLines:
line = ch.currentText[l]
elemName = line[2:line.index('{') - 1]
path.append("%s[@SAXID='%s']" % (elemName, l))
self.errorPath = '/'.join(path)
else:
ch.reinit()
raise XMLSyntaxError(str(e))
rec = SaxRecord(ch.currentText, xml, wordCount=ch.recordWordCount)
rec.elementHash = ch.elementHash
rec.byteCount = len(xml)
self._copyData(doc, rec)
ch.reinit()
return rec
class StoredSaxParser(BaseParser):
def process_document(self, session, doc):
data = doc.get_raw(session)
data = unicode(data, 'utf-8')
sax = data.split(nonTextToken)
if sax[-1][0] == "9":
line = sax.pop()
elemHash = pickle.loads(str(line[2:]))
else:
elemHash = {}
rec = SaxRecord(sax)
rec.elementHash = elemHash
return rec
class LxmlParser(BaseParser):
""" lxml based Parser. Creates LxmlRecords """
_possibleSettings = {
'validateDTD': {
'docs': ("Validate to DTD while parsing (if a DTD was "
"referenced by the Document.)"),
'type': int,
'options': "0|1"
},
'allowNetwork': {
'docs': ("Allow network access to look up external documents "
"(DTDs etc.)"),
'type': int,
'options': "0|1"
}
}
def __init__(self, session, config, parent):
BaseParser.__init__(self, session, config, parent)
dtdVal = bool(self.get_setting(session, 'validateDTD', 0))
noNetwork = not self.get_setting(session, 'allowNetwork', 0)
self.parser = etree.XMLParser(dtd_validation=dtdVal,
no_network=noNetwork)
def process_document(self, session, doc):
# Input must be string or stream
data = doc.get_raw(session)
try:
try:
et = etree.parse(StringIO.StringIO(data), self.parser)
except AssertionError:
data = data.decode('utf8')
et = etree.parse(StringIO.StringIO(data), self.parser)
except etree.XMLSyntaxError as e:
raise XMLSyntaxError(e.message)
rec = LxmlRecord(et)
rec.byteCount = len(data)
self._copyData(doc, rec)
return rec
class LxmlSchemaParser(Parser):
pass
class LxmlRelaxNGParser(Parser):
pass
class LxmlHtmlParser(BaseParser):
"""lxml based parser for HTML documents."""
def __init__(self, session, config, parent):
BaseParser.__init__(self, session, config, parent)
self.parser = etree.HTMLParser()
def process_document(self, session, doc):
data = doc.get_raw(session)
et = etree.parse(StringIO.StringIO(data), self.parser)
rec = LxmlRecord(et)
rec.byteCount = len(data)
self._copyData(doc, rec)
return rec
class PassThroughParser(BaseParser):
"""Take a Document that already contains parsed data and return a Record.
Copy the data from a document (eg list of sax events or a dom tree) into
an appropriate record object.
"""
def process_document(self, session, doc):
# Simply copy data into a record of appropriate type
data = doc.get_raw(session)
if isinstance(data, list):
rec = SaxRecord(data)
else:
rec = DomRecord(data)
self._copyData(doc, rec)
return rec
class MarcParser(BaseParser):
"""Creates MarcRecords which fake the Record API for Marc."""
def process_document(self, session, doc):
return MarcRecord(doc)
| 31.065217 | 77 | 0.588383 | 6,568 | 0.919244 | 0 | 0 | 0 | 0 | 0 | 0 | 1,258 | 0.176067 |
9e26ff289e7c1f363b136e3f4b93da4585664e71 | 6,275 | py | Python | scripts/checkpT_curv.py | masamuch/hepqpr-qallse | 0b39f8531c6f3c758b94c31f4633f75dcfeb67ad | [
"Apache-2.0"
]
| null | null | null | scripts/checkpT_curv.py | masamuch/hepqpr-qallse | 0b39f8531c6f3c758b94c31f4633f75dcfeb67ad | [
"Apache-2.0"
]
| null | null | null | scripts/checkpT_curv.py | masamuch/hepqpr-qallse | 0b39f8531c6f3c758b94c31f4633f75dcfeb67ad | [
"Apache-2.0"
]
| null | null | null | from hepqpr.qallse import *
from hepqpr.qallse.plotting import *
from hepqpr.qallse.cli.func import time_this
import time
import pickle
# import the method
from hepqpr.qallse.dsmaker import create_dataset
modelName = "D0"
#modelName = "Mp"
#modelName = "Doublet"
maxTry=1
# 5e-3 : 167 MeV
# 8e-4 : 1.04 GeV
varDensity = []
for ptThr_w in [0.15, 0.20, 0.30, 0.4, 0.50, 0.6, 0.75, 0.9, 1.0, 1.2]:
for ptThr_r in [3e-4, 3.5e-4, 4e-4, 4.5e-4, 5e-4, 6e-4, 7e-4, 8e-4, 9e-4, 1e-3, 1.2e-3, 1.5e-3, 1.7e-3, 2e-3, 2.5e-3, 3e-3, 4e-3, 5e-3]:
varDensity.append((modelName, ptThr_w, ptThr_r, maxTry))
#varDensity = [
# (modelName, 0.20, 5e-3, maxTry),
# (modelName, 1.00, 5e-3, maxTry),
#
#]
picklename = ".tmp.checkpT_curv.pickle"
try:
with open(picklename,'rb') as f:
results = pickle.load(f)
except:
print ("No pickle files.")
results = {}
for v in varDensity:
nTry = v[3]
for iTry in range(nTry):
k = (v[0], v[1], v[2], iTry)
print (k)
ModelName = k[0]
ptThr_w = k[1]
ptThr_r = k[2]
Density = 0.05
if k in results:
continue
results[k] = {}
results[k]["density"] = Density
results[k]["ptThr_w"] = ptThr_w
results[k]["ptThr_r"] = ptThr_r
results[k]["ModelName"] = ModelName
# dataset creation options
ds_options = dict(
# output directory: output_path+prefix
output_path='/tmp',
#prefix='ds_'+k,
#prefix=prefix,
# size
density = Density,
#phi_bounds = (0.15, 1.05),
# important: no pt cut
high_pt_cut = ptThr_w,
)
prefix = f'ez-{Density}'
if ds_options["high_pt_cut"] > 0:
prefix += f'_hpt-{ds_options["high_pt_cut"]}'
else:
prefix += '_baby'
prefix += f'_{iTry}'
prefix += f'_noPhiCut'
ds_options["prefix"] = prefix
# generate the dataset
import os
path = os.path.join(ds_options['output_path'], prefix, "event000001000")
if os.path.exists(path + "-hits.csv"):
import json
with open(path + "-meta.json") as f:
meta = json.load(f)
with open(path+"-metaHits.pickle", 'rb') as f:
time_info= pickle.load(f)
else:
with time_this() as time_info:
meta, path = create_dataset(**ds_options)
with open(os.path.join(path+"-metaHits.pickle"), 'wb') as f:
pickle.dump(time_info, f)
results[k]['TReadingHits'] = time_info[1]
results[k]['meta']=meta
from hepqpr.qallse.seeding import generate_doublets, SeedingConfig
# generate the doublets: the important part is the config_cls !
if os.path.exists(path + "-doublets.csv"):
doublets = pd.read_csv(path + "-doublets.csv", index_col=0)
results[k]['TInitialDoubletBuilding'] = time_info[1]
with open(path+"-metaDoublets.pickle", 'rb') as f:
time_info= pickle.load(f)
else:
with time_this() as time_info:
doublets = generate_doublets(hits_path=path+'-hits.csv', config_cls=SeedingConfig)
doublets.to_csv(path+'-doublets.csv')
with open(os.path.join(path+"-metaDoublets.pickle"), 'wb') as f:
pickle.dump(time_info, f)
results[k]['TInitialDoubletBuilding'] = time_info[1]
print('number of doublets = ', len(doublets))
results[k]['Ndoublets'] = len(doublets)
from hepqpr.qallse.qallse import Config
config = Config()
config.tplet_max_curv = ptThr_r
dw = DataWrapper.from_path(path + '-hits.csv')
if modelName == "D0":
from hepqpr.qallse.qallse_d0 import D0Config
new_config = merge_dicts(D0Config().as_dict(), config.as_dict())
model = QallseD0(dw, **new_config)
elif modelName == "Mp":
from hepqpr.qallse.qallse_mp import MpConfig
new_config = merge_dicts(MpConfig().as_dict(), config.as_dict())
model = QallseMp(dw, **new_config)
elif modelName == "Nominal":
from hepqpr.qallse.qallse import Config1GeV
new_config = merge_dicts(Config1GeV().as_dict(), config.as_dict())
model = Qallse1GeV(dw, **new_config)
elif modelName == "Doublet":
from hepqpr.qallse.qallse_doublet import DoubletConfig
new_config = merge_dicts(DoubletConfig().as_dict(), config.as_dict())
model = QallseDoublet(dw, **new_config)
p, r, ms = model.dataw.compute_score(doublets)
results[k]['precision_initDoublet'] = p
results[k]['recall_initDoublet'] = r
results[k]['missing_initDoublet'] = len(ms)
# generate the qubo as usual
with time_this() as time_info:
model.build_model(doublets)
print(f'Time of model building = {time_info[1]:.2f}s.')
results[k]['TModelBuilding'] = time_info[1]
with time_this() as time_info:
Q = model.to_qubo()
print(f'Time of qubo building = {time_info[1]:.2f}s.')
results[k]['TQuboBuilding'] = time_info[1]
results[k]['QuboSize'] = len(Q)
from hepqpr.qallse.cli.func import *
with time_this() as time_info:
response = solve_neal(Q)
print(f'Time of neal = {time_info[1]:.2f}s.')
results[k]['TNeal'] = time_info[1]
final_doublets, final_tracks = process_response(response)
en0 = 0 if Q is None else dw.compute_energy(Q)
en = response.record.energy[0]
results[k]['obsEnergy'] = en
results[k]['idealEnergy'] = en0
occs = response.record.num_occurrences
results[k]['bestOcc'] = occs[0]
results[k]['OccSum'] = occs.sum()
p, r, ms = dw.compute_score(final_doublets)
results[k]['precision'] = p
results[k]['recall'] = r
results[k]['missing'] = len(ms)
trackml_score = dw.compute_trackml_score(final_tracks)
results[k]['trackmlScore'] = trackml_score
with open(picklename, 'wb') as f:
pickle.dump(results, f)
#print(results)
| 35.055866 | 140 | 0.577211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,335 | 0.212749 |
9e27be8d3067835dcbda95c1548885176ae1ebf3 | 440 | py | Python | ifconfigparser/__init__.py | KnightWhoSayNi/ifconfig-parser | 4921ac9d6be6244b062d082c164f5a5e69522478 | [
"MIT"
]
| 17 | 2018-10-06T15:19:27.000Z | 2022-02-25T05:05:22.000Z | ifconfigparser/__init__.py | KnightWhoSayNi/ifconfig-parser | 4921ac9d6be6244b062d082c164f5a5e69522478 | [
"MIT"
]
| 3 | 2019-11-22T23:40:58.000Z | 2019-12-06T02:26:59.000Z | ifconfigparser/__init__.py | KnightWhoSayNi/ifconfig-parser | 4921ac9d6be6244b062d082c164f5a5e69522478 | [
"MIT"
]
| 2 | 2019-05-10T15:36:46.000Z | 2020-11-18T11:56:33.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ======================================================
#
# File name: __init__.py
# Author: [email protected]
# Date created: 30.06.2018 17:00
# Python Version: 3.7
#
# ======================================================
from .ifconfig_parser import IfconfigParser
__author__ = "KnightWhoSayNi"
__email__ = '[email protected]'
__version__ = '0.0.5'
| 25.882353 | 56 | 0.522727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 342 | 0.777273 |
9e287d153cff7385984c9cc16aca63539ed882d4 | 3,382 | py | Python | api/views/movies.py | iamvukasin/filminds | 54c9d7175f3a06f411cc750a694758bd683af1ee | [
"MIT"
]
| 2 | 2019-06-15T01:40:04.000Z | 2019-12-19T05:11:17.000Z | api/views/movies.py | iamvukasin/filminds | 54c9d7175f3a06f411cc750a694758bd683af1ee | [
"MIT"
]
| 1 | 2021-03-09T05:22:51.000Z | 2021-03-09T05:22:51.000Z | api/views/movies.py | iamvukasin/filminds | 54c9d7175f3a06f411cc750a694758bd683af1ee | [
"MIT"
]
| 2 | 2019-06-24T19:24:25.000Z | 2020-05-29T13:57:35.000Z | from abc import ABC, abstractmethod
import tmdbsimple as tmdb
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.utils.decorators import method_decorator
from rest_framework.response import Response
from rest_framework.views import APIView
from api.serializers import MovieSerializer
from app.models import Movie, SearchedMovie, User, CollectedMovie
MAX_NUM_CASTS = 4
class AddCollectedMovie(ABC, APIView):
"""
Adds the given movie to the user's favorites or watch list based
on list_type property.
"""
@method_decorator(login_required)
def get(self, request, pk):
user = User.get_user(request.user)
movie = Movie.get_or_create(pk)
if movie is None:
raise Http404
try:
collected_item = CollectedMovie.objects.filter(user=user, movie=movie).get()
collected_item.type = self.list_type
except CollectedMovie.DoesNotExist:
collected_item = CollectedMovie(
user=user,
movie=movie,
type=self.list_type
)
collected_item.save()
# success status
return Response('')
@property
@abstractmethod
def list_type(self):
pass
class MovieAddToFavorites(AddCollectedMovie):
"""
Adds the given movie to the user's favorites list.
"""
list_type = CollectedMovie.TYPE_WISH
class MovieAddToWatched(AddCollectedMovie):
"""
Adds the given movie to the user's watch list.
"""
list_type = CollectedMovie.TYPE_WATCH
class RemoveCollectedMovie(APIView):
"""
Removes the given movie to the user's favorites or watch list.
"""
@method_decorator(login_required)
def get(self, request, pk):
user = User.get_user(request.user)
movie = Movie.get_or_create(pk)
if movie is None:
raise Http404
CollectedMovie.objects.filter(user=user, movie=movie).delete()
# success status
return Response('')
class MovieInfo(APIView):
"""
Returns movie information from the database (data defined in Movie
model + cast information), if the movie has been already added. If
not, gets the information from TMDB, saves to the database and
then returns it.
"""
def get(self, request, pk):
movie = Movie.get_or_create(pk)
if movie is None:
raise Http404
# insert movie into searched movies table
if request.user.is_authenticated:
SearchedMovie.increment_search_count(User.get_user(request.user), movie)
serializer = MovieSerializer(movie)
data = serializer.data
# get actors from TMDB
movie_credits = tmdb.Movies(pk).credits()
data['cast'] = []
for cast in movie_credits['cast'][:MAX_NUM_CASTS]:
cast_data = {k: v for k, v in cast.items() if k in {'character', 'name', 'profile_path'}}
# set default profile photo if no photo is received
# from TMDB
if cast_data['profile_path'] is None:
cast_data['profile_path'] = ''
else:
cast_data['profile_path'] = f'https://image.tmdb.org/t/p/w276_and_h350_face{cast_data["profile_path"]}'
data['cast'].append(cast_data)
return Response(data)
| 27.274194 | 119 | 0.646363 | 2,945 | 0.870787 | 0 | 0 | 1,027 | 0.303666 | 0 | 0 | 883 | 0.261088 |
9e29911c2cf893692ea46e7dbded4b692a9e33a0 | 3,853 | py | Python | apps/lk/views.py | DaniilGorokhov/CaloryHelper | 6bf5ddce85479508b6498c3e4b2e0f4e5dd01b51 | [
"MIT"
]
| null | null | null | apps/lk/views.py | DaniilGorokhov/CaloryHelper | 6bf5ddce85479508b6498c3e4b2e0f4e5dd01b51 | [
"MIT"
]
| null | null | null | apps/lk/views.py | DaniilGorokhov/CaloryHelper | 6bf5ddce85479508b6498c3e4b2e0f4e5dd01b51 | [
"MIT"
]
| 1 | 2021-02-15T17:40:23.000Z | 2021-02-15T17:40:23.000Z | from django.shortcuts import render
from django.http import Http404, HttpResponseRedirect
from django.urls import reverse
from apps.index.models import User, UserHistory
from sova_avia.settings import MEDIA_ROOT
from imageai.Prediction import ImagePrediction
import json
from .models import Article
from .forms import ArticleForm
def index(request, user_login):
try:
user = User.objects.get(login=user_login)
except:
raise Http404
return render(request, 'lk/index.html', {'user_instance': user, 'user_login': user_login})
def view_history(request, user_login):
# try:
# history = UserHistory.objects.get(userId = user_login)
# except:
# raise Http404
user_id = User.objects.get(login=user_login).id
return render(request, 'lk/history.html', {'history': UserHistory.objects.all().filter(userId = user_id),
'user_login': user_login})
def settings(request, user_login):
try:
user = User.objects.get(login=user_login)
except:
raise Http404
return render(request, 'lk/settings.html', {'user_instance': user, 'user_login': user_login})
def wait(request, user_login):
if request.POST['password0u'] == request.POST['password1u']:
User.objects.get(login=user_login).password = request.POST['password0u']
return HttpResponseRedirect(reverse('lk:index', args=(user_login,)))
else:
return render(request, 'lk/settings.html', {'user_instance': User.objects.get(login=user_login),
'user_login': user_login})
def newPhoto(request, user_login):
if request.method == 'POST':
form = ArticleForm(request.POST, request.FILES)
if form.is_valid():
form.save()
file_name = request.FILES['file_obj']
result = process_image(file_name)
return render(request, 'lk/newPhoto.html', {'form': form, 'user_login': user_login, 'foodVariants': result})
# return render(request, 'lk/newPhoto.html', {'form': request.POST, 'user_login': user_login})
else:
form = ArticleForm()
return render(request, 'lk/newPhoto.html', {'form': form, 'user_login': user_login})
# return render(request, 'lk/newPhoto.html', {'user_login': user_login})
# return render(request, 'lk/newPhoto.html', {'user_login':user_login, 'foodVariants':
# [{'foodName': 'котлетка', 'foodDescription': "мамина"}]})
def process_image(file_name):
execution_path = "../../media/media/"
with open(MEDIA_ROOT + '/media/' + 'foods.json') as f:
foods = json.load(f)
prediction = ImagePrediction()
prediction.setModelTypeAsResNet()
prediction.setModelPath(MEDIA_ROOT + "/media/resnet50_weights_tf_dim_ordering_tf_kernels.h5")
prediction.loadModel()
result = []
predictions, probabilities = prediction.predictImage(MEDIA_ROOT + '/media/' + str(file_name), result_count=10)
for eachPrediction, eachProbability in zip(predictions, probabilities):
tmp = dict()
eachPrediction = eachPrediction.replace('_', ' ')
tmp['foodName'] = eachPrediction
tmp['foodDescription'] = eachProbability
calorieAmount = "124 cal"
flag = False
for food in foods:
if food['foodName'] == eachPrediction:
calorieAmount = food['foodDescription']
flag = True
break
if flag:
tmp['foodDescription'] = calorieAmount
result.append(tmp)
return result
def chooseFood(request, user_login, foodName, foodDescription):
UserHistory.objects.create(userId=User.objects.get(login=user_login), foodName=foodName, foodDescription=foodDescription)
return HttpResponseRedirect(reverse('lk:index', args=(user_login,)))
| 35.675926 | 125 | 0.659227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 938 | 0.242565 |
9e2adc78300cf5e3761e489b41942048bb77f39e | 544 | py | Python | que-shi-de-shu-zi-lcof.py | tsonglew/leetcode-solution | abce0c36def55a8d3bf86fca531246a29920e771 | [
"Unlicense"
]
| null | null | null | que-shi-de-shu-zi-lcof.py | tsonglew/leetcode-solution | abce0c36def55a8d3bf86fca531246a29920e771 | [
"Unlicense"
]
| null | null | null | que-shi-de-shu-zi-lcof.py | tsonglew/leetcode-solution | abce0c36def55a8d3bf86fca531246a29920e771 | [
"Unlicense"
]
| null | null | null | class Solution:
def missingNumber(self, nums) -> int:
if nums[0] != 0:
return 0
if nums[-1] != len(nums):
return len(nums)
return self.f(nums)
def f(self, nums):
print(nums)
if len(nums) <= 3:
for i in range(1, len(nums)):
if nums[i] != nums[i-1] + 1:
return nums[i-1] + 1
mid = len(nums) // 2
if nums[mid] != nums[0] + mid:
return self.f(nums[:mid+1])
return self.f(nums[mid:])
| 28.631579 | 44 | 0.439338 | 543 | 0.998162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9e2d53249be23d06d560e65260043ec473bab942 | 1,159 | py | Python | setup.py | CZ-NIC/deckard | 35ed3c59b27c52fc2e3a187679251353f5efe6c0 | [
"BSD-2-Clause"
]
| 30 | 2016-08-06T20:56:17.000Z | 2021-12-13T07:56:23.000Z | setup.py | CZ-NIC/deckard | 35ed3c59b27c52fc2e3a187679251353f5efe6c0 | [
"BSD-2-Clause"
]
| 6 | 2016-05-31T10:48:51.000Z | 2018-07-03T09:05:12.000Z | setup.py | CZ-NIC/deckard | 35ed3c59b27c52fc2e3a187679251353f5efe6c0 | [
"BSD-2-Clause"
]
| 10 | 2016-04-03T13:55:19.000Z | 2020-11-28T01:23:49.000Z | #!/usr/bin/env python3
from distutils.core import setup
version = '3.0'
setup(
name='deckard',
version=version,
description='DNS toolkit',
long_description=(
"Deckard is a DNS software testing based on library pydnstest."
"It supports parsing and running Unbound-like test scenarios,"
"and setting up a mock DNS server. It's based on dnspython."),
author='CZ.NIC',
author_email='[email protected]',
license='BSD',
url='https://gitlab.labs.nic.cz/knot/deckard',
packages=['pydnstest'],
python_requires='>=3.5',
install_requires=[
'dnspython>=1.15',
'jinja2',
'PyYAML',
'python-augeas'
],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3 :: Only'
'Operating System :: POSIX :: Linux',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
]
)
| 31.324324 | 71 | 0.609146 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 734 | 0.633305 |
9e2f62d9ca279a2304c666233677d5d0d663e572 | 1,894 | py | Python | tests/testing_utils.py | alguerre/TrackEditorWeb | e92cb8554e804af8620298ca75567e6ce653b15e | [
"MIT"
]
| 1 | 2021-09-06T14:56:27.000Z | 2021-09-06T14:56:27.000Z | tests/testing_utils.py | qjx666/TrackEditorWeb | e92cb8554e804af8620298ca75567e6ce653b15e | [
"MIT"
]
| 79 | 2021-07-06T13:37:09.000Z | 2021-10-21T11:09:10.000Z | tests/testing_utils.py | qjx666/TrackEditorWeb | e92cb8554e804af8620298ca75567e6ce653b15e | [
"MIT"
]
| 1 | 2022-01-30T05:44:25.000Z | 2022-01-30T05:44:25.000Z | import os
from urllib.parse import urljoin
from selenium import webdriver
from TrackApp.models import User, Track
from libs import track
def login(driver: webdriver,
live_server_url: str,
username: str,
password: str):
driver.get(urljoin(live_server_url, 'login'))
driver.find_element_by_id('input_txt_username').send_keys(username)
driver.find_element_by_id('input_txt_password').send_keys(password)
driver.find_element_by_id('input_btn_login').click()
def create_user(username: str = 'default_user',
password: str = 'default_password_1234',
email: str = '[email protected]'):
if not User.objects.filter(username=username):
user = User.objects.create(username=username,
email=email,
password='!')
user.set_password(password)
user.save()
else:
user = User.objects.get(username=username)
return user
def get_downloads_dir():
return os.path.join(os.path.expanduser('~'), 'Downloads')
def get_webdriver(headless: bool = True):
options = webdriver.ChromeOptions()
options.headless = headless
downloads_dir = get_downloads_dir()
preferences = \
{'download.default_directory': downloads_dir,
'safebrowsing.enabled': 'false'}
options.add_experimental_option('prefs', preferences)
driver = webdriver.Chrome(chrome_options=options)
return driver
def compare_tracks(reference_file: str, checked_file: str):
track_ref = track.Track().add_gpx(reference_file)
track_check = track.Track().add_gpx(checked_file)
return track_ref == track_check
def record_tracks(user: User, n: int, title='title'):
for i in range(n):
Track(user=user,
track=track.Track().to_json(),
title=f'{title}_{i}').save()
| 30.548387 | 71 | 0.661563 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.120908 |
9e30175d2516252b61b551241d3a7d897279d318 | 1,563 | py | Python | SimulEval/simuleval/agents/agent.py | ashkanalinejad/Supervised-Simultaneous-MT | d09397ed86bbf4133d5d9b906030a8881ee4c13f | [
"MIT"
]
| 2 | 2022-01-11T19:27:11.000Z | 2022-01-12T11:06:53.000Z | SimulEval/simuleval/agents/agent.py | sfu-natlang/Supervised-Simultaneous-MT | 12c3a53887c985ae24199ecef2f7b2335fe214c6 | [
"MIT"
]
| 1 | 2022-02-12T03:02:52.000Z | 2022-02-12T04:27:10.000Z | SimulEval/simuleval/agents/agent.py | sfu-natlang/Supervised-Simultaneous-MT | 12c3a53887c985ae24199ecef2f7b2335fe214c6 | [
"MIT"
]
| 1 | 2022-02-27T14:22:36.000Z | 2022-02-27T14:22:36.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from simuleval.states import TextStates, SpeechStates
class Agent(object):
data_type = None
def __init__(self, args):
assert self.data_type is not None
def states_type(self, args):
if self.data_type == "text":
return TextStates
elif self.data_type == "speech":
return SpeechStates
else:
raise NotImplementedError
def segment_to_units(self, segment, states):
return [segment]
def units_to_segment(self, unit_queue, states):
return unit_queue.pop()
def update_states_read(self, states):
pass
def update_states_write(self, states):
pass
def build_states(self, args, client, sentence_id):
# Initialize states here, for example add customized entry to states
# This funcion will be caused at begining of every new sentence
states = self.states_type(args)(args, client, sentence_id, self)
self.initialize_states(states)
return states
def initialize_states(self, states):
pass
@staticmethod
def add_args(parser):
# Add additional command line arguments here
pass
def policy(self, states):
# Make decision here
assert NotImplementedError
def predict(self, states):
# predict token here
assert NotImplementedError
| 26.948276 | 76 | 0.662828 | 1,309 | 0.837492 | 0 | 0 | 105 | 0.067179 | 0 | 0 | 420 | 0.268714 |
9e301c912b42abb46c781523b9340a9c6ccd01d4 | 13,317 | py | Python | source/mre-plugin-samples/Plugins/DetectShotsByRekognitionVideo/DetectShotsByRekognitionVideo.py | aws-samples/aws-media-replay-engine-samples | d9b479f3c7da87c8b6d2a265334a6d3aae58d885 | [
"MIT-0"
]
| 4 | 2022-02-03T17:23:19.000Z | 2022-03-16T13:13:09.000Z | source/mre-plugin-samples/Plugins/DetectShotsByRekognitionVideo/DetectShotsByRekognitionVideo.py | aws-samples/aws-media-replay-engine-samples | d9b479f3c7da87c8b6d2a265334a6d3aae58d885 | [
"MIT-0"
]
| 1 | 2022-02-22T01:25:57.000Z | 2022-03-10T21:27:31.000Z | source/mre-plugin-samples/Plugins/DetectShotsByRekognitionVideo/DetectShotsByRekognitionVideo.py | aws-samples/aws-media-replay-engine-samples | d9b479f3c7da87c8b6d2a265334a6d3aae58d885 | [
"MIT-0"
]
| 1 | 2022-02-16T02:23:43.000Z | 2022-02-16T02:23:43.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import boto3
import json
import sys
import time
import ffmpeg
from MediaReplayEnginePluginHelper import OutputHelper
from MediaReplayEnginePluginHelper import Status
from MediaReplayEnginePluginHelper import DataPlane
s3_client = boto3.client('s3')
class VideoDetect:
jobId = ''
rek = boto3.client('rekognition')
sqs = boto3.client('sqs')
sns = boto3.client('sns')
roleArn = ''
bucket = ''
video = ''
startJobId = ''
sqsQueueUrl = ''
snsTopicArn = ''
processType = ''
def __init__(self, role, bucket, video):
self.roleArn = role
self.bucket = bucket
self.video = video
def GetSQSMessageSuccess(self):
jobFound = False
succeeded = False
dotLine=0
while jobFound == False:
sqsResponse = self.sqs.receive_message(QueueUrl=self.sqsQueueUrl, MessageAttributeNames=['ALL'],
MaxNumberOfMessages=10)
###print(sqsResponse)
if sqsResponse:
if 'Messages' not in sqsResponse:
if dotLine<100:
print('.', end='')
dotLine=dotLine+1
else:
print()
dotLine=0
####kyle
print('TIMEOUT')
break
sys.stdout.flush()
time.sleep(5)
continue
for message in sqsResponse['Messages']:
notification = json.loads(message['Body'])
rekMessage = json.loads(notification['Message'])
print(rekMessage['JobId'])
print(rekMessage['Status'])
if rekMessage['JobId'] == self.startJobId:
print('Matching Job Found:' + rekMessage['JobId'])
jobFound = True
if (rekMessage['Status']=='SUCCEEDED'):
succeeded=True
self.sqs.delete_message(QueueUrl=self.sqsQueueUrl,
ReceiptHandle=message['ReceiptHandle'])
else:
print("Job didn't match:" +
str(rekMessage['JobId']) + ' : ' + self.startJobId)
# Delete the unknown message. Consider sending to dead letter queue
self.sqs.delete_message(QueueUrl=self.sqsQueueUrl,
ReceiptHandle=message['ReceiptHandle'])
return succeeded
def CreateTopicandQueue(self):
millis = str(int(round(time.time() * 1000)))
#Create SNS topic
snsTopicName="AmazonRekognitionExample" + millis
topicResponse=self.sns.create_topic(Name=snsTopicName)
self.snsTopicArn = topicResponse['TopicArn']
print('SNS created',snsTopicName)
#create SQS queue
sqsQueueName="AmazonRekognitionQueue" + millis
self.sqs.create_queue(QueueName=sqsQueueName)
self.sqsQueueUrl = self.sqs.get_queue_url(QueueName=sqsQueueName)['QueueUrl']
attribs = self.sqs.get_queue_attributes(QueueUrl=self.sqsQueueUrl,
AttributeNames=['QueueArn'])['Attributes']
sqsQueueArn = attribs['QueueArn']
print('SQS created',sqsQueueName)
# Subscribe SQS queue to SNS topic
self.sns.subscribe(
TopicArn=self.snsTopicArn,
Protocol='sqs',
Endpoint=sqsQueueArn)
#Authorize SNS to write SQS queue
policy = """{{
"Version":"2012-10-17",
"Statement":[
{{
"Sid":"MyPolicy",
"Effect":"Allow",
"Principal" : {{"AWS" : "*"}},
"Action":"SQS:SendMessage",
"Resource": "{}",
"Condition":{{
"ArnEquals":{{
"aws:SourceArn": "{}"
}}
}}
}}
]
}}""".format(sqsQueueArn, self.snsTopicArn)
response = self.sqs.set_queue_attributes(
QueueUrl = self.sqsQueueUrl,
Attributes = {
'Policy' : policy
})
def DeleteTopicandQueue(self):
self.sqs.delete_queue(QueueUrl=self.sqsQueueUrl)
self.sns.delete_topic(TopicArn=self.snsTopicArn)
def StartSegmentDetection(self, use_sns=False):
min_Technical_Cue_Confidence = 80.0
min_Shot_Confidence = 60.0
max_pixel_threshold = 0.1
min_coverage_percentage = 60
if use_sns:
response = self.rek.start_segment_detection(
Video={"S3Object": {"Bucket": self.bucket, "Name": self.video}},
NotificationChannel={
"RoleArn": self.roleArn,
"SNSTopicArn": self.snsTopicArn,
},
SegmentTypes=["TECHNICAL_CUE", "SHOT"],
Filters={
"TechnicalCueFilter": {
"MinSegmentConfidence": min_Technical_Cue_Confidence,
# "BlackFrame": {
# "MaxPixelThreshold": max_pixel_threshold,
# "MinCoveragePercentage": min_coverage_percentage,
# },
},
"ShotFilter": {"MinSegmentConfidence": min_Shot_Confidence},
}
)
else:
response = self.rek.start_segment_detection(
Video={"S3Object": {"Bucket": self.bucket, "Name": self.video}},
SegmentTypes=["TECHNICAL_CUE", "SHOT"],
Filters={
"TechnicalCueFilter": {
"MinSegmentConfidence": min_Technical_Cue_Confidence,
# "BlackFrame": {
# "MaxPixelThreshold": max_pixel_threshold,
# "MinCoveragePercentage": min_coverage_percentage,
# },
},
"ShotFilter": {"MinSegmentConfidence": min_Shot_Confidence},
}
)
self.startJobId = response["JobId"]
print(f"Start Job Id: {self.startJobId}")
def GetSegmentDetectionResults(self, chunk_start):
maxResults = 10
paginationToken = ""
finished = False
firstTime = True
outlist = []
while finished == False:
response = self.rek.get_segment_detection(
JobId=self.startJobId, MaxResults=maxResults, NextToken=paginationToken
)
#print(response)
if response['JobStatus'] == 'IN_PROGRESS':
print('waiting 10s')
time.sleep(10)
continue
if firstTime == True:
print(f"Status\n------\n{response['JobStatus']}")
print("\nRequested Types\n---------------")
for selectedSegmentType in response['SelectedSegmentTypes']:
print(f"\tType: {selectedSegmentType['Type']}")
print(f"\t\tModel Version: {selectedSegmentType['ModelVersion']}")
print()
print("\nAudio metadata\n--------------")
for audioMetadata in response['AudioMetadata']:
print(f"\tCodec: {audioMetadata['Codec']}")
print(f"\tDuration: {audioMetadata['DurationMillis']}")
print(f"\tNumber of Channels: {audioMetadata['NumberOfChannels']}")
print(f"\tSample rate: {audioMetadata['SampleRate']}")
print()
print("\nVideo metadata\n--------------")
for videoMetadata in response["VideoMetadata"]:
print(videoMetadata)
print(f"\tCodec: {videoMetadata['Codec']}")
#print(f"\tColor Range: {videoMetadata['ColorRange']}")
print(f"\tDuration: {videoMetadata['DurationMillis']}")
print(f"\tFormat: {videoMetadata['Format']}")
print(f"\tFrame rate: {videoMetadata['FrameRate']}")
print("\nSegments\n--------")
firstTime = False
for segment in response['Segments']:
if segment["Type"] == "TECHNICAL_CUE":
print("Technical Cue")
print(f"\tConfidence: {segment['TechnicalCueSegment']['Confidence']}")
print(f"\tType: {segment['TechnicalCueSegment']['Type']}")
if segment["Type"] == "SHOT":
print("Shot")
print(f"\tConfidence: {segment['ShotSegment']['Confidence']}")
print(f"\tIndex: " + str(segment["ShotSegment"]["Index"]))
outputSeg = {}
outputSeg['Label'] = 'SHOT'
outputSeg['beg'] = segment['StartTimecodeSMPTE']
outputSeg['end'] = segment['EndTimecodeSMPTE']
outputSeg['duration'] = segment['DurationSMPTE']
outlist.append(outputSeg)
print(f"\tDuration (milliseconds): {segment['DurationMillis']}")
print(f"\tStart Timestamp (milliseconds): {segment['StartTimestampMillis']}")
print(f"\tEnd Timestamp (milliseconds): {segment['EndTimestampMillis']}")
print(f"\tStart timecode: {segment['StartTimecodeSMPTE']}")
print(f"\tEnd timecode: {segment['EndTimecodeSMPTE']}")
print(f"\tDuration timecode: {segment['DurationSMPTE']}")
print(f"\tStart frame number {segment['StartFrameNumber']}")
print(f"\tEnd frame number: {segment['EndFrameNumber']}")
print(f"\tDuration frames: {segment['DurationFrames']}")
print()
if "NextToken" in response:
paginationToken = response["NextToken"]
else:
finished = True
times_sec = []
begs_sec = []
results = []
for out in outlist:
time_str = out['duration']
hh,mm,ss,ms = map(int,time_str.replace(';',':').split(':'))
time_sec = float("{:.2f}".format(ms/60 + ss + 60*(mm + 60*hh)))
print(time_str,time_sec)
times_sec.append(time_sec)
beg_str = out['beg']
hh,mm,ss,ms = map(int,beg_str.replace(';',':').split(':'))
beg_sec = float("{:.2f}".format(ms/60 + ss + 60*(mm + 60*hh))) + chunk_start
print(beg_str,beg_sec)
begs_sec.append(beg_sec)
results.append({'Label':'SHOT','Start':beg_sec,'Duration':time_sec})
return results
def lambda_handler(event, context):
results = []
mre_dataplane = DataPlane(event)
# 'event' is the input event payload passed to Lambda
mre_outputhelper = OutputHelper(event)
# Replace following with the ARN of the AmazonRekognitionServiceRole
roleArn = 'arn:aws:iam::ACCOUNTNUMBER:role/AmazonRekognitionServiceRole'
bucket = event['Input']['Media']["S3Bucket"]
video = event['Input']['Media']["S3Key"] #"***.ts"
chunk_start = event['Input']['Metadata']['HLSSegment']['StartTime']
try:
# Download the HLS video segment from S3
media_path = mre_dataplane.download_media()
mp4_path = '/tmp/mre_chunk.mp4'
try:
stream = ffmpeg.input(media_path)
out, err = (
ffmpeg.output(stream,mp4_path)
.run(capture_stdout=True, capture_stderr=True,overwrite_output=True)
)
except ffmpeg.Error as err:
print(err.stderr)
raise
try:
video_mp4 = video[:-2]+'mp4'
response = s3_client.upload_file(mp4_path, bucket, video_mp4)
except ClientError as e:
logging.error(e)
return False
print(f'{media_path} converted to {mp4_path} and uploaded to {video_mp4}')
analyzer=VideoDetect(roleArn, bucket,video_mp4)
analyzer.StartSegmentDetection()
results = analyzer.GetSegmentDetectionResults(chunk_start)
print(f'results:{results}')
# Add the results of the plugin to the payload (required if the plugin status is "complete"; Optional if the plugin has any errors)
mre_outputhelper.add_results_to_output(results)
# Persist plugin results for later use
mre_dataplane.save_plugin_results(results)
# Update the processing status of the plugin (required)
mre_outputhelper.update_plugin_status(Status.PLUGIN_COMPLETE)
# Returns expected payload built by MRE helper library
return mre_outputhelper.get_output_object()
except Exception as e:
print(e)
# Update the processing status of the plugin (required)
mre_outputhelper.update_plugin_status(Status.PLUGIN_ERROR)
# Re-raise the exception to MRE processing where it will be handled
raise
| 39.283186 | 139 | 0.535181 | 10,644 | 0.799279 | 0 | 0 | 0 | 0 | 0 | 0 | 4,101 | 0.307952 |
9e316afea9883b374b2578dfd94ecad511320c5f | 1,567 | py | Python | chempy/kinetics/tests/test_integrated.py | matecsaj/chempy | 2c93f185e4547739331193c06d77282206621517 | [
"BSD-2-Clause"
]
| null | null | null | chempy/kinetics/tests/test_integrated.py | matecsaj/chempy | 2c93f185e4547739331193c06d77282206621517 | [
"BSD-2-Clause"
]
| null | null | null | chempy/kinetics/tests/test_integrated.py | matecsaj/chempy | 2c93f185e4547739331193c06d77282206621517 | [
"BSD-2-Clause"
]
| null | null | null | from __future__ import division
from chempy.util.testing import requires
from ..integrated import pseudo_irrev, pseudo_rev, binary_irrev, binary_rev
import pytest
try:
import sympy
except ImportError:
sympy = None
else:
one = sympy.S(1)
t, kf, kb, prod, major, minor = sympy.symbols(
't kf kb prod major minor', negative=False, nonnegative=True, real=True)
subsd = {t: one*2, kf: one*3, kb: one*7, major: one*11,
minor: one*13, prod: one*0}
@requires('sympy')
def test_pseudo_irrev():
f = pseudo_irrev(t, kf, prod, major, minor, backend=sympy)
dfdt = f.diff(t)
num_dfdt = dfdt.subs(subsd)
assert (num_dfdt - (
major*kf*(minor - f)
).subs(subsd)).simplify() == 0
@requires('sympy')
def test_pseudo_rev():
f = pseudo_rev(t, kf, kb, prod, major, minor, backend=sympy)
dfdt = f.diff(t)
num_dfdt = dfdt.subs(subsd)
assert (num_dfdt - (major*kf*(minor - f) - kb*f).subs(subsd)).simplify() == 0
@pytest.mark.slow
@requires('sympy')
def test_binary_irrev():
f = binary_irrev(t, kf, prod, major, minor, backend=sympy)
dfdt = f.diff(t)
num_dfdt = dfdt.subs(subsd)
assert (num_dfdt - (kf*(minor - f)*(major - f)).subs(subsd)).simplify() == 0
@pytest.mark.slow
@requires('sympy')
def test_binary_rev():
f = binary_rev(t, kf, kb, prod, major, minor, backend=sympy)
dfdt = f.diff(t)
num_dfdt = dfdt.subs(subsd)
ans = kf*(minor - f)*(major - f) - kb*f
# symbolic susbsitution fails:
assert abs(float(num_dfdt) - float(ans.subs(subsd))) < 2e-14
| 27.017241 | 81 | 0.640715 | 0 | 0 | 0 | 0 | 1,068 | 0.681557 | 0 | 0 | 84 | 0.053606 |
9e3410f7e06e468d0eb7d1e58add77993b4f9819 | 1,362 | py | Python | emulateHttp2/processTestByBrowser.py | mixianghang/newhttp2 | 0843301ad79d11bc43f5d70dbcf934aaf072f6a3 | [
"MIT"
]
| null | null | null | emulateHttp2/processTestByBrowser.py | mixianghang/newhttp2 | 0843301ad79d11bc43f5d70dbcf934aaf072f6a3 | [
"MIT"
]
| null | null | null | emulateHttp2/processTestByBrowser.py | mixianghang/newhttp2 | 0843301ad79d11bc43f5d70dbcf934aaf072f6a3 | [
"MIT"
]
| null | null | null | #!/usr/bin/python
import sys
import os
import shutil
def main():
if len(sys.argv) < 3:
print "Usage sourceDir resultDir"
sys.exit(1)
sourceDir = sys.argv[1]
resultDir = sys.argv[2]
if not os.path.exists(sourceDir):
print "{0} doesn't exist".format(sourceDir)
sys.exit(1)
if os.path.exists(resultDir):
shutil.rmtree(resultDir + "_bak")
shutil.move(resultDir, resultDir + "_bak")
print "{0} exists, rename it".format(resultDir)
os.makedirs(resultDir)
for sourceFileName in os.listdir(sourceDir):
sourceFilePath = os.path.join(sourceDir, sourceFileName)
resultFilePath = os.path.join(resultDir, sourceFileName)
resultFd = open(resultFilePath, "w")
resultFd.write("sizeWhenCancel in KB\tpacketNumWhenCancel\tsizeOfAll in KB\tpacketNumOfAll\trecvedAfterCancel in KB\n")
with open(sourceFilePath, "r") as fd:
for line in fd:
line = line[:-1]
lineArray = line.split("\t")
if len(lineArray) < 4:
print "parse line {0} failed for file {1}".format(line, sourceFilePath)
sys.exit(1)
sizeWhenCancel = int(lineArray[0])
sizeOfAll = int(lineArray[2])
lineArray.append(sizeOfAll - sizeWhenCancel)
resultFd.write("\t".join(str(item) for item in lineArray) + "\n")
resultFd.close()
if __name__ == "__main__":
main()
| 32.428571 | 123 | 0.660793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 265 | 0.194567 |
9e36180ad2d9abb3875f4262a27e459d07a15a75 | 1,097 | py | Python | setup.py | osism/netbox-plugin-osism | 8cba95bd6bed167c5a05d464d95246c9d4c98a6a | [
"Apache-2.0"
]
| null | null | null | setup.py | osism/netbox-plugin-osism | 8cba95bd6bed167c5a05d464d95246c9d4c98a6a | [
"Apache-2.0"
]
| null | null | null | setup.py | osism/netbox-plugin-osism | 8cba95bd6bed167c5a05d464d95246c9d4c98a6a | [
"Apache-2.0"
]
| null | null | null | from setuptools import setup
setup(
name='netbox_plugin_osism',
version='0.0.1',
description='NetBox Plugin OSISM',
long_description='Netbox Plugin OSISM',
url='https://github.com/osism/netbox-plugin-osism',
download_url='https://github.com/osism/netbox-plugin-osism',
author='OSISM GmbH',
author_email='[email protected]',
maintainer='OSISM GmbH',
maintainer_email='[email protected]',
install_requires=[],
packages=['netbox_plugin_osism'],
package_data={
'netbox_plugin_osism':
['templates/netbox_plugin_osism/*.html']
},
include_package_data=True,
zip_safe=False,
platforms=['Any'],
keywords=['netbox', 'netbox-plugin'],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Framework :: Django',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Intended Audience :: Developers',
'Environment :: Console',
],
)
| 31.342857 | 64 | 0.631723 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 605 | 0.551504 |
9e36f2c784f6f44bd775bdedd2272a8be3601516 | 525 | py | Python | src/response.py | vcokltfre/snowflake.vcokltf.re | 5b8324a4fbc2e512dbc263d4ed65edb89d72a549 | [
"MIT"
]
| 1 | 2021-03-23T15:13:04.000Z | 2021-03-23T15:13:04.000Z | src/response.py | vcokltfre/snowflake.vcokltf.re | 5b8324a4fbc2e512dbc263d4ed65edb89d72a549 | [
"MIT"
]
| null | null | null | src/response.py | vcokltfre/snowflake.vcokltf.re | 5b8324a4fbc2e512dbc263d4ed65edb89d72a549 | [
"MIT"
]
| null | null | null | from starlette.responses import HTMLResponse
class ResponseBuilder:
def __init__(self):
self.items = []
def addtag(self, name: str, value: str):
self.items.append((name, value))
def build(self):
og_tags = ""
for item in self.items:
og_tags += f"\n<meta property=\"og:{item[0]}\" content=\"{item[1]}\">"
return HTMLResponse(f"""
<html>
<head>
{og_tags}
</head>
</html>
""")
| 23.863636 | 82 | 0.485714 | 478 | 0.910476 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.367619 |
9e377bb8273400c9545a16768897adf2638f5e45 | 63 | py | Python | rx/__init__.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
]
| 1 | 2018-11-16T09:07:13.000Z | 2018-11-16T09:07:13.000Z | rx/__init__.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | rx/__init__.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
]
| 1 | 2020-05-08T08:23:08.000Z | 2020-05-08T08:23:08.000Z | from .core import Observer, Observable, AnonymousObserver as _
| 31.5 | 62 | 0.825397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9e379e1fd1991982e0f968b5ef6aafe42d277ba1 | 47 | py | Python | news_api/settings/Vespa_config.py | rdoume/News_API | 9c555fdc5e5b717b98bcfec27364b9612b9c4aa1 | [
"MIT"
]
| 9 | 2019-07-19T13:19:55.000Z | 2021-07-08T16:25:30.000Z | news_api/settings/Vespa_config.py | rdoume/News_API | 9c555fdc5e5b717b98bcfec27364b9612b9c4aa1 | [
"MIT"
]
| null | null | null | news_api/settings/Vespa_config.py | rdoume/News_API | 9c555fdc5e5b717b98bcfec27364b9612b9c4aa1 | [
"MIT"
]
| 1 | 2021-05-12T01:50:04.000Z | 2021-05-12T01:50:04.000Z | VESPA_IP = "172.16.100.65"
VESPA_PORT = "8080"
| 15.666667 | 26 | 0.680851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.446809 |
9e39c8fbaaf037c97de86567d3d6ad2bfa09867d | 642 | py | Python | test/walk.py | manxueitp/cozmo-test | a91b1a4020544cb622bd67385f317931c095d2e8 | [
"MIT"
]
| null | null | null | test/walk.py | manxueitp/cozmo-test | a91b1a4020544cb622bd67385f317931c095d2e8 | [
"MIT"
]
| null | null | null | test/walk.py | manxueitp/cozmo-test | a91b1a4020544cb622bd67385f317931c095d2e8 | [
"MIT"
]
| null | null | null | import cozmo
from cozmo.util import distance_mm, speed_mmps,degrees
def cozmo_program(robot: cozmo.robot.Robot):
robot.drive_straight(distance_mm(150),speed_mmps(100)).wait_for_completed()
robot.turn_in_place(degrees(90)).wait_for_completed()
robot.drive_straight(distance_mm(150),speed_mmps(100)).wait_for_completed()
robot.turn_in_place(degrees(90)).wait_for_completed()
robot.drive_straight(distance_mm(150),speed_mmps(100)).wait_for_completed()
robot.turn_in_place(degrees(90)).wait_for_completed()
robot.drive_straight(distance_mm(150),speed_mmps(100)).wait_for_completed()
cozmo.run_program(cozmo_program)
| 45.857143 | 79 | 0.800623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9e3a0239409f0db941b17e1b31a07a8a3ed673cb | 694 | py | Python | lectures/extensions/hyperbolic_discounting/replication_code/.mywaflib/waflib/Tools/clang.py | loikein/ekw-lectures | a2f5436f10515ab26eab323fca8c37c91bdc5dcd | [
"MIT"
]
| 4 | 2019-11-15T15:21:27.000Z | 2020-07-08T15:04:30.000Z | lectures/extensions/hyperbolic_discounting/replication_code/.mywaflib/waflib/Tools/clang.py | loikein/ekw-lectures | a2f5436f10515ab26eab323fca8c37c91bdc5dcd | [
"MIT"
]
| 9 | 2019-11-18T15:54:36.000Z | 2020-07-14T13:56:53.000Z | lectures/extensions/hyperbolic_discounting/replication_code/.mywaflib/waflib/Tools/clang.py | loikein/ekw-lectures | a2f5436f10515ab26eab323fca8c37c91bdc5dcd | [
"MIT"
]
| 3 | 2021-01-25T15:41:30.000Z | 2021-09-21T08:51:36.000Z | #!/usr/bin/env python
# Krzysztof Kosiński 2014
"""
Detect the Clang C compiler
"""
from waflib.Configure import conf
from waflib.Tools import ar
from waflib.Tools import ccroot
from waflib.Tools import gcc
@conf
def find_clang(conf):
"""
Finds the program clang and executes it to ensure it really is clang
"""
cc = conf.find_program("clang", var="CC")
conf.get_cc_version(cc, clang=True)
conf.env.CC_NAME = "clang"
def configure(conf):
conf.find_clang()
conf.find_program(["llvm-ar", "ar"], var="AR")
conf.find_ar()
conf.gcc_common_flags()
conf.gcc_modifier_platform()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
| 22.387097 | 72 | 0.693084 | 0 | 0 | 0 | 0 | 233 | 0.335252 | 0 | 0 | 201 | 0.289209 |
9e3b5a48a7befde960b0ddd0c42b6f209d9a2b77 | 457 | py | Python | test_lambda_function.py | gavinbull/loyalty_anagram | a91d23083d8c040916733751932fb47d00592890 | [
"MIT"
]
| null | null | null | test_lambda_function.py | gavinbull/loyalty_anagram | a91d23083d8c040916733751932fb47d00592890 | [
"MIT"
]
| null | null | null | test_lambda_function.py | gavinbull/loyalty_anagram | a91d23083d8c040916733751932fb47d00592890 | [
"MIT"
]
| null | null | null | import unittest
from lambda_function import gather_anagrams
class TestSum(unittest.TestCase):
def test_list_int(self):
"""
Basic unit test to verify anagram of cinema including upper+lower case
"""
test_word = "iceman"
get_result = gather_anagrams(test_word)
expected = ['anemic', 'cinema', 'iceman']
self.assertEqual(get_result, expected)
if __name__ == '__main__':
unittest.main()
| 28.5625 | 86 | 0.654267 | 347 | 0.7593 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.315098 |
9e3d9a4ab5c166e9fe2b7e4de49e51e3488a6de5 | 577 | py | Python | euler62.py | dchourasia/euler-solutions | e20cbf016a9ea601fcce928d9690930c9a498837 | [
"Apache-2.0"
]
| null | null | null | euler62.py | dchourasia/euler-solutions | e20cbf016a9ea601fcce928d9690930c9a498837 | [
"Apache-2.0"
]
| null | null | null | euler62.py | dchourasia/euler-solutions | e20cbf016a9ea601fcce928d9690930c9a498837 | [
"Apache-2.0"
]
| null | null | null | '''
Find the smallest cube for which exactly five permutations of its digits are cube.
'''
import math, itertools
print(math.pow(8, 1/3).is_integer())
tried = {}
for i in range(1000, 1200):
cb = int(math.pow(i, 3))
#print(cb)
#print(math.pow(int(cb), 1/3))
roots = 1
tried[i] = [str(cb)]
for x in itertools.permutations(str(cb)):
x = ''.join(x)
if x not in tried[i]:
#print('x =', x)
y = round(math.pow(int(x), 1/3))
#print(y**3, x)
if y**3 == int(x):
roots += 1
tried[i].append(x)
print(roots, i, y, x)
if roots == 5:
print(cb)
break
| 21.37037 | 82 | 0.587522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.282496 |
9e3eca14631d828c95eda787a3d066e5994ecfdb | 3,010 | py | Python | examples/reeds_problem.py | bwhewe-13/ants | 6923cfc1603e0cd90c2ae90fa0fed6dd86edc0b2 | [
"MIT"
]
| null | null | null | examples/reeds_problem.py | bwhewe-13/ants | 6923cfc1603e0cd90c2ae90fa0fed6dd86edc0b2 | [
"MIT"
]
| null | null | null | examples/reeds_problem.py | bwhewe-13/ants | 6923cfc1603e0cd90c2ae90fa0fed6dd86edc0b2 | [
"MIT"
]
| null | null | null | from ants.medium import MediumX
from ants.materials import Materials
from ants.mapper import Mapper
from ants.multi_group import source_iteration
import numpy as np
import matplotlib.pyplot as plt
def reeds(cells):
width = 16.
delta_x = width/cells
group = 1
boundaries = [slice(0,int(2/delta_x)),slice(int(2/delta_x),int(3/delta_x)),
slice(int(3/delta_x),int(5/delta_x)),slice(int(5/delta_x),int(6/delta_x)),
slice(int(6/delta_x),int(10/delta_x)),slice(int(10/delta_x),int(11/delta_x)),
slice(int(11/delta_x),int(13/delta_x)),slice(int(13/delta_x),int(14/delta_x)),
slice(int(14/delta_x),int(16/delta_x))]
total_xs = np.zeros((cells,group),dtype='float64')
total_vals = [10,10,0,5,50,5,0,10,10]
# total_vals = [1,1,0,5,50,5,0,1,1]
scatter_xs = np.zeros((cells,group,group),dtype='float64')
scatter_vals = [9.9,9.9,0,0,0,0,0,9.9,9.9]
# scatter_vals = [0.9,0.9,0,0,0,0,0,0.9,0.9]
source = np.zeros((cells,group),dtype='float64')
source_vals = [0,1,0,0,50,0,0,1,0]
for ii in range(len(boundaries)):
total_xs[boundaries[ii]] = total_vals[ii]
scatter_xs[boundaries[ii]] = np.diag(np.repeat(scatter_vals[ii],group))
source[boundaries[ii]] = source_vals[ii]
# scatter_xs = np.ones((cells,group,group),dtype='float64') * 0.1
return total_xs, scatter_xs, source
groups = 1
cells_x = 1000
medium_width = 16.
cell_width_x = medium_width / cells_x
angles = 16
xbounds = np.array([1, 0])
materials = ['reed-vacuum', 'reed-strong-source', \
'reed-scatter','reed-absorber']
problem_01 = Materials(materials, 1, None)
medium = MediumX(cells_x, cell_width_x, angles, xbounds)
medium.add_external_source("reed")
map_obj = Mapper.load_map('reed_problem2.mpr')
if cells_x != map_obj.cells_x:
map_obj.adjust_widths(cells_x)
reversed_key = {v: k for k, v in map_obj.map_key.items()}
total = []
scatter = []
fission = []
for position in range(len(map_obj.map_key)):
map_material = reversed_key[position]
total.append(problem_01.data[map_material][0])
scatter.append(problem_01.data[map_material][1])
fission.append(problem_01.data[map_material][2])
total = np.array(total)
scatter = np.array(scatter)
fission = np.array(fission)
print(map_obj.map_key.keys())
print(problem_01.data.keys())
mu_x = medium.mu_x
weight = medium.weight
print(mu_x)
print(weight)
medium_map = map_obj.map_x.astype(int)
phi = source_iteration(groups, mu_x / cell_width_x, weight, total, scatter, \
fission, medium.ex_source, medium_map, xbounds, \
cell_width_x)
print(medium.ex_source.shape)
fig, ax = plt.subplots()
solution = np.load('reed_solution.npy')
print(len(solution))
print(np.allclose(solution, phi[:,0],atol=1e-12))
ax.plot(np.linspace(0, 16, len(solution)), solution, label='solution', c='k', ls='--')
ax.plot(np.linspace(0, medium_width, cells_x), phi[:,0], label='New', c='r', alpha=0.6)
ax.legend(loc=0)
plt.show() | 29.80198 | 87 | 0.679734 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.100332 |
9e40a4a7ae6fa13448f345e341c1c32845116799 | 29,411 | py | Python | exp_runner.py | BoifZ/NeuS | a2900fa5c0b2a9d54b9cb5b364440ee7eecfb525 | [
"MIT"
]
| null | null | null | exp_runner.py | BoifZ/NeuS | a2900fa5c0b2a9d54b9cb5b364440ee7eecfb525 | [
"MIT"
]
| null | null | null | exp_runner.py | BoifZ/NeuS | a2900fa5c0b2a9d54b9cb5b364440ee7eecfb525 | [
"MIT"
]
| null | null | null | import os
import time
import logging
import argparse
import numpy as np
import cv2 as cv
import trimesh
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from shutil import copyfile
from icecream import ic
from tqdm import tqdm
from pyhocon import ConfigFactory
from models.dataset import Dataset, load_K_Rt_from_P
from models.fields import RenderingNetwork, SDFNetwork, SingleVarianceNetwork, NeRF
from models.renderer import NeuSRenderer
from models.poses import LearnPose, LearnIntrin, RaysGenerator
# from models.depth import SiLogLoss
class Runner:
def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False):
self.device = torch.device('cuda')
# Configuration
self.conf_path = conf_path
f = open(self.conf_path)
conf_text = f.read()
conf_text = conf_text.replace('CASE_NAME', case)
f.close()
self.conf = ConfigFactory.parse_string(conf_text)
self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case)
self.base_exp_dir = self.conf['general.base_exp_dir']
os.makedirs(self.base_exp_dir, exist_ok=True)
self.dataset = Dataset(self.conf['dataset'])
self.iter_step = 0
self.poses_iter_step = 0
# Training parameters
self.end_iter = self.conf.get_int('train.end_iter')
self.save_freq = self.conf.get_int('train.save_freq')
self.report_freq = self.conf.get_int('train.report_freq')
self.val_freq = self.conf.get_int('train.val_freq')
self.val_mesh_freq = self.conf.get_int('train.val_mesh_freq')
self.batch_size = self.conf.get_int('train.batch_size')
self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level')
self.learning_rate = self.conf.get_float('train.learning_rate')
self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha')
self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd')
self.warm_up_end = self.conf.get_int('train.warm_up_end', default=0.0)
self.anneal_end = self.conf.get_int('train.anneal_end', default=0.0)
self.extract_depth = self.conf.get_bool('train.extract_depth')
self.learnable = self.conf.get_bool('train.focal_learnable')
if self.learnable:
self.focal_lr = self.conf.get_float('train.focal_lr')
self.pose_lr = self.conf.get_float('train.pose_lr')
self.focal_lr_gamma = self.conf.get_float('train.focal_lr_gamma')
self.pose_lr_gamma = self.conf.get_float('train.pose_lr_gamma')
self.step_size = self.conf.get_int('train.step_size')
self.start_refine_pose_iter = self.conf.get_int('train.start_refine_pose_iter')
self.start_refine_focal_iter = self.conf.get_int('train.start_refine_focal_iter')
# learn focal parameter
self.intrin_net = LearnIntrin(self.dataset.H, self.dataset.W, **self.conf['model.focal'], init_focal=self.dataset.focal).to(self.device)
# learn pose for each image
self.pose_param_net = LearnPose(self.dataset.n_images, **self.conf['model.pose'], init_c2w=self.dataset.pose_all).to(self.device)
self.optimizer_focal = torch.optim.Adam(self.intrin_net.parameters(), lr=self.focal_lr)
self.optimizer_pose = torch.optim.Adam(self.pose_param_net.parameters(), lr=self.pose_lr)
self.scheduler_focal = torch.optim.lr_scheduler.MultiStepLR(self.optimizer_focal, milestones=(self.warm_up_end, self.end_iter, self.step_size),
gamma=self.focal_lr_gamma)
self.scheduler_pose = torch.optim.lr_scheduler.MultiStepLR(self.optimizer_pose, milestones=range(self.warm_up_end, self.end_iter, self.step_size),
gamma=self.pose_lr_gamma)
else:
self.intrin_net = self.dataset.intrinsics_all
self.pose_param_net = self.dataset.pose_all
self.rays_generator = RaysGenerator(self.dataset.images_lis, self.dataset.masks_lis, self.dataset.depth_lis, self.pose_param_net, self.intrin_net, learnable=self.learnable)
# Weights
self.igr_weight = self.conf.get_float('train.igr_weight')
self.mask_weight = self.conf.get_float('train.mask_weight')
self.is_continue = is_continue
self.mode = mode
self.model_list = []
self.writer = None
# Networks
params_to_train = []
self.nerf_outside = NeRF(**self.conf['model.nerf']).to(self.device)
self.sdf_network = SDFNetwork(**self.conf['model.sdf_network']).to(self.device)
self.deviation_network = SingleVarianceNetwork(**self.conf['model.variance_network']).to(self.device)
self.color_network = RenderingNetwork(**self.conf['model.rendering_network']).to(self.device)
params_to_train += list(self.nerf_outside.parameters())
params_to_train += list(self.sdf_network.parameters())
params_to_train += list(self.deviation_network.parameters())
params_to_train += list(self.color_network.parameters())
if self.extract_depth:
# add depth_feats+
self.depth_weight = self.conf.get_float('train.depth_weight')
self.depth_network = RenderingNetwork(**self.conf['model.depth_extract_network']).to(self.device)
# self.d_loss = SiLogLoss()
params_to_train += list(self.depth_network.parameters())
else:
self.depth_network = None
self.renderer = NeuSRenderer(self.nerf_outside,
self.sdf_network,
self.deviation_network,
self.color_network,
self.depth_network,
**self.conf['model.neus_renderer'])
self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate)
# Load checkpoint
latest_model_name = None
if is_continue:
model_list_raw = os.listdir(os.path.join(self.base_exp_dir, 'checkpoints'))
model_list = []
for model_name in model_list_raw:
if model_name[-3:] == 'pth' and int(model_name[5:-4]) <= self.end_iter:
model_list.append(model_name)
model_list.sort()
latest_model_name = model_list[-1]
if latest_model_name is not None:
logging.info('Find checkpoint: {}'.format(latest_model_name))
self.load_checkpoint(latest_model_name)
# Backup codes and configs for debug
if self.mode[:5] == 'train':
self.file_backup()
def train(self):
self.writer = SummaryWriter(log_dir=os.path.join(self.base_exp_dir, 'logs'))
self.update_learning_rate()
res_step = self.end_iter - self.iter_step
image_perm = self.get_image_perm()
if self.learnable:
if self.poses_iter_step >= self.start_refine_pose_iter:
self.pose_param_net.train()
else:
self.pose_param_net.eval()
if self.poses_iter_step >= self.start_refine_focal_iter:
self.intrin_net.train()
else:
self.intrin_net.eval()
for iter_i in tqdm(range(res_step)):
if self.learnable:
if self.poses_iter_step >= self.start_refine_pose_iter:
self.pose_param_net.train()
if self.poses_iter_step >= self.start_refine_focal_iter:
self.intrin_net.train()
img_idx = image_perm[self.iter_step % len(image_perm)]
# data = self.dataset.gen_random_rays_at(image_perm[self.iter_step % len(image_perm)], self.batch_size)
data = self.rays_generator.gen_random_rays_at(img_idx, self.batch_size)
rays_o, rays_d, true_rgb, mask, gt_feats = data[:, :3], data[:, 3: 6], data[:, 6: 9], data[:, 9: 10], data[:, 10:]
near, far = self.dataset.near_far_from_sphere(rays_o, rays_d)
background_rgb = None
if self.use_white_bkgd:
background_rgb = torch.ones([1, 3])
if self.mask_weight > 0.0:
mask = (mask > 0.5).float()
else:
mask = torch.ones_like(mask)
mask_sum = mask.sum() + 1e-5
render_out = self.renderer.render(rays_o, rays_d, near, far,
background_rgb=background_rgb,
cos_anneal_ratio=self.get_cos_anneal_ratio())
depth_feats = render_out['render_feats']
color_fine = render_out['color_fine']
s_val = render_out['s_val']
cdf_fine = render_out['cdf_fine']
gradient_error = render_out['gradient_error']
weight_max = render_out['weight_max']
weight_sum = render_out['weight_sum']
# Loss
color_error = (color_fine - true_rgb) * mask
color_fine_loss = F.l1_loss(color_error, torch.zeros_like(color_error), reduction='sum') / mask_sum
psnr = 20.0 * torch.log10(1.0 / (((color_fine - true_rgb)**2 * mask).sum() / (mask_sum * 3.0)).sqrt())
eikonal_loss = gradient_error
mask_loss = F.binary_cross_entropy(weight_sum.clip(1e-3, 1.0 - 1e-3), mask)
loss = color_fine_loss +\
eikonal_loss * self.igr_weight +\
mask_loss * self.mask_weight
if self.extract_depth:
# print(gt_feats.shape)
# depth_loss = self.d_loss(torch.sigmoid(depth_feats), gt_feats)
# depth_fine_loss = F.l1_loss(depth_loss, torch.zeros_like(depth_loss), reduction='sum') / mask_sum
# loss += depth_loss
# self.writer.add_scalar('Loss/depth_loss', depth_loss, self.iter_step)
depth_feat_error = (depth_feats - gt_feats) * mask
depth_fine_loss = F.l1_loss(depth_feat_error, torch.zeros_like(depth_feat_error), reduction='sum') / mask_sum
psnr_dfeat = 20.0 * torch.log10(1.0 / (((depth_feats - gt_feats)**2 * mask).sum() / (mask_sum * 3.0)).sqrt())
loss += depth_fine_loss * self.depth_weight
self.writer.add_scalar('Loss/depth_loss', depth_fine_loss, self.iter_step)
self.writer.add_scalar('Statistics/psnr_dfeat', psnr_dfeat, self.iter_step)
# print(depth_loss)
# print(loss)
self.optimizer.zero_grad()
if self.learnable:
self.optimizer_focal.zero_grad()
self.optimizer_pose.zero_grad()
loss.backward()
self.optimizer.step()
if self.learnable:
self.optimizer_focal.step()
self.optimizer_pose.step()
self.iter_step += 1
self.poses_iter_step += 1
self.writer.add_scalar('Loss/loss', loss, self.iter_step)
self.writer.add_scalar('Loss/color_loss', color_fine_loss, self.iter_step)
self.writer.add_scalar('Loss/eikonal_loss', eikonal_loss, self.iter_step)
self.writer.add_scalar('Statistics/s_val', s_val.mean(), self.iter_step)
self.writer.add_scalar('Statistics/cdf', (cdf_fine[:, :1] * mask).sum() / mask_sum, self.iter_step)
self.writer.add_scalar('Statistics/weight_max', (weight_max * mask).sum() / mask_sum, self.iter_step)
self.writer.add_scalar('Statistics/psnr', psnr, self.iter_step)
if self.iter_step % self.report_freq == 0:
print(self.base_exp_dir)
print('iter:{:8>d} loss = {} lr={}'.format(self.iter_step, loss, self.optimizer.param_groups[0]['lr']))
if self.iter_step % self.save_freq == 0:
self.save_checkpoint()
# pose_history_milestone = list(range(0, 100, 5)) + list(range(100, 1000, 100)) + list(range(1000, 10000, 1000))
# if self.poses_iter_step in pose_history_milestone:
# self.save_pnf_checkpoint()
if self.iter_step % self.val_freq == 0:
self.validate_image()
if self.iter_step % self.val_mesh_freq == 0:
res = 128
if self.iter_step % 10000==0:
res = 256
self.validate_mesh(resolution=res)
self.update_learning_rate()
if self.iter_step % len(image_perm) == 0:
image_perm = self.get_image_perm()
def get_image_perm(self):
return torch.randperm(self.dataset.n_images)
def get_cos_anneal_ratio(self):
if self.anneal_end == 0.0:
return 1.0
else:
return np.min([1.0, self.iter_step / self.anneal_end])
def update_learning_rate(self):
if self.iter_step < self.warm_up_end:
learning_factor = self.iter_step / self.warm_up_end
else:
alpha = self.learning_rate_alpha
progress = (self.iter_step - self.warm_up_end) / (self.end_iter - self.warm_up_end)
learning_factor = (np.cos(np.pi * progress) + 1.0) * 0.5 * (1 - alpha) + alpha
for g in self.optimizer.param_groups:
g['lr'] = self.learning_rate * learning_factor
if self.learnable:
self.scheduler_focal.step()
self.scheduler_pose.step()
def file_backup(self):
dir_lis = self.conf['general.recording']
os.makedirs(os.path.join(self.base_exp_dir, 'recording'), exist_ok=True)
for dir_name in dir_lis:
cur_dir = os.path.join(self.base_exp_dir, 'recording', dir_name)
os.makedirs(cur_dir, exist_ok=True)
files = os.listdir(dir_name)
for f_name in files:
if f_name[-3:] == '.py':
copyfile(os.path.join(dir_name, f_name), os.path.join(cur_dir, f_name))
copyfile(self.conf_path, os.path.join(self.base_exp_dir, 'recording', 'config.conf'))
def load_checkpoint(self, checkpoint_name):
checkpoint = torch.load(os.path.join(self.base_exp_dir, 'checkpoints', checkpoint_name), map_location=self.device)
self.nerf_outside.load_state_dict(checkpoint['nerf'])
self.sdf_network.load_state_dict(checkpoint['sdf_network_fine'])
self.deviation_network.load_state_dict(checkpoint['variance_network_fine'])
self.color_network.load_state_dict(checkpoint['color_network_fine'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.iter_step = checkpoint['iter_step']
if self.learnable:
self.load_pnf_checkpoint(checkpoint_name.replace('ckpt', 'pnf'))
logging.info('End')
def save_checkpoint(self):
checkpoint = {
'nerf': self.nerf_outside.state_dict(),
'sdf_network_fine': self.sdf_network.state_dict(),
'variance_network_fine': self.deviation_network.state_dict(),
'color_network_fine': self.color_network.state_dict(),
'depth_network_fine': self.depth_network.state_dict(),
'optimizer': self.optimizer.state_dict(),
'iter_step': self.iter_step,
}
os.makedirs(os.path.join(self.base_exp_dir, 'checkpoints'), exist_ok=True)
torch.save(checkpoint, os.path.join(self.base_exp_dir, 'checkpoints', 'ckpt_{:0>6d}.pth'.format(self.iter_step)))
if self.learnable:
self.save_pnf_checkpoint()
def load_pnf_checkpoint(self, checkpoint_name):
checkpoint = torch.load(os.path.join(self.base_exp_dir, 'pnf_checkpoints', checkpoint_name), map_location=self.device)
self.intrin_net.load_state_dict(checkpoint['intrin_net'])
self.pose_param_net.load_state_dict(checkpoint['pose_param_net'])
self.optimizer_focal.load_state_dict(checkpoint['optimizer_focal'])
self.optimizer_pose.load_state_dict(checkpoint['optimizer_pose'])
self.poses_iter_step = checkpoint['poses_iter_step']
def save_pnf_checkpoint(self):
pnf_checkpoint = {
'intrin_net': self.intrin_net.state_dict(),
'pose_param_net': self.pose_param_net.state_dict(),
'optimizer_focal': self.optimizer_focal.state_dict(),
'optimizer_pose': self.optimizer_pose.state_dict(),
'poses_iter_step': self.poses_iter_step,
}
os.makedirs(os.path.join(self.base_exp_dir, 'pnf_checkpoints'), exist_ok=True)
torch.save(pnf_checkpoint, os.path.join(self.base_exp_dir, 'pnf_checkpoints', 'pnf_{:0>6d}.pth'.format(self.iter_step)))
def store_current_pose(self):
self.pose_net.eval()
num_cams = self.pose_net.module.num_cams if isinstance(self.pose_net, torch.nn.DataParallel) else self.pose_net.num_cams
c2w_list = []
for i in range(num_cams):
c2w = self.pose_net(i) # (4, 4)
c2w_list.append(c2w)
c2w_list = torch.stack(c2w_list) # (N, 4, 4)
c2w_list = c2w_list.detach().cpu().numpy()
np.save(os.path.join(self.base_exp_dir, 'cam_poses', 'pose_{:0>6d}.npy'.format(self.iter_step)), c2w_list)
return
def validate_image(self, idx=-1, resolution_level=-1):
if idx < 0:
idx = np.random.randint(self.dataset.n_images)
print('Validate: iter: {}, camera: {}'.format(self.iter_step, idx))
if resolution_level < 0:
resolution_level = self.validate_resolution_level
# rays_o, rays_d = self.dataset.gen_rays_at(idx, resolution_level=resolution_level)
rays_o, rays_d = self.rays_generator.gen_rays_at(idx, resolution_level=resolution_level)
H, W, _ = rays_o.shape
rays_o = rays_o.reshape(-1, 3).split(self.batch_size)
rays_d = rays_d.reshape(-1, 3).split(self.batch_size)
out_rgb_fine = []
out_normal_fine = []
for rays_o_batch, rays_d_batch in zip(rays_o, rays_d):
near, far = self.dataset.near_far_from_sphere(rays_o_batch, rays_d_batch)
background_rgb = torch.ones([1, 3]) if self.use_white_bkgd else None
render_out = self.renderer.render(rays_o_batch,
rays_d_batch,
near,
far,
cos_anneal_ratio=self.get_cos_anneal_ratio(),
background_rgb=background_rgb)
def feasible(key): return (key in render_out) and (render_out[key] is not None)
if feasible('color_fine'):
out_rgb_fine.append(render_out['color_fine'].detach().cpu().numpy())
if feasible('gradients') and feasible('weights'):
n_samples = self.renderer.n_samples + self.renderer.n_importance
normals = render_out['gradients'] * render_out['weights'][:, :n_samples, None]
if feasible('inside_sphere'):
normals = normals * render_out['inside_sphere'][..., None]
normals = normals.sum(dim=1).detach().cpu().numpy()
out_normal_fine.append(normals)
del render_out
img_fine = None
if len(out_rgb_fine) > 0:
img_fine = (np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3, -1]) * 256).clip(0, 255)
normal_img = None
if len(out_normal_fine) > 0:
normal_img = np.concatenate(out_normal_fine, axis=0)
rot = np.linalg.inv(self.dataset.pose_all[idx, :3, :3].detach().cpu().numpy())
normal_img = (np.matmul(rot[None, :, :], normal_img[:, :, None])
.reshape([H, W, 3, -1]) * 128 + 128).clip(0, 255)
os.makedirs(os.path.join(self.base_exp_dir, 'validations_fine'), exist_ok=True)
os.makedirs(os.path.join(self.base_exp_dir, 'normals'), exist_ok=True)
for i in range(img_fine.shape[-1]):
if len(out_rgb_fine) > 0:
cv.imwrite(os.path.join(self.base_exp_dir,
'validations_fine',
'{:0>8d}_{}_{}.png'.format(self.iter_step, i, idx)),
np.concatenate([img_fine[..., i],
self.rays_generator.image_at(idx, resolution_level=resolution_level)]))
# self.dataset.image_at(idx, resolution_level=resolution_level)]))
if len(out_normal_fine) > 0:
cv.imwrite(os.path.join(self.base_exp_dir,
'normals',
'{:0>8d}_{}_{}.png'.format(self.iter_step, i, idx)),
normal_img[..., i])
def render_novel_image(self, idx_0, idx_1, ratio, resolution_level):
"""
Interpolate view between two cameras.
"""
# rays_o, rays_d = self.dataset.gen_rays_between(idx_0, idx_1, ratio, resolution_level=resolution_level)
rays_o, rays_d = self.rays_generator.gen_rays_between(idx_0, idx_1, ratio, resolution_level=resolution_level)
H, W, _ = rays_o.shape
rays_o = rays_o.reshape(-1, 3).split(self.batch_size)
rays_d = rays_d.reshape(-1, 3).split(self.batch_size)
out_rgb_fine = []
for rays_o_batch, rays_d_batch in zip(rays_o, rays_d):
near, far = self.dataset.near_far_from_sphere(rays_o_batch, rays_d_batch)
background_rgb = torch.ones([1, 3]) if self.use_white_bkgd else None
render_out = self.renderer.render(rays_o_batch,
rays_d_batch,
near,
far,
cos_anneal_ratio=self.get_cos_anneal_ratio(),
background_rgb=background_rgb)
out_rgb_fine.append(render_out['color_fine'].detach().cpu().numpy())
del render_out
img_fine = (np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3]) * 256).clip(0, 255).astype(np.uint8)
return img_fine
def get_gt_poses(self, cameras_sphere, cam_num, color=None, length=0.5):
from vis_cam_traj import draw_camera_frustum_geometry
if color is None:
color = np.random.rand(1, 3)
camera_dict = np.load(cameras_sphere)
intrinsics_all = []
pose_all = []
for idx in range(cam_num):
scale_mat = camera_dict['scale_mat_%d' % idx].astype(np.float32)
world_mat = camera_dict['world_mat_%d' % idx].astype(np.float32)
P = world_mat @ scale_mat
P = P[:3, :4]
intrinsics, pose = load_K_Rt_from_P(None, P)
intrinsics_all.append(intrinsics.astype(np.float32))
pose_all.append(pose.astype(np.float32))
c2w_gt = np.array(pose_all)
fx_gt = intrinsics_all[0][0, 0]
gt_color = np.array([color], dtype=np.float32)
gt_color = np.tile(gt_color, (cam_num, 1))
gt_est_list = draw_camera_frustum_geometry(c2w_gt, self.dataset.H, self.dataset.W,
fx_gt, fx_gt,
length, gt_color)
return gt_est_list
def show_cam_pose(self, iter_show=-1, random_color=True):
import open3d as o3d
from vis_cam_traj import draw_camera_frustum_geometry
cam_num = 33
# cam_num = self.dataset.n_images
'''Get focal'''
fxfy = self.intrin_net(0).cpu().detach().numpy()[0][0]
print('learned cam intrinsics:')
print('fxfy', fxfy)
'''Get all poses in (N, 4, 4)'''
c2ws_est = torch.stack([self.pose_param_net(i) for i in range(cam_num)]) # (N, 4, 4)
'''Frustum properties'''
frustum_length = 0.5
random_color = random_color
all_color = np.random.rand(3, 3)
if random_color:
frustum_color = np.random.rand(cam_num, 3)
else:
# frustum_color = np.array([[249, 65, 68]], dtype=np.float32) / 255
frustum_color = np.array([all_color[0]], dtype=np.float32)
frustum_color = np.tile(frustum_color, (cam_num, 1))
'''Get frustums'''
frustum_est_list = draw_camera_frustum_geometry(c2ws_est.cpu().detach().cpu().numpy(), self.dataset.H, self.dataset.W,
fxfy, fxfy,
frustum_length, frustum_color)
# init poses
c2w_init = self.dataset.pose_all
fx_init = self.dataset.focal.cpu().detach()
init_color = np.array([all_color[1]], dtype=np.float32)
init_color = np.tile(init_color, (cam_num, 1))
init_est_list = draw_camera_frustum_geometry(c2w_init.cpu().detach().cpu().numpy(), self.dataset.H, self.dataset.W,
fx_init, fx_init,
frustum_length, init_color)
# gt poses
gt_est_list = self.get_gt_poses(os.path.join('./exp/teeth_noise', 'cameras_sphere.npz'), cam_num, color=all_color[2], length=frustum_length)
geometry_to_draw = []
geometry_to_draw.append(frustum_est_list)
geometry_to_draw.append(init_est_list)
geometry_to_draw.append(gt_est_list)
# mesh
mesh = o3d.io.read_triangle_mesh(os.path.join(self.base_exp_dir, 'meshes', '{:0>8d}.ply'.format(iter_show)))
mesh.compute_vertex_normals()
geometry_to_draw.append(mesh)
o3d.visualization.draw_geometries(geometry_to_draw)
def validate_mesh(self, world_space=False, resolution=256, threshold=0.0):
bound_min = torch.tensor(self.dataset.object_bbox_min, dtype=torch.float32)
bound_max = torch.tensor(self.dataset.object_bbox_max, dtype=torch.float32)
vertices, triangles =\
self.renderer.extract_geometry(bound_min, bound_max, resolution=resolution, threshold=threshold)
os.makedirs(os.path.join(self.base_exp_dir, 'meshes'), exist_ok=True)
if world_space:
vertices = vertices * self.dataset.scale_mats_np[0][0, 0] + self.dataset.scale_mats_np[0][:3, 3][None]
mesh = trimesh.Trimesh(vertices, triangles)
mesh.export(os.path.join(self.base_exp_dir, 'meshes', '{:0>8d}.ply'.format(self.iter_step)))
logging.info('End')
def interpolate_view(self, img_idx_0, img_idx_1):
images = []
n_frames = 60
for i in range(n_frames):
print(i)
images.append(self.render_novel_image(img_idx_0,
img_idx_1,
np.sin(((i / n_frames) - 0.5) * np.pi) * 0.5 + 0.5,
resolution_level=4))
for i in range(n_frames):
images.append(images[n_frames - i - 1])
fourcc = cv.VideoWriter_fourcc(*'mp4v')
video_dir = os.path.join(self.base_exp_dir, 'render')
os.makedirs(video_dir, exist_ok=True)
h, w, _ = images[0].shape
writer = cv.VideoWriter(os.path.join(video_dir,
'{:0>8d}_{}_{}.mp4'.format(self.iter_step, img_idx_0, img_idx_1)),
fourcc, 30, (w, h))
for image in images:
writer.write(image)
writer.release()
if __name__ == '__main__':
print('Hello Wooden')
torch.set_default_tensor_type('torch.cuda.FloatTensor')
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str, default='./confs/base.conf')
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--mcube_threshold', type=float, default=0.0)
parser.add_argument('--is_continue', default=False, action="store_true")
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--case', type=str, default='')
args = parser.parse_args()
torch.cuda.set_device(args.gpu)
runner = Runner(args.conf, args.mode, args.case, args.is_continue)
if args.mode == 'train':
runner.train()
elif args.mode == 'validate_mesh':
runner.validate_mesh(world_space=True, resolution=512, threshold=args.mcube_threshold)
elif args.mode.startswith('interpolate'): # Interpolate views given two image indices
_, img_idx_0, img_idx_1 = args.mode.split('_')
img_idx_0 = int(img_idx_0)
img_idx_1 = int(img_idx_1)
runner.interpolate_view(img_idx_0, img_idx_1)
elif args.mode.startswith('showcam'):
_, iter_show = args.mode.split('_')
runner.load_pnf_checkpoint(('pnf_{:0>6d}.pth').format(int(iter_show)))
runner.show_cam_pose(int(iter_show))
| 47.590615 | 180 | 0.605352 | 27,335 | 0.929414 | 0 | 0 | 0 | 0 | 0 | 0 | 3,860 | 0.131243 |
9e44b7345e9261d66e37f31753ad1afb6577bc5f | 2,007 | py | Python | code/video-analiz/python/camshift.py | BASARIRR/computer-vision-guide | 0a11726fb2be0cad63738ab45fd4edc4515441d2 | [
"MIT"
]
| 230 | 2019-01-17T01:00:53.000Z | 2022-03-31T18:00:09.000Z | code/video-analiz/python/camshift.py | sturlu/goruntu-isleme-kilavuzu | e9377ace3823ca5f2d06ca78a11884256539134d | [
"MIT"
]
| 8 | 2019-05-03T07:44:50.000Z | 2022-02-10T00:14:38.000Z | code/video-analiz/python/camshift.py | sturlu/goruntu-isleme-kilavuzu | e9377ace3823ca5f2d06ca78a11884256539134d | [
"MIT"
]
| 71 | 2019-01-17T12:11:06.000Z | 2022-03-03T22:02:46.000Z | #Python v3, OpenCV v3.4.2
import numpy as np
import cv2
videoCapture = cv2.VideoCapture("video.mp4")
ret,camera_input = videoCapture.read()
rows, cols = camera_input.shape[:2]
'''
Video dosyası üzerine Mean Shift için bir alan belirlenir.
Bu koordinatlar ağırlıklı ortalaması belirlenecek olan dörtgen alanıdır. '''
#w ve h boyutlandırmasını değiştirerek sonuçları gözlemleyebilirsiniz
w = 10
h = 15
col = int((cols - w) / 2)
row = int((rows - h) / 2)
shiftWindow = (col, row, w, h)
'''
Şimdi görüntü üzerindeki parlaklığı, renk dağılımlarını dengelemek için bir maskeleme alanı oluşturalım ve
bu alan üzerinde histogram eşitleme yapalım
'''
roi = camera_input[row:row + h, col:col + w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
histogram = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(histogram,histogram,0,255,cv2.NORM_MINMAX)
'''
Bu parametre / durdurma ölçütü algoritmanın kendi içerisinde kaydırma/hesaplama işlemini kaç defa yapacağını belirlemektedir.
'''
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while True:
#Video'dan bir frame okunur
ret ,camera_input = videoCapture.read()
'''
video içerisinde öncelikli HSV renk uzayı üzerinde histogram alıp histogram back projection yapacağız ve
tüm görüntü üzerinde istediğimiz yerin segmentlerini bulacağız.
'''
hsv = cv2.cvtColor(camera_input, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],histogram,[0,180],1)
#her yeni konum için meanshift tekrar uygulanır
ret, shiftWindow = cv2.CamShift(dst, shiftWindow, term_crit)
#Görüntü üzerinde tespit edilen alanı çizelim
pts = cv2.boxPoints(ret)
pts = np.int0(pts)
result_image = cv2.polylines(camera_input,[pts],True, 255,2)
cv2.imshow('Camshift (Surekli Mean Shift) Algoritmasi', result_image)
k = cv2.waitKey(60) & 0xff
videoCapture.release()
cv2.destroyAllWindows() | 32.901639 | 125 | 0.727454 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 964 | 0.463239 |
9e459ba91afb3134b739b9c40e6c311ac98e5335 | 346 | py | Python | DTT_files/dtt.py | stecik/Directory_to_text | f93c76f820ff7dc39e213779115861e53ed6a266 | [
"MIT"
]
| null | null | null | DTT_files/dtt.py | stecik/Directory_to_text | f93c76f820ff7dc39e213779115861e53ed6a266 | [
"MIT"
]
| null | null | null | DTT_files/dtt.py | stecik/Directory_to_text | f93c76f820ff7dc39e213779115861e53ed6a266 | [
"MIT"
]
| null | null | null | from dtt_class import DTT
from parser import args
if __name__ == "__main__":
dtt = DTT()
# Creates a list of files and subdirectories
try:
l = dtt.dir_to_list(args.directory, args)
# Creates a .txt file with the list
dtt.list_to_txt(args.output_file, l)
except Exception as e:
print(f"Error: {e}") | 28.833333 | 49 | 0.644509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.294798 |
9e45b73d08315aaa5770ad5f620934e0e80ebd70 | 1,675 | py | Python | src/models/head.py | takedarts/DenseResNet | d5f9c143ed3c484436a2a5bac366c3795e5d47ec | [
"MIT"
]
| null | null | null | src/models/head.py | takedarts/DenseResNet | d5f9c143ed3c484436a2a5bac366c3795e5d47ec | [
"MIT"
]
| null | null | null | src/models/head.py | takedarts/DenseResNet | d5f9c143ed3c484436a2a5bac366c3795e5d47ec | [
"MIT"
]
| null | null | null | import torch.nn as nn
import collections
class BasicHead(nn.Sequential):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
class PreActHead(nn.Sequential):
def __init__(self, in_channels, out_channels, normalization, activation, **kwargs):
super().__init__(collections.OrderedDict(m for m in [
('norm', normalization(in_channels)),
('act', activation(inplace=True)),
] if m[1] is not None))
class MobileNetV2Head(nn.Sequential):
def __init__(self, in_channels, out_channels, normalization, activation, **kwargs):
super().__init__(collections.OrderedDict(m for m in [
('conv', nn.Conv2d(
in_channels, out_channels, kernel_size=1, padding=0, stride=1, bias=False)),
('norm', normalization(out_channels)),
('act', activation(inplace=True)),
] if m[1] is not None))
class MobileNetV3Head(nn.Sequential):
def __init__(self, in_channels, out_channels, normalization, activation, **kwargs):
channels = round(out_channels * 0.75)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0, stride=1, bias=False)),
('norm1', normalization(channels)),
('act1', activation(inplace=True)),
('pool', nn.AdaptiveAvgPool2d(1)),
('conv2', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0, stride=1, bias=True)),
('act2', activation(inplace=True)),
] if m[1] is not None))
| 36.413043 | 93 | 0.605373 | 1,608 | 0.96 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.04 |
9e4644db01b6aad4460e509e0a9d08dada56a727 | 42 | py | Python | errorpro/exceptions.py | benti/Error-Pypagation | 108feddc58a705da82fe6fdce658b419b589b533 | [
"BSD-3-Clause"
]
| null | null | null | errorpro/exceptions.py | benti/Error-Pypagation | 108feddc58a705da82fe6fdce658b419b589b533 | [
"BSD-3-Clause"
]
| null | null | null | errorpro/exceptions.py | benti/Error-Pypagation | 108feddc58a705da82fe6fdce658b419b589b533 | [
"BSD-3-Clause"
]
| null | null | null | class DimensionError(Exception):
pass
| 14 | 32 | 0.761905 | 41 | 0.97619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9e47088047a050a5c1880fb84b394c06ebc4af2c | 968 | py | Python | application.py | nicolas-van/startup_asgard_app | acbb706256214f6758de9db92ff2988cee62c8ff | [
"MIT"
]
| null | null | null | application.py | nicolas-van/startup_asgard_app | acbb706256214f6758de9db92ff2988cee62c8ff | [
"MIT"
]
| null | null | null | application.py | nicolas-van/startup_asgard_app | acbb706256214f6758de9db92ff2988cee62c8ff | [
"MIT"
]
| null | null | null |
from __future__ import unicode_literals, print_function, absolute_import
import flask
import os
import os.path
import json
import sjoh.flask
import logging
import asgard
app = asgard.Asgard(__name__, flask_parameters={"static_folder": None})
# load configuration about files and folders
folder = os.path.dirname(__file__)
fc = os.path.join(folder, "filesconfig.json")
with open(fc, "rb") as file_:
fc_content = file_.read().decode("utf8")
files_config = json.loads(fc_content)
# register static folders
for s in files_config["static_folders"]:
def gen_fct(folder):
def static_route(path):
return flask.send_from_directory(folder, path)
return static_route
route = "/" + s + "/<path:path>"
app.web_app.add_url_rule(route, "static:"+s, gen_fct(s))
@app.web_app.route("/")
def main():
return flask.render_template("index.html", files_config=files_config)
@app.web_app.json("/hello")
def hello():
return "Hello"
| 25.473684 | 73 | 0.722107 | 0 | 0 | 0 | 0 | 168 | 0.173554 | 0 | 0 | 184 | 0.190083 |
9e470dc0299f2bc08dbfaf73e95ab549a126fe53 | 414 | py | Python | build/lib/tests/visualizer_test.py | eltoto1219/vltk | e84c0efe9062eb864604d96345f71483816340aa | [
"Apache-2.0"
]
| null | null | null | build/lib/tests/visualizer_test.py | eltoto1219/vltk | e84c0efe9062eb864604d96345f71483816340aa | [
"Apache-2.0"
]
| null | null | null | build/lib/tests/visualizer_test.py | eltoto1219/vltk | e84c0efe9062eb864604d96345f71483816340aa | [
"Apache-2.0"
]
| null | null | null | import io
import os
import unittest
import numpy as np
from PIL import Image
from vltk import SingleImageViz
PATH = os.path.dirname(os.path.realpath(__file__))
URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/images/input.jpg"
class TestVisaulizer(unittest.TestCase):
url = URL
def test_viz(self):
viz = SingleImageViz(self.url)
viz.show()
| 18 | 107 | 0.731884 | 138 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.243961 |
9e473c9d126543858d93cd7cc38a1863415d85a8 | 3,419 | py | Python | siam_tracker/models/train_wrappers/pairwise_wrapper.py | microsoft/PySiamTracking | a82dabeaa42a7816dbd8e823da7b7e92ebb622ce | [
"MIT"
]
| 28 | 2020-03-18T04:41:21.000Z | 2022-02-24T16:44:01.000Z | siam_tracker/models/train_wrappers/pairwise_wrapper.py | HengFan2010/PySiamTracking | a82dabeaa42a7816dbd8e823da7b7e92ebb622ce | [
"MIT"
]
| 1 | 2020-04-05T15:23:22.000Z | 2020-04-07T16:23:12.000Z | siam_tracker/models/train_wrappers/pairwise_wrapper.py | HengFan2010/PySiamTracking | a82dabeaa42a7816dbd8e823da7b7e92ebb622ce | [
"MIT"
]
| 11 | 2020-03-19T00:30:06.000Z | 2021-11-10T08:22:35.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import torch
from collections import OrderedDict
from ..builder import build_tracker, TRAIN_WRAPPERS
from ...datasets import TrainPairDataset, build_dataloader
from ...runner import Runner
from ...utils.parallel import MMDataParallel
from ...utils import load_checkpoint
@TRAIN_WRAPPERS.register_module
class PairwiseWrapper(object):
def __init__(self,
train_cfg,
model_cfg,
work_dir,
log_level,
resume_from=None,
gpus=1):
""" Training a tracker by image pairs. This is the most common strategy to train a
siamese-network-based tracker. Generally, two images are randomly sampled from the
dataset, one for template image (z_img) and another for search region (x_img). The
tracker model needs to locate the target object in search region.
"""
self.model_cfg = model_cfg
self.train_cfg = train_cfg
# Step 1, build the tracker model.
model = build_tracker(model_cfg, is_training=True, train_cfg=train_cfg, test_cfg=None)
if resume_from is not None:
load_checkpoint(model, resume_from)
model = MMDataParallel(model, device_ids=list(range(gpus))).cuda()
# Step 2, build image-pair datasets
train_dataset = TrainPairDataset(train_cfg.train_data)
self.data_loaders = build_dataloader(train_dataset,
train_cfg.samples_per_gpu,
train_cfg.workers_per_gpu,
num_gpus=gpus)
# Step 3, build a training runner
# build runner
self.runner = Runner(model, self.batch_processor, train_cfg.optimizer, work_dir, log_level)
self.runner.register_training_hooks(train_cfg.lr_config, train_cfg.optimizer_config,
train_cfg.checkpoint_config, train_cfg.log_config)
if 'status_config' in train_cfg and train_cfg['status_config'] is not None:
self.runner.register_status_hook(train_cfg['status_config'])
def run(self):
self.runner.run(self.data_loaders,
self.train_cfg.workflow,
self.train_cfg.total_epochs)
@staticmethod
def batch_processor(model, data, train_mode):
losses = model(**data)
loss, log_vars = PairwiseWrapper.parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['z_imgs'].data))
return outputs
@staticmethod
def parse_losses(losses):
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
'{} is not a tensor or list of tensors'.format(loss_name))
loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key)
log_vars['loss'] = loss
for name in log_vars:
log_vars[name] = log_vars[name].item()
return loss, log_vars
| 38.852273 | 99 | 0.623867 | 3,017 | 0.882422 | 0 | 0 | 3,049 | 0.891781 | 0 | 0 | 662 | 0.193624 |
9e477dd3df7f5df09267317cd3bfe78b579ab14e | 212 | py | Python | coaster/views/__init__.py | AferriDaniel/coaster | 3ffbc9d33c981284593445299aaee0c3cc0cdb0b | [
"BSD-2-Clause"
]
| 48 | 2015-01-15T08:57:24.000Z | 2022-01-26T04:04:34.000Z | coaster/views/__init__.py | AferriDaniel/coaster | 3ffbc9d33c981284593445299aaee0c3cc0cdb0b | [
"BSD-2-Clause"
]
| 169 | 2015-01-16T13:17:38.000Z | 2021-05-31T13:23:23.000Z | coaster/views/__init__.py | AferriDaniel/coaster | 3ffbc9d33c981284593445299aaee0c3cc0cdb0b | [
"BSD-2-Clause"
]
| 17 | 2015-02-15T07:39:04.000Z | 2021-10-05T11:20:22.000Z | """
View helpers
============
Coaster provides classes, functions and decorators for common scenarios in view
handlers.
"""
# flake8: noqa
from .classview import *
from .decorators import *
from .misc import *
| 16.307692 | 79 | 0.707547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.650943 |
9e481ccd75d0d45dc38668e3abc95311f9633891 | 1,429 | py | Python | socialdistribution/profiles/migrations/0009_auto_20200308_0539.py | um4r12/CMPUT404-project-socialdistribution | 54778371d1f6537370562de4ba4e4efe3288f95d | [
"Apache-2.0"
]
| null | null | null | socialdistribution/profiles/migrations/0009_auto_20200308_0539.py | um4r12/CMPUT404-project-socialdistribution | 54778371d1f6537370562de4ba4e4efe3288f95d | [
"Apache-2.0"
]
| null | null | null | socialdistribution/profiles/migrations/0009_auto_20200308_0539.py | um4r12/CMPUT404-project-socialdistribution | 54778371d1f6537370562de4ba4e4efe3288f95d | [
"Apache-2.0"
]
| null | null | null | # Generated by Django 2.1.5 on 2020-03-08 05:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles', '0008_auto_20200308_0535'),
]
operations = [
migrations.CreateModel(
name='AuthorFriendRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('request_accepted', models.BooleanField(default=False)),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='AuthorFriendRequest_author', to='profiles.Author')),
('friend', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='AuthorFriendRequest_friend', to='profiles.Author')),
],
),
migrations.AlterField(
model_name='authorfriend',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='AuthorFriend_author', to='profiles.Author'),
),
migrations.AlterField(
model_name='authorfriend',
name='friend',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='AuthorFriend_friend', to='profiles.Author'),
),
]
| 42.029412 | 167 | 0.650805 | 1,303 | 0.911826 | 0 | 0 | 0 | 0 | 0 | 0 | 355 | 0.248425 |
9e491ac31491040fbc01015d8b5c1a03d71d8961 | 377 | py | Python | src/edeposit/amqp/rest/structures/__init__.py | edeposit/edeposit.rest | ecb1c00f7c156e1ed2000a0b68a3e4da506e7992 | [
"MIT"
]
| 1 | 2015-12-10T13:30:22.000Z | 2015-12-10T13:30:22.000Z | src/edeposit/amqp/rest/structures/__init__.py | edeposit/edeposit.rest | ecb1c00f7c156e1ed2000a0b68a3e4da506e7992 | [
"MIT"
]
| 33 | 2015-10-06T16:02:13.000Z | 2015-12-10T15:00:04.000Z | src/edeposit/amqp/rest/structures/__init__.py | edeposit/edeposit.rest | ecb1c00f7c156e1ed2000a0b68a3e4da506e7992 | [
"MIT"
]
| null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
from incomming import CacheTick
from incomming import SaveLogin
from incomming import RemoveLogin
from incomming import StatusUpdate
from outgoing import UploadRequest
from outgoing import AfterDBCleanupRequest
| 26.928571 | 79 | 0.649867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 159 | 0.421751 |
9e4940a9f3cc370e790b4e7a714aac9bb4e6baa7 | 9,446 | py | Python | accelbyte_py_sdk/api/platform/wrappers/_anonymization.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
]
| null | null | null | accelbyte_py_sdk/api/platform/wrappers/_anonymization.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
]
| 1 | 2021-10-13T03:46:58.000Z | 2021-10-13T03:46:58.000Z | accelbyte_py_sdk/api/platform/wrappers/_anonymization.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
]
| null | null | null | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import HeaderStr
from ....core import get_namespace as get_services_namespace
from ....core import run_request
from ....core import run_request_async
from ....core import same_doc_as
from ..operations.anonymization import AnonymizeCampaign
from ..operations.anonymization import AnonymizeEntitlement
from ..operations.anonymization import AnonymizeFulfillment
from ..operations.anonymization import AnonymizeIntegration
from ..operations.anonymization import AnonymizeOrder
from ..operations.anonymization import AnonymizePayment
from ..operations.anonymization import AnonymizeSubscription
from ..operations.anonymization import AnonymizeWallet
@same_doc_as(AnonymizeCampaign)
def anonymize_campaign(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizeCampaign.create(
user_id=user_id,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AnonymizeCampaign)
async def anonymize_campaign_async(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizeCampaign.create(
user_id=user_id,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AnonymizeEntitlement)
def anonymize_entitlement(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizeEntitlement.create(
user_id=user_id,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AnonymizeEntitlement)
async def anonymize_entitlement_async(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizeEntitlement.create(
user_id=user_id,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AnonymizeFulfillment)
def anonymize_fulfillment(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizeFulfillment.create(
user_id=user_id,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AnonymizeFulfillment)
async def anonymize_fulfillment_async(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizeFulfillment.create(
user_id=user_id,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AnonymizeIntegration)
def anonymize_integration(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizeIntegration.create(
user_id=user_id,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AnonymizeIntegration)
async def anonymize_integration_async(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizeIntegration.create(
user_id=user_id,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AnonymizeOrder)
def anonymize_order(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizeOrder.create(
user_id=user_id,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AnonymizeOrder)
async def anonymize_order_async(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizeOrder.create(
user_id=user_id,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AnonymizePayment)
def anonymize_payment(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizePayment.create(
user_id=user_id,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AnonymizePayment)
async def anonymize_payment_async(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizePayment.create(
user_id=user_id,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AnonymizeSubscription)
def anonymize_subscription(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizeSubscription.create(
user_id=user_id,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AnonymizeSubscription)
async def anonymize_subscription_async(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizeSubscription.create(
user_id=user_id,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AnonymizeWallet)
def anonymize_wallet(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizeWallet.create(
user_id=user_id,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AnonymizeWallet)
async def anonymize_wallet_async(user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AnonymizeWallet.create(
user_id=user_id,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
| 37.935743 | 151 | 0.72401 | 0 | 0 | 0 | 0 | 7,866 | 0.832733 | 3,766 | 0.398687 | 787 | 0.083316 |
9e49cf2dc6f50772b3945f19de0ff48e7f6c2734 | 358 | py | Python | backend/api/serializers.py | vingle1/RestaurantProject | 5106a7662f26324ef50eebcfcba673960dff1734 | [
"MIT"
]
| null | null | null | backend/api/serializers.py | vingle1/RestaurantProject | 5106a7662f26324ef50eebcfcba673960dff1734 | [
"MIT"
]
| 1 | 2017-12-10T18:12:38.000Z | 2017-12-10T18:12:38.000Z | backend/api/serializers.py | vingle1/RestaurantProject | 5106a7662f26324ef50eebcfcba673960dff1734 | [
"MIT"
]
| 2 | 2017-10-31T20:48:04.000Z | 2017-11-30T04:05:36.000Z |
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from rest_framework_json_api.relations import *
#load django and webapp models
#from django.contrib.auth.models import *
from api.models import *
class FmenuSerializer(serializers.ModelSerializer):
class Meta:
model = Fmenu
fields = '__all__'
| 22.375 | 51 | 0.765363 | 116 | 0.324022 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.223464 |
9e4d5fb0fa81e143693d4b850e79279a83dcb058 | 622 | py | Python | preprocessed_data/RGHS/Code/S_model.py | SaiKrishna1207/Underwater-Image-Segmentation | 78def27e577b10e6722c02807bdcfeb7ba53d760 | [
"MIT"
]
| null | null | null | preprocessed_data/RGHS/Code/S_model.py | SaiKrishna1207/Underwater-Image-Segmentation | 78def27e577b10e6722c02807bdcfeb7ba53d760 | [
"MIT"
]
| null | null | null | preprocessed_data/RGHS/Code/S_model.py | SaiKrishna1207/Underwater-Image-Segmentation | 78def27e577b10e6722c02807bdcfeb7ba53d760 | [
"MIT"
]
| null | null | null | import numpy as np
import pylab as pl
x = [] # Make an array of x values
y = [] # Make an array of y values for each x value
for i in range(-128,127):
x.append(i)
for j in range(-128,127):
temp = j *(2**(1 - abs((j/128))))
y.append(temp)
# print('y',y)
# pl.xlim(-128, 127)# set axis limits
# pl.ylim(-128, 127)
pl.axis([-128, 127,-128, 127])
pl.title('S-model Curve Function ',fontsize=20)# give plot a title
pl.xlabel('Input Value',fontsize=20)# make axis labels
pl.ylabel('Output Value',fontsize=20)
pl.plot(x, y,color='red') # use pylab to plot x and y
pl.show() # show the plot on the screen | 23.037037 | 66 | 0.639871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 292 | 0.469453 |
9e4db1ef4c553d26b23cdf167ecc2ec7e965d780 | 36,578 | py | Python | tools/Blender Stuff/Plugins/Gothic_MaT_Blender/1.3/Gothic_MaT_Blender_1_3.py | PhoenixTales/gothic-devk | 48193bef8fd37626f8909853bfc5ad4b7126f176 | [
"FSFAP"
]
| 3 | 2021-04-13T07:12:30.000Z | 2021-06-18T17:26:10.000Z | tools/Blender Stuff/Plugins/Gothic_MaT_Blender/1.3/Gothic_MaT_Blender_1_3.py | PhoenixTales/gothic-devk | 48193bef8fd37626f8909853bfc5ad4b7126f176 | [
"FSFAP"
]
| null | null | null | tools/Blender Stuff/Plugins/Gothic_MaT_Blender/1.3/Gothic_MaT_Blender_1_3.py | PhoenixTales/gothic-devk | 48193bef8fd37626f8909853bfc5ad4b7126f176 | [
"FSFAP"
]
| 2 | 2021-03-23T19:45:39.000Z | 2021-04-17T17:21:48.000Z | bl_info = {
"name": "Gothic Materials and Textures Blender",
"description": "Makes life easier for Gothic material export",
"author": "Diego",
"version": (1, 3, 0),
"blender": (2, 78, 0),
"location": "3D View > Tools",
"warning": "", # used for warning icon and text in addons panel
"wiki_url": "",
"tracker_url": "",
"category": "Development"
}
import bpy
# if not blenders bundled python is used, packages might not be installed
try:
from mathutils import Color
except ImportError:
raise ImportError('Package mathutils needed, but not installed')
try:
import numpy
except ImportError:
raise ImportError('Package numpy needed, but not installed')
try:
import os.path
except ImportError:
raise ImportError('Package os needed, but not installed')
try:
import colorsys
except ImportError:
raise ImportError('Package colorsys needed, but not installed')
from bpy.props import (StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
EnumProperty,
PointerProperty,
)
from bpy.types import (Panel,
Operator,
PropertyGroup,
)
# ------------------------------------------------------------------------
# store properties in the active scene
# ------------------------------------------------------------------------
class GothicMaterialSettings(PropertyGroup):
apply_to_selected_only = BoolProperty(
name="Only Selected Objects",
description="Affect only selected objects rather than all (unhidden) objects in the scene",
default = True
)
keep_existing_materials = BoolProperty(
name="Keep Existing Slots",
description="Keep existing material slots if their texture does not occur and only add new on top",
default = True
)
set_transparency = BoolProperty(
name="Transparency",
description="Alpha channel will affect transparency in textured view",
default = True
)
keep_portals = BoolProperty(
name="Keep Portals",
description="Do not overwrite Portal or Ghostoccluder materials",
default = True
)
matching_name = BoolProperty(
name="Use Matching Names",
description="If exists, use Gothic material with same name as UV-image, even if multiple Gothic materials use this image",
default = True
)
isolate_all_layers = BoolProperty(
name="Isolate in all Layers",
description="Isolate objects in all layers",
default = True
)
pixel_samples = IntProperty(
name = "Pixels",
description="Number of pixels taken for material color, becomes very slow for high numbers",
default = 50,
min = 1,
max = 1000
)
saturation = FloatProperty(
name = "Saturation",
description="Makes material colors more or less saturated, 0.5 for unchanged",
default = 1.,
min = 0.,
max = 2.
)
value = FloatProperty(
name = "Brigthness",
description="Changes material color brigthness",
default = 1.,
min = 0.,
max = 2.
)
searched_material = StringProperty(
name="Material to Search",
description="",
default="unknown",
maxlen=1024,
)
ambiguous_materials = EnumProperty(
name="What Material Name for ambiguous Textures?",
description="What material name for ambiguous textures?",
items=[ ('first', "First Appearance", ""),
('last', "Last Appearance", ""),
('generic', "Generic: ambiguous1, ...", ""),
]
)
case = EnumProperty(
name="Case for Images and Textures",
description="Case-sensitivity for images and textures",
items=[ ('keep', "Keep File Case", ""),
('upper', "UPPER", ""),
('lower', "lower", ""),
]
)
matlib_filepath = StringProperty(
name="",
description="Filepath to MatLib.ini",
default="Filepath to MatLib.ini",
maxlen=1024,
subtype='FILE_PATH')
# ------------------------------------------------------------------------
# operators
# ------------------------------------------------------------------------
# hides all objects that do not have the material specified in the "searched_material" property
# optional: isolate in all layers
class GothicIsolateObjetcs(bpy.types.Operator):
"""Isolate all objects that use this material. Alt+H to reveal""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "object.gothic_isolate_objects" # unique identifier for buttons and menu items to reference.
bl_label = "Gothic: Isolate Objects" # display name in the interface.
bl_options = {'REGISTER'} # enable undo for the operator.
def execute(self, context): # execute() is called by blender when running the operator.
scene = context.scene
searchfor = scene.gothic_tools.searched_material
isolate_all_layers = scene.gothic_tools.isolate_all_layers
if searchfor == '':
self.report({'WARNING'}, 'No Material Specified')
return {'CANCELLED'}
matindex = bpy.data.materials.find(searchfor)
if matindex == -1:
self.report({'WARNING'}, 'Material not found')
return {'CANCELLED'}
else:
mat = bpy.data.materials[matindex]
objects_found = []
# two steps
# first: check if any objects are found
for object in bpy.data.objects:
# if this layer is not supposed to be affected skip
if not isolate_all_layers:
if not object.layers[scene.active_layer]:
continue
# if found, add to the list of found objects
for slot in object.material_slots:
try:
if slot.material == mat:
objects_found.append(object)
break
except AttributeError:
pass
# second: if so, hide + deselect all others and reveal + select themselves (in case they were hidden before)
if objects_found:
for object in bpy.data.objects:
if object in objects_found:
object.hide = False
object.select = True
else:
object.hide = True
object.select = False
self.report({'INFO'}, str(len(objects_found)) + ' objects found')
else:
self.report({'INFO'}, 'No objects found')
return {'FINISHED'} # this lets blender know the operator finished successfully.
# changes the names of all used images to their filename
# if multiple images use the same file, only one is kept
# the others will be replaced by this one
class GothicCleanImages(bpy.types.Operator):
"""Rename and replace images not named as their filename""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "context.gothic_clean_images" # unique identifier for buttons and menu items to reference.
bl_label = "Gothic: Clean Images and Textures" # display name in the interface.
bl_options = {'REGISTER'} # enable undo for the operator.
def execute(self, context): # execute() is called by blender when running the operator.
scene = context.scene
case = scene.gothic_tools.case
replaced_counter = 0
renamed_counter = 0
#rename all images to their filename
for image in bpy.data.images:
if image.users:
filename = os.path.basename(image.filepath)
correct_index = bpy.data.images.find(filename)
if correct_index == -1:
image.name = filename
renamed_counter += 1
else:
correct_image = bpy.data.images[correct_index]
if image != correct_image:
print(image.name + ' remapped to ' + correct_image.name)
image.user_remap(correct_image)
replaced_counter +=1
# optional change to lower or upper case
for image in bpy.data.images:
if image.users:
if case.lower() == 'upper':
image.name = image.name.upper()
elif case.lower() == 'lower':
image.name = image.name.lower()
self.report({'INFO'}, str(replaced_counter) + ' unlinked, ' + str(renamed_counter) + ' renamed (except for case)')
return {'FINISHED'} # this lets blender know the operator finished successfully.
# Removes suffixes like ".001" and renames textures to image filename
# replaces materials with same name except suffixes
# keeps only one texture per image file, replaces others by this one
class GothicCleanMaterials(bpy.types.Operator):
"""Remove suffixes as .001 from materials. Note: If object has \"mat\" and \"mat.001\", the slots Will not be merged""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "context.gothic_clean_materials" # unique identifier for buttons and menu items to reference.
bl_label = "Gothic: Clean Materials" # display name in the interface.
bl_options = {'REGISTER'} # enable undo for the operator.
def execute(self, context): # execute() is called by blender when running the operator.
replaced_counter = 0
renamed_counter = 0
# remove suffixes and replace materials that would be named the same
for mat in bpy.data.materials:
if mat.users and len(mat.name)>4:
if mat.name[-4]=='.':
try:
int(mat.name[-3:])
targetname = mat.name[0:-4]
index_of_existing = bpy.data.materials.find(targetname)
if index_of_existing == -1:
mat.name = targetname
renamed_counter +=1
else:
mat.user_remap(bpy.data.materials[index_of_existing])
replaced_counter += 1
except ValueError:
continue
# change texture name to image file name
for tex in bpy.data.textures:
if tex.users:
try:
# may exist already, don't overwrite name yet
texname = os.path.basename(tex.image.filepath)
except AttributeError:
print(tex.name + ' has no image')
continue
found_tex_index = bpy.data.textures.find(texname)
if found_tex_index == -1:
tex.name = texname
else:
tex.user_remap(bpy.data.textures[found_tex_index])
self.report({'INFO'}, str(replaced_counter) + ' unlinked, ' + str(renamed_counter) + ' renamed')
return {'FINISHED'}
# takes a sample of pixels and calculates average color for every material with image
class GothicCalcColors(bpy.types.Operator):
"""Calculate all material colors by texture""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "context.gothic_calc_colors" # unique identifier for buttons and menu items to reference.
bl_label = "Gothic: Calculate Material Colors" # display name in the interface.
bl_options = {'REGISTER'} # enable undo for the operator.
def execute(self, context):
scene = context.scene
set_transparency = scene.gothic_tools.set_transparency
pixel_samples = scene.gothic_tools.pixel_samples
value = context.scene.gothic_tools.value
saturation = context.scene.gothic_tools.saturation
colors_calculated = 0
too_bright = False
for material in bpy.data.materials:
print('Calc color for ' + material.name)
try:
if len(material.texture_slots[0].texture.image.pixels):
image = material.texture_slots[0].texture.image
else:
continue
except AttributeError:
continue
averagecolor = numpy.array([0.,0.,0.])
# "pixels" has the structure [pixel1_red, pixel1_green, pixel1_blue, pixel1_alpha, pixel2_red, ...]
samples = pixel_samples
n = int(len(image.pixels)/4)
# take no more samples than pixels exist
if samples > n:
samples = n
pixels = image.pixels
for i in range(samples):
pos = int(i/samples*n)*4
averagecolor += image.pixels[pos:pos+3]
averagecolor = averagecolor / samples
if True in numpy.isnan(averagecolor):
averagecolor[:] = [0,0,0]
# adjust saturation and brightness (value)
adjustedcolor = Color(averagecolor)
hsv = list(colorsys.rgb_to_hsv(*adjustedcolor))
hsv[1] += saturation - 1
hsv[2] += value - 1
new_rgb = colorsys.hsv_to_rgb(*hsv)
# Colors may be out of range in some cases, norm to [0,1]
if any(c>1 for c in new_rgb):
max_rbg = max(new_rgb)
new_rgb = (new_rgb[0]/max_rbg,
new_rgb[1]/max_rbg,
new_rgb[2]/max_rbg)
too_bright = True
material.diffuse_color = Color(new_rgb)
material.diffuse_intensity = 1.0
colors_calculated += 1
if set_transparency:
material.use_transparency = True
self.report({'INFO'}, str(colors_calculated) + ' colors updated')
if too_bright:
self.report({'INFO'}, str(colors_calculated) + ' colors updated (clamped)')
return {'FINISHED'}
# replaces all UV textures by the image that the material of this face has
class GothicAssignImages(bpy.types.Operator):
"""Apply UV-Images that correspond to the assigned materials""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "context.gothic_assign_images" # unique identifier for buttons and menu items to reference.
bl_label = "Gothic: Assign Images by Materials" # display name in the interface.
bl_options = {'REGISTER'} # enable undo for the operator.
def execute(self, context): # execute() is called by blender when running the operator.
scene = context.scene
apply_to_selected_only = scene.gothic_tools.apply_to_selected_only
if apply_to_selected_only:
objects_tobechanged = context.selected_objects
if not objects_tobechanged:
self.report({'WARNING'}, 'No objects selected')
else:
objects_tobechanged = bpy.data.objects
for object in objects_tobechanged:
if not object.type == 'MESH':
continue
bpy.context.scene.objects.active = object
bpy.ops.object.mode_set(mode = 'OBJECT')
mesh = object.data
if not mesh.uv_textures:
uv = mesh.uv_textures.new('UvMap')
# collect all materials and their iamge
# map material index to image beforehands into dict: image_by_material_index
image_by_material_index = [None]*len(object.material_slots)
for matindex,matslot in enumerate(object.material_slots):
# if texture or texture image doesn't exist, return None
try:
image_by_material_index[matindex] = matslot.material.texture_slots[0].texture.image
except AttributeError:
pass
# assign image to face
uv = object.data.uv_textures[0]
for index,face in enumerate(mesh.polygons):
uv.data[index].image = image_by_material_index[face.material_index]
self.report({'INFO'}, 'UV-Images assigned to ' +str(len(objects_tobechanged)) + ' objects')
return {'FINISHED'}
# replaces materials by those that belong to the assigned UV textures
# at every call matlib.ini is parsed and for every image a matching material is searched_material
# depending on how often this texture is used by a material, the used material name is
# never: texture name without file extension
# once: take name from materialfilter
# more: ambiguous, depending on settings
# optionally faces with portal materials are not overwritten
# note that this will create a material for all used images in the file if they dont exist. this is done because
# it would be more troublesome to first filter out the actually needed materials
class GothicAssignMaterials(bpy.types.Operator):
"""Apply Materials that Correspond to the Unwrapped UV-Images""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "context.gothic_assign_materials" # unique identifier for buttons and menu items to reference.
bl_label = "Gothic: Assign Materials by UV-Images" # display name in the interface.
bl_options = {'REGISTER'} # enable undo for the operator.
def execute(self, context): # execute() is called by blender when running the operator.
scene = context.scene
apply_to_selected_only = scene.gothic_tools.apply_to_selected_only
matlib_filepath = scene.gothic_tools.matlib_filepath
ambiguous_materials = scene.gothic_tools.ambiguous_materials
matching_name = scene.gothic_tools.matching_name
apply_to_selected_only = scene.gothic_tools.apply_to_selected_only
keep_portals = scene.gothic_tools.keep_portals
# if no objects are selected and "only selected objects", cancel
if apply_to_selected_only:
objects_tobechanged = context.selected_objects
if not objects_tobechanged:
self.report({'WARNING'}, 'No objects selected')
return {'FINISHED'}
# if no valid matlib.ini specified, cancel
matlib_dirpath = os.path.dirname(matlib_filepath)
if not os.path.isfile(matlib_filepath):
self.report({'ERROR'}, 'Invalid MatLib.ini filepath')
return {'CANCELLED'}
# for every used image create or find a matching texture
# use existing textures with correct name if available
# map image to texture into dict "texture_by_image"
used_images = []
texture_by_image = {}
for image in bpy.data.images:
if image.users:
used_images.append(image)
found_matching_texindex = bpy.data.textures.find(image.name)
if found_matching_texindex == -1:
newtex = bpy.data.textures.new(image.name,'IMAGE')
newtex.image = image
texture_by_image[image] = newtex
else:
texture_by_image[image] = bpy.data.textures[found_matching_texindex]
""" gothic materials """
# parse matlib
# create one list of materials, one of corresponing textures and one for colors
# same index for matching material/texture/color
gmaterial_names = []
gtexture_names = []
gmaterial_colors = []
# append found items to given input variables
def add_materials_from_pml(file, materials, textures, colors):
if not os.path.isfile(file):
self.report({'WARNING'}, 'PML not found: ' + file)
return
file=open(file,'r')
for line in file:
if not line.find('% zCMaterial') == -1:
materials.append("")
textures.append("")
colors.append("")
elif not line.find('name=string:') == -1:
materials[-1] = line[line.find('name=string:')+12:-1].upper()
elif not line.find('texture=string:') == -1:
textures[-1] = line[line.find('texture=string:')+15:-1].upper()
elif not line.find('color=color:') == -1:
colors[-1] = line[line.find('color=color:')+12:-1].split(' ')[:-1]
matlib = open(matlib_filepath,'r')
for line in matlib:
if '=#' in line:
add_materials_from_pml(os.path.join(matlib_dirpath, line[0:line.find('=#')]+'.pml'),gmaterial_names,gtexture_names, gmaterial_colors)
# find materials that appear more than once
# start from the end so that with duplicate materials
# the lower index entry will be removed
seenmaterials = set()
duplicates = []
for x in enumerate(list(reversed(gmaterial_names))):
if x[1] in seenmaterials:
duplicates.append(len(gmaterial_names)-1-x[0])
else:
seenmaterials.add(x[1])
# remove duplicate gothic materials from both lists
for duplicate in duplicates:
gmaterial_names.pop(duplicate)
gtexture_names.pop(duplicate)
# find gothic textures that are used by more than one material
ambiguoustex_names = list(set([texname for texname in gtexture_names if gtexture_names.count(texname)>1]))
ambiguoustex_defaultmat = {}
for ambigtexname in ambiguoustex_names:
# take first or last entry
for index in range(len(gmaterial_names)):
if gtexture_names == ambigtexname:
ambiguoustex_defaultmat[ambigtexname.lower()] = gmaterial_names[index]
# if first entry is taken: skip remaining
# else defaultmat is overwritten every time
if ambiguous_materials=='first':
break
# if a material with same name exists and option checked, overwrite
if matching_name:
if ambigtexname in gmaterial_names:
ambiguoustex_defaultmat[ambigtexname.lower()] = ambigtexname
# else if a material with same name except extension exists, take it as default
elif ambigtexname[0:-4] in gmaterial_names:
ambiguoustex_defaultmat[ambigtexname.lower()] = ambigtexname[0:-4]
""" blender materials """
# for every blender texture: what should be the material name
# if no corresponding gtex: same name as in gothic
# if one correspoding gtex: use the existing material name
# if ambiguous: first, last or generic ('ambiguous1'...), additionally matching name if available
# save the determined material name in var "bmat_name_by_image" mapped by image
bmat_name_by_image = {}
bmat_color_by_image = {}
index_of_ambiguous = 1
for image in used_images:
gmat_exists = False
# gtex_index is used to find the gmat, because they have same indices
for gtex_index, gtex_name in enumerate(gtexture_names):
if gtex_name.lower() == image.name.lower():
bmat_color_by_image[image] = Color([int(x)/255 for x in gmaterial_colors[gtex_index]])
if not gtex_name in ambiguoustex_names:
bmat_name_by_image[image] = gmaterial_names[gtex_index]
else:
if ambiguous_materials=='generic':
bmat_name_by_image[image] = 'ambiguous'+str(index_of_ambiguous)
index_of_ambiguous += 1
else:
bmat_name_by_image[image] = ambiguoustex_defaultmat[image.name.lower()]
gmat_exists = True
break;
if not gmat_exists:
# take filename without extension and default color
bmat_name_by_image[image] = os.path.basename(image.name).upper()[0:-4]
bmat_color_by_image[image] = Color([0.8, 0.8, 0.8])
# collect the materials that belong to any existing used image
# (not only those images that appear in the selected objects, because its simpled this way)
# use existing materials with correct name if available
# first create global 'unknown' material for faces without image
# even if no unknown exist, zero users will still be a useful indicator
if bpy.data.materials.find('unknown')==-1:
matunknown = bpy.data.materials.new('unknown')
matunknown.diffuse_color = Color([1,0,1]) # pink
else:
matunknown = bpy.data.materials[bpy.data.materials.find('unknown')]
material_by_image = {}
material_by_image[None] = matunknown
for image,bmat_name in bmat_name_by_image.items():
found_existing_bmat = False
for scannedmaterial in bpy.data.materials:
if scannedmaterial.name == bmat_name:
targetmat = scannedmaterial
found_existing_bmat = True
break;
if not found_existing_bmat:
targetmat = bpy.data.materials.new(bmat_name)
targetmat.diffuse_color = bmat_color_by_image[image]
material_by_image[image] = targetmat
# determine texture for this material
corresponding_texture = texture_by_image[image]
for slot in targetmat.texture_slots:
if slot:
break
else:
targetmat.texture_slots.add()
targetmat.texture_slots[0].texture = corresponding_texture
# iterate over all polygons and look up the matching material
# for every used image in the file the matching material is mapped inside var "material_by_image"
if apply_to_selected_only:
objects_tobechanged = context.selected_objects
else:
objects_tobechanged = bpy.data.objects
for object in objects_tobechanged:
if not object.type == 'MESH':
continue
if object.hide:
continue
bpy.context.scene.objects.active = object
bpy.ops.object.mode_set(mode = 'OBJECT')
mesh = object.data
# keep_mat_with_index stores material slot numbers which will not be overwritten by UV (portals)
keep_mat_with_index = []
# slot_is_used contains any material index that will not be deleted after reassigning the slots
slot_is_used = [False]*len(object.material_slots)
try:
if keep_portals:
for matindex, matslot in enumerate(object.material_slots):
n = matslot.material.name.lower()
if n == 'ghostoccluder' or \
n[0:2] == 'p:' or \
n[0:3] == 'pi:' or \
n[0:3] == 'pn:':
keep_mat_with_index.append(matindex)
slot_is_used[matindex] = True
except AttributeError:
pass
if not mesh.uv_textures:
# in this case only unknown material except portals will be assigned
uv = mesh.uv_textures.new('UVMap')
else:
uv = mesh.uv_textures[0]
# for every polygon look up which material matches its UV image
for index,face in enumerate(mesh.polygons):
image = mesh.uv_textures[0].data[index].image
# dont assign anything if not supposed to because its a portal
if face.material_index in keep_mat_with_index:
continue;
# if no image, take 'unknown' mat
if not image:
mat = matunknown
else:
# for every image a material should be mapped in material_by_image
if image in material_by_image:
mat = material_by_image[image]
else:
# something went wrong, most likely image users not updated correctly
raise ValueError('No mapped material found for '+image.name + '. Most likely the images are not updated internally. Try restarting Blender')
mat = matunknown
# check if object has this material already
for slotindex,slot in enumerate(object.material_slots):
if slot.material == mat:
face.material_index = slotindex
slot_is_used[slotindex] = True
break;
# if not, add a slot at bottom (new slot will be last)
else:
bpy.ops.object.material_slot_add()
object.active_material = mat
object.material_slots[object.active_material_index].link = 'DATA'
face.material_index = object.active_material_index
slot_is_used.append(True)
# delete unused slots from bottom to top
for slot_reversed, used in enumerate(reversed(slot_is_used)):
if not used:
slot = len(slot_is_used) - slot_reversed - 1
object.active_material_index = slot
bpy.ops.object.material_slot_remove()
self.report({'INFO'}, 'Materials assigned to ' +str(len(objects_tobechanged)) + ' objects')
return {'FINISHED'} # this lets blender know the operator finished successfully.
# ------------------------------------------------------------------------
# gothic tools in objectmode
# ------------------------------------------------------------------------
class VIEW3D_PT_gothic_clean_duplicates_panel(Panel):
bl_idname = "OBJECT_PT_gothic_clean_duplicates_panel"
bl_label = "Clean Duplicates"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Gothic Materials"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
scene = context.scene
gothic_tools = scene.gothic_tools
layout.operator('context.gothic_clean_images', text = 'Clean Images', icon = 'IMAGE_DATA')
layout.operator('context.gothic_clean_materials', text = 'Clean Materials', icon = 'MATERIAL')
layout.label(text="Case:")
layout.prop(gothic_tools, "case", text="")
layout.separator()
layout.separator()
class VIEW3D_PT_gothic_assign_materials_panel(Panel):
bl_idname = "VIEW3D_PT_gothic_assign_materials_panel"
bl_label = "UVs to Materials"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Gothic Materials"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
scene = context.scene
gothic_tools = scene.gothic_tools
layout.operator('context.gothic_assign_materials', text = 'Assign Materials', icon = 'FACESEL')
layout.separator()
layout.prop(gothic_tools, "matlib_filepath", text="")
layout.prop(gothic_tools, "apply_to_selected_only")
layout.prop(gothic_tools, "keep_portals")
layout.separator()
layout.label(text="Ambiguous Textures:")
layout.prop(gothic_tools, "matching_name")
layout.label(text="or else")
layout.prop(gothic_tools, "ambiguous_materials", text="")
layout.separator()
layout.separator()
class VIEW3D_PT_gothic_assign_images_panel(Panel):
bl_idname = "VIEW3D_PT_gothic_assign_images_panel"
bl_label = "Materials to UVs"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Gothic Materials"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
scene = context.scene
gothic_tools = scene.gothic_tools
layout.operator('context.gothic_assign_images', text = 'Assign Images', icon = 'FACESEL_HLT')
layout.prop(gothic_tools, "apply_to_selected_only")
layout.separator()
layout.separator()
class VIEW3D_PT_gothic_material_colors_panel(Panel):
bl_idname = "VIEW3D_PT_gothic_material_colors_panel"
bl_label = "Material Colors"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Gothic Materials"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
scene = context.scene
gothic_tools = context.scene.gothic_tools
layout.operator('context.gothic_calc_colors', text = 'Calc Colors (slow)', icon = 'COLOR')
row = layout.row()
row.prop(gothic_tools, "set_transparency")
row.prop(gothic_tools, "pixel_samples")
layout.prop(gothic_tools, "saturation")
layout.prop(gothic_tools, "value")
layout.separator()
layout.separator()
class VIEW3D_PT_gothic_search_material_panel(Panel):
bl_idname = "VIEW3D_PT_gothic_search_material_panel"
bl_label = "Search Material"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Gothic Materials"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
scene = context.scene
gothic_tools = scene.gothic_tools
layout.operator('object.gothic_isolate_objects', text = 'Isolate Objects', icon = 'VIEWZOOM')
layout.prop(gothic_tools, "searched_material", text="with Mat")
layout.prop(gothic_tools, "isolate_all_layers")
layout.separator()
layout.separator()
# ------------------------------------------------------------------------
# register and unregister
# ------------------------------------------------------------------------
def register():
bpy.utils.register_module(__name__)
bpy.types.Scene.gothic_tools = PointerProperty(type=GothicMaterialSettings)
def unregister():
bpy.utils.unregister_module(__name__)
del bpy.types.Scene.gothic_tools
if __name__ == "__main__":
register() | 43.963942 | 194 | 0.561458 | 32,762 | 0.895675 | 0 | 0 | 0 | 0 | 0 | 0 | 11,955 | 0.326836 |
9e4e27c4f397f2c0b09121050df5d040566af2dd | 7,881 | py | Python | v1/GCRCatalogs/MB2GalaxyCatalog.py | adam-broussard/descqa | d9681bd393553c31882ec7e28e6c1c7b6e482dd3 | [
"BSD-3-Clause"
]
| 4 | 2017-11-14T03:33:57.000Z | 2021-06-05T16:35:40.000Z | v1/GCRCatalogs/MB2GalaxyCatalog.py | adam-broussard/descqa | d9681bd393553c31882ec7e28e6c1c7b6e482dd3 | [
"BSD-3-Clause"
]
| 136 | 2017-11-06T16:02:58.000Z | 2021-11-11T18:20:23.000Z | v1/GCRCatalogs/MB2GalaxyCatalog.py | adam-broussard/descqa | d9681bd393553c31882ec7e28e6c1c7b6e482dd3 | [
"BSD-3-Clause"
]
| 31 | 2017-11-06T19:55:35.000Z | 2020-12-15T13:53:53.000Z | # Massive Black 2 galaxy catalog class
import numpy as np
from astropy.table import Table
import astropy.units as u
import astropy.cosmology
from .GalaxyCatalogInterface import GalaxyCatalog
class MB2GalaxyCatalog(GalaxyCatalog):
"""
Massive Black 2 galaxy catalog class.
"""
def __init__(self, **kwargs):
fn = kwargs.get('fn')
self.type_ext = 'MB2'
self.filters = {
'zlo': True,
'zhi': True
}
self.h = 0.702
self.cosmology = astropy.cosmology.FlatLambdaCDM(H0=self.h*100.0, Om0 = 0.275)
self.quantities = {
'halo_id': self._get_stored_property,
'parent_halo_id': self._get_stored_property,
'redshift': self._get_stored_property,
'positionX': self._get_derived_property, # Position returned in Mpc, stored in kpc/h
'positionY': self._get_derived_property,
'positionZ': self._get_derived_property,
'velocityX': self._get_stored_property, # Velocity returned in km/sec
'velocityY': self._get_stored_property, # Velocity returned in km/sec
'velocityZ': self._get_stored_property, # Velocity returned in km/sec
'mass': self._get_derived_property, # Masses returned in Msun but stored in 1e10 Msun/h
'stellar_mass': self._get_derived_property,
'gas_mass': self._get_stored_property,
'sfr': self._get_stored_property,
'SDSS_u:observed:': self._get_derived_property,
'SDSS_g:observed:': self._get_derived_property,
'SDSS_r:observed:': self._get_derived_property,
'SDSS_i:observed:': self._get_derived_property,
'SDSS_z:observed:': self._get_derived_property,
'SDSS_u:rest:': self._get_derived_property,
'SDSS_g:rest:': self._get_derived_property,
'SDSS_r:rest:': self._get_derived_property,
'SDSS_i:rest:': self._get_derived_property,
'SDSS_z:rest:': self._get_derived_property,
}
self.derived = {
'mass': (('mass',), (1.e10 / self.h,), self._multiply),
'stellar_mass': (('stellar_mass',), (1.e10 / self.h,), self._multiply),
'positionX': (('x',), (1.e-3 / self.h,), self._multiply), # Position stored in kpc/h
'positionY': (('y',), (1.e-3 / self.h,), self._multiply),
'positionZ': (('z',), (1.e-3 / self.h,), self._multiply),
'SDSS_u:rest:': (('SDSS_u:rest:',), (), self._luminosity_to_magnitude),
'SDSS_g:rest:': (('SDSS_g:rest:',), (), self._luminosity_to_magnitude),
'SDSS_r:rest:': (('SDSS_r:rest:',), (), self._luminosity_to_magnitude),
'SDSS_i:rest:': (('SDSS_i:rest:',), (), self._luminosity_to_magnitude),
'SDSS_z:rest:': (('SDSS_z:rest:',), (), self._luminosity_to_magnitude),
'SDSS_u:observed:': (('SDSS_u:rest:', 'redshift'), (), self._add_distance_modulus),
'SDSS_g:observed:': (('SDSS_g:rest:', 'redshift'), (), self._add_distance_modulus),
'SDSS_r:observed:': (('SDSS_r:rest:', 'redshift'), (), self._add_distance_modulus),
'SDSS_i:observed:': (('SDSS_i:rest:', 'redshift'), (), self._add_distance_modulus),
'SDSS_z:observed:': (('SDSS_z:rest:', 'redshift'), (), self._add_distance_modulus),
}
self.Ngals = 0
self.sky_area = 4.*np.pi*u.sr # all sky by default
self.lightcone = False
self.box_size = 100.0 / self.h
self.SDSS_kcorrection_z = 0.1
return GalaxyCatalog.__init__(self, fn)
def load(self, fn):
"""
Given a catalog path, attempt to read the catalog and set up its
internal data structures.
"""
self.catalog = Table.read(fn, path='data')
self.Ngals = len(self.catalog)
self.redshift = self.catalog['redshift'][0]
return self
def _construct_mask(self, filters):
"""
Given a dictionary of filter constraints, construct a mask array
for use in filtering the catalog.
"""
if type(filters) is not dict:
raise TypeError("construct_mask: filters must be given as dict")
mask = np.ones(self.Ngals, dtype=bool)
mask &= (np.isfinite(self.catalog['x'])) # filter out NaN positions from catalog
mask &= (np.isfinite(self.catalog['y']))
mask &= (np.isfinite(self.catalog['z']))
for filter_name in filters.keys():
if filter_name == 'zlo':
mask &= (filters[filter_name] < self.catalog['redshift'])
elif filter_name == 'zhi':
mask &= (filters[filter_name] > self.catalog['redshift'])
return mask
def _get_stored_property(self, quantity, filters):
"""
Return the requested property of galaxies in the catalog as a NumPy
array. This is for properties that are explicitly stored in the
catalog.
"""
filter_mask = self._construct_mask(filters)
if not filter_mask.any():
return np.array([])
return self.catalog[quantity][np.where(filter_mask)].data
def _get_derived_property(self, quantity, filters):
"""
Return a derived halo property. These properties aren't stored
in the catalog but can be computed from properties that are via
a simple function call.
"""
filter_mask = self._construct_mask(filters)
if not filter_mask.any():
return np.array([])
arrays_required, scalars, func = self.derived[quantity]
return func([self.catalog[name][np.where(filter_mask)].data for name in arrays_required], scalars)
# Functions for computing derived values
def _translate(self, propList):
"""
Translation routine -- a passthrough that accomplishes mapping of
derived quantity names to stored quantity names via the derived
property function mechanism.
"""
return propList
def _multiply(self, array_tuple, scalar_tuple):
"""
Multiplication routine -- derived quantity is equal to a stored
quantity times some factor. Additional args for the derived quantity
routines are passed in as a tuple, so extract the factor first.
"""
return array_tuple[0] * scalar_tuple[0]
def _add_distance_modulus(self, array_tuple, scalar_tuple):
return self._luminosity_to_magnitude(array_tuple,scalar_tuple) + self.cosmology.distmod(array_tuple[1]).value
def _luminosity_to_magnitude(self,array_tuple,scalar_tuple):
bandlum = array_tuple[0]*1.0e28
bandflux = bandlum/(4*(np.pi)*(1.0e38)*(3.08567758**2))
return -2.5*(np.log10(bandflux)) - 48.6
| 52.192053 | 134 | 0.53242 | 7,687 | 0.975384 | 0 | 0 | 0 | 0 | 0 | 0 | 2,302 | 0.292095 |
9e4e87db0add45d330be3d156367bbd52e0ded32 | 714 | py | Python | skylernet/views.py | skylermishkin/skylernet | d715c69348c050d976ba7931127a576565b67ff1 | [
"MIT"
]
| null | null | null | skylernet/views.py | skylermishkin/skylernet | d715c69348c050d976ba7931127a576565b67ff1 | [
"MIT"
]
| null | null | null | skylernet/views.py | skylermishkin/skylernet | d715c69348c050d976ba7931127a576565b67ff1 | [
"MIT"
]
| null | null | null | from django.shortcuts import get_object_or_404, render
from django.contrib.staticfiles.templatetags.staticfiles import static
def index(request):
return render(request, 'skylernet/landing.html')
def connect(request):
context = {'online_media': [{"name": 'LinkedIn',
'href': 'https://www.linkedin.com/in/skyler-mishkin-62446b158',
'src': static('skylernet/LinkedIn.svg')},
{'name': 'GitHub',
'href': 'https://github.com/skylermishkin',
'src': static('skylernet/GitHub.png')}]}
return render(request, 'skylernet/connect.html', context)
| 42 | 96 | 0.564426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 248 | 0.347339 |
9e4e8b052d2746faabafff4026914e35d26807a7 | 532 | py | Python | src/objects/qubit.py | KaroliShp/Quantumformatics | 4166448706c06a1a45abd106da8152b4f4c40a25 | [
"MIT"
]
| 2 | 2019-10-28T20:26:14.000Z | 2019-10-29T08:28:45.000Z | src/objects/qubit.py | KaroliShp/Quantumformatics | 4166448706c06a1a45abd106da8152b4f4c40a25 | [
"MIT"
]
| 3 | 2019-10-28T09:19:27.000Z | 2019-10-28T13:42:08.000Z | src/objects/qubit.py | KaroliShp/Quantumformatics | 4166448706c06a1a45abd106da8152b4f4c40a25 | [
"MIT"
]
| null | null | null | from src.dirac_notation.bra import Bra
from src.dirac_notation.ket import Ket
from src.dirac_notation.matrix import Matrix
from src.dirac_notation import functions as dirac
from src.dirac_notation import constants as const
from src.objects.quantum_system import QuantumSystem, SystemType
class Qubit(QuantumSystem):
"""
Special case of a qudit in 2D Hilbert space, basic unit
Composition pattern: Leaf
"""
def __init__(self, state: Ket):
super().__init__(state)
assert state.vector_space == 2 | 29.555556 | 64 | 0.755639 | 241 | 0.453008 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.18985 |
9e4edf8dd4337b4a83cb6c425f974138a731fbae | 9,926 | py | Python | cuddlefish/apiparser.py | mozilla/FlightDeck | 61d66783252ac1318c990e342877a26c64f59062 | [
"BSD-3-Clause"
]
| 6 | 2015-04-24T03:10:44.000Z | 2020-12-27T19:46:33.000Z | cuddlefish/apiparser.py | fox2mike/FlightDeck | 3a2fc78c13dd968041b349c4f9343e6c8b22dd25 | [
"BSD-3-Clause"
]
| null | null | null | cuddlefish/apiparser.py | fox2mike/FlightDeck | 3a2fc78c13dd968041b349c4f9343e6c8b22dd25 | [
"BSD-3-Clause"
]
| 5 | 2015-09-18T19:58:31.000Z | 2020-01-28T05:46:55.000Z | import sys, re, textwrap
class ParseError(Exception):
# args[1] is the line number that caused the problem
def __init__(self, why, lineno):
self.why = why
self.lineno = lineno
def __str__(self):
return ("ParseError: the JS API docs were unparseable on line %d: %s" %
(self.lineno, self.why))
class Accumulator:
def __init__(self, holder, firstline):
self.holder = holder
self.firstline = firstline
self.otherlines = []
def addline(self, line):
self.otherlines.append(line)
def finish(self):
# take a list of strings like:
# "initial stuff" (this is in firstline)
# " more stuff" (this is in lines[0])
# " yet more stuff"
# " indented block"
# " indented block"
# " nonindented stuff" (lines[-1])
#
# calculate the indentation level by looking at all but the first
# line, and removing the whitespace they all have in common. Then
# join the results with newlines and return a single string.
pieces = []
if self.firstline:
pieces.append(self.firstline)
if self.otherlines:
pieces.append(textwrap.dedent("\n".join(self.otherlines)))
self.holder["description"] = "\n".join(pieces)
class APIParser:
def parse(self, lines, lineno):
api = {"line_number": lineno}
titleLine = lines.pop(0)
if "name" not in titleLine:
raise ParseError("Opening <api> tag must have a name attribute.",
lineno)
m = re.search("name=['\"]{0,1}([-\w\.]*?)['\"]", titleLine)
if not m:
raise ParseError("No value for name attribute found in "
"opening <api> tag.", lineno)
lineno += 1
api["name"] = m.group(1)
finalLine = lines.pop()
if not "</api>" in finalLine:
raise ParseError("Closing </api> not found.", lineno+len(lines))
props = []
currentPropHolder = None
params = []
tag, info, firstline = self._parseTypeLine(lines[0], lineno)
api["type"] = tag
if tag == 'property':
if not 'type' in info:
raise ParseError("No type found for @property.", lineno)
api['property_type'] = info['type']
# info is ignored
currentAccumulator = Accumulator(api, firstline)
for line in lines[1:]:
lineno += 1 # note that we count from lines[1:]
if not line.lstrip().startswith("@"):
currentAccumulator.addline(line)
continue
# we're starting a new section
currentAccumulator.finish()
tag, info, firstline = self._parseTypeLine(line, lineno)
if tag == "prop":
if "type" not in info:
raise ParseError("@prop lines must include {type}: '%s'" %
line, lineno)
if "name" not in info:
raise ParseError("@prop lines must provide a name: '%s'" %
line, lineno)
props.append(info) # build up props[]
currentAccumulator = Accumulator(info, firstline)
continue
# close off the @prop list
if props and currentPropHolder:
currentPropHolder["props"] = props
props = []
if tag == "returns":
api["returns"] = info
# the Accumulator will add ["description"] when done
currentAccumulator = Accumulator(info, firstline)
# @prop tags get attached to api["returns"]
currentPropHolder = info
continue
if tag == "param":
if info.get("required", False) and "default" in info:
raise ParseError("required parameters should not have defaults: '%s'"
% line, lineno)
params.append(info)
currentAccumulator = Accumulator(info, firstline)
# @prop tags get attached to this param
currentPropHolder = info
continue
raise ParseError("unknown '@' section header %s in '%s'" %
(tag, line), lineno)
currentAccumulator.finish()
if props and currentPropHolder:
currentPropHolder["props"] = props
if params:
api["params"] = params
return api
def _parseTypeLine(self, line, lineno):
# handle these things:
# @method
# @returns description
# @returns {string} description
# @param NAME {type} description
# @param NAME
# @prop NAME {type} description
# @prop NAME
info = {"line_number": lineno}
pieces = line.split()
if not pieces:
raise ParseError("line is too short: '%s'" % line, lineno)
if not pieces[0].startswith("@"):
raise ParseError("type line should start with @: '%s'" % line,
lineno)
tag = pieces[0][1:]
skip = 1
expect_name = tag in ("param", "prop")
if len(pieces) == 1:
description = ""
else:
if pieces[1].startswith("{"):
# NAME is missing, pieces[1] is TYPE
pass
else:
if expect_name:
info["required"] = not pieces[1].startswith("[")
name = pieces[1].strip("[ ]")
if "=" in name:
name, info["default"] = name.split("=")
info["name"] = name
skip += 1
if len(pieces) > skip and pieces[skip].startswith("{"):
info["type"] = pieces[skip].strip("{ }")
skip += 1
# we've got the metadata, now extract the description
pieces = line.split(None, skip)
if len(pieces) > skip:
description = pieces[skip]
else:
description = ""
return tag, info, description
def parse_hunks(text):
# return a list of tuples. Each is one of:
# ("raw", string) : non-API blocks
# ("api-json", dict) : API blocks
processed = 0 # we've handled all bytes up-to-but-not-including this offset
line_number = 1
for m in re.finditer("<api[\w\W]*?</api>", text, re.M):
start = m.start()
if start > processed+1:
hunk = text[processed:start]
yield ("markdown", hunk)
processed = start
line_number += hunk.count("\n")
api_text = m.group(0)
api_lines = api_text.splitlines()
d = APIParser().parse(api_lines, line_number)
yield ("api-json", d)
processed = m.end()
line_number += api_text.count("\n")
if processed < len(text):
yield ("markdown", text[processed:])
class TestRenderer:
# render docs for test purposes
def getm(self, d, key):
return d.get(key, "<MISSING>")
def join_lines(self, text):
return " ".join([line.strip() for line in text.split("\n")])
def render_prop(self, p):
s = "props[%s]: " % self.getm(p, "name")
pieces = []
for k in ("type", "description", "required", "default"):
if k in p:
pieces.append("%s=%s" % (k, self.join_lines(str(p[k]))))
return s + ", ".join(pieces)
def render_param(self, p):
pieces = []
for k in ("name", "type", "description", "required", "default"):
if k in p:
pieces.append("%s=%s" % (k, self.join_lines(str(p[k]))))
yield ", ".join(pieces)
for prop in p.get("props", []):
yield " " + self.render_prop(prop)
def format_api(self, api):
yield "name= %s" % self.getm(api, "name")
yield "type= %s" % self.getm(api, "type")
yield "description= %s" % self.getm(api, "description")
params = api.get("params", [])
if params:
yield "parameters:"
for p in params:
for pline in self.render_param(p):
yield " " + pline
r = api.get("returns", None)
if r:
yield "returns:"
if "type" in r:
yield " type= %s" % r["type"]
if "description" in r:
yield " description= %s" % self.join_lines(r["description"])
props = r.get("props", [])
for p in props:
yield " " + self.render_prop(p)
def render_docs(self, docs_json, outf=sys.stdout):
for (t,data) in docs_json:
if t == "api-json":
#import pprint
#for line in str(pprint.pformat(data)).split("\n"):
# outf.write("JSN: " + line + "\n")
for line in self.format_api(data):
outf.write("API: " + line + "\n")
else:
for line in str(data).split("\n"):
outf.write("MD :" + line + "\n")
def hunks_to_dict(docs_json):
exports = {}
for (t,data) in docs_json:
if t != "api-json":
continue
if data["name"]:
exports[data["name"]] = data
return exports
if __name__ == "__main__":
json = False
if sys.argv[1] == "--json":
json = True
del sys.argv[1]
docs_text = open(sys.argv[1]).read()
docs_parsed = list(parse_hunks(docs_text))
if json:
import simplejson
print simplejson.dumps(docs_parsed, indent=2)
else:
TestRenderer().render_docs(docs_parsed)
| 35.833935 | 89 | 0.503728 | 8,482 | 0.854523 | 1,948 | 0.196252 | 0 | 0 | 0 | 0 | 2,569 | 0.258815 |
9e4f2abe49eca6572412ecb2672b250ab2b29afd | 1,217 | py | Python | specs/core.py | farleykr/acrylamid | c6c0f60b594d2920f6387ba82b552093d7c5fe1b | [
"BSD-2-Clause-FreeBSD"
]
| 61 | 2015-01-15T23:23:11.000Z | 2022-03-24T16:39:31.000Z | specs/core.py | farleykr/acrylamid | c6c0f60b594d2920f6387ba82b552093d7c5fe1b | [
"BSD-2-Clause-FreeBSD"
]
| 28 | 2015-01-26T22:32:24.000Z | 2022-01-13T01:11:56.000Z | specs/core.py | farleykr/acrylamid | c6c0f60b594d2920f6387ba82b552093d7c5fe1b | [
"BSD-2-Clause-FreeBSD"
]
| 25 | 2015-01-22T19:26:29.000Z | 2021-06-30T21:53:06.000Z | # -*- coding: utf-8 -*-
import attest
from acrylamid.core import cache
class Cache(attest.TestBase):
def __context__(self):
with attest.tempdir() as path:
self.path = path
cache.init(self.path)
yield
@attest.test
def persistence(self):
cache.init(self.path)
cache.set('foo', 'bar', "Hello World!")
cache.set('foo', 'baz', "spam")
assert cache.get('foo', 'bar') == "Hello World!"
assert cache.get('foo', 'baz') == "spam"
cache.shutdown()
cache.init(self.path)
assert cache.get('foo', 'bar') == "Hello World!"
assert cache.get('foo', 'baz') == "spam"
@attest.test
def remove(self):
cache.init(self.path)
cache.set('foo', 'bar', 'baz')
cache.remove('foo')
cache.remove('invalid')
assert cache.get('foo', 'bar') == None
assert cache.get('invalid', 'bla') == None
@attest.test
def clear(self):
cache.init(self.path)
cache.set('foo', 'bar', 'baz')
cache.set('spam', 'bar', 'baz')
cache.clear()
assert cache.get('foo', 'bar') == None
assert cache.get('spam', 'bar') == None
| 23.862745 | 56 | 0.532457 | 1,142 | 0.938373 | 139 | 0.114215 | 950 | 0.780608 | 0 | 0 | 248 | 0.20378 |
9e51608d7b0aa9e6ba5eb1fb96ffd50952b54f6c | 1,235 | py | Python | python/animate_sub_plots_sharc.py | FinMacDov/PhD_codes | 44e781c270fa9822a8137ef271f35c6e945c5828 | [
"MIT"
]
| null | null | null | python/animate_sub_plots_sharc.py | FinMacDov/PhD_codes | 44e781c270fa9822a8137ef271f35c6e945c5828 | [
"MIT"
]
| null | null | null | python/animate_sub_plots_sharc.py | FinMacDov/PhD_codes | 44e781c270fa9822a8137ef271f35c6e945c5828 | [
"MIT"
]
| null | null | null | from subplot_animation import subplot_animation
import sys
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import os
import numpy as np
import glob
sys.path.append("/home/smp16fm/forked_amrvac/amrvac/tools/python")
from amrvac_pytools.datfiles.reading import amrvac_reader
from amrvac_pytools.vtkfiles import read, amrplot
program_name = sys.argv[0]
path2files = sys.argv[1:]
# Switches
refiner = '__'
fps = 3
start_frame = 0
in_extension = 'png'
out_extension = 'avi'
# set time to look over
time_start = 0
time_end = None
text_x_pos = 0.85
text_y_pos = 0.01
save_dir = '/shared/mhd_jet1/User/smp16fm/j/2D/results'
# make dirs
#path2files = "/shared/mhd_jet1/User/smp16fm/sj/2D/P300/B100/A20/"
# path2files = "../test/"
# dummy_name = 'solar_jet_con_'
dummy_name = ''
#read.load_vtkfile(0, file='/shared/mhd_jet1/User/smp16fm/sj/2D/P300/B100/A20/jet_t300_B100A_20_', type='vtu')
print(path2files[0])
test = subplot_animation(path2files[0], save_dir=save_dir, dummy_name='',
refiner=None, text_x_pos=0.85, text_y_pos=0.01,
time_start=0, time_end=time_end, start_frame=0, fps=fps,
in_extension='png', out_extension='avi')
| 27.444444 | 110 | 0.715789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 408 | 0.330364 |
9e554dd387e1b98981fc98073b0b6ac0775be949 | 812 | py | Python | swcf/controllers/index.py | pratiwilestari/simpleWebContactForm | 56369daadb8130bb72c19ae8ee10ad590804c84d | [
"MIT"
]
| null | null | null | swcf/controllers/index.py | pratiwilestari/simpleWebContactForm | 56369daadb8130bb72c19ae8ee10ad590804c84d | [
"MIT"
]
| null | null | null | swcf/controllers/index.py | pratiwilestari/simpleWebContactForm | 56369daadb8130bb72c19ae8ee10ad590804c84d | [
"MIT"
]
| null | null | null | from flask.helpers import flash
from flask.wrappers import Request
from swcf import app
from flask import render_template, redirect, request, url_for
from swcf.dao.indexDAO import *
@app.route("/", methods=['GET'])
def index():
return render_template("layout.html")
@app.route("/sendPost", methods=['POST'])
def sendPost():
print('masuk sini')
name = request.form['name']
email = request.form['email']
issue = request.form['issue']
content = request.form['fillContent']
print(name, email, issue, content)
hInsert = insertPost(name, email, issue, 'content')
print(hInsert)
if hInsert['flag'] == 'T':
flash("Proses insert berhasil", 'success')
else :
flash("Tidak dapat melakukan proses insert", 'error')
return render_template("layout.html") | 31.230769 | 61 | 0.674877 | 0 | 0 | 0 | 0 | 627 | 0.772167 | 0 | 0 | 191 | 0.235222 |
9e55fcc920876b41b0c966a7f0b020aafcb8f66f | 87 | py | Python | examples/testlib2/box/methods_a.py | uibcdf/pyunitwizard | 54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7 | [
"MIT"
]
| 2 | 2021-07-01T14:33:58.000Z | 2022-03-19T19:19:09.000Z | examples/testlib2/box/methods_a.py | uibcdf/pyunitwizard | 54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7 | [
"MIT"
]
| 15 | 2021-02-11T18:54:16.000Z | 2022-03-18T17:38:03.000Z | examples/testlib2/box/methods_a.py | uibcdf/pyunitwizard | 54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7 | [
"MIT"
]
| 2 | 2021-06-17T18:56:02.000Z | 2022-03-08T05:02:17.000Z | from testlib2 import _puw
def get_default_form():
return _puw.get_default_form()
| 14.5 | 34 | 0.770115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9e5734bc9428d420f659a156adfa25e7ae27b0df | 4,668 | py | Python | tests/broker/test_show_machine.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
]
| 7 | 2015-07-31T05:57:30.000Z | 2021-09-07T15:18:56.000Z | tests/broker/test_show_machine.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
]
| 115 | 2015-03-03T13:11:46.000Z | 2021-09-20T12:42:24.000Z | tests/broker/test_show_machine.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
]
| 13 | 2015-03-03T11:17:59.000Z | 2021-09-09T09:16:41.000Z | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the show machine command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestShowMachine(TestBrokerCommand):
def testverifymachineall(self):
command = ["show", "machine", "--all"]
out = self.commandtest(command)
self.matchoutput(out, "ut3c5n10", command)
self.matchoutput(out, "ut3c1n3", command)
self.matchoutput(out, "ut3c1n4", command)
self.matchoutput(out, "ut3s01p1", command)
self.matchoutput(out, "ut8s02p1", command)
self.matchoutput(out, "ut9s03p1", command)
self.matchoutput(out, "ut10s04p1", command)
self.matchoutput(out, "ut11s01p1", command)
self.matchoutput(out, "f5test", command)
def testverifymachineallproto(self):
command = ["show", "machine", "--all", "--format", "proto"]
machines = self.protobuftest(command)
machine_names = set(msg.name for msg in machines)
for machine in ("ut3c5n10", "ut3c1n3", "ut3c1n4", "ut3s01p1",
"ut8s02p1", "ut9s03p1", "ut10s04p1", "ut11s01p1",
"f5test"):
self.assertIn(machine, machine_names)
def testverifyut3c1n3interfacescsv(self):
command = "show machine --machine ut3c1n3 --format csv"
out = self.commandtest(command.split(" "))
net = self.net["unknown0"]
self.matchoutput(out,
"ut3c1n3,ut3,ut,ibm,hs21-8853,KPDZ406,eth0,%s,%s" %
(net.usable[2].mac, net.usable[2]), command)
self.matchoutput(out,
"ut3c1n3,ut3,ut,ibm,hs21-8853,KPDZ406,eth1,%s,%s" %
(net.usable[3].mac, net.usable[3]), command)
self.matchoutput(out,
"ut3c1n3,ut3,ut,ibm,hs21-8853,KPDZ406,bmc,%s,%s" %
(net.usable[4].mac, net.usable[4]), command)
def testrejectfqdn(self):
command = "show machine --machine unittest00.one-nyp.ms.com"
out = self.badrequesttest(command.split(" "))
self.matchoutput(out, "Illegal hardware label", command)
def testshowproto(self):
command = ["show_machine", "--machine", "ut3c1n3", "--format", "proto"]
machine = self.protobuftest(command, expect=1)[0]
self.assertEqual(machine.name, "ut3c1n3")
self.assertEqual(machine.host, "unittest00.one-nyp.ms.com")
self.assertEqual(machine.location.name, "ut3")
self.assertEqual(machine.model.name, "hs21-8853")
self.assertEqual(machine.model.vendor, "ibm")
self.assertEqual(machine.model.model_type, "blade")
self.assertEqual(machine.cpu, "e5-2660")
self.assertEqual(machine.cpu_count, 2)
self.assertEqual(machine.memory, 8192)
self.assertEqual(machine.serial_no, "KPDZ406")
self.assertEqual(len(machine.interfaces), 3)
self.assertEqual(len(machine.disks), 2)
self.assertEqual(machine.disks[0].device_name, "c0d0")
self.assertEqual(machine.disks[0].disk_type, "cciss")
self.assertEqual(machine.disks[0].capacity, 34)
self.assertEqual(machine.disks[0].address, "")
self.assertEqual(machine.disks[0].bus_address, "pci:0000:01:00.0")
self.assertEqual(machine.disks[0].wwn,
"600508b112233445566778899aabbccd")
self.assertEqual(machine.disks[1].device_name, "sda")
self.assertEqual(machine.disks[1].disk_type, "scsi")
self.assertEqual(machine.disks[1].capacity, 68)
self.assertEqual(machine.disks[1].address, "")
self.assertEqual(machine.disks[1].bus_address, "")
self.assertEqual(machine.disks[1].wwn, "")
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestShowMachine)
unittest.TextTestRunner(verbosity=2).run(suite)
| 45.320388 | 79 | 0.646744 | 3,585 | 0.767995 | 0 | 0 | 0 | 0 | 0 | 0 | 1,533 | 0.328406 |
9e5983beaa6b6cc08ac0ba87d128a18495efcf64 | 117 | py | Python | config-template.py | johanjordaan/silver-giggle | 5304a96b6aa1c4c5eb1f9069212423810aa89818 | [
"MIT"
]
| 1 | 2021-12-04T05:11:26.000Z | 2021-12-04T05:11:26.000Z | config-template.py | johanjordaan/silver-giggle | 5304a96b6aa1c4c5eb1f9069212423810aa89818 | [
"MIT"
]
| null | null | null | config-template.py | johanjordaan/silver-giggle | 5304a96b6aa1c4c5eb1f9069212423810aa89818 | [
"MIT"
]
| null | null | null | host="mysql-general.cyqv8he15vrg.ap-southeast-2.rds.amazonaws.com"
user="admin"
password=""
database="silver_giggle"
| 23.4 | 66 | 0.794872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.726496 |
9e5cfbb9bf026d80e086f27d5037c72987aa2b73 | 447 | py | Python | secret/forms.py | MinisterioPublicoRJ/apilabcontas | c01d5c0f1e6705eb8470ba7ba5078c0c172a9570 | [
"MIT"
]
| 2 | 2019-06-10T18:34:15.000Z | 2020-04-29T14:23:34.000Z | secret/forms.py | MinisterioPublicoRJ/datalakecadg | c01d5c0f1e6705eb8470ba7ba5078c0c172a9570 | [
"MIT"
]
| 5 | 2020-01-09T15:59:16.000Z | 2021-06-10T21:06:13.000Z | secret/forms.py | MinisterioPublicoRJ/datalakecadg | c01d5c0f1e6705eb8470ba7ba5078c0c172a9570 | [
"MIT"
]
| null | null | null | from django import forms
from django.core.exceptions import ValidationError
from secret.models import Secret
class SecretForm(forms.ModelForm):
class Meta:
model = Secret
fields = ['username', 'email']
def clean_username(self):
username = self.cleaned_data['username']
if Secret.objects.filter(username=username).exists():
raise ValidationError("Usuário já existe!")
return username
| 26.294118 | 61 | 0.686801 | 336 | 0.74833 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.109131 |
9e5d616453b278b53324517816e3de2bbc018cf8 | 125 | py | Python | secreto.py | PeedrinZangw/sadness-musicbot-01 | c0dab41baba5ab43d840e440cfdc6ec78ac2d823 | [
"MIT"
]
| null | null | null | secreto.py | PeedrinZangw/sadness-musicbot-01 | c0dab41baba5ab43d840e440cfdc6ec78ac2d823 | [
"MIT"
]
| null | null | null | secreto.py | PeedrinZangw/sadness-musicbot-01 | c0dab41baba5ab43d840e440cfdc6ec78ac2d823 | [
"MIT"
]
| null | null | null | def seu_token():
return "NDUyMzQxOTA3MDEyNzE0NTI2.DgCa4Q.qhpEIZAUh3sLzZAqbdduRqjUwl8"
#Subistitua xxxxxx pelo seu token!! | 41.666667 | 72 | 0.824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.768 |
9e5de8187f51a01a92395201a4a1d4ef624e2064 | 4,209 | py | Python | real_estate_analysis/models/xgb_model/xgboost_model.py | enyquist/Real_Estate_Analysis | 47bbcfbc9bece20ae2aa0fce84dfca700ec6842f | [
"MIT"
]
| null | null | null | real_estate_analysis/models/xgb_model/xgboost_model.py | enyquist/Real_Estate_Analysis | 47bbcfbc9bece20ae2aa0fce84dfca700ec6842f | [
"MIT"
]
| null | null | null | real_estate_analysis/models/xgb_model/xgboost_model.py | enyquist/Real_Estate_Analysis | 47bbcfbc9bece20ae2aa0fce84dfca700ec6842f | [
"MIT"
]
| null | null | null | import xgboost as xgb
import datetime
import real_estate_analysis.models.functions as func
import real_estate_analysis.models.xgb_model.utils as XGB_utils
import real_estate_analysis.Model.utils as model_utils
def main():
####################################################################################################################
# Config Log File
####################################################################################################################
logger = func.create_logger(e_handler_name='../logs/xgboost_error_log.log',
t_handler_name='../logs/xgboost_training_log.log')
####################################################################################################################
# Data
####################################################################################################################
X_train, y_train, X_test, y_test = func.retrieve_and_prepare_data()
# Format as DMatrices
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
####################################################################################################################
# Bayesian Optimization
####################################################################################################################
dict_params = {
'max_depth': (3, 10),
'min_child_weight': (10e-6, 8),
'eta': (10e-6, 0.2),
'subsample': (0.5, 1),
'colsample_bytree': (0.5, 1),
'gamma': (0, 8),
'lambda_': (0.5, 10),
'alpha': (5, 10)
}
logger.info('Starting Bayesian Optimization')
optimizer = XGB_utils.optimize_xgb(dtrain=dtrain, pbounds=dict_params, n_iter=10, init_points=3)
logger.info('Bayesian Optimization Complete')
# Extract best params
best_params = optimizer.max['params']
best_params['max_depth'] = int(best_params['max_depth'])
best_params['lambda'] = best_params['lambda_']
best_params.pop('lambda_')
# Set up best params for GPU learning
best_params['objective'] = 'reg:squarederror'
best_params['eval_metric'] = 'rmse'
best_params['tree_method'] = 'gpu_hist'
best_params['max_bin'] = 64
best_params['predictor'] = 'gpu_predictor'
best_params['gpu_id'] = 0
####################################################################################################################
# Train Model with Optimized Params
####################################################################################################################
NUM_BOOST_ROUND = 999
logger.info('Starting Model Training')
# Train model with those params Model to search for best boosting rounds
model = xgb.train(
params=best_params,
dtrain=dtrain,
num_boost_round=NUM_BOOST_ROUND,
evals=[(dtest, 'Test')],
early_stopping_rounds=10
)
best_params['n_estimators'] = model.best_iteration + 1
optimized_model = xgb.XGBRegressor(**best_params)
optimized_model.fit(X_train, y_train)
logger.info('Model Training Complete')
####################################################################################################################
# Validation
####################################################################################################################
dict_scores = func.score_my_model(my_model=optimized_model, x_train=X_train, y_train=y_train,
x_test=X_test, y_test=y_test)
logger.info('Results from XGBoost Search:')
logger.info(f'Best params: {best_params}')
func.log_scores(dict_scores)
####################################################################################################################
# Evaluate and Save
####################################################################################################################
today = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
fname = f'xgboost_{today}.joblib'
model_utils.validate_model(optimized_model, dict_scores, fname)
if __name__ == '__main__':
main()
| 37.918919 | 120 | 0.43502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,223 | 0.528154 |
9e5e1d23daee791eaea271ade55225f743349e3f | 1,067 | py | Python | tests/utils.py | 1116574/vulcan-api | 3cf64e78ba3e68299c94d629c3ffe4f7e8c94aed | [
"MIT"
]
| null | null | null | tests/utils.py | 1116574/vulcan-api | 3cf64e78ba3e68299c94d629c3ffe4f7e8c94aed | [
"MIT"
]
| null | null | null | tests/utils.py | 1116574/vulcan-api | 3cf64e78ba3e68299c94d629c3ffe4f7e8c94aed | [
"MIT"
]
| null | null | null | from datetime import date
from os import environ
PARAMS_LESSON_PLAN = [
(
date(2018, 9, 4),
[
{"IdPrzedmiot": 173, "IdPracownik": 99},
{"IdPrzedmiot": 123, "IdPracownik": 101},
{"IdPrzedmiot": 172, "IdPracownik": 92},
{"IdPrzedmiot": 189, "IdPracownik": 91},
{"IdPrzedmiot": 119, "IdPracownik": 100},
{"IdPrzedmiot": 175, "IdPracownik": 97},
{"IdPrzedmiot": 118, "IdPracownik": 89},
],
)
]
PARAMS_TESTS = [
(date(2018, 10, 5), [{"Id": 661, "IdPrzedmiot": 177, "IdPracownik": 87}]),
(
date(2018, 10, 23),
[
{"Id": 798, "IdPrzedmiot": 173, "IdPracownik": 99},
{"Id": 838, "IdPrzedmiot": 172, "IdPracownik": 92},
],
),
]
PARAMS_HOMEWORKS = [
(
date(2018, 10, 23),
[
{"Id": 305, "IdPracownik": 100, "IdPrzedmiot": 119},
{"Id": 306, "IdPracownik": 100, "IdPrzedmiot": 119},
],
)
]
def load_variable(name):
return environ.get(name)
| 24.813953 | 78 | 0.492034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 332 | 0.311153 |
9e5eaad811b723cd9fbdf58606b08cc92c36666b | 886 | py | Python | setup.py | utahta/pyvbcode | 5708f5563016578576a48cf7374470c4e5c11825 | [
"MIT"
]
| 3 | 2018-10-14T12:38:49.000Z | 2021-06-05T08:13:42.000Z | setup.py | utahta/pyvbcode | 5708f5563016578576a48cf7374470c4e5c11825 | [
"MIT"
]
| 1 | 2017-07-02T15:27:45.000Z | 2017-10-28T20:52:54.000Z | setup.py | utahta/pyvbcode | 5708f5563016578576a48cf7374470c4e5c11825 | [
"MIT"
]
| 5 | 2016-12-26T08:06:24.000Z | 2020-02-22T17:20:16.000Z | # vim:fileencoding=utf8
from distutils.core import setup
import os
README = os.path.join(os.path.dirname(__file__),'PKG-INFO')
long_description = open(README).read() + "\n"
setup(name="vbcode",
version='0.2.0',
py_modules=['vbcode'],
description="Variable byte codes",
author="utahta",
author_email = "[email protected]",
long_description=long_description,
classifiers=["Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Python Software Foundation License",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Natural Language :: Japanese"
],
url="https://github.com/utahta/pyvbcode",
license="MIT"
)
| 36.916667 | 83 | 0.595937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 418 | 0.471783 |
9e5f5a16f32d2c7ad12cdebabca7ff18c984b6b6 | 1,221 | py | Python | cogs/testing_cog.py | Critteros/DzwoneczekBOT | 4f6100cf26f430521247f494620c9a2ceda1f362 | [
"Apache-2.0"
]
| null | null | null | cogs/testing_cog.py | Critteros/DzwoneczekBOT | 4f6100cf26f430521247f494620c9a2ceda1f362 | [
"Apache-2.0"
]
| null | null | null | cogs/testing_cog.py | Critteros/DzwoneczekBOT | 4f6100cf26f430521247f494620c9a2ceda1f362 | [
"Apache-2.0"
]
| null | null | null | """
Extension desined to test bot functionality, just for testing
"""
# Library includes
from discord.ext import commands
# App includes
from app.client import BotClient
class TestCog(commands.Cog):
"""
Class cog for the testing_cog cog extension
"""
def __init__(self, client: BotClient):
self.client: BotClient = client
self.log = client.log
@commands.command(help='test', brief='Testing command')
async def echo(self, ctx: commands.Context, *args):
"""
Testing fuction designed to print context to logging output
Args:
ctx (commands.Context): Context of invocation
"""
log = self.log
log.debug('Executing echo command')
log.debug(f'Context is: {ctx.__dict__}')
log.debug(f'Context type is {type(ctx)}')
log.debug(f'Context message: {ctx.args}')
log.debug(f'data is: /{args}/\n data type is{type(args)}')
await ctx.message.reply("Hi <:python:815369954224373760>")
def setup(client):
"""
Setup function for testing_cog extension
Args:
client (app.client.BotClient): Client that connects to discord API
"""
client.add_cog(TestCog(client))
| 24.42 | 74 | 0.63964 | 840 | 0.687961 | 0 | 0 | 628 | 0.514333 | 568 | 0.465192 | 674 | 0.552007 |
9e5f866f7cec9044c5ffc4636fdb2a689ffe67a2 | 3,221 | py | Python | src/pdfDownloader.py | dna33/covid19-pdfocr | 66f11fc7eb3d4f0146d04344a112578bc3149a02 | [
"MIT"
]
| 1 | 2021-08-16T22:21:30.000Z | 2021-08-16T22:21:30.000Z | src/pdfDownloader.py | dna33/covid19-pdfocr | 66f11fc7eb3d4f0146d04344a112578bc3149a02 | [
"MIT"
]
| null | null | null | src/pdfDownloader.py | dna33/covid19-pdfocr | 66f11fc7eb3d4f0146d04344a112578bc3149a02 | [
"MIT"
]
| null | null | null | import urllib3
from bs4 import BeautifulSoup
import shutil
import re
import os
def obtenerReporteDiario(reporte_url, path):
req = urllib3.PoolManager()
res = req.request('GET', reporte_url)
soup = BeautifulSoup(res.data, features="html.parser")
pdfs = []
for link_soup in soup.find_all('a'):
link = str(link_soup.get('href'))
regex_pdf = re.compile(r"(reporte_covid19)[\w\-]*\.pdf", re.IGNORECASE)
pdf_match = re.search(regex_pdf, link)
if pdf_match:
pdf_file = f'{path}{os.path.basename(link)}'
if not os.path.isfile(pdf_file):
with req.request('GET', link, preload_content=False) as res, open(pdf_file, 'wb') as pfopen:
shutil.copyfileobj(res, pfopen)
pdfs.append(os.path.basename(link))
else:
print(pdf_file + ' already downloaded ')
return pdfs
def obtenerInformeEpidemiologico(reporte_url, path):
req = urllib3.PoolManager()
res = req.request('GET', reporte_url)
soup = BeautifulSoup(res.data, features="html.parser")
pdfs = []
for link_soup in soup.find_all('a'):
link = str(link_soup.get('href'))
#regex_pdf = re.compile(r"(informe|reporte)[\w\-]*\.pdf", re.IGNORECASE)
regex_pdf = re.compile(r"(epi|ep_)[\w\-]*\.pdf", re.IGNORECASE)
pdf_match = re.search(regex_pdf, link)
if pdf_match:
pdf_file = f'{path}{os.path.basename(link)}'
if not os.path.isfile(pdf_file):
print('Downloading ' + pdf_file)
with req.request('GET', link, preload_content=False) as res, open(pdf_file, 'wb') as pfopen:
shutil.copyfileobj(res, pfopen)
pdfs.append(os.path.basename(link))
else:
print(pdf_file + ' already downloaded ')
return pdfs
def obtenerSituacionCOVID19(reporte_url, path):
req = urllib3.PoolManager()
res = req.request('GET', reporte_url)
soup = BeautifulSoup(res.data, features="html.parser")
pdfs = []
for link_soup in soup.find_all('a'):
link = str(link_soup.get('href'))
regex_pdf = re.compile(r"(informe|reporte)[\w\-]*\.pdf", re.IGNORECASE)
pdf_match = re.search(regex_pdf, link)
if pdf_match:
pdf_file = f'{path}{os.path.basename(link)}'
if not os.path.isfile(pdf_file):
print('Downloading ' + pdf_file)
with req.request('GET', link, preload_content=False) as res, open(pdf_file, 'wb') as pfopen:
shutil.copyfileobj(res, pfopen)
pdfs.append(os.path.basename(link))
else:
print(pdf_file + ' already downloaded ')
return pdfs
if __name__ == '__main__':
#https://www.minsal.cl/nuevo-coronavirus-2019-ncov/informe-epidemiologico-covid-19/
obtenerInformeEpidemiologico('https://www.gob.cl/coronavirus/cifrasoficiales/', '../input/InformeEpidemiologico/')
obtenerReporteDiario('https://www.gob.cl/coronavirus/cifrasoficiales/', '../input/ReporteDiario/')
obtenerSituacionCOVID19('http://epi.minsal.cl/informes-covid-19/', '../input/InformeSituacionCOVID19/')
| 39.765432 | 118 | 0.616889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 786 | 0.244024 |
9e5ff0af4ee8d2c0f56518f7dfc6f17b87b1d4b4 | 44,126 | py | Python | setup.py | amahoro12/anne | 9b68c71c491bde4f57c2cbbf78a377239a9026d8 | [
"MIT"
]
| null | null | null | setup.py | amahoro12/anne | 9b68c71c491bde4f57c2cbbf78a377239a9026d8 | [
"MIT"
]
| null | null | null | setup.py | amahoro12/anne | 9b68c71c491bde4f57c2cbbf78a377239a9026d8 | [
"MIT"
]
| null | null | null | ## This script set up classes for 4 bus and 2 bus environment
import pandapower as pp
import pandapower.networks as nw
import pandapower.plotting as plot
import enlopy as el
import numpy as np
import pandas as pd
import pickle
import copy
import math
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import pandapower.control as ct
import statistics as stat
from FACTScontrol import SeriesFACTS, ShuntFACTS
pd.options.display.float_format = '{:.4g}'.format
### This 4-bus class is not complete as of handover to ABB PG and Magnus Tarle.
# The 2-bus class further below is however complete.
class powerGrid_ieee4:
def __init__(self, numberOfTimeStepsPerState=4):
print('in init. Here we lay down the grid structure and load some random state values based on IEEE dataset');
with open('Data/JanLoadEvery5mins.pkl', 'rb') as pickle_file:
self.loadProfile = pickle.load(pickle_file)
with open('Data/generatorValuesEvery5mins.pkl', 'rb') as pickle_file:
self.powerProfile = pickle.load(pickle_file)
with open('Data/trainIndices.pkl', 'rb') as pickle_file:
self.trainIndices = pickle.load(pickle_file)
with open('Data/testIndices.pkl', 'rb') as pickle_file:
self.testIndices = pickle.load(pickle_file)
self.k_old=0;
self.q_old=0;
self.actionSpace = {'v_ref_pu': [i*5 / 100 for i in range(16, 25)], 'lp_ref': [i * 5 for i in range(0, 31)]}
## Basic ieee 4bus system
self.net = pp.networks.case4gs();
####Shunt FACTS device (bus 1)
# MV bus
bus_SVC = pp.create_bus(self.net, name='MV SVCtrafo bus', vn_kv=69, type='n', geodata=(-2, 2.5), zone=2,
max_vm_pu=1.1,
min_vm_pu=0.9)
# Trafo
trafoSVC = pp.create_transformer_from_parameters(self.net, hv_bus=1, lv_bus=4, in_service=True,
name='trafoSVC', sn_mva=110, vn_hv_kv=230, vn_lv_kv=69,
vk_percent=12, vkr_percent=0.26, pfe_kw=55, i0_percent=0.06,
shift_degree=0, tap_side='hv', tap_neutral=0, tap_min=-9,
tap_max=9,
tap_step_percent=1.5, tap_step_degree=0,
tap_phase_shifter=False)
# Tap changer usually not used on this trafo in real life implementation
#trafo_control = ct.DiscreteTapControl(net=self.net, tid=0, vm_lower_pu=0.95, vm_upper_pu=1.05)
# Breaker between grid HV bus and trafo HV bus to connect buses
sw_SVC = pp.create_switch(self.net, bus=1, element=0, et='t', type='CB', closed=False)
# Shunt device connected with MV bus
shuntDev = pp.create_shunt(self.net, bus_SVC, 0, in_service=True, name='Shunt Device', step=1)
##Series device (at line 3, in middle between bus 2 and 3)
# Add intermediate buses for bypass and series compensation impedance
bus_SC1 = pp.create_bus(self.net, name='SC bus 1', vn_kv=230, type='n', geodata=(-1, 3.1), zone=2,
max_vm_pu=1.1, min_vm_pu=0.9)
bus_SC2 = pp.create_bus(self.net, name='SC bus 2', vn_kv=230, type='n', geodata=(-1, 3.0), zone=2,
max_vm_pu=1.1, min_vm_pu=0.9)
sw_SC_bypass = pp.create_switch(self.net, bus=5, element=6, et='b', type='CB', closed=True)
imp_SC = pp.create_impedance(self.net, from_bus=5, to_bus=6, rft_pu=0.01272, xft_pu=-0.0636,
rtf_pu=0.01272, xtf_pu=-0.0636, sn_mva=250, in_service=True)
# Adjust orginal Line 3 to connect to new buses instead.
self.net.line.at[3, ['length_km', 'to_bus', 'name']] = [0.5, 5, 'line1_SC']
lineSC2 = pp.create_line_from_parameters(self.net, name='line2_SC',
c_nf_per_km=self.net.line.at[3, 'c_nf_per_km'],
df=self.net.line.at[3, 'df'], from_bus=6,
g_us_per_km=self.net.line.at[3, 'g_us_per_km'],
in_service=self.net.line.at[3, 'in_service'], length_km=0.5,
max_i_ka=self.net.line.at[3, 'max_i_ka'],
max_loading_percent=self.net.line.at[3, 'max_loading_percent'],
parallel=self.net.line.at[3, 'parallel'],
r_ohm_per_km=self.net.line.at[3, 'r_ohm_per_km'],
std_type=self.net.line.at[3, 'std_type'], to_bus=3,
type=self.net.line.at[3, 'type'],
x_ohm_per_km=self.net.line.at[3, 'x_ohm_per_km']);
# Change PV generator to static generator
self.net.gen.drop(index=[0], inplace=True) # Drop PV generator
pp.create_sgen(self.net, 3, p_mw=318, q_mvar=181.4, name='static generator', scaling=1)
# Randomize starting index in load/gen profiles
self.numberOfTimeStepsPerState=numberOfTimeStepsPerState;
self.stateIndex = np.random.randint(len(self.loadProfile)-self.numberOfTimeStepsPerState, size=1)[0];
#self.stateIndex=0
self.scaleLoadAndPowerValue(self.stateIndex);
try:
pp.runpp(self.net, run_control=False)
print('Environment has been successfully initialized');
except:
print('Some error occured while creating environment');
raise Exception('cannot proceed at these settings. Please fix the environment settings');
# Power flow calculation, runControl = True gives shunt device trafo tap changer iterative control activated
def runEnv(self, runControl):
try:
pp.runpp(self.net, run_control=runControl);
#print('Environment has been successfully initialized');
except:
print('Some error occurred while creating environment');
raise Exception('cannot proceed at these settings. Please fix the environment settings');
## Retreieve voltage and line loading percent as measurements of current state
def getCurrentState(self):
bus_index_shunt = 1
line_index = 1;
return (self.net.res_bus.vm_pu[bus_index_shunt], self.net.res_line.loading_percent[line_index]);
## Retrieve measurements for multiple buses, including load angle for DQN as well
def getCurrentStateForDQN(self):
return [self.net.res_bus.vm_pu[1:-3], self.net.res_line.loading_percent[0:], self.net.res_bus.va_degree[1:-3]];
## UPDATE NEEED:
def takeAction(self, lp_ref, v_ref_pu):
#q_old = 0
bus_index_shunt = 1
line_index=3;
impedenceBackup = self.net.impedance.loc[0, 'xtf_pu'];
shuntBackup = self.net.shunt.q_mvar
self.net.switch.at[1, 'closed'] = False
self.net.switch.at[0, 'closed'] = True
##shunt compenstation
q_comp = self.Shunt_q_comp(v_ref_pu, bus_index_shunt, self.q_old);
self.q_old = q_comp;
self.net.shunt.q_mvar = q_comp;
##series compensation
k_x_comp_pu = self.K_x_comp_pu(lp_ref, 1, self.k_old);
self.k_old = k_x_comp_pu;
x_line_pu = self.X_pu(line_index)
self.net.impedance.loc[0, ['xft_pu', 'xtf_pu']] = x_line_pu * k_x_comp_pu
networkFailure = False
self.stateIndex += 1;
if self.stateIndex < len(self.powerProfile):
self.scaleLoadAndPowerValue(self.stateIndex);
try:
pp.runpp(self.net, run_control=True);
reward = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent);
except:
print('Unstable environment settings');
networkFailure = True;
reward = -1000;
return (self.net.res_bus.vm_pu[bus_index_shunt], self.net.res_line.loading_percent[line_index]), reward, self.stateIndex == len(self.powerProfile) or networkFailure;
##Function to calculate line reactance in pu
def X_pu(self, line_index):
s_base = 100e6
v_base = 230e3
x_base = pow(v_base, 2) / s_base
x_line_ohm = self.net.line.x_ohm_per_km[line_index]
x_line_pu = x_line_ohm / x_base # Can take one since this line is divivded into
# 2 identical lines with length 0.5 km
return x_line_pu
def reset(self):
print('reset the current environment for next episode');
oldIndex = self.stateIndex;
self.stateIndex = np.random.randint(len(self.loadProfile)-1, size=1)[0];
self.net.switch.at[0, 'closed'] = False
self.net.switch.at[1, 'closed'] = True
self.k_old = 0;
self.q_old = 0;
self.scaleLoadAndPowerValue(self.stateIndex);
try:
pp.runpp(self.net, run_control=False);
print('Environment has been successfully initialized');
except:
print('Some error occurred while resetting the environment');
raise Exception('cannot proceed at these settings. Please fix the environment settings');
# Calculate immediate reward with loadangle as optional
def calculateReward(self, voltages, loadingPercent, loadAngles=10):
try:
rew = 0;
for i in range(1, len(voltages)-2): # Dont need to include bus 0 as it is the slack with constant voltage and angle
# -2 because dont want to inclue buses created for FACTS device implementation (3 of them)
if voltages[i] > 1.25 or voltages[i] < 0.8:
rew -= 50;
elif voltages[i] > 1.1 or voltages[i] < 0.9:
rew -= 25;
elif voltages[i] > 1.05 or voltages[i] < 0.95:
rew -= 10;
elif voltages[i] > 1.025 or voltages[i] < 0.975:
rew += 10;
else:
rew += 20;
rew = rew
loadingPercentInstability = np.std(loadingPercent) * len(loadingPercent);
rew -= loadingPercentInstability
# Check load angle
for i in range(1, len(loadAngles)-2):
if abs(loadAngles[i]) >= 30:
rew -= 200
except:
print('exception in calculate reward')
print(voltages);
print(loadingPercent)
return 0;
return rew
## Simple plot of one-line diagram
def plotGridFlow(self):
print('plotting powerflow for the current state')
plot.simple_plot(self.net)
## Scale load and generation from load and generation profiles
## Update Needed (Nominal Values)
def scaleLoadAndPowerValue(self,index):
scalingFactorLoad = self.loadProfile[index] / (sum(self.loadProfile)/len(self.loadProfile));
scalingFactorPower = self.powerProfile[index] / max(self.powerProfile);
# Scaling all loads and the static generator
self.net.load.p_mw = self.net.load.p_mw * scalingFactorLoad;
self.net.load.q_mvar = self.net.load.q_mvar * scalingFactorLoad;
self.net.sgen.p_mw = self.net.sgen.p_mw * scalingFactorPower;
self.net.sgen.q_mvar = self.net.sgen.q_mvar * scalingFactorPower;
## UPDATE NEEDED:
##Function for transition from reference power to reactance of series device
def K_x_comp_pu(self, loading_perc_ref, line_index, k_old):
##NEW VERSION TEST:
c = 15 # Coefficient for transition
k_x_comp_max_ind = 0.4
k_x_comp_max_cap = -k_x_comp_max_ind
loading_perc_meas = self.net.res_line.loading_percent[line_index]
k_delta = (c * k_x_comp_max_ind * (
loading_perc_meas - loading_perc_ref) / 100) - k_old # 100 To get percentage in pu
k_x_comp = k_delta + k_old
# Bypassing series device if impedance close to 0
if abs(k_x_comp) < 0.0001: # Helping with convergence
self.net.switch.closed[1] = True # ACTUAL network, not a copy
# Make sure output within rating of device
if k_x_comp > k_x_comp_max_ind:
k_x_comp = k_x_comp_max_ind
if k_x_comp < k_x_comp_max_cap:
k_x_comp = k_x_comp_max_cap
return k_x_comp
## UPDATE NEEDED:
## Function for transition from reference parameter to reactive power output of shunt device
def Shunt_q_comp(self, v_ref_pu, bus_index, q_old):
v_bus_pu = self.net.res_bus.vm_pu[bus_index]
k = 25 # Coefficient for transition, tuned to hit 1 pu with nominal IEEE
q_rated = 100 # Mvar
q_min = -q_rated
q_max = q_rated
q_delta = k * q_rated * (
v_bus_pu - v_ref_pu) - q_old # q_old might come in handy later with RL if able to take actions without
# independent change in environment
q_comp = q_delta + q_old
if q_comp > q_max:
q_comp = q_max
if q_comp < q_min:
q_comp = q_min
# print(q_comp)
return q_comp
#The class for the 2-bus test network used in the Master Thesis by Joakim Oldeen & Vishnu Sharma.
#The class also include several methods used by different RL algorithms such as taking action, calculating reward, recieving states and more
class powerGrid_ieee2:
def __init__(self,method):
#print('in init. Here we lay down the grid structure and load some random state values based on IEEE dataset');
self.method=method;
if self.method in ('dqn','ddqn','td3'):
self.errorState=[-2, -1000, -90];
self.numberOfTimeStepsPerState=3
else:
self.errorState=[-2,-1000];
self.numberOfTimeStepsPerState=1
with open('Data/JanLoadEvery5mins.pkl', 'rb') as pickle_file:
self.loadProfile = pickle.load(pickle_file)
with open('Data/generatorValuesEvery5mins.pkl', 'rb') as pickle_file:
self.powerProfile = pickle.load(pickle_file)
with open('Data/trainIndices.pkl', 'rb') as pickle_file:
self.trainIndices = pickle.load(pickle_file)
with open('Data/testIndices.pkl', 'rb') as pickle_file:
self.testIndices = pickle.load(pickle_file)
self.testIndices = [860,860,860]
self.actionSpace = {'v_ref_pu': [i*5 / 100 for i in range(18, 23)], 'lp_ref': [i * 15 for i in range(0, 11)]}
#self.deepActionSpace = {'v_ref_pu': [i/ 100 for i in range(90, 111)], 'lp_ref': [i * 5 for i in range(0, 31)]}
self.deepActionSpace = {'v_ref_pu': [i*2/100 for i in range(45, 56)], 'lp_ref': [i * 10 for i in range(0, 16)]}
self.k_old = 0;
self.q_old = 0;
## Basic ieee 4bus system to copy parts from
net_temp = pp.networks.case4gs();
# COPY PARAMETERS FROM TEMP NETWORK TO USE IN 2 BUS RADIAL SYSTEM.
# BUSES
b0_in_service = net_temp.bus.in_service[0]
b0_max_vm_pu = net_temp.bus.max_vm_pu[0]
b0_min_vm_pu = net_temp.bus.min_vm_pu[0]
b0_name = net_temp.bus.name[0]
b0_type = net_temp.bus.type[0]
b0_vn_kv = net_temp.bus.vn_kv[0]
b0_zone = net_temp.bus.zone[0]
b0_geodata = (3, 2)
b1_in_service = net_temp.bus.in_service[1]
b1_max_vm_pu = net_temp.bus.max_vm_pu[1]
b1_min_vm_pu = net_temp.bus.min_vm_pu[1]
b1_name = net_temp.bus.name[1]
b1_type = net_temp.bus.type[1]
b1_vn_kv = net_temp.bus.vn_kv[1]
b1_zone = net_temp.bus.zone[1]
b1_geodata = (4, 2)
# BUS ELEMENTS
load_bus = net_temp.load.bus[1]
load_in_service = net_temp.load.in_service[1]
load_p_mw = net_temp.load.p_mw[1]
load_q_mvar = net_temp.load.q_mvar[1]
load_scaling = net_temp.load.scaling[1]
extGrid_bus = net_temp.ext_grid.bus[0]
extGrid_in_service = net_temp.ext_grid.in_service[0]
extGrid_va_degree = net_temp.ext_grid.va_degree[0]
extGrid_vm_pu = net_temp.ext_grid.vm_pu[0]
extGrid_max_p_mw = net_temp.ext_grid.max_p_mw[0]
extGrid_min_p_mw = net_temp.ext_grid.min_p_mw[0]
extGrid_max_q_mvar = net_temp.ext_grid.max_q_mvar[0]
extGrid_min_q_mvar = net_temp.ext_grid.min_q_mvar[0]
# LINES
line0_scaling = 1
line0_c_nf_per_km = net_temp.line.c_nf_per_km[0]
line0_df = net_temp.line.df[0]
line0_from_bus = net_temp.line.from_bus[0]
line0_g_us_per_km = net_temp.line.g_us_per_km[0]
line0_in_service = net_temp.line.in_service[0]
line0_length_km = net_temp.line.length_km[0]
line0_max_i_ka = net_temp.line.max_i_ka[0]
line0_max_loading_percent = net_temp.line.max_loading_percent[0]
line0_parallel = net_temp.line.parallel[0]
line0_r_ohm_per_km = net_temp.line.r_ohm_per_km[0] * line0_scaling
line0_to_bus = net_temp.line.to_bus[0]
line0_type = net_temp.line.type[0]
line0_x_ohm_per_km = net_temp.line.x_ohm_per_km[0] * line0_scaling
line1_scaling = 1.2
line1_c_nf_per_km = line0_c_nf_per_km
line1_df = line0_df
line1_from_bus = line0_from_bus
line1_g_us_per_km = line0_g_us_per_km
line1_in_service = line0_in_service
line1_length_km = line0_length_km
line1_max_i_ka = line0_max_i_ka
line1_max_loading_percent = line0_max_loading_percent
line1_parallel = line0_parallel
line1_r_ohm_per_km = line0_r_ohm_per_km
line1_to_bus = line0_to_bus
line1_type = line0_type
line1_x_ohm_per_km = line0_x_ohm_per_km * line1_scaling # Assume that the lines are identical except for line reactance
## creating 2 bus system using nominal values from 4 bus system
self.net = pp.create_empty_network()
# Create buses
b0 = pp.create_bus(self.net, in_service=b0_in_service, max_vm_pu=b0_max_vm_pu, min_vm_pu=b0_min_vm_pu,
name=b0_name, type=b0_type, vn_kv=b0_vn_kv, zone=b0_zone, geodata=b0_geodata)
b1 = pp.create_bus(self.net, in_service=b1_in_service, max_vm_pu=b1_max_vm_pu, min_vm_pu=b1_min_vm_pu,
name=b1_name, type=b1_type, vn_kv=b1_vn_kv, zone=b1_zone, geodata=b1_geodata)
# Create bus elements
load = pp.create_load(self.net, bus=load_bus, in_service=load_in_service,
p_mw=load_p_mw, q_mvar=load_q_mvar, scaling=load_scaling)
extGrid = pp.create_ext_grid(self.net, bus=extGrid_bus, in_service=extGrid_in_service,
va_degree=extGrid_va_degree,
vm_pu=extGrid_vm_pu, max_p_mw=extGrid_max_p_mw, min_p_mw=extGrid_min_p_mw,
max_q_mvar=extGrid_max_q_mvar, min_q_mvar=extGrid_min_q_mvar)
# Create lines
l0 = pp.create_line_from_parameters(self.net, c_nf_per_km=line0_c_nf_per_km, df=line0_df, from_bus=line0_from_bus,
g_us_per_km=line0_g_us_per_km, in_service=line0_in_service,
length_km=line0_length_km,
max_i_ka=line0_max_i_ka, max_loading_percent=line0_max_loading_percent,
parallel=line0_parallel, r_ohm_per_km=line0_r_ohm_per_km,
to_bus=line0_to_bus,
type=line0_type, x_ohm_per_km=line0_x_ohm_per_km)
l1 = pp.create_line_from_parameters(self.net, c_nf_per_km=line1_c_nf_per_km, df=line1_df, from_bus=line1_from_bus,
g_us_per_km=line1_g_us_per_km, in_service=line1_in_service,
length_km=line1_length_km,
max_i_ka=line1_max_i_ka, max_loading_percent=line1_max_loading_percent,
parallel=line1_parallel, r_ohm_per_km=line1_r_ohm_per_km,
to_bus=line1_to_bus,
type=line1_type, x_ohm_per_km=line1_x_ohm_per_km)
####Shunt FACTS device (bus 1)
# MV bus
bus_SVC = pp.create_bus(self.net, name='MV SVCtrafo bus', vn_kv=69, type='n', geodata=(4.04, 1.98), zone=2,
max_vm_pu=1.1,
min_vm_pu=0.9)
# Trafo
trafoSVC = pp.create_transformer_from_parameters(self.net, hv_bus=1, lv_bus=2, in_service=True,
name='trafoSVC', sn_mva=110, vn_hv_kv=230, vn_lv_kv=69,
vk_percent=12, vkr_percent=0.26, pfe_kw=55, i0_percent=0.06,
shift_degree=0, tap_side='hv', tap_neutral=0, tap_min=-9,
tap_max=9,
tap_step_percent=1.5, tap_step_degree=0,
tap_phase_shifter=False)
# TAP Changer on shunt device usually not used in Real life implementation.
#trafo_control = ct.DiscreteTapControl(net=self.net, tid=0, vm_lower_pu=0.95, vm_upper_pu=1.05)
# Breaker between grid HV bus and trafo HV bus to connect buses
sw_SVC = pp.create_switch(self.net, bus=1, element=0, et='t', type='CB', closed=False)
# Shunt devices connected with MV bus
shuntDev = pp.create_shunt(self.net, bus_SVC, 2, in_service=True, name='Shunt Device', step=1)
####Series device (at line 1, in middle between bus 0 and 1)
# Add intermediate buses for bypass and series compensation impedance
bus_SC1 = pp.create_bus(self.net, name='SC bus 1', vn_kv=230, type='n', geodata=(3.48, 2.05),
zone=2, max_vm_pu=1.1, min_vm_pu=0.9)
bus_SC2 = pp.create_bus(self.net, name='SC bus 2', vn_kv=230, type='n', geodata=(3.52, 2.05),
zone=2, max_vm_pu=1.1, min_vm_pu=0.9)
sw_SC_bypass = pp.create_switch(self.net, bus=3, element=4, et='b', type='CB', closed=True)
imp_SC = pp.create_impedance(self.net, from_bus=3, to_bus=4, rft_pu=0.0000001272, xft_pu=-0.0636*0.4,
rtf_pu=0.0000001272, xtf_pu=-0.0636*0.4, sn_mva=250,
in_service=True) # Just some default values
# Adjust orginal Line 3 to connect to new buses instead.
self.net.line.at[1, ['length_km', 'to_bus', 'name']] = [0.5, 3, 'line1_SC']
self.nominalP=self.net.load.p_mw[0]
self.nominalQ=self.net.load.q_mvar[0]
## select a random state for the episode
#self.stateIndex = np.random.randint(len(self.loadProfile)-1-self.numberOfTimeStepsPerState, size=1)[0];
def setMode(self,mode):
if mode=='train':
self.source=self.trainIndices;
else:
self.source=self.testIndices;
self.stateIndex = self.getstartingIndex()
self.scaleLoadAndPowerValue(self.stateIndex);
try:
pp.runpp(self.net, run_control=False);
print('Environment has been successfully initialized');
# Create SHUNT controllers
self.shuntControl = ShuntFACTS(net=self.net, busVoltageInd=1, convLim=0.0005)
self.seriesControl = SeriesFACTS(net=self.net, lineLPInd=1, convLim=0.0005, x_line_pu=self.X_pu(1))
except:
print('Some error occurred while creating environment');
raise Exception('cannot proceed at these settings. Please fix the environment settings');
def getstartingIndex(self):
index = np.random.randint(len(self.source), size=1)[0];
if self.source[index] + self.numberOfTimeStepsPerState < len(self.loadProfile):
return self.source[index];
else:
return self.getstartingIndex()
# Power flow calculation, runControl = True gives shunt device trafo tap changer iterative control activated
def runEnv(self, runControl):
try:
pp.runpp(self.net, run_control=runControl);
#print('Environment has been successfully initialized');
except:
#print(self.net.load.p_mw[0],self.net.load.q_mvar[0]);
#print(self.stateIndex)
#print(len(self.powerProfile))
if runControl:
print('Some error occurred while running environment after load increment in runEnv Function in DQN');
else:
print('Some error occurred while running environment after reset in runEnv Function in DQN');
raise Exception('cannot proceed at these settings. Please fix the environment settings');
## Retreieve voltage and line loading percent as measurements of current state
def getCurrentState(self):
bus_index_shunt = 1
line_index = 1;
return [self.net.res_bus.vm_pu[bus_index_shunt], self.net.res_line.loading_percent[line_index]];
def getCurrentStateForDQN(self):
bus_index_shunt = 1
line_index = 1;
return [self.net.res_bus.vm_pu[bus_index_shunt], self.net.res_line.loading_percent[line_index]/150, self.net.res_bus.va_degree[bus_index_shunt]/30];
# Return mean line loading in system. Emulation of what system operator would have set loading reference to.
def lp_ref_operator(self):
return stat.mean(self.net.res_line.loading_percent)
## Take epsilon-greedy action
## Return next state measurements, reward, done (boolean)
def takeAction(self, lp_ref, v_ref_pu):
# print('taking action')
stateAfterAction = copy.deepcopy(self.errorState);
stateAfterEnvChange = copy.deepcopy(self.errorState);
measAfterAction = [-2, -1000, -1000]
self.net.switch.at[0, 'closed'] = True
self.net.switch.at[1, 'closed'] = False
if lp_ref != 'na' and v_ref_pu != 'na':
self.shuntControl.ref = v_ref_pu;
self.seriesControl.ref = lp_ref;
networkFailure = False
done = False;
bus_index_shunt = 1;
line_index = 1;
if self.stateIndex < min(len(self.powerProfile), len(self.loadProfile)):
try:
dummyRes = (self.net.res_bus.vm_pu, self.net.res_line.loading_percent)
## state = (voltage,ll,angle,p,q)
pp.runpp(self.net, run_control=True);
if self.method in ('dqn', 'ddqn','td3'):
reward1 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent,
self.net.res_bus.va_degree[bus_index_shunt]);
stateAfterAction = self.getCurrentStateForDQN()
else:
reward1 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent);
stateAfterAction = self.getCurrentState()
#print('rew1: ', reward1)
measAfterAction = [self.net.res_bus.vm_pu[1], max(self.net.res_line.loading_percent), np.std(self.net.res_line.loading_percent)]
done = self.stateIndex == (len(self.powerProfile) - 1)
if done == False:
self.incrementLoadProfile()
if self.method in ('dqn', 'ddqn','td3'):
reward2 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent,
self.net.res_bus.va_degree[bus_index_shunt]);
stateAfterEnvChange = self.getCurrentStateForDQN()
else:
reward2 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent);
stateAfterEnvChange = self.getCurrentState()
#print('rew2: ',reward2)
reward = 0.7 * reward1 + 0.3 * reward2;
except:
print('Unstable environment settings in takeAction(). Action: ', (lp_ref, v_ref_pu), 'p_mw: ', self.net.load.p_mw[0]);
print('shunt, series, series switch: ', self.net.shunt.q_mvar[0], self.net.impedance.loc[0, ['xft_pu']], self.net.switch.closed[1])
#print(stateAfterEnvChange)
#print(stateAfterAction)
#print(lp_ref,v_ref_pu)
# print(dummyRes)
#print(self.net.load.p_mw[0],self.net.load.q_mvar[0]);
networkFailure = True;
reward = 0;
# return stateAfterAction, reward, networkFailure,stateAfterEnvChange ;
else:
print('wrong block!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
stateAfterEnvChange.extend(stateAfterAction)
# print(self.errorState)
# print(reward2)
#print('totrew: ', reward)
return stateAfterEnvChange, reward, done or networkFailure, measAfterAction;
## Same as Take Action but without Try for debugging
def takeAction_noTry(self, lp_ref, v_ref_pu):
# print('taking action')
stateAfterAction = copy.deepcopy(self.errorState);
stateAfterEnvChange = copy.deepcopy(self.errorState);
measAfterAction = [-2, -1000, -1000]
self.net.switch.at[0, 'closed'] = True
self.net.switch.at[1, 'closed'] = False
if lp_ref != 'na' and v_ref_pu != 'na':
self.shuntControl.ref = v_ref_pu;
self.seriesControl.ref = lp_ref;
networkFailure = False
done = False;
bus_index_shunt = 1;
line_index = 1;
if self.stateIndex < min(len(self.powerProfile), len(self.loadProfile)):
dummyRes = (self.net.res_bus.vm_pu, self.net.res_line.loading_percent)
## state = (voltage,ll,angle,p,q)
pp.runpp(self.net, run_control=True);
if self.method in ('dqn', 'ddqn', 'td3'):
reward1 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent,
self.net.res_bus.va_degree[bus_index_shunt]);
stateAfterAction = self.getCurrentStateForDQN()
else:
reward1 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent);
stateAfterAction = self.getCurrentState()
# print('rew1: ', reward1)
measAfterAction = [self.net.res_bus.vm_pu[1], max(self.net.res_line.loading_percent),
np.std(self.net.res_line.loading_percent)]
done = self.stateIndex == (len(self.powerProfile) - 1)
if done == False:
self.incrementLoadProfile()
if self.method in ('dqn', 'ddqn', 'td3'):
reward2 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent,
self.net.res_bus.va_degree[bus_index_shunt]);
stateAfterEnvChange = self.getCurrentStateForDQN()
else:
reward2 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent);
stateAfterEnvChange = self.getCurrentState()
# print('rew2: ',reward2)
reward = 0.7 * reward1 + 0.3 * reward2;
else:
print('wrong block!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
stateAfterEnvChange.extend(stateAfterAction)
# print(self.errorState)
# print(reward2)
# print('totrew: ', reward)
return stateAfterEnvChange, reward, done or networkFailure, measAfterAction;
def incrementLoadProfile(self):
self.stateIndex += 1;
self.scaleLoadAndPowerValue(self.stateIndex);
self.runEnv(True);
"""
try:
pp.runpp(self.net);
reward = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent);
except:
networkFailure=True;
self.net.shunt.q_mvar=shuntBackup;
self.net.impedance.loc[0, ['xft_pu', 'xtf_pu']]=impedenceBackup;
pp.runpp(self.net);
reward=1000;
return self.net.res_bus,reward,True;
self.stateIndex += 1;
if self.stateIndex < len(self.powerProfile):
if (self.scaleLoadAndPowerValue(self.stateIndex, self.stateIndex - 1) == False):
networkFailure = True;
reward = 1000;
# self.stateIndex -= 1;
return self.net.res_bus, reward, self.stateIndex == len(self.powerProfile) or networkFailure;
"""
##Function to calculate line reactance in pu
def X_pu(self, line_index):
s_base = 100e6
v_base = 230e3
x_base = pow(v_base, 2) / s_base
x_line_ohm = self.net.line.x_ohm_per_km[line_index]
x_line_pu = x_line_ohm / x_base # Can take one since this line is divivded into
# 2 identical lines with length 0.5 km
#print(x_line_pu)
return x_line_pu
## Resets environment choosing new starting state, used for beginning of each episode
def reset(self):
self.stateIndex = self.getstartingIndex()
#Disable FACTS
self.net.switch.at[0, 'closed'] = False
self.net.switch.at[1, 'closed'] = True
# Make sure FACTS output is reset for controllers to work properly
#print(self.net.shunt.q_mvar[0])
#self.net.shunt.q_mvar[0] = 0
#print(self.net.impedance.loc[0, ['xft_pu']])
#self.net.impedance.loc[0, ['xft_pu', 'xtf_pu']] =
#self.net.shunt.q_mvar
self.scaleLoadAndPowerValue(self.stateIndex);
try:
pp.runpp(self.net, run_control=False);
except:
print('Some error occurred while resetting the environment');
raise Exception('cannot proceed at these settings. Please fix the environment settings');
## Calculate immediate reward
def calculateReward(self, voltages, loadingPercent,loadAngle=10):
try:
rew=0;
for i in range(1,2):
if voltages[i] > 1:
rew=voltages[i]-1;
else:
rew=1-voltages[i];
rewtemp = rew # For storage to set reward to 0
rew = math.exp(rew*10)*-20;
#print(rew)
loadingPercentInstability=np.std(loadingPercent)# Think it works better without this addition: * len(loadingPercent);
rew = rew - loadingPercentInstability;
# (math.exp(abs(1-voltages[i])*10)*-20)-std ;
#print(rew)
#rew=rew if abs(loadAngle)<30 else rew-200;
except:
print('exception in calculate reward')
print(voltages);
print(loadingPercent)
return 0;
rew = (200+rew)/200 # normalise between 0-1
if rewtemp > 0.15 or abs(loadAngle)>=30: # IF voltage deviating more than 0.15 pu action is very very bad.
rew = 0.001 #Also makes sure that final rew >=0
if rew < 0:
rew = 0
return rew
## Simple plot diagram
def plotGridFlow(self):
print('plotting powerflow for the current state')
plot.simple_plot(self.net)
## Scale load and generation from load and generation profiles
def scaleLoadAndPowerValue(self,index):
scalingFactorLoad = self.loadProfile[index] / (sum(self.loadProfile)/len(self.loadProfile));
scalingFactorPower = self.powerProfile[index] / max(self.powerProfile);
self.net.load.p_mw[0] = self.nominalP * scalingFactorLoad;
self.net.load.q_mvar[0] = self.nominalQ * scalingFactorLoad;
#self.net.sgen.p_mw = self.net.sgen.p_mw * scalingFactorPower;
#self.net.sgen.q_mvar = self.net.sgen.q_mvar * scalingFactorPower;
def runNoFACTS(self, busVoltageInd):
# Bypass FACTS devices if wantd
self.net.switch.at[0, 'closed'] = True
self.net.switch.at[1, 'closed'] = False
self.net.controller.in_service[0] = True
self.net.controller.in_service[1] = True
self.shuntControl.ref = 1
self.seriesControl.ref = 50
# Create array
v_arr = []
l_arr = []
# Loop through all loadings
for i in range(0, 600): #len(self.loadProfile)
# Increment and run environment
self.stateIndex += 1;
self.scaleLoadAndPowerValue(self.stateIndex);
self.runEnv(True);
# Store result for current settings
v_arr.append(self.net.res_bus.vm_pu[busVoltageInd])
l_arr.append(self.stateIndex)
# Plot result
print(max(v_arr))
print(min(v_arr))
plt.plot(l_arr, v_arr)
plt.grid()
plt.xlabel('Time step on load profile [-]', fontsize= 18 )
plt.ylabel('Voltage [pu]', fontsize= 18)
plt.title('Bus 2 Voltage with shunt+series FACTS ', fontsize= 22)
plt.show()
def runNoRL(self, busVoltageInd):
# Print the load profile:
# loadProfilesScaled = self.loadProfile / (sum(self.loadProfile) / len(self.loadProfile))
# P = loadProfilesScaled * self.nominalP
# Q = loadProfilesScaled * self.nominalQ
# xaxis = range(0, len(self.loadProfile))
# fig, ax1 = plt.subplots()
# ax1.set_title('Load profile', fontsize=24)
# ax1.set_xlabel('Time step on load profile [-]', fontsize=20)
# ax1.set_ylabel('Active power [MW] ', color='r', fontsize=20)
# ax1.plot(xaxis, P, color='r')
# ax1.set_ylim(0, 500)
# plt.xticks(fontsize=16)
# plt.yticks(fontsize=16)
# ax2 = ax1.twinx()
# ax2.set_ylabel('Reactive power [Mvar] ', color='tab:blue', fontsize=20)
# ax2.plot(xaxis, Q, color='tab:blue')
# ax2.set_ylim(0,500)
# plt.xticks(fontsize=16)
# plt.yticks(fontsize=16)
# plt.grid()
# plt.show()
#
# #Zoomed in version:
# fig, ax1 = plt.subplots()
# ending = 1000-1
# ax1.set_title('Load profile', fontsize=24)
# ax1.set_xlabel('Time step on load profile [-]', fontsize=20)
# ax1.set_ylabel('Active power [MW] ', color='r', fontsize=20)
# ax1.plot(xaxis[0:ending], P[0:ending], color='r')
# ax1.set_ylim(0,500)
# plt.xticks(fontsize=16)
# plt.yticks(fontsize=16)
# ax2 = ax1.twinx()
# ax2.set_ylabel('Reactive power [Mvar] ', color='tab:blue', fontsize=20)
# ax2.plot(xaxis[0:ending], Q[0:ending], color='tab:blue')
# ax2.set_ylim(0,500)
# plt.xticks(fontsize=16)
# plt.yticks(fontsize=16)
# plt.grid()
# plt.show()
#SHUNT+SERIES:
# Bypass FACTS devices if wantd
self.net.switch.at[0, 'closed'] = True
self.net.switch.at[1, 'closed'] = True
self.net.controller.in_service[0] = True
self.net.controller.in_service[1] = False
self.shuntControl.ref = 1
self.seriesControl.ref = 50
# Create array
v_arr = []
v_arr_so = []
l_arr = []
# Loop through all loadings
for i in range(0, 600): # len(self.loadProfile)
# Increment and run environment
self.stateIndex += 1;
self.scaleLoadAndPowerValue(self.stateIndex);
self.runEnv(True);
# Store result for current settings
v_arr_so.append(self.net.res_bus.vm_pu[busVoltageInd])
l_arr.append(self.stateIndex)
#SHUNT ONLY
self.setMode('test')
self.net.switch.at[0, 'closed'] = True
self.net.switch.at[1, 'closed'] = False
self.net.controller.in_service[0] = True
self.net.controller.in_service[1] = True
for i in range(0, 600): # len(self.loadProfile)
# Increment and run environment
self.stateIndex += 1;
self.scaleLoadAndPowerValue(self.stateIndex);
self.runEnv(True);
# Store result for current settings
v_arr.append(self.net.res_bus.vm_pu[busVoltageInd])
# Plot result
print(max(v_arr))
print(min(v_arr))
print(max(v_arr_so))
print(min(v_arr_so))
plt.plot(l_arr, v_arr)
plt.plot(l_arr, v_arr_so)
plt.grid()
plt.xlabel('Time step on load profile [-]', fontsize=20)
plt.ylabel('Voltage [pu]', fontsize=20)
plt.title('Bus 2 Voltage with non-RL FACTS ', fontsize=24)
plt.legend(['shunt+series','shunt only'], fontsize=12)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.show()
##Load Profile data has been pickled already, do not run this function for now
def createLoadProfile():
ML = (np.cos(2 * np.pi/12 * np.linspace(0,11,12)) * 50 + 100 ) * 1000 # monthly load
ML = el.make_timeseries(ML) #convenience wrapper around pd.DataFrame with pd.DateTimeindex
#print(ML)
DWL = el.gen_daily_stoch_el() #daily load working
DNWL = el.gen_daily_stoch_el() #daily load non working
#print(sum(DNWL))
Weight = .60 # i.e energy will be split 55% in working day 45% non working day
Load1 = el.gen_load_from_daily_monthly(ML, DWL, DNWL, Weight)
Load1.name = 'L1'
Load1=Load1.round();
#print(Load1)
disag_profile = np.random.rand(60)
JanLoadEveryMinute=el.generate.disag_upsample(Load1[0:744],disag_profile, to_offset='min');
JanLoadEvery5mins=[];
l=0;
for i in range(0,JanLoadEveryMinute.shape[0]):
l=l+JanLoadEveryMinute[i];
if np.mod(i+1,5) == 0:
JanLoadEvery5mins.append(l);
l=0;
windDataDF = pd.read_excel('Data/WindEnergyData.xlsx');
generatorValuesEvery5mins=[];
for i in range(1,windDataDF.shape[0]):
randomValue=np.random.choice(100, 1)[0]
randomValue_prob = np.random.random();
if randomValue > windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'] or randomValue_prob < 0.4:
generatorValuesEvery5mins.append(windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'])
generatorValuesEvery5mins.append(windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'])
else :
generatorValuesEvery5mins.append(windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'] - randomValue)
generatorValuesEvery5mins.append(windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'] + randomValue)
generatorValuesEvery5mins.append(windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'])
print(len(generatorValuesEvery5mins))
print(len(JanLoadEvery5mins))
pickle.dump(generatorValuesEvery5mins, open("Data/generatorValuesEvery5mins.pkl", "wb"))
pickle.dump(JanLoadEvery5mins, open("Data/JanLoadEvery5mins.pkl", "wb"))
def trainTestSplit():
with open('Data/JanLoadEvery5mins.pkl', 'rb') as pickle_file:
loadProfile = pickle.load(pickle_file)
numOFTrainingIndices = int(np.round(0.8*len(loadProfile)))
trainIndices=np.random.choice(range(0,len(loadProfile)),numOFTrainingIndices,replace=False)
trainIndicesSet=set(trainIndices)
testIndices=[x for x in range(0,len(loadProfile)) if x not in trainIndicesSet]
pickle.dump(trainIndices, open("Data/trainIndices.pkl", "wb"))
pickle.dump(testIndices, open("Data/testIndices.pkl", "wb"))
#print(len(loadProfile))
#print(len(trainIndicesSet))
#print(len(trainIndices))
#print(len(testIndices))
#createLoadProfile()
#trainTestSplit()
| 48.436883 | 173 | 0.604496 | 40,424 | 0.916104 | 0 | 0 | 0 | 0 | 0 | 0 | 12,243 | 0.277455 |
9e66515414c951c5a5647702f8a347abcfdec43d | 10,659 | py | Python | unittests/TestGameServerController.py | dgsd-consulting/python_cowbull_server | b3f5e36c98c29701b0faf0adcf5d7b56a91a7402 | [
"Apache-2.0"
]
| 1 | 2019-01-22T03:48:30.000Z | 2019-01-22T03:48:30.000Z | unittests/TestGameServerController.py | dgsd-consulting/python_cowbull_server | b3f5e36c98c29701b0faf0adcf5d7b56a91a7402 | [
"Apache-2.0"
]
| 1 | 2019-04-14T21:15:17.000Z | 2019-08-08T01:25:29.000Z | unittests/TestGameServerController.py | davidjsanders/python_cowbull_server | b3f5e36c98c29701b0faf0adcf5d7b56a91a7402 | [
"Apache-2.0"
]
| 2 | 2018-09-20T20:28:48.000Z | 2018-10-02T20:57:45.000Z | import json
from unittest import TestCase
from flask import Flask
from flask_controllers.GameServerController import GameServerController
from flask_helpers.VersionHelpers import VersionHelpers
from python_cowbull_server import app
from python_cowbull_server.Configurator import Configurator
from flask_helpers.ErrorHandler import ErrorHandler
from Persistence.PersistenceEngine import PersistenceEngine
class TestGameServerController(TestCase):
def setUp(self):
self.info = VersionHelpers()
app.testing = True
self.app = app.test_client()
self.c = Configurator()
self.c.execute_load(self.app.application)
# Force use of File persister
p = {"engine_name": "file", "parameters": {}}
self.app.application.config["PERSISTER"] = PersistenceEngine(**p)
if self.info.major < 3:
self.json_raises = ValueError
else:
self.json_raises = json.JSONDecodeError
def test_gsc_init(self):
GameServerController()
def test_gsc_bad_init(self):
self.app.application.config["PERSISTER"] = None
try:
GameServerController()
except ValueError as ve:
self.assertIn("No persistence engine is defined", str(ve))
def test_gsc_valid_init(self):
gsc = GameServerController()
self.assertIsNone(gsc.game_version)
self.assertIsInstance(gsc.handler, ErrorHandler)
def test_gsc_get_game(self):
with self.app as c:
response = c.get('/v1/game')
self.assertEqual(response.status, '200 OK')
def test_gsc_get_game_bad_mode(self):
gsc = GameServerController()
with self.app as c:
response = c.get('/v1/game?mode=reallyreallytough')
self.assertEqual(response.status, '400 BAD REQUEST')
self.assertIn("Mode reallyreallytough not found", str(response.data))
def test_gsc_get_game_bad_persister(self):
p = self.app.application.config["PERSISTER"]
with self.app:
with self.assertRaises(TypeError):
self.app.application.config["PERSISTER"] = PersistenceEngine(
engine_name="foobar",
parameters={
"host": "foobar",
"port": 27017,
"db": "cowbull"
}
)
self.app.application.config["PERSISTER"] = p
def test_gsc_get_game_no_persister(self):
p = self.app.application.config["PERSISTER"]
with self.app as c:
with self.assertRaises(KeyError):
self.app.application.config["PERSISTER"] = PersistenceEngine(
engine_name="redis",
parameters={
"host": "local",
"port": 6379,
"db": "cowbull"
}
)
c.get('/v1/game')
self.app.application.config["PERSISTER"] = p
def test_gsc_get_game_badparam_persister(self):
p = self.app.application.config["PERSISTER"]
with self.app:
with self.assertRaises(TypeError):
self.app.application.config["PERSISTER"] = PersistenceEngine(
engine_name="redis",
parameters={
"host": "local",
"port": 6379,
"db": "cowbull",
"foo": "bar"
}
)
self.app.application.config["PERSISTER"] = p
def test_gsc_post_game(self):
with self.app as c:
response = c.get('/v1/game')
self.assertEqual(response.status[0:3], '200')
key = json.loads(response.data)["key"]
game_data = {
"key": key,
"digits": [0, 1, 2, 3]
}
response = c.post(
'/v1/game',
data=json.dumps(game_data),
content_type="application/json"
)
self.assertEqual(response.status[0:3], '200')
def test_gsc_post_bad_key(self):
with self.app as c:
key = '1234'
game_data = {
"key": key,
"digits": [0, 1, 2, 3]
}
response = c.post(
'/v1/game',
data=json.dumps(game_data),
content_type="application/json"
)
self.assertEqual(response.status[0:3], '400')
self.assertIn("The request must contain a valid game key", str(response.data))
def test_gsc_post_bad_digits(self):
with self.app as c:
response = c.get('/v1/game')
self.assertEqual(response.status[0:3], '200')
key = json.loads(response.data)["key"]
game_data = {
"key": key,
"digits": ['X', 'Y', 2, 3]
}
response = c.post(
'/v1/game',
data=json.dumps(game_data),
content_type="application/json"
)
self.assertEqual(response.status[0:3], '400')
def test_gsc_post_no_digits(self):
with self.app as c:
response = c.get('/v1/game')
self.assertEqual(response.status[0:3], '200')
key = json.loads(response.data)["key"]
game_data = {
"key": key
}
response = c.post(
'/v1/game',
data=json.dumps(game_data),
content_type="application/json"
)
self.assertEqual(response.status[0:3], '400')
self.assertIn("The request must contain an array of digits", str(response.data))
def test_gsc_post_num_digits(self):
with self.app as c:
response = c.get('/v1/game')
self.assertEqual(response.status[0:3], '200')
key = json.loads(response.data)["key"]
game_data = {
"key": key,
"digits": [0, 1, 2, 3, 4, 5]
}
response = c.post(
'/v1/game',
data=json.dumps(game_data),
content_type="application/json"
)
self.assertEqual(response.status[0:3], '400')
self.assertIn("The DigitWord objects are of different lengths", str(response.data))
def test_gsc_post_hilo_digits(self):
with self.app as c:
response = c.get('/v1/game')
self.assertEqual(response.status[0:3], '200')
key = json.loads(response.data)["key"]
game_data = {
"key": key,
"digits": [-10, 21, 32, 43]
}
response = c.post(
'/v1/game',
data=json.dumps(game_data),
content_type="application/json"
)
self.assertEqual(response.status[0:3], '400')
self.assertIn("A digit must be a string representation or integer of a number", str(response.data))
def test_gsc_post_type_digits(self):
with self.app as c:
response = c.get('/v1/game')
self.assertEqual(response.status[0:3], '200')
key = json.loads(response.data)["key"]
game_data = {
"key": key,
"digits": {"foo": "bar"}
}
response = c.post(
'/v1/game',
data=json.dumps(game_data),
content_type="application/json"
)
self.assertEqual(response.status[0:3], '400')
self.assertIn("A digit must be a string representation or integer of a number", str(response.data))
def test_gsc_post_no_json(self):
with self.app as c:
response = c.post(
'/v1/game',
content_type="application/json"
)
self.assertEqual(response.status[0:3], '400')
self.assertIn("For some reason the json_dict is None!", str(response.data))
def test_gsc_post_bad_json(self):
with self.app as c:
response = c.post(
'/v1/game',
data=json.dumps({"keys": "1234"}),
content_type="application/json"
)
self.assertEqual(response.status[0:3], '400')
self.assertIn("For some reason the json_dict does not contain a key", str(response.data))
def test_gsc_post_bad_gamekey(self):
with self.app as c:
key = '1234'
game_data = {
"key": key,
"digits": ['X', 'Y', 2, 3]
}
response = c.post(
'/v1/game',
data=json.dumps(game_data),
content_type="application/json"
)
self.assertEqual(response.status[0:3], '400')
self.assertIn("Unable to open the key file", str(response.data))
def test_gsc_post_badtype_gamekey(self):
with self.app as c:
key = 1234
game_data = {
"key": key,
"digits": ['X', 'Y', 2, 3]
}
response = c.post(
'/v1/game',
data=json.dumps(game_data),
content_type="application/json"
)
self.assertEqual(response.status[0:3], '400')
self.assertIn("For some reason the json_dict does not contain a key!", str(response.data))
def test_gsc_post_no_gamekey(self):
with self.app as c:
game_data = {
"digits": ['X', 'Y', 2, 3]
}
response = c.post(
'/v1/game',
data=json.dumps(game_data),
content_type="application/json"
)
self.assertEqual(response.status[0:3], '400')
self.assertIn("For some reason the json_dict does not contain a key", str(response.data))
def test_gsc_post_type_gamekey(self):
with self.app as c:
game_data = {
"key": None,
"digits": ['X', 'Y', 2, 3]
}
response = c.post(
'/v1/game',
data=json.dumps(game_data),
content_type="application/json"
)
self.assertEqual(response.status[0:3], '400')
self.assertIn("For some reason the json_dict does not contain a key!", str(response.data))
| 36.628866 | 111 | 0.512149 | 10,251 | 0.961722 | 0 | 0 | 0 | 0 | 0 | 0 | 1,750 | 0.164181 |
9e66f7324f463b84e3db235287a63c2e184564ad | 10,104 | py | Python | python_flights/client.py | sylvaus/python_flights | 613f1ad294ecb53a54af1fa3ca78fa83b0badc30 | [
"MIT"
]
| 1 | 2020-01-12T18:55:45.000Z | 2020-01-12T18:55:45.000Z | python_flights/client.py | sylvaus/python_flights | 613f1ad294ecb53a54af1fa3ca78fa83b0badc30 | [
"MIT"
]
| null | null | null | python_flights/client.py | sylvaus/python_flights | 613f1ad294ecb53a54af1fa3ca78fa83b0badc30 | [
"MIT"
]
| null | null | null | import logging
import time
from datetime import datetime, timedelta
from itertools import product
from typing import List
import requests
from python_flights.itinerary import Itinerary
from python_flights.pods import Country, Currency, Airport, Place, Agent, Carrier, Direction, Trip, Segment, Price, \
CabinClass, SortType, SortOrder
PARAM_DATE_FORMATTING = "%Y-%m-%d"
JSON_DATE_FORMATTING = "%Y-%m-%dT%H:%M:%S"
API_ADDRESS = "https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices"
LOCALES = [
'de-DE', 'el-GR', 'en-GB', 'en-US', 'es-ES', 'es-MX', 'et-EE', 'fi-FI', 'fr-FR', 'hr-HR', 'hu-HU', 'id-ID', 'it-IT',
'ja-JP', 'ko-KR', 'lt-LT', 'lv-LV', 'ms-MY', 'nb-NO', 'nl-NL', 'pl-PL', 'pt-BR', 'pt-PT', 'ro-RO', 'ru-RU', 'sk-SK',
'sv-SE', 'th-TH', 'tr-TR', 'uk-UA', 'vi-VN', 'zh-CN', 'zh-HK', 'zh-SG', 'zh-TW'
]
class FlightBrowser:
def __init__(self, api_key: str, locale="en-US", country="CA", currency="CAD"):
self._get_headers = {
'x-rapidapi-host': "skyscanner-skyscanner-flight-search-v1.p.rapidapi.com",
'x-rapidapi-key': f"{api_key}"
}
self._post_headers = {
'x-rapidapi-host': "skyscanner-skyscanner-flight-search-v1.p.rapidapi.com",
'x-rapidapi-key': f"{api_key}",
'content-type': "application/x-www-form-urlencoded"
}
self._locale = locale
self._country = country
self._currency = currency
self._currencies = None
self._logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
@property
def currencies(self):
if self._currencies is None:
response = self._get(f"reference/v1.0/currencies")
if response.status_code != 200:
self._logger.warning(f"Request failed with status {response.status_code}")
return []
json = response.json()
self._currencies = [
Currency.from_json(currency_json)
for currency_json in json.get("Currencies", [])
]
return self._currencies
@property
def countries(self):
response = self._get(f"reference/v1.0/countries/{self._locale}")
if response.status_code != 200:
return []
json = response.json()
return [
Country.from_json(country_json)
for country_json in json.get("Countries", [])
]
def _get(self, url: str, params: dict = None):
if params is None:
params = {}
return requests.get(f"{API_ADDRESS}/{url}", headers=self._get_headers, params=params)
def _post(self, url: str, params: dict = None, data: str = ""):
if params is None:
params = {}
return requests.post(
f"{API_ADDRESS}/{url}", headers=self._post_headers
, params=params, data=data
)
def get_airports(self, keyword):
response = self._get(
f"autosuggest/v1.0/{self._country}/{self._currency}/{self._locale}/"
, params={"query": f"{keyword}"}
)
if response.status_code != 200:
return []
response_json = response.json()
return [
Airport.from_json(airport_json)
for airport_json in response_json.get("Places", [])
]
def get_flights(
self, departure_date: datetime, departure_id: str
, arrival_date: datetime, arrival_id: str
, cabin_class: CabinClass = None
, adults: int = 1, children: int = 0
, infants: int = 0, stops: int = None
, duration_mins: int = None, number_results: int = 10
, sort_type: SortType = None, sort_order: SortOrder = SortOrder.ASCENDING
) -> List[Itinerary]:
params = \
f"inboundDate={arrival_date.strftime(PARAM_DATE_FORMATTING)}" \
f"&country={self._country}¤cy={self._currency}" \
f"&locale={self._locale}&originPlace={departure_id}-sky&destinationPlace={arrival_id}-sky" \
f"&outboundDate={departure_date.strftime(PARAM_DATE_FORMATTING)}" \
f"&adults={adults}&children={children}&infants={infants}"
if cabin_class:
params += f"&cabinClass={cabin_class.value}"
self._logger.debug(f"Creating session with parameters {params}")
response = self._post("pricing/v1.0", data=params)
if response.status_code != 201:
return []
_, url = response.headers["Location"].split("/apiservices/")
params = {"pageIndex": "0", "pageSize": f"{number_results}"}
if duration_mins:
params["duration"] = f"{duration_mins}"
if stops:
params["stops"] = f"{stops}"
if sort_type:
params["sortType"] = f"{sort_type.value}"
params["sortOrder"] = f"{sort_order.value}"
self._logger.debug("Polling session")
response = self._get(url, params)
if response.status_code != 200:
return []
return self._extract_itineraries(response.json())
def _extract_itineraries(self, response_json) -> List[Itinerary]:
currencies = [
Currency.from_json(json_dict)
for json_dict in response_json.get("Currencies", [])
]
id_places = {
json_dict["Id"]: Place.from_json(json_dict)
for json_dict in response_json.get("Places", [])
}
id_agents = {
json_dict["Id"]: Agent.from_json(json_dict)
for json_dict in response_json.get("Agents", [])
}
id_carriers = {
json_dict["Id"]: Carrier.from_json(json_dict)
for json_dict in response_json.get("Carriers", [])
}
id_segments = {}
for json_dict in response_json.get("Segments", []):
id_ = json_dict["Id"]
departure_place = id_places[json_dict["OriginStation"]]
departure_time = datetime.strptime(json_dict["DepartureDateTime"], JSON_DATE_FORMATTING)
arrival_place = id_places[json_dict["DestinationStation"]]
arrival_time = datetime.strptime(json_dict["ArrivalDateTime"], JSON_DATE_FORMATTING)
carrier = id_carriers[json_dict["Carrier"]]
operating_carrier = id_carriers[json_dict["OperatingCarrier"]]
duration = timedelta(minutes=json_dict["Duration"])
flight_number = json_dict["FlightNumber"]
trip_type = json_dict["JourneyMode"]
direction = Direction.OUTBOUND if json_dict["Directionality"] == "Outbound" else Direction.INBOUND
id_segments[id_] = Segment(
id_, departure_place, departure_time, arrival_place, arrival_time,
carrier, operating_carrier, duration, flight_number, trip_type, direction
)
id_trips = {}
for json_dict in response_json.get("Legs", []):
id_ = json_dict["Id"]
segments = [
id_segments[segment_id]
for segment_id in json_dict.get("SegmentIds", [])
]
departure_place = id_places[json_dict["OriginStation"]]
departure_date = datetime.strptime(json_dict["Departure"], JSON_DATE_FORMATTING)
arrival_place = id_places[json_dict["DestinationStation"]]
arrival_date = datetime.strptime(json_dict["Arrival"], JSON_DATE_FORMATTING)
duration = timedelta(minutes=json_dict["Duration"])
stops = [
id_places[place_id]
for place_id in json_dict.get("Stops", [])
]
carriers = [
id_carriers[carrier_id]
for carrier_id in json_dict.get("Carriers", [])
]
operating_carriers = [
id_carriers[carrier_id]
for carrier_id in json_dict.get("Carriers", [])
]
direction = Direction.OUTBOUND if json_dict["Directionality"] == "Outbound" else Direction.INBOUND
id_trips[id_] = Trip(
id_, segments, departure_place, departure_date, arrival_place, arrival_date
, duration, stops, carriers, operating_carriers, direction
)
itineraries = []
for json_dict in response_json.get("Itineraries", []):
outbound_trip = id_trips[json_dict["OutboundLegId"]]
inbound_trip = id_trips[json_dict["InboundLegId"]]
prices = []
for price_dict in json_dict.get("PricingOptions", []):
agents = [id_agents[agent_id] for agent_id in price_dict["Agents"]]
quote_age = timedelta(minutes=price_dict["QuoteAgeInMinutes"])
price = price_dict["Price"]
url = price_dict["DeeplinkUrl"]
prices.append(Price(agents, quote_age, price, url))
itineraries.append(Itinerary(outbound_trip, inbound_trip, prices))
return itineraries
def get_flights_ranges(
self, departure_dates: List[datetime], departure_ids: List[str]
, arrival_dates: List[datetime], arrival_ids: List[str]
, *args, rate_limit_per_min: int = 40, **kwargs
) -> List[Itinerary]:
itineraries = []
# The time in between calls is multiplied by two because two requests are made to get flights
in_between_call_s = (60 / rate_limit_per_min) * 2
combinations = list(product(departure_dates, departure_ids, arrival_dates, arrival_ids))
nb_combinations = len(combinations)
for index, (departure_date, departure_id, arrival_date, arrival_id) in enumerate(combinations):
self._logger.debug(f"Getting itineraries {index} out of {nb_combinations}")
start_time = time.time()
itineraries.extend(
self.get_flights(departure_date, departure_id, arrival_date, arrival_id, *args, **kwargs)
)
time.sleep(max([0, in_between_call_s - (time.time() - start_time)]))
return itineraries
| 42.1 | 120 | 0.597387 | 9,249 | 0.91538 | 0 | 0 | 854 | 0.084521 | 0 | 0 | 2,131 | 0.210907 |
9e675b79e0383d49ce47e747d971a54a4f4b735e | 8,636 | py | Python | python/monitor.py | ChrisArnault/fink_data_monitor | 3ef3167204711222fb71d6d6f828bce4094ad21a | [
"Apache-2.0"
]
| null | null | null | python/monitor.py | ChrisArnault/fink_data_monitor | 3ef3167204711222fb71d6d6f828bce4094ad21a | [
"Apache-2.0"
]
| 8 | 2019-03-30T13:27:46.000Z | 2019-06-05T13:55:26.000Z | python/monitor.py | ChrisArnault/fink_data_monitor | 3ef3167204711222fb71d6d6f828bce4094ad21a | [
"Apache-2.0"
]
| 1 | 2019-03-22T12:38:32.000Z | 2019-03-22T12:38:32.000Z | #!/usr/bin/python
# coding: utf-8
# Copyright 2018 AstroLab Software
# Author: Chris Arnault
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Dataset monitor
This is the client part.
The monitor.py script has to be present on the <host> machine
where the minimal HTML server has been activated as
> python server.py
Then, call in a web navigator the URL
http://<host>:24701/monitor.py
"""
import cgi
from pylivy.session import *
from pylivy.client import *
from variables import HTMLVariableSet
# ======================================================
LIVY_URL = "http://vm-75222.lal.in2p3.fr:21111"
form = cgi.FieldStorage()
print("Content-type: text/html; charset=utf-8\n")
client = LivyClient(LIVY_URL)
# init data
html = HTMLVariableSet(["started",
"simul",
"change_simul",
"livy_session",
"waiting_session",
"waiting_statement",
"livy_statement",
"kill_session"],
["new_statement", "result"])
url = "/monitor.py"
method = "POST"
# ======================================================
def html_header():
"""
Global & common html header. SHould be used everywhere
Returns:
--------
out: str
"""
return """
<!DOCTYPE html>
<head>
<link rel="stylesheet" type="text/css" href="css/finkstyle.css">
<title>Mon programme test</title>
</head>
<body>
<div class="hero-image">
<div class="hero-text">
<h1 style="font-size:50px">Fink</h1>
<h3>Alert dataset monitor</h3>
<div class="topnav"> """
def html_trailer():
"""
Global & common html trailer. SHould be used everywhere
Returns:
--------
out: str
"""
return """
</div>
<p>© AstroLab Software 2018-2019</p>
</div>
</div>
</body>
</html>
"""
def html_manage_simulation_mode(out: str) -> str:
# manage Livy simulation
will_change_simul = html.change_simul.is_set()
print("<br>change simul = {}".format(will_change_simul))
html.change_simul.reset()
if will_change_simul:
if html.simul.is_set():
out += """<form action="{}" method="{}">""".format(url, method)
out += """
<br> Currently using real Livy"""
html.simul.reset()
out += html.to_form()
out += """<button type="submit">Simul Livy</button>
</form>
"""
else:
out += """<form action="{}" method="{}">""".format(url, method)
out += """
<br> Currently simulate Livy """
html.simul.set(1)
out += html.to_form()
out += """<button type="submit">Use real Livy</button>
</form>
"""
else:
if html.simul.is_set():
out += """<form action="{}" method="{}">""".format(url, method)
out += """
<br> Currently simulate Livy """
html.change_simul.set(1)
out += html.to_form()
out += """
<button type="submit">Use real Livy</button>
</form>
"""
else:
out += """<form action="{}" method="{}">""".format(url, method)
out += """
<br> Currently using real Livy"""
html.change_simul.set(1)
out += html.to_form()
out += """
<button type="submit">Simul Livy</button>
</form>
"""
# out += html.debug()
html.change_simul.reset()
return out
# Read all HTML POST variables
html.read(form)
if not html.started.is_set():
# Handle the very first launch to set the default
html.simul.set(1)
html.started.set(1)
# ======================================================
# the start of the WEB page
# ======================================================
out = html_header()
out = html_manage_simulation_mode(out)
# out += html.debug()
# Manage Livy session & Spark statements
out += """<form action="{}" method="{}">""".format(url, method)
if html.simul.is_set():
if html.waiting_session.above(5):
print("<br> session is now idle")
html.waiting_session.reset()
html.waiting_statement.reset()
html.livy_statement.reset()
html.livy_session.set(1)
if html.waiting_statement.above(5):
print("<br> statement just finished")
html.waiting_session.reset()
html.waiting_statement.reset()
html.livy_statement.incr()
# debugging
# print("<br>")
# print("Keys = [", ",".join(form.keys()), "]")
# print(html.debug())
"""
Command interface
- select Livy simulation
- open session & wait for idle
- start statement & wait for completion
"""
if html.kill_session.is_set():
session_id = html.livy_session.value
try:
client.delete_session(session_id)
except:
print("error killing session ", session_id)
html.livy_session.reset()
html.waiting_session.reset()
html.kill_session.reset()
if html.livy_session.is_set():
# statement management
if not html.waiting_statement.is_set():
out += """<br>session is idle: we may start a statement<br>"""
html.waiting_statement.set(0)
out += html.to_form()
out += """
Enter a Spark statement
<input type="text" name="new_statement" value="{}" />
<input type="text" name="result" value="{}" />
<button type="submit">Run</button>
""".format(html.new_statement.value, html.result.value)
else:
out += """<br>session is idle, we do wait a statement to complete<br>"""
html.waiting_statement.incr()
s = client.get_session(html.livy_session.value)
if not html.livy_statement.is_set():
st = client.create_statement(s.session_id, html.new_statement.value)
html.livy_statement.set(st.statement_id)
else:
st = client.get_statement(s.session_id, html.livy_statement.value)
if st.state == StatementState.AVAILABLE:
html.waiting_statement.reset()
html.result.set(st.output.text)
print("<br>", html.result.value)
html.livy_statement.reset()
out += html.to_form()
out += """<button type="submit">waiting statement to complete</button>"""
else:
# session management
if not html.waiting_session.is_set():
out += """<br>No session<br>"""
html.waiting_session.set(0)
# print(html.waiting_session.debug())
html.waiting_statement.reset()
out += html.to_form()
out += """<button type="submit">Open a session</button>"""
else:
# we have requested a new session thus waiting_session is set
if html.simul.is_set():
html.waiting_session.incr()
else:
if not html.livy_session.is_set():
print("Create a session ")
s = client.create_session(SessionKind.PYSPARK)
print("<br> session {} <br>".format(s.session_id))
html.livy_session.set(s.session_id)
# we test if the session is already idle
s = client.get_session(html.livy_session.value)
if s.state == SessionState.IDLE:
print("<br> session is now idle")
html.waiting_session.reset()
html.waiting_statement.reset()
html.livy_statement.reset()
html.new_statement.reset()
out += """<br>Waiting session to become idle<br>"""
out += html.to_form()
out += """<button type="submit">waiting session</button>"""
out += """</form>"""
if html.livy_session.is_set():
out += """<form action="{}" method="{}">""".format(url, method)
html.kill_session.set(1)
out += html.to_form()
out += """
<button type="submit">Delete the session</button>
</form>
"""
out += html_trailer()
print(out)
| 28.50165 | 81 | 0.559287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,274 | 0.494905 |
9e687cbd3bdfdf17c399fa781c8f96210ee0138e | 8,457 | py | Python | python/src/buildXyzMapCommand.py | kylemcdonald/LightLeaks | f72719c4f46e4ec0cf8f37b520f7be859381d43b | [
"MIT"
]
| 57 | 2015-01-06T13:07:04.000Z | 2022-03-26T04:05:50.000Z | python/src/buildXyzMapCommand.py | kylemcdonald/LightLeaks | f72719c4f46e4ec0cf8f37b520f7be859381d43b | [
"MIT"
]
| 34 | 2015-01-01T21:18:50.000Z | 2021-09-02T16:28:10.000Z | python/src/buildXyzMapCommand.py | kylemcdonald/LightLeaks | f72719c4f46e4ec0cf8f37b520f7be859381d43b | [
"MIT"
]
| 11 | 2015-02-23T18:56:22.000Z | 2020-07-19T07:50:11.000Z | import click
import json
import os
import re
from tqdm import tqdm
from utils.imutil import *
import numpy as np
import math
PROCESSED_SCAN_FOLDER = 'processedScan'
def buildXyzMap(data_dir, prefix):
projector_size = get_projector_size(data_dir)
click.echo("Projector resolution %i x %i (from settings.json)" %
(projector_size[0], projector_size[1]))
if not os.path.exists(os.path.join(data_dir, 'mask-0.png')):
click.secho(
f'Error: Projector mask not found at path {os.path.join(data_dir, "mask-0.png")}', err=True, fg='red')
return
scan_folders = sorted(
[f for f in os.listdir(data_dir) if re.match('^'+prefix, f)])
scan_folders = list(filter(lambda x: os.path.isdir(
os.path.join(data_dir, x, PROCESSED_SCAN_FOLDER)), scan_folders))
if len(scan_folders) == 0:
click.secho(
f"No scans found {data_dir} with prefix {prefix}", err=True, fg="red")
return
deduped = None
for i, folder in tqdm(enumerate(scan_folders), total=len(scan_folders)):
tqdm.write(folder + f": Loading processed scan")
processed_path = os.path.join(data_dir, folder, PROCESSED_SCAN_FOLDER)
cam_confidence = imread(os.path.join(
processed_path, 'camConfidence.exr'))
cam_binary_map = np.load(os.path.join(processed_path, 'camBinary.npy'))
cam_width = cam_confidence.shape[1]
cam_height = cam_confidence.shape[0]
# tqdm.write(f"{folder}: Camera size {cam_width}x{cam_height}")
# Load binary file from camamok
cam_xyz_map = np.fromfile(os.path.join(
data_dir, folder, 'camamok', 'xyzMap.raw'), np.float32)
# Determine scale factor of binary file (probably 4 if code hasnt changed in camamok)
scale_factor = math.floor(
1/math.sqrt((len(cam_xyz_map) / 4) / (cam_width * cam_height)))
tqdm.write(folder + f": upscaling xyz map by {scale_factor}")
# Reshape camamok xyz map
cam_xyz_map = cam_xyz_map.reshape(
int(cam_height / scale_factor), int(cam_width / scale_factor), 4)[:, :, 0:3]
cam_xyz_map = upsample(cam_xyz_map, scale=scale_factor)
tqdm.write(folder + f": xyz map size {cam_xyz_map.shape}")
# tqdm.write(f'{folder}: cam xyz minimum: {np.min(cam_xyz_map)}, max: {np.max(cam_xyz_map)}')
assert len(cam_confidence) > 0
assert len(cam_binary_map) > 0
assert len(cam_xyz_map) > 0
tqdm.write(folder + f": Packing data")
packed = pack_maps(cam_confidence, cam_binary_map,
cam_xyz_map, i, projector_size)
tqdm.write(
f'{folder}: Packed {packed.shape[0]:,} pixels. Removing duplicate pixels')
if deduped is not None:
# print('deduped before:', deduped.shape)
packed = np.vstack((packed, deduped))
# print('packed after:', packed.shape)
deduped = dedupe(packed)
tqdm.write(
f'{folder}: {deduped.shape[0]:,} pixels in deduplicated stack')
click.echo("Done processing scanes. Unpacking projector map")
projector_xyz, projector_confidence, cam_index_map, cam_pixel_index = unpack_maps(
deduped, projector_size)
cam_index_map_colored = np.copy(cam_index_map)
cam_index_map_colored[projector_confidence < 0.1] = -1
cam_index_map_colored = cam_index_map_colored * \
255 / (cam_index_map.max()+1)
cam_index_map_colored = cv2.applyColorMap(
cam_index_map_colored.astype(np.uint8), cv2.COLORMAP_JET)
# imshow(cam_index_map_colored, fmt='jpg')
# Store result
debug_out_path = os.path.join(data_dir, 'BuildXYZ')
if not os.path.exists(debug_out_path):
os.makedirs(debug_out_path)
projector_mask = imread(os.path.join(
data_dir, 'mask-0.png')).mean(axis=2) / 255
projector_confidence_masked = projector_confidence * \
projector_mask[:, :, np.newaxis]
imwrite(os.path.join(debug_out_path, 'confidenceMap-0.exr'),
projector_confidence_masked.astype(np.float32))
imwrite(os.path.join(debug_out_path, 'xyzMap-0.exr'),
projector_xyz.astype(np.float32))
imwrite(os.path.join(debug_out_path, 'camIndexMap.png'), cam_index_map)
imwrite(os.path.join(debug_out_path, 'camIndexMapColored.png'),
cam_index_map_colored)
with open(os.path.join(debug_out_path, "BuildXYZOutput.txt"), "w") as text_file:
def t(text):
text_file.write("%s\n" % text)
click.echo(text)
t("Scans used:")
for s in scan_folders:
t("\t%s" % s)
t("Resolution: %ix%i" % (projector_size[0], projector_size[1]))
threshold = 0.05
t("\nCoverage (threshold %.2f):" % threshold)
masked_camIndexMap = np.copy(cam_index_map)
masked_camIndexMap[projector_confidence < threshold] = -1
u, c = np.unique(masked_camIndexMap, return_counts=True)
for _u, _c in zip(u, c):
if _u != -1:
t("\tScan %i (%s): %.2f%% (%i)" %
(_u, scan_folders[int(_u)], 100*_c / sum(c), _c))
else:
t("\tNo scan: %.2f%% (%i)" % (100*_c / sum(c), _c))
def get_projector_size(data_dir):
with open(os.path.join(data_dir, 'settings.json')) as json_file:
data = json.load(json_file)
proj_width = data['projectors'][0]['width']
proj_height = data['projectors'][0]['height']
return proj_width, proj_height
def overflow_fix(cam_binary_map, proj_size):
cam_binary_map[(cam_binary_map[:, 0] >= proj_size[0]) | (
cam_binary_map[:, 1] >= proj_size[1])] = [0, 0]
# rows, cols = camHeight, camWidth
# confidence.shape: rows, cols (float)
# cam_binary_map.shape: rows, cols, 2 (int)
# cam_xyz_map.shape: rows, cols, 3 (float)
# cam_index: int
def pack_maps(confidence, cam_binary_map, cam_xyz_map, cam_index, proj_size):
""" Pack camera confidence, cam binary projector map and camera xyz map """
# prepare confidence_flat
confidence_flat = confidence.reshape(-1, 1)
# prepare cam_binary_mapFlat
cam_binary_map_flat = cam_binary_map.reshape((-1, 2))
overflow_fix(cam_binary_map_flat, proj_size)
cam_binary_map_flat = np.ravel_multi_index(cam_binary_map_flat.transpose()[
::-1], (proj_size[1], proj_size[0])).reshape(-1, 1)
# prepare cam_xyz_map_flat
# scale = len(cam_binary_map) / len(cam_xyz_map)
cam_xyz_map_flat = cam_xyz_map.reshape(-1, 3)
# DEBUG STUFF
# Pack camera index into array
cam_index_flat = np.full((cam_xyz_map_flat.shape[0], 1), cam_index)
# Cam Pixel Index
cam_pixel_index = np.arange(cam_xyz_map_flat.shape[0])[:, np.newaxis]
# stack and return everything in shape: (rows x cols), 7
return np.hstack((confidence_flat, cam_binary_map_flat, cam_xyz_map_flat, cam_index_flat, cam_pixel_index))
def dedupe(packed):
# get indices sorted by confidence, use ::-1 to put max confidence first
packedSortedIndices = packed[:, 0].argsort()[::-1]
packedSorted = packed[packedSortedIndices]
# get unique packedSorted indices
_, indices = np.unique(packedSorted.transpose()[1], return_index=True)
return packedSorted[indices]
def unpack_maps(packed, proj_size):
""" Unpack projector xyz map and projector confidence """
proj_width = proj_size[0]
proj_height = proj_size[1]
projector_xyz = np.zeros((proj_height, proj_width, 3))
projector_confidence = np.zeros((proj_height, proj_width, 1))
cam_index = np.full((proj_height, proj_width, 1), -1)
cam_pixel_index = np.zeros((proj_height, proj_width, 1))
# assign xyzMap values use proMapFlat indices
# packed[:,0] contains confidence value
# packed[:,1] contains binary code (projector pixel coordinate)
# packed[:,2:5] contains xyz coordinate
# packed[:,5] contains camera index (debug)
# packed[:,6] contains camera pixel index (debug)
proMapFlat = packed[:, 1].astype(np.int32)
projector_confidence.reshape(-1)[proMapFlat] = packed[:, 0]
projector_xyz.reshape(-1, 3)[proMapFlat] = packed[:, 2:5]
# DEBUG STUFF
cam_index.reshape(-1)[proMapFlat] = packed[:, 5]
cam_pixel_index.reshape(-1)[proMapFlat] = packed[:, 6].astype(np.uint64)
return projector_xyz, projector_confidence, cam_index, cam_pixel_index
| 38.793578 | 114 | 0.653541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,275 | 0.269008 |
9e698f12281208ec9285a26a2656c4de0a23f99f | 3,383 | py | Python | api/tests/test_bad_queries.py | jpclark6/datalake | d9dceabe889f55ce589494fae5d00a27985e8088 | [
"Apache-2.0"
]
| 2 | 2016-12-11T18:00:08.000Z | 2017-12-26T22:47:15.000Z | api/tests/test_bad_queries.py | jpclark6/datalake | d9dceabe889f55ce589494fae5d00a27985e8088 | [
"Apache-2.0"
]
| 10 | 2015-09-24T00:32:55.000Z | 2017-09-14T02:15:53.000Z | api/tests/test_bad_queries.py | jpclark6/datalake | d9dceabe889f55ce589494fae5d00a27985e8088 | [
"Apache-2.0"
]
| 2 | 2016-12-21T16:49:47.000Z | 2019-02-24T23:58:11.000Z | # Copyright 2015 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import simplejson as json
import base64
def get_bad_request(client, params):
uri = '/v0/archive/files/'
q = '&'.join(['{}={}'.format(k, v) for k, v in params.iteritems()])
if q:
uri += '?' + q
res = client.get(uri)
assert res.status_code == 400
response = json.loads(res.get_data())
assert 'code' in response
assert 'message' in response
return response
def test_no_parameters(client):
res = get_bad_request(client, {})
assert res['code'] == 'NoArgs'
def test_no_what_parameter(client):
res = get_bad_request(client, {'start': 123})
assert res['code'] == 'NoWhat'
def test_no_work_id_or_interval(client):
res = get_bad_request(client, {'what': 'syslog'})
assert res['code'] == 'NoWorkInterval'
def test_work_id_and_start(client):
params = {
'what': 'syslog',
'work_id': 'work123',
'start': 123
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidWorkInterval'
def test_work_id_and_end(client):
params = {
'what': 'syslog',
'work_id': 'work123',
'end': 345
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidWorkInterval'
def test_start_without_end(client):
params = {
'what': 'syslog',
'start': 123
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidWorkInterval'
def test_end_without_start(client):
params = {
'what': 'syslog',
'end': 345
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidWorkInterval'
def test_invalid_start(client):
params = {
'what': 'syslog',
'start': 'notaninteger',
'end': 123
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidTime'
def test_invalid_end(client):
params = {
'what': 'syslog',
'end': 'notaninteger',
'start': 123
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidTime'
def test_start_after_end(client):
params = {
'what': 'syslog',
'end': 100,
'start': 200,
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidWorkInterval'
def test_invalid_cursor(client):
params = {
'what': 'syslog',
'start': 100,
'end': 200,
'cursor': 'foobar',
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidCursor'
def test_bad_cursor_valid_json(client):
cursor = base64.b64encode('{"valid": "json", "invalid": "cursor"}')
params = {
'what': 'syslog',
'start': 100,
'end': 200,
'cursor': cursor,
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidCursor'
| 24.875 | 79 | 0.617204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,241 | 0.366834 |
9e69ba962e4d092d4863d5beb5b0972723e70fc5 | 936 | py | Python | books/urls.py | ravenda900/bookshop-django | d66308a75c69854d55f8093aa8d35d4940cb5689 | [
"MIT"
]
| null | null | null | books/urls.py | ravenda900/bookshop-django | d66308a75c69854d55f8093aa8d35d4940cb5689 | [
"MIT"
]
| null | null | null | books/urls.py | ravenda900/bookshop-django | d66308a75c69854d55f8093aa8d35d4940cb5689 | [
"MIT"
]
| null | null | null | from django.urls import path, include
from . import views
urlpatterns = [
path('', views.home, name="home"),
path('signup', views.signup, name="signup"),
path('activate/<uidb64>/<token>/', views.activate_account, name='activate'),
path('sell-book', views.sell_book, name='sell_book'),
path('book/<int:id>/detail', views.book_detail, name='book_detail'),
path('add-balance', views.add_balance, name='add_balance'),
path('books-for-sale', views.books_for_sale, name='books_for_sale'),
path('purchased-books', views.purchased_books, name='purchased_books'),
path('profile/<str:username>', views.profile, name='profile'),
path('cart-items', views.cart_items, name='cart_items'),
path('add-items-to-cart/<int:book_item>', views.add_items_to_cart, name="add_items_to_cart"),
path('cancel-items', views.cancel_items, name="cancel_items"),
path('checkout', views.checkout, name='checkout')
] | 52 | 97 | 0.698718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 370 | 0.395299 |
9e6aea59173b99844868e9ef768a9d1a693c85e4 | 2,772 | py | Python | tests/test_models/test_exception.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
]
| null | null | null | tests/test_models/test_exception.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
]
| null | null | null | tests/test_models/test_exception.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
]
| null | null | null | import pytest
from pathlib import Path
import json
if __name__ == "__main__":
pytest.main([__file__])
@pytest.fixture(scope="session")
def fixture_exception(fixture_json_path: Path) -> Path:
return fixture_json_path / 'exception' / 'Exception.json'
def test_exception(fixture_exception):
from src.model.ex.model_ex import ModelException
from src.model.shared.ooo_type import OooType
with open(fixture_exception, 'r') as f:
f_json = json.load(f)
obj = ModelException(**f_json)
assert obj is not None
assert obj.id == 'uno-ooo-parser'
assert obj.version == "0.1.24"
assert obj.libre_office_ver == "7.2"
assert obj.name == "Exception"
assert obj.type == OooType.EXCEPTION
assert obj.type == "exception"
assert obj.namespace == "com.sun.star.uno"
assert obj.parser_args.sort == False
assert obj.parser_args.long_names == True
assert obj.parser_args.remove_parent_inherited == True
assert obj.writer_args.include_desc == True
assert obj.data.allow_db == True
assert obj.data.requires_typing == True
assert obj.data.name == "Exception"
assert obj.data.namespace == "com.sun.star.uno"
assert obj.data.url == "https://api.libreoffice.org/docs/idl/ref/exceptioncom_1_1sun_1_1star_1_1uno_1_1Exception.html"
assert len(obj.data.from_imports) == 0
assert len(obj.data.from_imports_typing) == 1
imp = obj.data.from_imports_typing[0]
assert imp.frm == ".x_interface"
assert imp.imp == "XInterface"
assert imp.az == "XInterface_8f010a43"
assert len(obj.data.extends_map) == 0
assert len(obj.data.quote) == 1
assert obj.data.quote[0] == "XInterface_8f010a43"
assert len(obj.data.typings) == 0
assert len(obj.data.full_imports.general) == 0
assert len(obj.data.full_imports.typing) == 1
assert obj.data.full_imports.typing[0] == "com.sun.star.uno.XInterface"
assert len(obj.data.imports) == 0
assert len(obj.data.extends) == 1
assert obj.data.extends[0] == "Exception"
assert len(obj.data.desc) == 3
assert obj.data.desc[0] == "the base of all UNO exceptions"
assert obj.data.items.properties is not None
p = obj.data.items.properties[0]
assert p.name == "Message"
assert p.returns == "str"
assert p.origin == "string"
assert p.origtype is None
assert len(p.desc) == 3
assert p.raises_get == ""
assert p.raises_set == ""
p = obj.data.items.properties[1]
assert p.name == "Context"
assert p.returns == "XInterface_8f010a43"
assert p.origin == "com.sun.star.uno.XInterface"
assert p.origtype == "com.sun.star.uno.XInterface"
assert len(p.desc) == 3
assert p.raises_get == ""
assert p.raises_set == ""
assert obj.data.items.methods is None
| 36.473684 | 122 | 0.683622 | 0 | 0 | 0 | 0 | 150 | 0.054113 | 0 | 0 | 500 | 0.180375 |
9e6d3fb617d1c39df17947d6364fa31a8d56f02f | 4,135 | py | Python | automlapi/automl_cognito.py | GFuentesBSC/automlapi | 575c23bc4a159ee19d97074762ec299c80578d10 | [
"Unlicense"
]
| 1 | 2021-04-27T06:08:34.000Z | 2021-04-27T06:08:34.000Z | automlapi/automl_cognito.py | GFuentesBSC/automlapi | 575c23bc4a159ee19d97074762ec299c80578d10 | [
"Unlicense"
]
| null | null | null | automlapi/automl_cognito.py | GFuentesBSC/automlapi | 575c23bc4a159ee19d97074762ec299c80578d10 | [
"Unlicense"
]
| 1 | 2021-05-17T17:58:45.000Z | 2021-05-17T17:58:45.000Z | import boto3
import base64
import hmac
import hashlib
from .automl import AWS_ACC_KEY_ID, AWS_SEC_ACC_KEY, USER_POOL_ID, CLIENT_ID, CLIENT_SECRET, AWS_REGION_NAME
client_cognito = boto3.client('cognito-idp',
aws_access_key_id=AWS_ACC_KEY_ID,
aws_secret_access_key=AWS_SEC_ACC_KEY,
region_name=AWS_REGION_NAME)
def get_secret_hash(username):
msg = username + CLIENT_ID
dig = hmac.new(str(CLIENT_SECRET).encode('utf-8'),
msg = str(msg).encode('utf-8'), digestmod=hashlib.sha256).digest()
d2 = base64.b64encode(dig).decode()
return d2
def sign_up_user(username, password, email):
try:
resp = client_cognito.sign_up(
ClientId=CLIENT_ID,
SecretHash=get_secret_hash(username),
Username=username,
Password=password,
UserAttributes=[
{
'Name': "email",
'Value': email
}
],
ValidationData=[
{
'Name': "email",
'Value': email
},
{
'Name': "custom:username",
'Value': username
}
])
except client_cognito.exceptions.UsernameExistsException as e:
return {"error": False,
"success": True,
"message": "This username already exists",
"data": None}
except client_cognito.exceptions.InvalidPasswordException as e:
return {"error": False,
"success": True,
"message": "Password should have Caps, Special chars, Numbers",
"data": None}
except client_cognito.exceptions.UserLambdaValidationException as e:
return {"error": False,
"success": True,
"message": "Email already exists",
"data": None}
except Exception as e:
return {"error": False,
"success": True,
"message": str(e),
"data": None}
return {"error": False,
"success": True,
"message": "Please confirm your signup, check Email for validation code",
"data": None}
def confirm_sign_up(username, code):
try:
response = client_cognito.confirm_sign_up(
ClientId=CLIENT_ID,
SecretHash=get_secret_hash(username),
Username=username,
ConfirmationCode=code,
ForceAliasCreation=False,
)
except client_cognito.exceptions.UserNotFoundException:
return {"error": True, "success": False, "message": "Username doesnt exists"}
# return event
except client_cognito.exceptions.CodeMismatchException:
return {"error": True, "success": False, "message": "Invalid Verification code"}
except client_cognito.exceptions.NotAuthorizedException:
return {"error": True, "success": False, "message": "User is already confirmed"}
except Exception as e:
return {"error": True, "success": False, "message": f"Unknown error {e.__str__()} "}
# return event
return {"error": False, "success": True, "message": "Username confirmed"}
def initiate_auth(username, password):
secret_hash = get_secret_hash(username)
try:
resp = client_cognito.initiate_auth(
# AuthFlow='USER_SRP_AUTH'|'REFRESH_TOKEN_AUTH'|'REFRESH_TOKEN'|'CUSTOM_AUTH'|'ADMIN_NO_SRP_AUTH'|'USER_PASSWORD_AUTH'|'ADMIN_USER_PASSWORD_AUTH',
AuthFlow='USER_PASSWORD_AUTH',
AuthParameters={
'USERNAME': username,
'SECRET_HASH': secret_hash,
'PASSWORD': password
},
ClientId=CLIENT_ID,
)
except client_cognito.exceptions.NotAuthorizedException:
return None, "The username or password is incorrect"
except client_cognito.exceptions.UserNotConfirmedException:
return None, "User is not confirmed"
except Exception as e:
return None, e.__str__()
return resp, None
def signin_user(username, password):
resp, msg = initiate_auth(username, password)
if msg != None:
return {'message': msg,
"error": True,
"success": False,
"data": None}
if resp.get("AuthenticationResult"):
return {'message': "success",
"error": False,
"success": True,
"data": {
"id_token": resp["AuthenticationResult"]["IdToken"],
"refresh_token": resp["AuthenticationResult"]["RefreshToken"],
"access_token": resp["AuthenticationResult"]["AccessToken"],
"expires_in": resp["AuthenticationResult"]["ExpiresIn"],
"token_type": resp["AuthenticationResult"]["TokenType"]
}}
else: #this code block is relevant only when MFA is enabled
return {"error": True,
"success": False,
"data": None, "message": None}
| 29.119718 | 149 | 0.708343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,367 | 0.330593 |
9e6d433ebfb2152c9c032a7b2793db23253d6dbb | 10,464 | py | Python | Scripts/Genetic Algorithm Optimizations/gazebo_walk_ga.py | Bittu96/humanoid | 3b5cfaee25207c3bfe3a47339ec1bd0f8836689a | [
"Apache-2.0"
]
| 1 | 2020-09-09T15:02:31.000Z | 2020-09-09T15:02:31.000Z | Scripts/Genetic Algorithm Optimizations/gazebo_walk_ga.py | Bittu96/humanoid | 3b5cfaee25207c3bfe3a47339ec1bd0f8836689a | [
"Apache-2.0"
]
| null | null | null | Scripts/Genetic Algorithm Optimizations/gazebo_walk_ga.py | Bittu96/humanoid | 3b5cfaee25207c3bfe3a47339ec1bd0f8836689a | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
from LIPM_with_dsupport import *
import random
import subprocess
from mono_define import *
from nav_msgs.msg import Odometry
from std_srvs.srv import Empty
def walk_test(initiate_time, T_dbl, zc, foot_height):
rospy.init_node('mono_move')
print('function called')
l_2.pub = rospy.Publisher('/mono/l_hip_roll_position/command', Float64, queue_size=1)
l_3.pub = rospy.Publisher('/mono/l_hip_pitch_position/command', Float64, queue_size=1)
l_4.pub = rospy.Publisher('/mono/l_knee_pitch_position/command', Float64, queue_size=1)
l_5.pub = rospy.Publisher('/mono/l_ankle_pitch_position/command', Float64, queue_size=1)
l_6.pub = rospy.Publisher('/mono/l_ankle_roll_position/command', Float64, queue_size=1)
r_2.pub = rospy.Publisher('/mono/r_hip_roll_position/command', Float64, queue_size=1)
r_3.pub = rospy.Publisher('/mono/r_hip_pitch_position/command', Float64, queue_size=1)
r_4.pub = rospy.Publisher('/mono/r_knee_pitch_position/command', Float64, queue_size=1)
r_5.pub = rospy.Publisher('/mono/r_ankle_pitch_position/command', Float64, queue_size=1)
r_6.pub = rospy.Publisher('/mono/r_ankle_roll_position/command', Float64, queue_size=1)
# reset_simulation = rospy.ServiceProxy('/gazebo/reset_simulation', Empty)
fall = False
def callback(data):
nonlocal fall
height = data.pose.pose.position.z
if height < 0.5:
fall = True
else:
fall = False
# print(fall,height)
odom_sub = rospy.Subscriber("/mono/odom", Odometry, callback, queue_size=1)
def initiate_robot():
nonlocal fall
initiate_time = 5
speed = 0.01
angles_l = [0, 0, pi / 2, 0, 0, 0, 0]
angles_r = [0, 0, pi / 2, 0, 0, 0, 0]
body.set_angle(angles_l, 'Left')
body.set_angle(angles_r, 'Right')
body.get_all_pos()
s = subprocess.check_call("rosservice call /gazebo/reset_simulation \"{}\"", shell=True)
rospy.sleep(0.1)
body.ros_publish()
s = subprocess.check_call("rosservice call /gazebo/reset_simulation \"{}\"", shell=True)
rospy.sleep(0.4)
r = subprocess.check_call("rosservice call gazebo/unpause_physics", shell=True)
rospy.sleep(0.4)
# reset_simulation()
return pose_robot()
def pose_robot():
nonlocal fall
t = 0
initiate_time = 5
speed = 0.01
rate = rospy.Rate(1 / speed)
initial_height = 0.70
body.CoM = array([[0.015 - 0.09, 0, initial_height]])
spline_1, spline_2, spline_3 = body.transition_angle([pi / 2, 0, 0],
body.inverse_kinematics([0.09, 0, 0], "Left")[2:],
initiate_time)
while t <= initiate_time:
angles_l = [0, 0, spline_1(t), spline_2(t), spline_3(t), pi / 2]
angles_r = [0, 0, spline_1(t), spline_2(t), spline_3(t), pi / 2]
odom_sub = rospy.Subscriber("/mono/odom", Odometry, callback, queue_size=1)
if t >= 3 and fall:
print('-------------robot has fallen--------')
return False
body.set_angle(angles_l, 'Left')
body.set_angle(angles_r, 'Right')
body.get_all_pos()
body.ros_publish()
t += speed
rate.sleep()
return True
for i in range(3):
if initiate_robot():
break
else:
continue
t = 0
# foot_height = 0.05
step_size = .1
iteration = 0
switch_timer = 0
left_l = True
foot_origin_ds = 0.09
initial_height = 0.70
foot_last_pos = [0, 0]
body.CoM = array([[0.015 - 0.09, 0, initial_height]])
# these are the best results initiate_time = 0.65 T_dbl = 0.1 zc = 0.6
# initiate_time = 0.63
# T_dbl = 0.08
# speed = 0.01
# zc = 0.6
speed = 0.01
try:
print(initiate_time, T_dbl, zc, foot_height)
xsolve, vxsolve, ysolve, vysolve, p_mod = LIPM(speed, initiate_time, T_dbl, zc)
body.time_step = speed
rate = rospy.Rate(1 / speed)
print("---------------started walking------------------------------------------")
while not rospy.is_shutdown():
odom_sub = rospy.Subscriber("/mono/odom", Odometry, callback, queue_size=1)
if fall == True:
odom_sub.unregister()
l_2.pub.unregister()
l_3.pub.unregister()
l_4.pub.unregister()
l_5.pub.unregister()
l_6.pub.unregister()
r_2.pub.unregister()
r_3.pub.unregister()
r_4.pub.unregister()
r_5.pub.unregister()
r_6.pub.unregister()
return iteration
if iteration >= len(ysolve) - 20:
break
body.CoM = array([[ysolve[iteration] - 0.09, -xsolve[iteration], initial_height]])
if abs(round(switch_timer, 3)) == 0:
if t == 0:
step_multi = 0 # first step takes only 1 step size but the second step covers twice the d/s
second_step = True
else:
step_multi = 2
if second_step == True and t != 0:
second_step = False
step_multi = 1
if left_l:
spline_h_l = CubicSpline([0, initiate_time / 2, initiate_time], [0, foot_height, 0],
bc_type=(((1, 0)), (1, 0)))
spline_y_l = CubicSpline([0, initiate_time],
[body.links_l[6].end[0, 1],
-step_multi * step_size + body.links_l[6].end[0, 1]],
bc_type=(((1, 0)), (1, 0)))
swing_leg = 'Left'
switch_timer = initiate_time + T_dbl
ds_timer = T_dbl
dbl_phase = False
foot_last_pos[0] = r_6.end[0, 0]
foot_last_pos[1] = r_6.end[0, 1]
angles_r = body.inverse_kinematics([foot_last_pos[0], foot_last_pos[1], 0], 'Right')
angles_l = body.inverse_kinematics([foot_origin_ds, spline_y_l(0), spline_h_l(0)], 'Left')
# k = speed
if not left_l:
spline_h_r = CubicSpline([0, initiate_time / 2, initiate_time], [0, foot_height, 0],
bc_type=((1, 0), (1, 0)))
spline_y_r = CubicSpline([0, initiate_time],
[body.links_r[6].end[0, 1],
-step_multi * step_size + body.links_r[6].end[0, 1]],
bc_type=((1, 0), (1, 0)))
swing_leg = 'Right'
switch_timer = initiate_time + T_dbl
ds_timer = T_dbl
dbl_phase = False
# k = speed
foot_last_pos[0] = l_6.end[0, 0]
foot_last_pos[1] = l_6.end[0, 1]
angles_l = body.inverse_kinematics([foot_last_pos[0], foot_last_pos[1], 0], 'Left')
angles_r = body.inverse_kinematics([-foot_origin_ds, spline_y_r(0), spline_h_r(0)], 'Right')
# print(foot_last_pos)
left_l = not left_l
elif abs(round(switch_timer, 4)) > 0:
switch_timer -= speed
if swing_leg == 'Left':
k = initiate_time + T_dbl - switch_timer
# k +=speed
if round(k, 2) == initiate_time:
dbl_phase = True
if dbl_phase == True:
k = initiate_time
if abs(round(switch_timer, 4)) == 0:
switch_timer = 0
t += speed
continue
angles_l = body.inverse_kinematics([foot_origin_ds, spline_y_l(k), spline_h_l(k)], 'Left')
angles_r = body.inverse_kinematics([foot_last_pos[0], foot_last_pos[1], 0], 'Right')
# print("this is in main", end=" ")
# print(rad2deg(angles_l))
elif swing_leg == 'Right':
k = initiate_time + T_dbl - switch_timer
# k+=speed
if round(k, 2) == initiate_time:
dbl_phase = True
if dbl_phase == True:
k = initiate_time
if abs(round(switch_timer, 4)) == 0:
switch_timer = 0
t += speed
continue
angles_r = body.inverse_kinematics([-foot_origin_ds, spline_y_r(k), spline_h_r(k)], 'Right')
angles_l = body.inverse_kinematics([foot_last_pos[0], foot_last_pos[1], 0], 'Left')
# print("this is in main after hit", end=" ")
# print(rad2deg(angles_l))
if np.isnan(np.sum(angles_r)) or np.isnan(np.sum(angles_l)):
print("----------------NaN----------------------------")
return 0
body.set_angle(angles_l, 'Left')
body.set_angle(angles_r, 'Right')
body.get_all_pos()
body.ros_publish()
iteration += 1
rate.sleep()
except:
odom_sub.unregister()
l_2.pub.unregister()
l_3.pub.unregister()
l_4.pub.unregister()
l_5.pub.unregister()
l_6.pub.unregister()
r_2.pub.unregister()
r_3.pub.unregister()
r_4.pub.unregister()
r_5.pub.unregister()
r_6.pub.unregister()
print('-----------------walk_error-------------------')
return 0
i = 0
# while True:
# # initiate_time = random.choice([x / 100 for x in range(40, 71)])
# # T_dbl = random.choice([0.09, 0.1])
# # zc = random.choice([x / 100 for x in range(40, 71)])
# # i+=1
# # print(i)
# print(walk_test(0.48, 0.08, 0.41,0.05))
# # print(walk_test(initiate_time,T_dbl, zc))
# #
| 39.338346 | 112 | 0.503727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,771 | 0.169247 |
9e6ee084797d0ef64a6ff35e8d531e000c40a386 | 781 | py | Python | extract_annotations.py | milesroberts-123/extract-annotations | dde5733835607c80d45a48e4d097cd7322db84e6 | [
"MIT"
]
| null | null | null | extract_annotations.py | milesroberts-123/extract-annotations | dde5733835607c80d45a48e4d097cd7322db84e6 | [
"MIT"
]
| null | null | null | extract_annotations.py | milesroberts-123/extract-annotations | dde5733835607c80d45a48e4d097cd7322db84e6 | [
"MIT"
]
| null | null | null | from BCBio import GFF
from Bio import SeqIO
import csv
import sys
in_gff_file = sys.argv[1]
out_file = sys.argv[2]
#Add annotations to sequences
print("Parsing .gff file...")
in_handle = open(in_gff_file)
limit_info = dict(gff_type = ["mRNA"])
protnames = []
protanno = []
for rec in GFF.parse(in_handle, limit_info = limit_info, target_lines = 1):
feat = rec.features[0]
protnames.append(feat.qualifiers["Name"][0])
protanno.append(feat.qualifiers["Note"][0])
in_handle.close()
#Write lists of sequences and annotations to .tsv file
print("Writing annotations to %s ..." % out_file)
with open(out_file, "w") as f:
for protname, protan in zip(protnames, protanno):
entry = [protname, protan]
f.write("\t".join(entry) + "\n")
f.close()
print("Extraction complete.")
| 23.666667 | 75 | 0.713188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.239437 |
9e6ee92ffbfcbd13c35e3bca05e4f1adb80adce8 | 1,657 | py | Python | alienLanguageSort.py | syeddabeer/0projects | e132628f3693ed40c5ea9055a6c79f8266196bae | [
"Apache-2.0"
]
| null | null | null | alienLanguageSort.py | syeddabeer/0projects | e132628f3693ed40c5ea9055a6c79f8266196bae | [
"Apache-2.0"
]
| null | null | null | alienLanguageSort.py | syeddabeer/0projects | e132628f3693ed40c5ea9055a6c79f8266196bae | [
"Apache-2.0"
]
| null | null | null | """
In an alien language, surprisingly they also use english lowercase letters, but possibly in a different order. The order of the alphabet is some permutation of lowercase letters.
Given a sequence of words written in the alien language, and the order of the alphabet, return true if and only if the given words are sorted lexicographicaly in this alien language.
Example 1:
Input: words = ["hello","luther"], order = "hlabcdefgijkmnopqrstuvwxyz"
Output: true
Explanation: As 'h' comes before 'l' in this language, then the sequence is sorted.
"""
class Solution:
def isAlienSorted(self, words, order):
order_map={}
for index, value in enumerate(order):
#order map is created. with letter as index and position as value.
order_map[value] = index
for i in range(0, len(words)-1, 1):
for j in range(0, len(words[i])):
# first word is similar to second word. but first word is longer. like apple, app
if j >= len(words[i+1]):
return False
if words[i][j] != words[i+1][j]:
if order_map[words[i][j]] > order_map[words[i+1][j]]:
return False
break
return True
words1=["hello","luther"]
order1="hlabcdefgijkmnopqrstuvwxyz"
print(Solution().isAlienSorted(words1, order1))
words2=["word","world","row"]
order2="worldabcefghijkmnpqstuvxyz"
print(Solution().isAlienSorted(words2, order2))
words2=["apple","app"]
order2="abcdefghijklmnopqrstuvwxyz"
print(Solution().isAlienSorted(words2, order2)) | 35.255319 | 182 | 0.631261 | 766 | 0.462281 | 0 | 0 | 0 | 0 | 0 | 0 | 831 | 0.501509 |
9e6f2b92ac7a2ae064a50bab58b816c3b9c6230f | 163 | py | Python | urizen/__init__.py | misagai/urizen | ad756749ae7b0bb6db7024c6128998e56236ee6d | [
"Apache-2.0"
]
| 107 | 2020-01-08T21:27:59.000Z | 2022-03-19T07:59:23.000Z | urizen/__init__.py | misagai/urizen | ad756749ae7b0bb6db7024c6128998e56236ee6d | [
"Apache-2.0"
]
| 1 | 2020-05-22T17:54:12.000Z | 2021-06-27T01:02:39.000Z | urizen/__init__.py | misagai/urizen | ad756749ae7b0bb6db7024c6128998e56236ee6d | [
"Apache-2.0"
]
| 7 | 2020-01-08T21:12:11.000Z | 2022-03-19T07:59:27.000Z | import urizen.core
from urizen.core import *
import urizen.generators
from urizen.generators import *
import urizen.visualizers
from urizen.visualizers import *
| 18.111111 | 32 | 0.822086 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9e714ffa033577119fdde50aec9e7885109ed239 | 3,524 | py | Python | osna/tmp/stats_Youtube.py | tapilab/elevate-osna-news | bffe6c9a8269ea1afba0d998b79c8db1b842b7bf | [
"MIT"
]
| 2 | 2019-08-14T08:17:33.000Z | 2019-11-13T18:03:11.000Z | osna/tmp/stats_Youtube.py | tapilab/elevate-osna-news | bffe6c9a8269ea1afba0d998b79c8db1b842b7bf | [
"MIT"
]
| null | null | null | osna/tmp/stats_Youtube.py | tapilab/elevate-osna-news | bffe6c9a8269ea1afba0d998b79c8db1b842b7bf | [
"MIT"
]
| 2 | 2020-05-26T05:11:15.000Z | 2021-10-08T08:01:21.000Z | import pandas as pd
from collections import Counter
import re
def Mystats(directory):
df=pd.read_csv(directory)
id=df['social_id'].unique()
#1
print('Q1:Number of unique users:',len(id))
mes=df['comment_tokens']
#2
print('Q2:Number of unique messages:',len(mes.unique()))
#4
word=[]
for m in mes.astype(str):
mes=m.split()
for mes1 in mes:
mes1=re.sub("[0-9\W+]","",mes1)
# print(mes1)
if(mes1!=""):
word.append(mes1)
word1=list(set(word))
print('Q4:Number of unique words:',len(word1))
#5
print('Q5:Number of tokens:', len(mes))
#6
c=Counter(word)
print('Q6:50 most common words:',c.most_common(50))
word1 = []
df1=pd.read_csv('D:\\news\\training_data\\factchecks.csv')
true=df1[(df1.site=='youtube')&(df1.ruling=='TRUE')]
msgtrue=true['social_id']
print('Q3:Number of users/message in class TRUE:', len(msgtrue))
pd1=pd.merge(df,true,on=['social_id','site'],how='inner')
word1=tweet_tokenizer(pd1)
print('Q7:50 most common words:', Counter(word1).most_common(50))
false = df1[(df1.site == 'youtube') & (df1.ruling == 'FALSE')]
msgfalse = false['social_id']
print('Q3:Number of users/message in class FALSE:', len(msgfalse))
pd1 = pd.merge(df, false, on=['social_id', 'site'], how='inner')
word1 = tweet_tokenizer(pd1)
print('Q7:50 most common words:', Counter(word1).most_common(50))
fire= df1[(df1.site == 'youtube') & (df1.ruling == 'Pants on Fire!')]
msgfire = fire['social_id']
print('Number of users/message in class Pants on Fire:', len(msgfire))
pd1 = pd.merge(df, fire, on=['social_id', 'site'], how='inner')
word1 = tweet_tokenizer(pd1)
print('Q7:50 most common words:', Counter(word1).most_common(50))
mt = df1[(df1.site == 'youtube') & (df1.ruling == 'Mostly True')]
msgmt = mt['social_id']
print('Number of users/message in class Mostly True:', len(msgmt))
pd1 = pd.merge(df, mt, on=['social_id', 'site'], how='inner')
word1 = tweet_tokenizer(pd1)
print('Q7:50 most common words:', Counter(word1).most_common(50))
mf = df1[(df1.site == 'youtube') & (df1.ruling == 'Mostly False')]
msgmf = mf['social_id']
print('Number of users/message in class Mostly False:', len(msgmf))
pd1 = pd.merge(df, mf, on=['social_id', 'site'], how='inner')
word1 = tweet_tokenizer(pd1)
print('Q7:50 most common words:', Counter(word1).most_common(50))
ht = df1[(df1.site == 'youtube') & (df1.ruling == 'Half-True')]
msgfire = ht['social_id']
print('Number of users/message in class Half-True:', len(ht))
pd1 = pd.merge(df, ht, on=['social_id', 'site'], how='inner')
word1 = tweet_tokenizer(pd1)
print('Q7:50 most common words:', Counter(word1).most_common(50))
mx = df1[(df1.site == 'youtube') & (df1.ruling == 'MIXTURE')]
msgfire = mx['social_id']
print('Number of users/message in class MIXTURE:', len(mx))
pd1 = pd.merge(df, mx, on=['social_id', 'site'], how='inner')
word1 = tweet_tokenizer(pd1)
print('Q7:50 most common words:', Counter(word1).most_common(50))
def tweet_tokenizer(df):
list=[]
msg = df['comment_tokens']
for m in msg.astype(str):
mes = m.split()
for mes1 in mes:
mes1 = re.sub("[0-9\W+]", "", mes1)
if (mes1!= ""):
list.append(mes1)
print(list)
return list
if __name__=='__main__':
Mystats(directory)
| 34.54902 | 74 | 0.607832 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,165 | 0.33059 |
9e71ac9c81a289cfab5784c2ca72d59fdcd7d4d0 | 3,300 | py | Python | tests/test_css_parsing_tests.py | cmulders/styler | cffc6b99cc97e6299b75e84fe74e39216bd0109e | [
"Apache-2.0"
]
| null | null | null | tests/test_css_parsing_tests.py | cmulders/styler | cffc6b99cc97e6299b75e84fe74e39216bd0109e | [
"Apache-2.0"
]
| null | null | null | tests/test_css_parsing_tests.py | cmulders/styler | cffc6b99cc97e6299b75e84fe74e39216bd0109e | [
"Apache-2.0"
]
| null | null | null | import codecs
import re
from collections import namedtuple
import unittest
from typing import Collection, Iterable, Sequence, Tuple, Type
import io
from pathlib import Path
from styler import decode
import json
import logging
from itertools import islice
logger = logging.getLogger(__name__)
CSS_PARSING_TESTS_DIR = Path(__file__).parent / "css-parsing-tests"
JSONCase = namedtuple("JSONCase", "case, expectation")
def pairs(iterable):
"s -> (s0,s1), (s2,s3), (s4, s5), ..."
return zip(
islice(iterable, 0, None, 2),
islice(iterable, 1, None, 2),
)
class CSSParseTestCaseMeta(type):
"""Metaclass for dynanic test loading"""
@classmethod
def __prepare__(cls, clsname, bases, **kwargs):
namespace = dict()
if not "cases" in kwargs or unittest.TestCase not in bases:
logger.warning(
f"Class `{cls}` should specify a name as intialize argument and must base unittest.TestCase, nothing loaded"
)
return namespace
namespace["cases"] = list(cls.load_cases(kwargs["cases"]))
for idx, case in enumerate(namespace["cases"]):
name, fn = cls.create_test(idx, case)
namespace[name] = fn
return namespace
def __new__(cls, name, bases, namespace, **kwargs):
kwargs.pop("cases") # Already processd this in the __prepare__
return super().__new__(cls, name, bases, namespace, **kwargs)
@classmethod
def load_cases(cls, name) -> Iterable[JSONCase]:
json_path = (CSS_PARSING_TESTS_DIR / name).with_suffix(".json")
assert json_path.exists(), f"JSON cases file does not exists: {json_path}."
with json_path.open("rb") as fd:
raw_cases = json.load(fd)
return map(JSONCase._make, pairs(raw_cases))
@staticmethod
def create_test(idx, case: JSONCase):
def inner(self):
self.run_case(case.case, case.expectation)
if isinstance(case.case, dict) and "comment" in case.case:
case_str = case.case["comment"]
elif isinstance(case.case, dict) and "css_bytes" in case.case:
case_str = case.case["css_bytes"]
else:
case_str = ""
case_str = re.sub(r"[^\w]+", "_", case_str).strip("_").strip()
if case_str:
return f"test_{idx:03}_{case_str}", inner
else:
return f"test_{idx:03}", inner
class StylesheetBytesTestCase(
unittest.TestCase,
metaclass=CSSParseTestCaseMeta,
cases="stylesheet_bytes",
):
def run_case(self, case, expectation: Tuple[Iterable, str]):
css_bytes = str(case["css_bytes"]).encode("latin1")
protocol_encoding = case.get("protocol_encoding")
environment_encoding = case.get("environment_encoding")
expected_ast, expected_encoding = expectation
stream = decode(
io.BytesIO(css_bytes),
protocol_encoding=protocol_encoding,
environment_encoding=environment_encoding,
)
# Encoding matches with expectation
self.assertEqual(
codecs.lookup(expected_encoding).name,
codecs.lookup(stream.encoding).name,
f"Detected encoding {stream.encoding} instead of {expected_encoding}",
)
| 30.841121 | 124 | 0.638788 | 2,710 | 0.821212 | 0 | 0 | 1,552 | 0.470303 | 0 | 0 | 652 | 0.197576 |
9e74579632486a6b6e9af658505be492f28cf2a0 | 1,886 | py | Python | Callum/Day3/Day3.py | JackDanielHarding/advent-of-code-2021 | 5b860e36b4ac1af205c992763167ffef41a81a1b | [
"CC0-1.0"
]
| null | null | null | Callum/Day3/Day3.py | JackDanielHarding/advent-of-code-2021 | 5b860e36b4ac1af205c992763167ffef41a81a1b | [
"CC0-1.0"
]
| null | null | null | Callum/Day3/Day3.py | JackDanielHarding/advent-of-code-2021 | 5b860e36b4ac1af205c992763167ffef41a81a1b | [
"CC0-1.0"
]
| null | null | null | from collections import Counter
from functools import reduce
with open("./input.txt", "r") as inputFile:
readingsStr = inputFile.read().splitlines()
columnsRange = range(len(readingsStr[0]))
columns = map(lambda columnIndex : map(lambda row : row[columnIndex], readingsStr), columnsRange)
multiModes = map(lambda column: Counter(column).most_common(), columns)
multiModesWithoutCount = map(lambda mm: (mm[0][0], mm[1][0]), multiModes)
rates = reduce(lambda multiModeX, multiModeY: [multiModeX[0] + multiModeY[0], multiModeX[1] + multiModeY[1]], multiModesWithoutCount)
gamma = int(rates[0], 2)
epsilon = int(rates[1], 2)
print(f'Gamma: {gamma}, Epsilon: {epsilon}, Power: {gamma * epsilon}')
# Part 2
oxygenFilteredReadings = readingsStr.copy()
co2FilteredReadings = readingsStr.copy()
for columnIndex in range(len(readingsStr[0])):
oxygenColumns = map(lambda row : row[columnIndex], oxygenFilteredReadings)
oxygenCounter = Counter(oxygenColumns)
oxygenMostCommon = oxygenCounter.most_common()[0]
oxygenMostCommonVal = oxygenMostCommon[0]
if oxygenMostCommon[1] == oxygenCounter.total() / 2:
oxygenMostCommonVal = '1'
oxygenFilteredReadings = list(filter(lambda row : row[columnIndex] == oxygenMostCommonVal, oxygenFilteredReadings))
co2Columns = map(lambda row : row[columnIndex], co2FilteredReadings)
co2Counter = Counter(co2Columns)
co2MostCommon = co2Counter.most_common()
co2LeastCommon = co2MostCommon[len(co2MostCommon)-1]
co2LeastCommonVal = co2LeastCommon[0]
if co2LeastCommon[1] == co2Counter.total() / 2:
co2LeastCommonVal = '0'
co2FilteredReadings = list(filter(lambda row : row[columnIndex] == co2LeastCommonVal, co2FilteredReadings))
oxygen = int(oxygenFilteredReadings[0], 2)
co2 = int(co2FilteredReadings[0], 2)
print(f'Oxygen: {oxygen}, CO2: {co2}, Life Support Rating: {oxygen * co2}') | 46 | 133 | 0.73701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.085366 |
9e753ccf2f01c17789c789b78559c01a411800d2 | 2,637 | py | Python | shell/shell.py | utep-cs-systems-courses/1-shell-EdwinTomy | 5e15372a49712584bc6a1bf3d8a508eb5328287a | [
"BSD-3-Clause"
]
| null | null | null | shell/shell.py | utep-cs-systems-courses/1-shell-EdwinTomy | 5e15372a49712584bc6a1bf3d8a508eb5328287a | [
"BSD-3-Clause"
]
| null | null | null | shell/shell.py | utep-cs-systems-courses/1-shell-EdwinTomy | 5e15372a49712584bc6a1bf3d8a508eb5328287a | [
"BSD-3-Clause"
]
| null | null | null | import os, sys, re
while True:
path = os.getcwd() + " $"
# User input
os.write(1, path.encode())
args = os.read(0, 1000).decode().split()
# Exit
if args[0] == "exit":
if len(args) > 1:
print("Program terminated with exit code", args[1])
sys.exit(int(args[1]))
print("Program terminated without exit code")
sys.exit(1)
# Change Directory
if args[0] == "cd":
try:
if len(args) < 2:
os.chdir(os.path.expanduser("~"))
else:
os.chdir(args[1])
except FileNotFoundError:
print("File not found!")
pass
continue
# Forking
rc = os.fork()
if rc < 0:
os.write(1, "Fork failure :( !")
sys.exit(1)
# Child process for redirect & piping
elif rc == 0:
# Redirect output
if '>' in args:
i = args.index('>')
os.close(1)
os.open(args[i+1], os.O_CREAT | os.O_WRONLY)
os.set_inheritable(1, True)
child_command = args[:i]
# Redirect output
elif '<' in args:
i = args.index('<')
os.close(1)
os.open(args[i-1], os.O_CREAT | os.O_WRONLY)
os.set_inheritable(1, True)
child_command = args[i:]
# Piping
elif '|' in args:
i = args.index('|')
pipe1 = args[:i]
pipe2 = args[(i + 1):]
pr, pw = os.pipe()
os.set_inheritable(pr, True)
os.set_inheritable(pw, True)
pipe_child = os.fork()
if pipe_child < 0:
sys.exit(1)
if pipe_child == 0:
os.close(1)
os.dup(pw)
os.set_inheritable(1, True)
os.close(pr)
os.close(pw)
child_command = pipe1
else:
os.close(0)
os.dup(pr)
os.set_inheritable(0, True)
os.close(pr)
os.close(pw)
child_command = pipe2
# Command not found
else:
print("Command not found")
sys.exit(1)
# Try each directory in path
for directory in re.split(":", os.environ['PATH']):
program = "%s/%s" % (directory, args[0])
try:
os.execve(program, child_command, os.environ)
except FileNotFoundError:
pass
sys.exit(1)
# Check for background processes
else:
childPidCode = os.wait()
| 24.877358 | 63 | 0.454683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 382 | 0.144862 |
9e7720d00dac0986b6a6877d0a71575810560a55 | 528 | py | Python | lexicographic_order.py | YukiShinonome/NLP | 2ac59b0adc777882f8183cdca360bc277046d42c | [
"MIT"
]
| 4 | 2018-08-07T02:31:27.000Z | 2020-07-18T15:43:28.000Z | lexicographic_order.py | yukishinonome/NLP | 2ac59b0adc777882f8183cdca360bc277046d42c | [
"MIT"
]
| null | null | null | lexicographic_order.py | yukishinonome/NLP | 2ac59b0adc777882f8183cdca360bc277046d42c | [
"MIT"
]
| null | null | null | def lexicographic_order(w_list):
"""
単語のリストを辞書式順序(五十音順)に並び替える。
優先度:半角記号・半角数字 > アルファベット > ひらがな > カタカナ > 漢字 > 全角記号・全角数字
注意:漢字を意図した読みで認識しているとは限らず、人間が使う辞書の並びと異なる場合がある。
"""
w_list = sorted(w_list)
# もう一つ方法がある
# w_list.sort()
print(w_list)
if __name__ == '__main__':
w_list = ["おはよう", "こんにちは", "?", "?", "ありがとう", "japan", "!", "!", "りんご", \
"あんこ", "01", "25", "012", "01", "カタカナ", "本", "さんぽ", "日本", "アイス", \
"花", "星", "abc", "def"]
lexicographic_order(w_list) | 31.058824 | 81 | 0.530303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 615 | 0.721831 |
9e78a464d85758a6410cf9ef2916db721432642c | 4,860 | py | Python | radar_label_convert_kitti_format.py | wzan0001/Astyx-radar-dataset-convert-to-kitti-format | f0e6bf04fc9cd7b49c96f09803598a2c8561bf5a | [
"MIT"
]
| 12 | 2019-11-04T08:56:41.000Z | 2022-03-29T05:47:14.000Z | radar_label_convert_kitti_format.py | paland3/Astyx-radar-dataset-convert-to-kitti-format | f0e6bf04fc9cd7b49c96f09803598a2c8561bf5a | [
"MIT"
]
| 3 | 2019-12-04T18:19:06.000Z | 2020-10-08T12:34:21.000Z | radar_label_convert_kitti_format.py | paland3/Astyx-radar-dataset-convert-to-kitti-format | f0e6bf04fc9cd7b49c96f09803598a2c8561bf5a | [
"MIT"
]
| 3 | 2019-12-04T18:06:37.000Z | 2020-10-01T09:25:10.000Z | #####################################################
##将radar 数据转为kitti格式 ##
#####################################################
import json
import math
import os
import numpy as np
import utils
def rotMat2quatern(R):
# transform the rotation matrix into quatern
q = np.zeros(4)
K = np.zeros([4, 4])
K[0, 0] = 1 / 3 * (R[0, 0] - R[1, 1] - R[2, 2])
K[0, 1] = 1 / 3 * (R[1, 0] + R[0, 1])
K[0, 2] = 1 / 3 * (R[2, 0] + R[0, 2])
K[0, 3] = 1 / 3 * (R[1, 2] - R[2, 1])
K[1, 0] = 1 / 3 * (R[1, 0] + R[0, 1])
K[1, 1] = 1 / 3 * (R[1, 1] - R[0, 0] - R[2, 2])
K[1, 2] = 1 / 3 * (R[2, 1] + R[1, 2])
K[1, 3] = 1 / 3 * (R[2, 0] - R[0, 2])
K[2, 0] = 1 / 3 * (R[2, 0] + R[0, 2])
K[2, 1] = 1 / 3 * (R[2, 1] + R[1, 2])
K[2, 2] = 1 / 3 * (R[2, 2] - R[0, 0] - R[1, 1])
K[2, 3] = 1 / 3 * (R[0, 1] - R[1, 0])
K[3, 0] = 1 / 3 * (R[1, 2] - R[2, 1])
K[3, 1] = 1 / 3 * (R[2, 0] - R[0, 2])
K[3, 2] = 1 / 3 * (R[0, 1] - R[1, 0])
K[3, 3] = 1 / 3 * (R[0, 0] + R[1, 1] + R[2, 2])
D, V = np.linalg.eig(K)
pp = 0
for i in range(1, 4):
if(D[i] > D[pp]):
pp = i
q = V[:, pp]
q = np.array([q[3], q[0], q[1], q[2]])
#print(q)
return q
def qaut_to_angle(quat):
x=quat[0]
y=quat[1]
z=quat[2]
w=quat[3]
rol = math.atan2(2*(w*x+y*z),1-2*(x*x+y*y))#the rol is the yaw angle!
#pith = math.asin(2*(w*y-z*z))
#yaw = math.atan2(2*(w*z+x*y),1-2*(z*z+y*y))
return rol
def quaternionToRotationMatrix(quat):
q = quat.copy()
q=np.array(q)
n = np.dot(q, q)
if n < np.finfo(q.dtype).eps:
rot_matrix=np.identity(4)
return rot_matrix
q = q * np.sqrt(2.0 / n)
q = np.outer(q, q)
rot_matrix = np.array(
[[1.0 - q[2, 2] - q[3, 3], q[1, 2] + q[3, 0], q[1, 3] - q[2, 0]],
[q[1, 2] - q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] + q[1, 0]],
[q[1, 3] + q[2, 0], q[2, 3] - q[1, 0], 1.0 - q[1, 1] - q[2, 2]]],
dtype=q.dtype)
return rot_matrix
def radarcoordToCameracoordYaw(quat,frame_calib):
radar_quat_to_mat=quaternionToRotationMatrix(quat)
radar_to_camera_mat=np.array(frame_calib.tr_velodyne_to_cam)
radar_to_camera_mat=radar_to_camera_mat[:,0:3]
rot_mat=np.dot(radar_to_camera_mat,radar_quat_to_mat)
rot_quat=rotMat2quatern(rot_mat)
angles=qaut_to_angle(rot_quat)
return angles
def label_convert(save_dir,read_dir,calib_dir):
name_list=[]
for file in os.listdir(read_dir):
name_list.append(file)
for name in name_list:
read_name=read_dir+name
save_name=save_dir+name[0:6]+'.txt'
img_idx=int(name[0:6])
print(save_name)
frame_calib = utils.read_calibration(calib_dir, img_idx)
with open(save_name,mode='w')as save_txt_file_name:
with open(read_name,mode='r')as read_json_file_name:
read_object=json.load(read_json_file_name)#dict
objts=read_object['objects']#list
for oo in objts:
obj=oo#dict
anotation=[]
if obj['classname']=='Other Vehicle':
anotation.append('Other_Vehicle')
else:
anotation.append(obj['classname'])
anotation.append('0')#truncated unused
anotation.append(str(obj['occlusion']))
anotation.append('-10')#alpha unused
anotation.append('0')#2d box unuseds
anotation.append('0')
anotation.append('0')
anotation.append('0')
dim=obj['dimension3d']
anotation.append(str(dim[2]))#h
anotation.append(str(dim[1]))#w
anotation.append(str(dim[0]))#l
centerpoint=np.array(obj['center3d'])
centerpoint=np.reshape(centerpoint,(1,3))
camera_centerpoint = utils.radar_to_cam_frame(centerpoint, frame_calib)#transform to camera coordinate
anotation.append(str(camera_centerpoint[0][0]))
anotation.append(str(camera_centerpoint[0][1]+dim[2]*0.5))#top centor point
anotation.append(str(camera_centerpoint[0][2]))
orientation_quat=obj['orientation_quat']#quaterns
yaw_ang=radarcoordToCameracoordYaw(orientation_quat,frame_calib)
anotation.append(str(yaw_ang))
anotation.append('0')
str_anot=' '.join(anotation)
#print(str_anot)
save_txt_file_name.write(str_anot+'\n')
| 37.384615 | 122 | 0.480864 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 615 | 0.12618 |
9e7a0da2b81a2065d69c0b76472c3f6bc721ee3a | 2,739 | py | Python | wb/main/jobs/accuracy_analysis/per_tensor/create_per_tensor_scripts_job.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
]
| 23 | 2022-03-17T12:24:09.000Z | 2022-03-31T09:13:30.000Z | wb/main/jobs/accuracy_analysis/per_tensor/create_per_tensor_scripts_job.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
]
| 18 | 2022-03-21T08:17:44.000Z | 2022-03-30T12:42:30.000Z | wb/main/jobs/accuracy_analysis/per_tensor/create_per_tensor_scripts_job.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
]
| 16 | 2022-03-17T12:24:14.000Z | 2022-03-31T12:15:12.000Z | """
OpenVINO DL Workbench
Class for creating per tensor scripts job
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from contextlib import closing
from pathlib import Path
from sqlalchemy.orm import Session
from config.constants import (ACCURACY_ARTIFACTS_FOLDER, JOBS_SCRIPTS_FOLDER_NAME, JOB_SCRIPT_NAME)
from wb.extensions_factories.database import get_db_session_for_celery
from wb.main.enumerates import JobTypesEnum, StatusEnum
from wb.main.jobs.interfaces.ijob import IJob
from wb.main.models import (PerTensorReportJobsModel, CreatePerTensorScriptsJobModel)
from wb.main.scripts.job_scripts_generators.tensor_distance_job_script_generator import \
get_tensor_distance_job_script_generator
from wb.main.utils.utils import create_empty_dir
class CreatePerTensorScriptsJob(IJob):
job_type = JobTypesEnum.create_per_tensor_scripts_type
_job_model_class = CreatePerTensorScriptsJobModel
def __init__(self, job_id: int, **unused_kwargs):
super().__init__(job_id=job_id)
self._attach_default_db_and_socket_observers()
def run(self):
self._job_state_subject.update_state(status=StatusEnum.running, progress=0)
with closing(get_db_session_for_celery()) as session:
session: Session
job_model: CreatePerTensorScriptsJobModel = self.get_job_model(session)
accuracy_artifacts_path = Path(ACCURACY_ARTIFACTS_FOLDER) / str(job_model.pipeline_id)
scripts_path = accuracy_artifacts_path / JOBS_SCRIPTS_FOLDER_NAME
job_script_file_path = scripts_path / JOB_SCRIPT_NAME
create_empty_dir(scripts_path)
pipeline_id = job_model.pipeline_id
per_tensor_report_job_model: PerTensorReportJobsModel = (
session.query(PerTensorReportJobsModel).filter_by(pipeline_id=pipeline_id).first()
)
job_script_generator = get_tensor_distance_job_script_generator(per_tensor_report_job_model)
job_script_generator.create(job_script_file_path)
self.on_success()
def on_success(self):
self._job_state_subject.update_state(status=StatusEnum.ready, progress=100)
self._job_state_subject.detach_all_observers()
| 44.177419 | 104 | 0.775831 | 1,454 | 0.530851 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.235487 |
9e7a19b95d053efb0d88b148936622f138516c6b | 862 | py | Python | src/products/migrations/0010_auto_20201201_0119.py | xistadi/BookStore | 878c27e0c53ac0434d3866e4a27ffb0e460e4363 | [
"Apache-2.0"
]
| null | null | null | src/products/migrations/0010_auto_20201201_0119.py | xistadi/BookStore | 878c27e0c53ac0434d3866e4a27ffb0e460e4363 | [
"Apache-2.0"
]
| null | null | null | src/products/migrations/0010_auto_20201201_0119.py | xistadi/BookStore | 878c27e0c53ac0434d3866e4a27ffb0e460e4363 | [
"Apache-2.0"
]
| null | null | null | # Generated by Django 3.1.2 on 2020-11-30 22:19
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0009_auto_20201201_0038'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='rating',
),
migrations.AddField(
model_name='book',
name='number_of_orders',
field=models.PositiveIntegerField(default=0, verbose_name='Количество заказазов'),
),
migrations.AlterField(
model_name='book',
name='avr_rating',
field=models.SmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)], verbose_name='Средний рейтинг'),
),
]
| 29.724138 | 189 | 0.62993 | 772 | 0.86257 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.234637 |
9e7f4a260998bd0657b9e3609f0b0e379a30df8c | 212 | py | Python | Integrators/leap.py | chaosandcomplexity/Classical-Propagation | 2180d1aafd5e0b5c378382b9fdbeb21d759b6ce2 | [
"MIT"
]
| null | null | null | Integrators/leap.py | chaosandcomplexity/Classical-Propagation | 2180d1aafd5e0b5c378382b9fdbeb21d759b6ce2 | [
"MIT"
]
| null | null | null | Integrators/leap.py | chaosandcomplexity/Classical-Propagation | 2180d1aafd5e0b5c378382b9fdbeb21d759b6ce2 | [
"MIT"
]
| null | null | null | def method(q1,p1,dq,dp,t1,dt):
a1=[0.5,0.5]
b1=[0,1]
A=[dq,dp]
for i in range(len(a1)):
q1+=b1[i]*dt*A[0](q1,p1,t1)
p1+=a1[i]*dt*A[1](q1,p1,t1)
t1+=dt
return q1,p1,t1
| 19.272727 | 35 | 0.462264 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9e7f57ad27d934ffd652f467c3d73fde22074499 | 1,217 | py | Python | pandora/queue.py | shwetabhsharan/leetcode | 6630592b1f962bb4c4bb3c83162a8ff12b2074b3 | [
"MIT"
]
| null | null | null | pandora/queue.py | shwetabhsharan/leetcode | 6630592b1f962bb4c4bb3c83162a8ff12b2074b3 | [
"MIT"
]
| null | null | null | pandora/queue.py | shwetabhsharan/leetcode | 6630592b1f962bb4c4bb3c83162a8ff12b2074b3 | [
"MIT"
]
| null | null | null | """
enqueue
dequeue
size
traverse
Queue Implementation using SLL
"""
class Node(object):
def __init__(self, value):
self.value = value
self.next = None
class Queue(object):
def __init__(self):
self.head = None
def enqueue(self, value):
if self.head is None:
self.head = Node(value)
else:
node = Node(value)
node.next = self.head
self.head = node
def dequeue(self):
cnt = 0
curr = self.head
prev = None
while curr is not None:
cnt = cnt + 1
if cnt == self.size():
prev.next = None
curr.value = None
else:
prev = curr
curr = curr.next
def traverse(self):
curr = self.head
while curr is not None:
print curr.value
curr = curr.next
def size(self):
cnt = 0
curr = self.head
while curr is not None:
cnt = cnt + 1
curr = curr.next
return cnt
obj = Queue()
obj.enqueue(1)
obj.enqueue(2)
obj.enqueue(3)
obj.enqueue(4)
obj.enqueue(5)
obj.traverse()
obj.dequeue()
obj.traverse() | 19.015625 | 35 | 0.507806 | 1,008 | 0.828266 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.057518 |
9e80b42b52475d6e15054bfeda78fadd12468c69 | 2,133 | py | Python | spotify/v1/track.py | geekonedge/spotify | 1f4cf733a1fb11ab96259ed1e229b141e5c696f3 | [
"MIT"
]
| 2 | 2018-10-10T08:00:47.000Z | 2021-10-12T04:15:33.000Z | spotify/v1/track.py | geekonedge/spotify | 1f4cf733a1fb11ab96259ed1e229b141e5c696f3 | [
"MIT"
]
| 2 | 2018-08-31T21:59:47.000Z | 2018-08-31T22:27:57.000Z | spotify/v1/track.py | geekonedge/spotify | 1f4cf733a1fb11ab96259ed1e229b141e5c696f3 | [
"MIT"
]
| 1 | 2018-08-31T21:18:58.000Z | 2018-08-31T21:18:58.000Z | from spotify import values
from spotify.page import Page
from spotify.resource import Resource, UpgradableInstance
class TrackContext(Resource):
def __init__(self, version, id):
super(TrackContext, self).__init__(version)
self.id = id
def fetch(self, market=values.UNSET):
params = values.of({
'market': market
})
response = self.version.request('GET', '/tracks/{}'.format(self.id), params=params)
return TrackInstance(self.version, response.json())
class TrackInstance(UpgradableInstance):
@property
def artists(self):
from spotify.v1.artist import ArtistInstance
return [ArtistInstance(self.version, artist) for artist in self.property('artists')]
@property
def available_markets(self):
return self.property('available_markets')
@property
def disc_number(self):
return self.property('disc_number')
@property
def duration_ms(self):
return self.property('duration_ms')
@property
def explicit(self):
return self.property('explicit')
@property
def external_urls(self):
return self.property('external_urls')
@property
def id(self):
return self.property('id')
@property
def name(self):
return self.property('name')
@property
def preview_url(self):
return self.property('preview_url')
@property
def track_number(self):
return self.property('track_number')
@property
def type(self):
return self.property('type')
@property
def uri(self):
return self.property('uri')
class TrackList(Resource):
def get(self, id):
return TrackContext(self.version, id)
def list(self, ids, market=values.UNSET):
params = values.of({
'ids': ','.join(ids),
'market': market
})
response = self.version.request('GET', '/tracks', params=params)
return TrackPage(self.version, response.json(), 'tracks')
class TrackPage(Page):
@property
def instance_class(self):
return TrackInstance
| 23.184783 | 92 | 0.635724 | 2,006 | 0.940459 | 0 | 0 | 1,076 | 0.504454 | 0 | 0 | 190 | 0.089076 |
9e82bb1c42a0dd7d3d0090469ffab04c743997a6 | 3,526 | py | Python | basic/wordcount.py | duyduc27/Google-s-Python-Class | 1ea9ab6e4d4f60564f4226b9ff9aaf94b1854a7d | [
"Apache-2.0"
]
| null | null | null | basic/wordcount.py | duyduc27/Google-s-Python-Class | 1ea9ab6e4d4f60564f4226b9ff9aaf94b1854a7d | [
"Apache-2.0"
]
| null | null | null | basic/wordcount.py | duyduc27/Google-s-Python-Class | 1ea9ab6e4d4f60564f4226b9ff9aaf94b1854a7d | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a-- print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespac
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
###
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def text_to_words(the_text):
my_substitutions = the_text.maketrans(
# If you find any of these
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&()*+,-./:;<=>?@[]^_`{|}~'\\",
# Replace them by these
"abcdefghijklmnopqrstuvwxyz ")
# Translate the text now.
cleaned_text = the_text.translate(my_substitutions)
wds = cleaned_text.split()
return wds
def get_words_in_file(file):
f = open(file, 'r')
content= f.read()
wds = text_to_words(content)
f.close()
return wds
def make_dic_from_wds(file):
dic = {} # initial dictionary
lis_wds= get_words_in_file(file)
lis_wds.sort()
for word in lis_wds:
if word not in dic:
dic[word] = 1
else:
dic[word] += 1
return dic
def print_words(filename):
"""Analyse text file. Print words and their counts
Args:
Return:
"""
dic = make_dic_from_wds(filename)
print("Word Count")
print("=======================")
for k, v in dic.items():
print(k," " ,v)
def print_top(filename):
"""Print 20 most common words sorted. So the most common word is first, so on..."""
dic = make_dic_from_wds(filename)
print("=======================")
print("20 most common words")
n= 0
for key, value in sorted(dic.items(), key=lambda kv:kv[1], reverse=True):
print(key," ", value)
n += 1
if n>= 20:
break
def main():
if len(sys.argv) != 3:
print ('usage: ./wordcount.py {--count | --topcount} file')
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print ('unknown option: ' + option)
sys.exit(1)
if __name__ == '__main__':
main()
| 28.208 | 85 | 0.67612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,340 | 0.663642 |
9e8502300566fe834355583417c7c53166b5b4bb | 871 | py | Python | tests/test_multicollinearity_test.py | flor14/lrasm | dd3a05f34319049f51fdfa9407ab4d5906ea82ed | [
"MIT"
]
| null | null | null | tests/test_multicollinearity_test.py | flor14/lrasm | dd3a05f34319049f51fdfa9407ab4d5906ea82ed | [
"MIT"
]
| 21 | 2022-01-16T23:56:32.000Z | 2022-02-05T18:51:49.000Z | tests/test_multicollinearity_test.py | flor14/lrasm | dd3a05f34319049f51fdfa9407ab4d5906ea82ed | [
"MIT"
]
| 2 | 2022-01-27T20:30:01.000Z | 2022-02-26T01:32:21.000Z | from lrasm.multicollinearity_tst import multicollinearity_test
import numpy as np
import pandas as pd
from statsmodels.stats.outliers_influence import variance_inflation_factor
import pytest
def test_multicollinearity_test():
"""Test multicollinearity test outputs from dataset"""
X_proper = pd.DataFrame({"head": [1,2,3,3,5,8,7],"Feet": [7,6,5,4,3,2,1], 'Random': [12,24,25,26,29,55,99]})
X_str_df = pd.DataFrame({"head": ["str",2,3,4,5,6,7]})
X_series = pd.Series([1,2,3,4,5,6,7])
with pytest.raises(TypeError):
multicollinearity_test(X_str_df, 10)
multicollinearity_test(X_series, 10)
assert round(multicollinearity_test(X_proper, 10)['VIF'][0], 2) == 9.04
assert round(multicollinearity_test(X_proper, 10)['VIF'][2], 2) == 8.37
assert isinstance(multicollinearity_test(X_proper, 10), pd.DataFrame) | 39.590909 | 112 | 0.699196 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.10907 |
9e86f093b3ddd416fb693a33a299a63023c78c4a | 1,014 | py | Python | src/entry_point.py | TaikiInoue/KaoruRecognition | 9e42944d89abeea3a754b8ce858b0aa66119565f | [
"MIT"
]
| null | null | null | src/entry_point.py | TaikiInoue/KaoruRecognition | 9e42944d89abeea3a754b8ce858b0aa66119565f | [
"MIT"
]
| null | null | null | src/entry_point.py | TaikiInoue/KaoruRecognition | 9e42944d89abeea3a754b8ce858b0aa66119565f | [
"MIT"
]
| null | null | null | # References
# https://docs.aws.amazon.com/sagemaker/latest/dg/adapt-inference-container.html
import logging
import numpy as np
import PIL
from numpy import ndarray as NDArray
from PIL.Image import Image
from six import BytesIO
from torch.nn import Module
from facenet_pytorch import MTCNN
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def model_fn(model_dir: str) -> Module:
return MTCNN(image_size=160, margin=0, device="cuda:0")
def input_fn(request_body: bytes, content_type: str = "application/x-npy") -> Image:
stream = BytesIO(request_body)
np_img = np.load(stream, allow_pickle=True)
return PIL.Image.fromarray(np_img)
def predict_fn(input_data: Image, model: Module) -> NDArray:
face = model(input_data)
face = face.permute(1, 2, 0)
return face.detach().cpu().numpy()
def output_fn(prediction: NDArray, content_type: str = "application/x-npy") -> bytes:
buffer = BytesIO()
np.save(buffer, prediction)
return buffer.getvalue()
| 23.045455 | 85 | 0.732742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.136095 |
9e87ca188b43074a3794e37a50617be88767b932 | 2,528 | py | Python | opentamp/domains/namo_domain/generate_simple_sort.py | Algorithmic-Alignment-Lab/openTAMP | f0642028d551d0436b3a3dbc3bfb2f23a00adc14 | [
"MIT"
]
| 4 | 2022-02-13T15:52:18.000Z | 2022-03-26T17:33:13.000Z | opentamp/domains/namo_domain/generate_simple_sort.py | Algorithmic-Alignment-Lab/OpenTAMP | eecb950bd273da8cbed4394487630e8453f2c242 | [
"MIT"
]
| 1 | 2022-02-13T22:48:09.000Z | 2022-02-13T22:48:09.000Z | opentamp/domains/namo_domain/generate_simple_sort.py | Algorithmic-Alignment-Lab/OpenTAMP | eecb950bd273da8cbed4394487630e8453f2c242 | [
"MIT"
]
| null | null | null | import itertools
import random
NUM_CANS = 1
filename = "namo_probs/sort_prob_{0}.prob".format(NUM_CANS)
GOAL = "(RobotAt pr2 robot_end_pose)"
HEIGHT = 5
WIDTH = 5
def main():
s = "# AUTOGENERATED. DO NOT EDIT.\n# Configuration file for NAMO problem instance. Blank lines and lines beginning with # are filtered out.\n\n"
coords = list(itertools.product(list(range(-HEIGHT, HEIGHT)), list(range(-WIDTH, WIDTH))))
random.shuffle(coords)
coord_ind = 0
s += "# The values after each attribute name are the values that get passed into the __init__ method for that attribute's class defined in the domain configuration.\n"
s += "Objects: "
for n in range(NUM_CANS):
s += "Target (name can%d_init_target); "%(n)
s += "RobotPose (name pdp_target%d); "%(n)
s += "Can (name can%d); "%(n)
s += "Target (name can%d_end_target); "%(n)
s += "Robot (name %s); "%"pr2"
s += "Grasp (name {}); ".format("grasp0")
s += "RobotPose (name %s); "%"robot_init_pose"
s += "RobotPose (name %s); "%"robot_end_pose"
s += "Target (name %s) \n\n"%"middle_target"
s += "Init: "
for i in range(NUM_CANS):
s += "(geom can%d_init_target 0.2), (value can%d_init_target %s), "%(i, i, list(coords[i]))
s += "(value pdp_target%d undefined), "%i
s += "(gripper pdp_target%d undefined), "%i
s += "(geom can%d 0.2), (pose can%d %s), "%(i, i, list(coords[i]))
s += "(geom can%d_end_target 0.2), (value can%d_end_target %s), "%(i, i, list(coords[i]))
s += "(value grasp0 undefined), "
s += "(geom %s 0.2), (pose %s %s), "%("pr2", "pr2", [0, 0])
s += "(gripper pr2 [0.]), "
s += "(value %s %s), "%("robot_init_pose", [0., 0.])
s += "(value %s %s), "%("robot_end_pose", [0., 0.])
s += "(gripper %s [0.]), "%("robot_init_pose")
s += "(gripper %s [0.]), "%("robot_end_pose")
s += "(value %s [0., 0.]); "%("middle_target")
for i in range(NUM_CANS):
s += "(At can{} can{}_init_target), ".format(i, i)
s += "(Stationary can{}), ".format(i)
for j in range(NUM_CANS):
s += "(StationaryNEq can{} can{}), ".format(i, j)
# s += "(InContact pr2 pdp_target{} can{}_init_target), ".format(i, i)
# s += "(GraspValid pdp_target{} can{}_init_target grasp0), ".format(i, i)
s += "(RobotAt pr2 robot_init_pose), "
s += "(IsMP pr2) \n\n"
s += "Goal: %s"%GOAL
with open(filename, "w") as f:
f.write(s)
if __name__ == "__main__":
main()
| 37.731343 | 171 | 0.561709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,475 | 0.583465 |
9e87cdddbb6985c539e2f3fd8f43bf67a78297aa | 862 | py | Python | setup.py | al45tair/pygeon | 70e95f6ffc8988fa212e312452d4688e0e544966 | [
"MIT"
]
| 1 | 2022-02-26T17:14:38.000Z | 2022-02-26T17:14:38.000Z | setup.py | al45tair/pygeon | 70e95f6ffc8988fa212e312452d4688e0e544966 | [
"MIT"
]
| null | null | null | setup.py | al45tair/pygeon | 70e95f6ffc8988fa212e312452d4688e0e544966 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst', 'rb') as f:
long_desc = f.read().decode('utf-8')
setup(name='pygeon',
version='0.1.0',
description='IP Geolocation in Python',
long_description=long_desc,
author='Alastair Houghton',
author_email='[email protected]',
url='http://bitbucket.org/al45tair/pygeon',
license='MIT License',
packages=['pygeon'],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Networking'
],
scripts=['scripts/pygeon'],
install_requires=[
'sqlalchemy >= 0.9.8',
'IPy >= 0.82',
'bintrees >= 2.0.1'
],
provides=['pygeon']
)
| 28.733333 | 55 | 0.558005 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 419 | 0.486079 |
9e87ed3751d6a84cde898423e624e0e29e5bc397 | 357 | py | Python | api/static_api.py | SachinKalsi/face-detection-api | 93d012a1b315d3898dbff2612e7beffabdf7d9f7 | [
"MIT"
]
| 9 | 2019-02-28T09:32:39.000Z | 2021-07-06T23:12:47.000Z | api/static_api.py | SachinKalsi/face-detection-api | 93d012a1b315d3898dbff2612e7beffabdf7d9f7 | [
"MIT"
]
| 2 | 2022-01-13T01:00:20.000Z | 2022-03-11T23:37:01.000Z | api/static_api.py | SachinKalsi/face-api | 93d012a1b315d3898dbff2612e7beffabdf7d9f7 | [
"MIT"
]
| 4 | 2020-02-02T17:04:33.000Z | 2020-09-14T05:25:59.000Z | from flask import Blueprint, render_template, send_file
from flask_app import app
static_api = Blueprint('static_api', __name__)
# @static_api.route('/', methods=['GET'])
# def index():
# return render_template('index.html')
@static_api.route('/<image_id>', methods=['GET'])
def get_image(image_id):
return send_file('static/' +image_id + '.jpg') | 29.75 | 55 | 0.714286 | 0 | 0 | 0 | 0 | 125 | 0.35014 | 0 | 0 | 142 | 0.397759 |
9e8817627535df6f0d585998aa24f60ff7d9791c | 365 | py | Python | skp_edu_docker/code/cluster/preprocess/pre_node_feed_fr2cnn.py | TensorMSA/hoyai_docker | 12f0041e6306d8a6421585a4b51666bad30be442 | [
"MIT"
]
| 8 | 2017-06-16T00:19:12.000Z | 2020-08-13T03:15:57.000Z | skp_edu_docker/code/cluster/preprocess/pre_node_feed_fr2cnn.py | TensorMSA/tensormsa_docker | 12f0041e6306d8a6421585a4b51666bad30be442 | [
"MIT"
]
| 21 | 2017-06-09T10:15:14.000Z | 2018-03-29T07:51:02.000Z | skp_edu_docker/code/cluster/preprocess/pre_node_feed_fr2cnn.py | TensorMSA/hoyai_docker | 12f0041e6306d8a6421585a4b51666bad30be442 | [
"MIT"
]
| 4 | 2017-10-25T09:59:53.000Z | 2020-05-07T09:51:11.000Z | from cluster.preprocess.pre_node_feed import PreNodeFeed
class PreNodeFeedFr2Cnn(PreNodeFeed):
"""
"""
def run(self, conf_data):
"""
override init class
"""
super(PreNodeFeedFr2Cnn, self).run(conf_data)
self._init_node_parm(conf_data['node_id'])
def _convert_data_format(self, obj, index):
pass
| 19.210526 | 56 | 0.635616 | 304 | 0.832877 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.175342 |
9e8906fbd78257ce287c1863743dd186ef2262c2 | 3,535 | py | Python | Multi_Page_WebApp/services/python_worker/receive.py | Anthogr/netcdf_editor_app | e1d5fe9bcb5e9374dceec517c3532743dd7f2539 | [
"MIT"
]
| 8 | 2020-11-04T15:55:02.000Z | 2021-09-02T11:12:50.000Z | Multi_Page_WebApp/services/python_worker/receive.py | Anthogr/netcdf_editor_app | e1d5fe9bcb5e9374dceec517c3532743dd7f2539 | [
"MIT"
]
| 88 | 2020-10-09T14:32:12.000Z | 2021-07-21T14:09:58.000Z | Multi_Page_WebApp/services/python_worker/receive.py | Anthogr/netcdf_editor_app | e1d5fe9bcb5e9374dceec517c3532743dd7f2539 | [
"MIT"
]
| 5 | 2020-11-10T17:10:24.000Z | 2021-10-05T03:11:47.000Z | #!/usr/bin/env python
from datetime import datetime
import pika
import os
import sys
import steps # noqa: F401
import json
from climate_simulation_platform.db import step_parameters, save_step, step_seen
from climate_simulation_platform import create_app
def func_params(func, body):
# If invalidated isn't in keys then this is a "root" call meaning it should be run
if "invalidated" not in body.keys():
return body
# If 'invalidated': 'y(es)' in the body then this means the step has been invalidated
# It should be rerun IF it has already been run before OR has no params
# We will rerun it with the same parameters
if "invalidated" in body.keys() and body["invalidated"].lower() in ["yes", "y"]:
if "has_params" in body.keys() and body["has_params"].lower() in ["no", "n"]:
return body
app = create_app()
with app.app_context():
if step_seen(body["id"], func):
return step_parameters(body["id"], func)
return None
def main():
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=os.environ["BROKER_HOSTNAME"])
)
app = create_app()
channel = connection.channel()
channel.exchange_declare(exchange="preprocessing", exchange_type="topic")
channel.queue_declare(queue="preprocessing_python_task_queue", durable=True)
channel.queue_bind(
exchange="preprocessing",
queue="preprocessing_python_task_queue",
routing_key="preprocessing.*.python",
)
def callback(ch, method, properties, body):
routing_key = method.routing_key
print(
f" [x] {datetime.now()} Received message from {routing_key} with body: {body.decode()}",
flush=True,
)
func = routing_key.split(".")[1]
body = json.loads(body.decode())
params = func_params(func, body)
print(f"{datetime.now()} Params: {params}", flush=True)
if params is not None:
_id = body["id"]
if func != "invalidate":
with app.app_context():
save_step(_id, func, params, up_to_date=False)
eval(f"steps.{func}({params})")
if func != "invalidate":
with app.app_context():
save_step(_id, func, params, up_to_date=True)
routing_key_done = ".".join([*routing_key.split(".")[:2], "done"])
channel.basic_publish(
exchange="preprocessing",
routing_key=routing_key_done,
body=json.dumps(body),
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
),
)
print(
" [x] {} Sent message to {} {}".format(
datetime.now(), routing_key_done, body
),
flush=True,
)
print(f" [x] {datetime.now()} Done", flush=True)
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(
queue="preprocessing_python_task_queue", on_message_callback=callback
)
print(
f" [*] {datetime.now()} Waiting for messages. To exit press CTRL+C", flush=True
)
channel.start_consuming()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Interrupted")
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 33.037383 | 100 | 0.595474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 958 | 0.271004 |
9e8bb6044559a80cc3e9ba40b40090e9b9222e9d | 7,764 | py | Python | run_cqa_inference.py | SeonjeongHwang/coqa_cqa | 67169b62e4d213d0e61cd31d844ad9665918049b | [
"Apache-2.0"
]
| 1 | 2022-02-22T07:05:40.000Z | 2022-02-22T07:05:40.000Z | run_cqa_inference.py | SeonjeongHwang/coqa_cqa | 67169b62e4d213d0e61cd31d844ad9665918049b | [
"Apache-2.0"
]
| null | null | null | run_cqa_inference.py | SeonjeongHwang/coqa_cqa | 67169b62e4d213d0e61cd31d844ad9665918049b | [
"Apache-2.0"
]
| null | null | null | import os
import sys
import random
import json
import tqdm
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import numpy as np
from transformers import BertTokenizer, BertModel, AdamW, get_linear_schedule_with_warmup
from tool.data_process import *
from tool.inference_utils import write_predictions
MIN_FLOAT = -1e30
import argparse
parser = argparse.ArgumentParser(description="CQA")
### Arguments for Traning
parser.add_argument("--batch-size", type=int)
### Directories
parser.add_argument("--output-dir", type=str)
parser.add_argument("--result-dir", type=str)
### Arguments for Dataset
parser.add_argument("--num-turn", type=int, default=3)
parser.add_argument("--max-seq-length", type=int, default=512)
parser.add_argument("--max-history-length", type=int, default=128)
parser.add_argument("--doc-stride", type=int, default=192)
parser.add_argument("--model-name", type=str, default="bert-cased-large")
### Inference Setting
parser.add_argument("--n-best-size", type=int, default=5)
parser.add_argument("--max-answer-length", type=int, default=30)
args = parser.parse_args()
exp_dir = os.path.join(args.output_dir, args.result_dir)
model_file=exp_dir+"/model/model.pth"
tokenizer_dir=exp_dir+"/tokenizer"
config = exp_dir+"/config.json"
with open(config, "r") as f:
config_items = json.load(f)
model_name = config_items["model_name"]
max_seq_length = config_items["max_seq_length"]
max_history_length = config_items["max_history_length"]
doc_stride = config_items["doc_stride"]
num_turn = config_items["num_turn"]
test_data = f"data/coqa/coqa-dev-v1.0.json"
test_example = f"data/coqa/dev_{args.num_turn}_examples.pkl"
test_feature = f"data/coqa/dev_{args.num_turn}_features.pkl"
def seed_everything(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
seed = 2022
seed_everything(seed)
class Dataset(Dataset):
def __init__(self, data_file, example_file, feature_file, tokenizer, mode):
if os.path.exists(example_file):
print(f"Loading {mode} examples from {example_file}...")
with open(example_file, "rb") as f:
self.examples = pickle.load(f)
else:
print(f"Generating {mode} examples...")
self.examples = read_manmade_example(input_file=data_file, is_training=False, num_turn=num_turn)
print(f"Save the examples to {example_file}...")
with open(example_file, "wb") as f:
pickle.dump(self.examples, f, pickle.HIGHEST_PROTOCOL)
if os.path.exists(feature_file):
print(f"Loading {mode} features from {feature_file}...")
with open(feature_file, "rb") as f:
self.features = pickle.load(f)
else:
with open(example_file, "wb") as f:
pickle.dump(self.examples, f, pickle.HIGHEST_PROTOCOL)
print(f"Generating {mode} features...")
self.features = convert_examples_to_features(examples=self.examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
max_history_length=max_history_length,
doc_stride=doc_stride,
is_training=False)
print(f"Save the features to {feature_file}...")
with open(feature_file, "wb") as f:
pickle.dump(self.features, f, pickle.HIGHEST_PROTOCOL)
self.unique_id = self.features["unique_id"]
self.input_ids = self.features["input_ids"]
self.attention_mask = self.features["attention_mask"]
self.segment_ids = self.features["segment_ids"]
def __len__(self):
return len(self.input_ids)
def __getitem__(self, idx):
unique_id = self.unique_id[idx]
input_ids = torch.tensor(self.input_ids[idx])
attention_mask = torch.tensor(self.attention_mask[idx])
segment_ids = torch.tensor(self.segment_ids[idx])
return input_ids, attention_mask, segment_ids, unique_id
class CQA(nn.Module):
def __init__(self, bert_model_name, tokenizer):
super().__init__()
self.BertEncoder = BertModel.from_pretrained(bert_model_name)
self.BertEncoder.resize_token_embeddings(len(tokenizer))
### CODE ###
def forward(self, input_ids, segment_ids, attention_mask, history_ids, p_mask):
bert_output = self.BertEncoder(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=segment_ids).last_hidden_state
### CODE ###
def prediction(model, test_dataset, device):
progress_bar = tqdm.tqdm
model = model.to(device)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
test_pbar = progress_bar(test_loader, total=len(test_loader))
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
all_results = []
print("Predicting answers...")
for input_ids, attention_mask, p_mask, segment_ids, history_ids, unique_id in test_pbar:
start_logits, end_logits = model(input_ids=input_ids.to(device),
segment_ids=segment_ids.to(device),
attention_mask=attention_mask.to(device))
batch_num = start_logits.size(0)
for idx in range(batch_num):
start_logit = [float(x) for x in start_logits[idx].tolist()]
end_logit = [float(x) for x in end_logits[idx].tolist()]
all_results.append(RawResult(unique_id=int(unique_id[idx]),
start_logits=start_logit,
end_logits=end_logit))
return all_results
print(f"Loading tokenizer from {tokenizer_dir}...")
tokenizer = BertTokenizer.from_pretrained(tokenizer_dir)
print(f"Loading trained model from {model_file}...")
device = torch.device("cuda")
model = CQA(model_name, tokenizer, args.batch_size, device)
model.load_state_dict(torch.load(model_file))
test_dataset = Dataset(data_file=test_data,
example_file=test_example,
feature_file=test_feature,
tokenizer=tokenizer,
mode="test")
all_results = prediction(model, test_dataset, device)
output_prediction_file = os.path.join(exp_dir, "predictions.json")
output_nbest_file = os.path.join(exp_dir, "nbest_predictions.json")
print("Writing predictions...")
write_predictions(all_examples=test_dataset.examples,
features_dict=test_dataset.features,
all_results=all_results,
n_best_size=args.n_best_size,
max_answer_length=args.max_answer_length,
do_lower_case=True,
tokenizer=tokenizer,
output_prediction_file=output_prediction_file,
output_nbest_file=output_nbest_file)
print("Done") | 39.015075 | 109 | 0.6212 | 3,032 | 0.39052 | 0 | 0 | 0 | 0 | 0 | 0 | 1,110 | 0.142968 |
9e8bde4f4893f69df667f132646ec28b77e6aaf9 | 1,542 | py | Python | anywayapp/base.py | ronreiter/anyway | 90326b7defaec062d75653729fd63a1913074064 | [
"BSD-3-Clause"
]
| 8 | 2016-09-14T11:31:04.000Z | 2021-02-23T22:29:55.000Z | anywayapp/base.py | ronreiter/anyway | 90326b7defaec062d75653729fd63a1913074064 | [
"BSD-3-Clause"
]
| 2 | 2015-03-02T15:16:09.000Z | 2016-11-16T11:20:15.000Z | anywayapp/base.py | ronreiter/anyway | 90326b7defaec062d75653729fd63a1913074064 | [
"BSD-3-Clause"
]
| 4 | 2015-03-01T09:50:57.000Z | 2020-08-28T12:03:37.000Z | import webapp2
from models import *
from webapp2_extras import sessions
def user_optional(handler):
def check_login(self, *args, **kwargs):
self.user = self.get_user()
return handler(self, *args, **kwargs)
return check_login
def user_required(handler):
def check_login(self, *args, **kwargs):
user = self.get_user()
if not user:
self.session["last_page_before_login"] = self.request.path + "?" + self.request.query_string
self.redirect("/")
else:
self.user = user
return handler(self, *args, **kwargs)
return check_login
class BaseHandler(webapp2.RequestHandler):
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
# Returns a session using the default cookie key.
return self.session_store.get_session()
def get_user(self):
if "user_id" in self.session and self.session["user_id"] is not None:
return User.get_by_id(self.session["user_id"])
def set_user(self, user):
self.session["user_id"] = user.key().id()
def logout(self):
self.session["user_id"] = None
| 29.653846 | 105 | 0.610246 | 883 | 0.572633 | 0 | 0 | 156 | 0.101167 | 0 | 0 | 210 | 0.136187 |
9e8d0d88791289330a7412e20650652419814d5a | 9,447 | py | Python | datasets/kitti.py | ShengyuH/PredateOverlap | 770c3063399f08b3836935212ab4c84d355b4704 | [
"MIT"
]
| 153 | 2020-11-30T09:47:11.000Z | 2021-04-28T00:58:10.000Z | datasets/kitti.py | ShengyuH/PredateOverlap | 770c3063399f08b3836935212ab4c84d355b4704 | [
"MIT"
]
| 31 | 2021-05-10T12:39:19.000Z | 2022-03-27T03:07:45.000Z | datasets/kitti.py | ShengyuH/PredateOverlap | 770c3063399f08b3836935212ab4c84d355b4704 | [
"MIT"
]
| 22 | 2020-11-30T13:50:55.000Z | 2021-04-28T09:47:40.000Z | # Basic libs
import os, time, glob, random, pickle, copy, torch
import numpy as np
import open3d
from scipy.spatial.transform import Rotation
# Dataset parent class
from torch.utils.data import Dataset
from lib.benchmark_utils import to_tsfm, to_o3d_pcd, get_correspondences
class KITTIDataset(Dataset):
"""
We follow D3Feat to add data augmentation part.
We first voxelize the pcd and get matches
Then we apply data augmentation to pcds. KPConv runs over processed pcds, but later for loss computation, we use pcds before data augmentation
"""
DATA_FILES = {
'train': './configs/kitti/train_kitti.txt',
'val': './configs/kitti/val_kitti.txt',
'test': './configs/kitti/test_kitti.txt'
}
def __init__(self,config,split,data_augmentation=True):
super(KITTIDataset,self).__init__()
self.config = config
self.root = os.path.join(config.root,'dataset')
self.icp_path = os.path.join(config.root,'icp')
if not os.path.exists(self.icp_path):
os.makedirs(self.icp_path)
self.voxel_size = config.first_subsampling_dl
self.matching_search_voxel_size = config.overlap_radius
self.data_augmentation = data_augmentation
self.augment_noise = config.augment_noise
self.IS_ODOMETRY = True
self.max_corr = config.max_points
self.augment_shift_range = config.augment_shift_range
self.augment_scale_max = config.augment_scale_max
self.augment_scale_min = config.augment_scale_min
# Initiate containers
self.files = []
self.kitti_icp_cache = {}
self.kitti_cache = {}
self.prepare_kitti_ply(split)
self.split = split
def prepare_kitti_ply(self, split):
assert split in ['train','val','test']
subset_names = open(self.DATA_FILES[split]).read().split()
for dirname in subset_names:
drive_id = int(dirname)
fnames = glob.glob(self.root + '/sequences/%02d/velodyne/*.bin' % drive_id)
assert len(fnames) > 0, f"Make sure that the path {self.root} has data {dirname}"
inames = sorted([int(os.path.split(fname)[-1][:-4]) for fname in fnames])
# get one-to-one distance by comparing the translation vector
all_odo = self.get_video_odometry(drive_id, return_all=True)
all_pos = np.array([self.odometry_to_positions(odo) for odo in all_odo])
Ts = all_pos[:, :3, 3]
pdist = (Ts.reshape(1, -1, 3) - Ts.reshape(-1, 1, 3)) ** 2
pdist = np.sqrt(pdist.sum(-1))
######################################
# D3Feat script to generate test pairs
more_than_10 = pdist > 10
curr_time = inames[0]
while curr_time in inames:
next_time = np.where(more_than_10[curr_time][curr_time:curr_time + 100])[0]
if len(next_time) == 0:
curr_time += 1
else:
next_time = next_time[0] + curr_time - 1
if next_time in inames:
self.files.append((drive_id, curr_time, next_time))
curr_time = next_time + 1
# remove bad pairs
if split=='test':
self.files.remove((8, 15, 58))
print(f'Num_{split}: {len(self.files)}')
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
drive = self.files[idx][0]
t0, t1 = self.files[idx][1], self.files[idx][2]
all_odometry = self.get_video_odometry(drive, [t0, t1])
positions = [self.odometry_to_positions(odometry) for odometry in all_odometry]
fname0 = self._get_velodyne_fn(drive, t0)
fname1 = self._get_velodyne_fn(drive, t1)
# XYZ and reflectance
xyzr0 = np.fromfile(fname0, dtype=np.float32).reshape(-1, 4)
xyzr1 = np.fromfile(fname1, dtype=np.float32).reshape(-1, 4)
xyz0 = xyzr0[:, :3]
xyz1 = xyzr1[:, :3]
# use ICP to refine the ground_truth pose, for ICP we don't voxllize the point clouds
key = '%d_%d_%d' % (drive, t0, t1)
filename = self.icp_path + '/' + key + '.npy'
if key not in self.kitti_icp_cache:
if not os.path.exists(filename):
print('missing ICP files, recompute it')
M = (self.velo2cam @ positions[0].T @ np.linalg.inv(positions[1].T)
@ np.linalg.inv(self.velo2cam)).T
xyz0_t = self.apply_transform(xyz0, M)
pcd0 = to_o3d_pcd(xyz0_t)
pcd1 = to_o3d_pcd(xyz1)
reg = open3d.registration.registration_icp(pcd0, pcd1, 0.2, np.eye(4),
open3d.registration.TransformationEstimationPointToPoint(),
open3d.registration.ICPConvergenceCriteria(max_iteration=200))
pcd0.transform(reg.transformation)
M2 = M @ reg.transformation
np.save(filename, M2)
else:
M2 = np.load(filename)
self.kitti_icp_cache[key] = M2
else:
M2 = self.kitti_icp_cache[key]
# refined pose is denoted as trans
tsfm = M2
rot = tsfm[:3,:3]
trans = tsfm[:3,3][:,None]
# voxelize the point clouds here
pcd0 = to_o3d_pcd(xyz0)
pcd1 = to_o3d_pcd(xyz1)
pcd0 = pcd0.voxel_down_sample(self.voxel_size)
pcd1 = pcd1.voxel_down_sample(self.voxel_size)
src_pcd = np.array(pcd0.points)
tgt_pcd = np.array(pcd1.points)
# Get matches
matching_inds = get_correspondences(pcd0, pcd1, tsfm, self.matching_search_voxel_size)
if(matching_inds.size(0) < self.max_corr and self.split == 'train'):
return self.__getitem__(np.random.choice(len(self.files),1)[0])
src_feats=np.ones_like(src_pcd[:,:1]).astype(np.float32)
tgt_feats=np.ones_like(tgt_pcd[:,:1]).astype(np.float32)
rot = rot.astype(np.float32)
trans = trans.astype(np.float32)
# add data augmentation
src_pcd_input = copy.deepcopy(src_pcd)
tgt_pcd_input = copy.deepcopy(tgt_pcd)
if(self.data_augmentation):
# add gaussian noise
src_pcd_input += (np.random.rand(src_pcd_input.shape[0],3) - 0.5) * self.augment_noise
tgt_pcd_input += (np.random.rand(tgt_pcd_input.shape[0],3) - 0.5) * self.augment_noise
# rotate the point cloud
euler_ab=np.random.rand(3)*np.pi*2 # anglez, angley, anglex
rot_ab= Rotation.from_euler('zyx', euler_ab).as_matrix()
if(np.random.rand(1)[0]>0.5):
src_pcd_input = np.dot(rot_ab, src_pcd_input.T).T
else:
tgt_pcd_input = np.dot(rot_ab, tgt_pcd_input.T).T
# scale the pcd
scale = self.augment_scale_min + (self.augment_scale_max - self.augment_scale_min) * random.random()
src_pcd_input = src_pcd_input * scale
tgt_pcd_input = tgt_pcd_input * scale
# shift the pcd
shift_src = np.random.uniform(-self.augment_shift_range, self.augment_shift_range, 3)
shift_tgt = np.random.uniform(-self.augment_shift_range, self.augment_shift_range, 3)
src_pcd_input = src_pcd_input + shift_src
tgt_pcd_input = tgt_pcd_input + shift_tgt
return src_pcd_input, tgt_pcd_input, src_feats, tgt_feats, rot, trans, matching_inds, src_pcd, tgt_pcd, torch.ones(1)
def apply_transform(self, pts, trans):
R = trans[:3, :3]
T = trans[:3, 3]
pts = pts @ R.T + T
return pts
@property
def velo2cam(self):
try:
velo2cam = self._velo2cam
except AttributeError:
R = np.array([
7.533745e-03, -9.999714e-01, -6.166020e-04, 1.480249e-02, 7.280733e-04,
-9.998902e-01, 9.998621e-01, 7.523790e-03, 1.480755e-02
]).reshape(3, 3)
T = np.array([-4.069766e-03, -7.631618e-02, -2.717806e-01]).reshape(3, 1)
velo2cam = np.hstack([R, T])
self._velo2cam = np.vstack((velo2cam, [0, 0, 0, 1])).T
return self._velo2cam
def get_video_odometry(self, drive, indices=None, ext='.txt', return_all=False):
if self.IS_ODOMETRY:
data_path = self.root + '/poses/%02d.txt' % drive
if data_path not in self.kitti_cache:
self.kitti_cache[data_path] = np.genfromtxt(data_path)
if return_all:
return self.kitti_cache[data_path]
else:
return self.kitti_cache[data_path][indices]
def odometry_to_positions(self, odometry):
if self.IS_ODOMETRY:
T_w_cam0 = odometry.reshape(3, 4)
T_w_cam0 = np.vstack((T_w_cam0, [0, 0, 0, 1]))
return T_w_cam0
def _get_velodyne_fn(self, drive, t):
if self.IS_ODOMETRY:
fname = self.root + '/sequences/%02d/velodyne/%06d.bin' % (drive, t)
return fname
def get_position_transform(self, pos0, pos1, invert=False):
T0 = self.pos_transform(pos0)
T1 = self.pos_transform(pos1)
return (np.dot(T1, np.linalg.inv(T0)).T if not invert else np.dot(
np.linalg.inv(T1), T0).T)
| 40.896104 | 146 | 0.592887 | 9,168 | 0.970467 | 0 | 0 | 555 | 0.058749 | 0 | 0 | 1,168 | 0.123637 |
9e8d10545762b08a28204f212d3c73b287afb2c3 | 1,344 | py | Python | bin/compare_versions.py | sdss/lvmmodel | 1ab52f51a172500f8a10e762c88b9929898e1b20 | [
"BSD-3-Clause"
]
| 2 | 2017-07-18T19:22:38.000Z | 2021-12-17T16:02:01.000Z | bin/compare_versions.py | sdss/lvmmodel | 1ab52f51a172500f8a10e762c88b9929898e1b20 | [
"BSD-3-Clause"
]
| 134 | 2016-02-07T03:48:48.000Z | 2022-02-21T17:50:09.000Z | bin/compare_versions.py | sdss/lvmmodel | 1ab52f51a172500f8a10e762c88b9929898e1b20 | [
"BSD-3-Clause"
]
| 3 | 2017-07-12T21:36:19.000Z | 2022-01-11T16:15:44.000Z | #!/usr/bin/env python
"""
Make plots to compare two different versions of desimodel
Stephen Bailey, LBL
July 2014
"""
import os, sys
import numpy as np
import pylab as P
import matplotlib.pyplot as plt
import fitsio
camcolor = dict(b='b', r='r', z='k')
def compare_throughput(dir1, dir2):
P.figure()
p0 = plt.subplot2grid((3,1), (0,0), rowspan=2)
p1 = plt.subplot2grid((3,1), (2,0))
for x in ('b', 'r', 'z'):
d1 = fitsio.read(dir1+'/data/throughput/thru-'+x+'.fits')
d2 = fitsio.read(dir2+'/data/throughput/thru-'+x+'.fits')
w1 = d1['wavelength']
w2 = d2['wavelength']
t1 = d1['throughput']
t2 = d2['throughput']
p0.plot(w1, t1, '-', color=camcolor[x])
p0.plot(w2, t2, '--', color=camcolor[x])
p1.plot(w1, (t1-np.interp(w1, w2, t2))/t1, '-', color=camcolor[x])
p0.set_xlim(3500, 10000)
p0.set_ylim(0.0, 0.5)
p0.set_ylabel('Throughput')
p0.grid()
p1.set_xlim(3500, 10000)
### p1.set_ylim(-0.5, 0.5)
p1.set_xlabel('Wavelength [Angstroms]')
p1.set_ylabel('Relative difference')
p1.grid()
def compare_fiberloss(dir1, dir2):
pass
#-------------------------------------------------------------------------
dir1, dir2 = sys.argv[1:3]
compare_throughput(dir1, dir2)
plt.show()
| 23.578947 | 74 | 0.554315 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 412 | 0.306548 |
9e8e19f97e0eb39926f29ca476d7649b8872fc92 | 1,923 | py | Python | tests/graph/parallel_graphs.py | marcelotrevisani/acorns | 682749b0963ffc0a3998a7065ef505fc95123f50 | [
"MIT"
]
| null | null | null | tests/graph/parallel_graphs.py | marcelotrevisani/acorns | 682749b0963ffc0a3998a7065ef505fc95123f50 | [
"MIT"
]
| null | null | null | tests/graph/parallel_graphs.py | marcelotrevisani/acorns | 682749b0963ffc0a3998a7065ef505fc95123f50 | [
"MIT"
]
| null | null | null | import matplotlib.pyplot as plt
import numpy as np
import os
import json
import seaborn as sns
import re
sns.set(style="darkgrid")
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
def convert_files_to_lists(file_location):
our_times = []
with open(file_location) as json_data:
data = json.load(json_data)
for i, key in enumerate(sorted(data)):
for num_cores in sorted(data[key],key=natural_keys):
our_times.append(data[key][num_cores]['us'])
return our_times
def get_speedup_list(time_list):
speedup_list = []
single_thread_time = time_list[0]
for time in time_list[1:]:
speedup_list.append( float(single_thread_time) / float(time) )
return speedup_list
def generate_two_graph(avg_us, denom, suffix="", ylabel="Time (s)"):
plt.plot(denom, avg_us, color='#1abc9c', linestyle='dashed', markersize=7)
# legend
plt.xlabel('Threads', fontfamily='monospace')
plt.ylabel('{} (s)'.format(ylabel), fontfamily='monospace')
plt.margins(0,0)
plt.savefig('./tests/results/hess/graphs/parallel/parallel-graph{}.pdf'.format(suffix), bbox_inches = 'tight',
pad_inches = 0)
# plt.savefig('./tests/complex/graphs/graph_by_128_speedup.pdf')
plt.clf()
our_times = convert_files_to_lists("./tests/results/grad/json/parallel/parallel_results_good.json")
print(our_times)
generate_two_graph(our_times, range(1, 48))
speedup_list = get_speedup_list(our_times)
generate_two_graph(speedup_list, range(1, 47), suffix="-speedup", ylabel="Speedup (Time Single Thread / Time X Threads)")
| 33.736842 | 122 | 0.680707 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 529 | 0.275091 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.