max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
src/ensae_teaching_cs/homeblog/filename_helper.py | Jerome-maker/ensae_teaching_cs | 73 | 12797651 | <filename>src/ensae_teaching_cs/homeblog/filename_helper.py
"""
@file
@brief Helpers around file names.
"""
import os
import re
from pyquickhelper.loghelper import noLOG
from pyquickhelper.filehelper import explore_folder
def get_file_per_folder(folder, deep=1):
"""
extract all folders in a folder and then all files in these folders
@param folder folder
@param deep number of folders to considers before the filename
@return dictionary
"""
files = explore_folder(folder)[1]
res = {}
for f in files:
spl = f.replace("\\", "/").split("/")
if deep == 1:
te = spl[-2]
fi = spl[-1]
if te not in res:
res[te] = []
res[te].append(fi)
elif deep == 2:
te = spl[-3:-1]
fi = spl[-1]
if te not in res:
res[te] = []
res[te].append(fi)
else:
raise Exception("deep should be 1 or 2")
return res
def normalize_name_and_numbers(files):
"""
tries to match names and number in a file
@param files list of files
@return list of tuple (number, normalized name, extension, suggested name, original name)
"""
exp = re.compile(
"([0-9a-z;() ]+([-][a-z ]+)?) ?[-] ?([0-9]{2,3})[ .v_CF[]")
exp2 = re.compile("([0-9a-z;() ]+) episode ([0-9]{2,3})[ .v_CF[]")
exp3 = re.compile("([a-z0-9 ]+[.][0-9]+) ?[-] ?([0-9]{2,3})[ .v_CF[]")
res = []
for fi in files:
name = fi.lower().replace("_", " ").replace("!", " ")
ext = os.path.splitext(fi)[-1]
solution = None
for ex, ind in [(exp, 2), (exp2, 1), (exp3, 1)]:
num = ex.search(name)
if num:
grs = num.groups()
nam = grs[0].strip()
num = grs[ind]
words = nam.split()
for i in range(len(words)):
words[i] = words[i][0].upper() + words[i][1:]
nam = " ".join(words)
sugg = "{0} - {1}{2}".format(nam, num, ext)
if solution is None or len(nam) > len(solution[1]):
solution = (num, nam, ext, sugg, fi)
if solution is not None:
res.append(solution)
res.sort()
return res
def normalize_folder(folder, fLOG=noLOG):
"""
normalize the filename of a whole folder and subfolders
@param folder folder
@return list of tuple (number, normalized name, extension, suggested name, original name)
"""
alls = []
files = get_file_per_folder(folder)
for d in sorted(files):
norm = normalize_name_and_numbers(files[d])
for r in norm:
if r[-2] != r[-1]:
pat = os.path.join(folder, d, r[-1])
nee = os.path.join(folder, d, r[-2])
fLOG("rename", pat, " in ", nee)
neelast = os.path.split(nee)[-1]
if neelast[0] < 'A' or neelast[0] > 'Z':
raise Exception("bad name for " +
neelast + "(" + nee + ")")
os.rename(pat, nee)
alls.extend(norm)
return alls
def music_statistics(folder):
"""
provides statistics on a folder
@param folder folder
@return dictionary { "folder": { "last": ..., "missing": } }
"""
res = {}
files = get_file_per_folder(folder)
for d in sorted(files):
norm = normalize_name_and_numbers(files[d])
for r in norm:
if d not in res:
res[d] = []
res[d].append(int(r[0]))
comp = {}
for k, v in res.items():
mi, ma = min(v), max(v)
ke = {_: 1 for _ in v}
li = [0 for i in range(ma + 1)]
for _ in ke:
li[_] = 1
missing = [i for i, _ in enumerate(li) if _ == 0 and i >= mi]
comp[k] = {"min": mi, "max": ma, "missing": missing}
return comp
| 2.953125 | 3 |
ifpd2/asserts.py | ggirelli/ifpd2 | 1 | 12797652 | """
@author: <NAME>
@contact: <EMAIL>
"""
import logging
import numpy as np # type: ignore
import sys
from typing import Callable
def ert_type(x, stype, label):
if not isinstance(x, stype):
raise AssertionError(f"{label} should be {stype}, {type(x)} instead")
def ert_multiTypes(x, types, label):
cond = any(isinstance(x, t) for t in types)
if not cond:
raise AssertionError(f"{label} should be one of {types}, {type(x)} instead")
def ert_nonNeg(x, label, include_zero=False):
if not include_zero:
if x <= 0:
raise AssertionError(f"{label} should be greater than 0")
elif x < 0:
raise AssertionError(f"{label} should be greater than or equal to 0")
def ert_inInterv(x, vmin, vmax, label, leftClose=False, rightClose=True):
if leftClose:
if rightClose:
if x < vmin or x > vmax:
raise AssertionError(f"expected {vmin}<={label}<={vmax}")
elif x < vmin or x >= vmax:
raise AssertionError(f"expected {vmin}<={label}<{vmax}")
elif rightClose:
if x <= vmin or x > vmax:
raise AssertionError(f"expected {vmin}<{label}<={vmax}")
elif x <= vmin or x >= vmax:
raise AssertionError(f"expected {vmin}<{label}<{vmax}")
def ert_in_dtype(x, dtype):
if dtype.startswith("f"):
if x > np.finfo(dtype).max:
raise AssertionError(
" ".join(
[
"expected to be lower than {dtype} max:",
f"{x} < {np.finfo(dtype).max}",
]
)
)
elif dtype.startswith("u") or dtype.startswith("i"):
if x > np.iinfo(dtype).max:
raise AssertionError(
" ".join(
[
"expected to be lower than {dtype} max:",
f"{x} < {np.iinfo(dtype).max}",
]
)
)
else:
logging.warning(f"assert not implemented for dtype '{dtype}'")
def enable_rich_assert(fun: Callable) -> Callable:
def wrapper(*args, **kwargs):
try:
return fun(*args, **kwargs)
except AssertionError as e:
logging.exception(e)
sys.exit()
return wrapper
| 2.8125 | 3 |
common.py | lamhoangtung/densenet121-quickdraw-doodle-recognition-challenge | 3 | 12797653 | import ast
import os
import cv2
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from keras.applications.densenet import preprocess_input
from keras.metrics import (categorical_accuracy, top_k_categorical_accuracy)
from keras.models import Model, load_model
DP_DIR = './input/shuffle-csvs/'
INPUT_DIR = './input/quickdraw-doodle-recognition/'
BASE_SIZE = 256
NCSVS = 200
NCATS = 340
np.random.seed(seed=2018)
tf.set_random_seed(seed=2018)
def f2cat(filename: str) -> str:
return filename.split('.')[0]
def list_all_categories():
files = os.listdir(os.path.join(INPUT_DIR, 'train_simplified'))
return sorted([f2cat(f) for f in files], key=str.lower)
def apk(actual, predicted, k=3):
"""
Source: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/average_precision.py
"""
if len(predicted) > k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i, p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i + 1.0)
if not actual:
return 0.0
return score / min(len(actual), k)
def mapk(actual, predicted, k=3):
"""
Source: https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/average_precision.py
"""
return np.mean([apk(a, p, k) for a, p in zip(actual, predicted)])
def preds2catids(predictions):
return pd.DataFrame(np.argsort(-predictions, axis=1)[:, :3], columns=['a', 'b', 'c'])
def top_3_accuracy(y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=3)
def draw_cv2(raw_strokes, size=256, lw=6, time_color=True):
img = np.zeros((BASE_SIZE, BASE_SIZE), np.uint8)
for t, stroke in enumerate(raw_strokes):
for i in range(len(stroke[0]) - 1):
color = 255 - min(t, 10) * 13 if time_color else 255
_ = cv2.line(img, (stroke[0][i], stroke[1][i]),
(stroke[0][i + 1], stroke[1][i + 1]), color, lw)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
if size != BASE_SIZE:
return cv2.resize(img, (size, size))
else:
return img
def image_generator_xd(size, batchsize, ks, lw=6, time_color=True):
while True:
for k in np.random.permutation(ks):
filename = os.path.join(DP_DIR, 'train_k{}.csv.gz'.format(k))
for df in pd.read_csv(filename, chunksize=batchsize):
df['drawing'] = df['drawing'].apply(ast.literal_eval)
x = np.zeros((len(df), size, size, 3))
for i, raw_strokes in enumerate(df.drawing.values):
x[i, :, :, :] = draw_cv2(raw_strokes, size=size, lw=lw,
time_color=time_color)
x = preprocess_input(x).astype(np.float32)
y = keras.utils.to_categorical(df.y, num_classes=NCATS)
yield x, y
def df_to_image_array_xd(df, size, lw=6, time_color=True):
df['drawing'] = df['drawing'].apply(ast.literal_eval)
x = np.zeros((len(df), size, size, 3))
for i, raw_strokes in enumerate(df.drawing.values):
x[i, :, :, :] = draw_cv2(
raw_strokes, size=size, lw=lw, time_color=time_color)
x = preprocess_input(x).astype(np.float32)
return x
class TTA_ModelWrapper():
"""A simple TTA wrapper for keras computer vision models.
Args:
model (keras model): A fitted keras model with a predict method.
"""
def __init__(self, model):
self.model = model
def predict(self, X):
"""Wraps the predict method of the provided model.
Augments the testdata with horizontal and vertical flips and
averages the results.
Args:
X (numpy array of dim 4): The data to get predictions for.
"""
p0 = self.model.predict(X, batch_size=128, verbose=1)
p1 = self.model.predict(np.flipud(X), batch_size=128, verbose=1)
p = (p0 + p1) / 2
return np.array(p)
| 2.328125 | 2 |
misc/zip/Cura-master/cura/Machines/Models/QualityManagementModel.py | criscola/G-Gen | 1 | 12797654 | # Copyright (c) 2018 <NAME>.
# Cura is released under the terms of the LGPLv3 or higher.
from PyQt5.QtCore import Qt, pyqtSlot
from UM.Qt.ListModel import ListModel
from UM.Logger import Logger
#
# This the QML model for the quality management page.
#
class QualityManagementModel(ListModel):
NameRole = Qt.UserRole + 1
IsReadOnlyRole = Qt.UserRole + 2
QualityGroupRole = Qt.UserRole + 3
QualityChangesGroupRole = Qt.UserRole + 4
def __init__(self, parent = None):
super().__init__(parent)
self.addRoleName(self.NameRole, "name")
self.addRoleName(self.IsReadOnlyRole, "is_read_only")
self.addRoleName(self.QualityGroupRole, "quality_group")
self.addRoleName(self.QualityChangesGroupRole, "quality_changes_group")
from cura.CuraApplication import CuraApplication
self._container_registry = CuraApplication.getInstance().getContainerRegistry()
self._machine_manager = CuraApplication.getInstance().getMachineManager()
self._extruder_manager = CuraApplication.getInstance().getExtruderManager()
self._quality_manager = CuraApplication.getInstance().getQualityManager()
self._machine_manager.globalContainerChanged.connect(self._update)
self._quality_manager.qualitiesUpdated.connect(self._update)
self._update()
def _update(self):
Logger.log("d", "Updating {model_class_name}.".format(model_class_name = self.__class__.__name__))
global_stack = self._machine_manager.activeMachine
if not global_stack:
self.setItems([])
return
quality_group_dict = self._quality_manager.getQualityGroups(global_stack)
quality_changes_group_dict = self._quality_manager.getQualityChangesGroups(global_stack)
available_quality_types = set(quality_type for quality_type, quality_group in quality_group_dict.items()
if quality_group.is_available)
if not available_quality_types and not quality_changes_group_dict:
# Nothing to show
self.setItems([])
return
item_list = []
# Create quality group items
for quality_group in quality_group_dict.values():
if not quality_group.is_available:
continue
item = {"name": quality_group.name,
"is_read_only": True,
"quality_group": quality_group,
"quality_changes_group": None}
item_list.append(item)
# Sort by quality names
item_list = sorted(item_list, key = lambda x: x["name"].upper())
# Create quality_changes group items
quality_changes_item_list = []
for quality_changes_group in quality_changes_group_dict.values():
if quality_changes_group.quality_type not in available_quality_types:
continue
quality_group = quality_group_dict[quality_changes_group.quality_type]
item = {"name": quality_changes_group.name,
"is_read_only": False,
"quality_group": quality_group,
"quality_changes_group": quality_changes_group}
quality_changes_item_list.append(item)
# Sort quality_changes items by names and append to the item list
quality_changes_item_list = sorted(quality_changes_item_list, key = lambda x: x["name"].upper())
item_list += quality_changes_item_list
self.setItems(item_list)
# TODO: Duplicated code here from InstanceContainersModel. Refactor and remove this later.
#
## Gets a list of the possible file filters that the plugins have
# registered they can read or write. The convenience meta-filters
# "All Supported Types" and "All Files" are added when listing
# readers, but not when listing writers.
#
# \param io_type \type{str} name of the needed IO type
# \return A list of strings indicating file name filters for a file
# dialog.
@pyqtSlot(str, result = "QVariantList")
def getFileNameFilters(self, io_type):
from UM.i18n import i18nCatalog
catalog = i18nCatalog("uranium")
#TODO: This function should be in UM.Resources!
filters = []
all_types = []
for plugin_id, meta_data in self._getIOPlugins(io_type):
for io_plugin in meta_data[io_type]:
filters.append(io_plugin["description"] + " (*." + io_plugin["extension"] + ")")
all_types.append("*.{0}".format(io_plugin["extension"]))
if "_reader" in io_type:
# if we're listing readers, add the option to show all supported files as the default option
filters.insert(0, catalog.i18nc("@item:inlistbox", "All Supported Types ({0})", " ".join(all_types)))
filters.append(catalog.i18nc("@item:inlistbox", "All Files (*)")) # Also allow arbitrary files, if the user so prefers.
return filters
## Gets a list of profile reader or writer plugins
# \return List of tuples of (plugin_id, meta_data).
def _getIOPlugins(self, io_type):
from UM.PluginRegistry import PluginRegistry
pr = PluginRegistry.getInstance()
active_plugin_ids = pr.getActivePlugins()
result = []
for plugin_id in active_plugin_ids:
meta_data = pr.getMetaData(plugin_id)
if io_type in meta_data:
result.append( (plugin_id, meta_data) )
return result
| 2 | 2 |
BCI/BCI/SFS.py | RichardLeeK/BCI | 4 | 12797655 | <gh_stars>1-10
# Sequential Forward Selection
import warnings
import numpy as np
from sklearn.utils import check_X_y, safe_sqr
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.base import BaseEstimator
from sklearn.base import MetaEstimatorMixin
from sklearn.base import clone
from sklearn.base import is_classifier
from sklearn.cross_validation import _check_cv as check_cv
from sklearn.cross_validation import _safe_split, _score
from sklearn.metrics.scorer import check_scoring
from sklearn.feature_selection.base import SelectorMixin
class FFS(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with forward feature selection.
Modification from backward feature selection from:
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/rfe.py
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of forward feature selection
(FFS) is to select features by recursively considering larger and larger
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
#X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features // 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.zeros(n_features, dtype=np.bool)
support_[0] = True
ranking_ = np.zeros(n_features, dtype=np.int)
ranking_[0] = 1
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) < n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))[::-1]
else:
ranks = np.argsort(safe_sqr(coefs))[::-1]
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, n_features_to_select - np.sum(support_))
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = True
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
| 3.21875 | 3 |
app/app/util/csv_transformer.py | Oblo-cit-sci/Oblo-backend | 0 | 12797656 | <gh_stars>0
from csv import DictWriter
from datetime import datetime
from typing import List
from app.models.orm import Entry
from app.util.consts import TYPE, NAME, TERMINAL_ASPECT_TYPES, VALUE
regular_entry_base_meta_columns = [
"uuid",
"creation_ts",
"domain",
"template",
"template_version",
"type",
"last_edit_ts",
"version",
"title",
"status",
"description",
"language",
"privacy",
"license",
"image",
"attached_files",
"actors",
]
straight_grab = [
"uuid",
"creation_ts",
"domain",
"template_version",
"type",
"last_edit_ts",
"version",
"title",
"status",
"description",
"language",
"privacy",
"license",
"image",
"template_version",
]
list_item_sep = "|"
inner_value_sep = ":"
transformer = {
"uuid": lambda uuid: str(uuid),
"creation_ts": lambda ts: datetime.strftime(ts, "%Y"),
"last_edit_ts": lambda ts: datetime.strftime(ts, "%Y"),
"template": lambda template: template.title,
"actors": lambda actors: cell_separated(
list(map(lambda entry_role: entry_role.csv_format(inner_value_sep), actors))
),
}
def cell_separated(values: List[str]):
return list_item_sep.join(values)
def transform_to_csv(entry: Entry, template: Entry):
res = {}
print(
cell_separated(
list(
map(
lambda entry_role: entry_role.csv_format(inner_value_sep),
entry.actors,
)
)
)
)
for col in regular_entry_base_meta_columns:
# if col in straight_grab:
val = getattr(entry, col)
if not val:
val = ""
if col in transformer:
val = transformer[col](val)
res[col] = val
# temp file method. doesnt work atm
# fp = tempfile.TemporaryFile()
# bh = list(map(lambda v: v.encode("utf-8"), regular_entry_base_meta_columns))
no_t = open("t.csv", "w")
writer = DictWriter(no_t, regular_entry_base_meta_columns)
# print(bh)
# print(writer.fieldnames)
writer.writeheader()
writer.writerow(res)
no_t.close()
csv_text = open("t.csv").read()
# temp file method...
# fp.seek(0)
# csv_text = fp.read()
for aspect in template.aspects:
res = resolve_values(aspect, entry.values[aspect.name])
return csv_text
def resolve_values(aspect, value):
a_name = aspect.get(NAME)
a_type = aspect[TYPE]
if a_type in TERMINAL_ASPECT_TYPES:
return {a_name: value[VALUE]}
| 2.5 | 2 |
notification-api/modules/app/controllers/user.py | ibrahim-sakr/swvl_notifications | 1 | 12797657 | <filename>notification-api/modules/app/controllers/user.py
import os
from flask import request, jsonify
from app import app, mongo
import datetime
import random
import string
@app.route('/users', methods=['GET'])
def usersIndex():
data = list(mongo.db.users.find())
return jsonify({'result': data}), 200
@app.route('/users', methods=['POST'])
def usersCreate():
user = request.get_json()
if user.get('name', None) is not None and user.get('email', None) is not None:
now = datetime.datetime.now()
user['token'] = <PASSWORD>()
user['created_at'] = now
user['updated_at'] = now
mongo.db.users.insert_one(user)
return jsonify({'ok': True, 'message': 'User created successfully!', 'user': user}), 200
else:
return jsonify({'ok': False, 'message': 'Bad request parameters!'}), 400
@app.route('/users', methods=['DELETE'])
def usersDelete():
user = request.get_json()
if user.get('email', None) is not None:
db_response = mongo.db.users.delete_one({'email': user['email']})
if db_response.deleted_count == 1:
response = {'ok': True, 'message': 'record deleted'}
else:
response = {'ok': True, 'message': 'no record found'}
return jsonify(response), 200
else:
return jsonify({'ok': False, 'message': 'Bad request parameters!'}), 400
def randomString(stringLength=50):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
| 2.84375 | 3 |
logcausality/config.py | cpflat/LogCausalAnalysis | 20 | 12797658 | #!/usr/bin/env python
# coding: utf-8
import sys
import os
import datetime
import logging
import collections
import ConfigParser
DEFAULT_CONFIG_NAME = "/".join((os.path.dirname(__file__),
"config.conf.default"))
LOGGER_NAME = __name__.rpartition(".")[-1]
_logger = logging.getLogger(LOGGER_NAME)
_ch = logging.StreamHandler()
_ch.setLevel(logging.WARNING)
_logger.setLevel(logging.WARNING)
_logger.addHandler(_ch)
class ExtendedConfigParser(ConfigParser.SafeConfigParser, object):
def __init__(self):
#super(ExtendedConfigParser, self).__init__(self)
ConfigParser.SafeConfigParser.__init__(self)
self.filename = None # source filename
def read(self, fn):
if not os.path.exists(fn):
raise IOError("{0} not found".format(fn))
else:
self.filename = fn
return super(ExtendedConfigParser, self).read(fn)
def update(self, conf, warn = False):
# Update existing configurations in defaults
# If new option found, warn and ignore it
def warn_opt(section, option, fn):
_logger.warning("""
Invalid option name {0} in section {1} in {2}, ignored
""".strip().format(option, section, fn))
def warn_sec(section, fn):
_logger.warning("Invalid section name {0} in {1}".format(
section, fn))
for section in conf.sections():
if not section in self.sections():
self.add_section(section)
if warn:
warn_sec(section, conf.filename)
for option in conf.options(section):
value = conf.get(section, option)
if not option in self.options(section):
if warn:
warn_opt(section, option, conf.filename)
self.set(section, option, value)
return self
def merge(self, conf):
# Do NOT allow updateing existing options in defaults
for section in conf.sections():
if not section in self.sections():
self.add_section(section)
for option in conf.options(section):
if not self.has_option(section, option):
value = conf.get(section, option)
self.set(section, option, value)
return self
def gettuple(self, section, name):
ret = self.get(section, name)
return tuple(e.strip() for e in ret.split(",")
if not e.strip() == "")
def getlist(self, section, name):
ret = self.get(section, name)
if ret == "":
return None
else:
return [e.strip() for e in ret.split(",")
if not e.strip() == ""]
def getdt(self, section, name):
ret = self.get(section, name)
if ret == "":
return None
else:
return datetime.datetime.strptime(ret.strip(),
"%Y-%m-%d %H:%M:%S")
def getterm(self, section, name):
ret = self.get(section, name)
if ret == "":
return None
else:
return tuple(datetime.datetime.strptime(e.strip(),
"%Y-%m-%d %H:%M:%S") for e in ret.split(","))
def getdur(self, section, name):
ret = self.get(section, name)
if ret == "":
return None
else:
return str2dur(ret)
class GroupDef():
"""
Define grouping by external text
Rules:
description after # in a line will be recognized as comment
line "[GROUP_NAME]" will change group to set
other lines add elements in the group set with GROUP_NAME line
"""
def __init__(self, fn, default_val = None):
self.gdict = {}
self.rgdict = {}
self.default = default_val
if fn is None or fn == "":
pass
else:
self.open_def(fn)
def open_def(self, fn):
group = None
with open(fn, 'r') as f:
for line in f:
# ignore after comment sygnal
line = line.strip().partition("#")[0]
if line == "":
continue
elif line[0] == "#":
continue
elif line[0] == "[" and line[-1] == "]":
group = line[1:].strip("[]")
else:
if group is None:
raise ValueError("no group definition before value")
val = line
self.gdict.setdefault(group, []).append(val)
self.rgdict.setdefault(val, []).append(group)
def setdefault(self, group):
self.default = group
def groups(self):
return self.gdict.keys()
def values(self):
return self.rgdict.keys()
def ingroup(self, group, val):
if self.rgdict(val):
return val in self.rgdict[val]
else:
return False
def get_group(self, val):
if self.rgdict.has_key(val):
return self.rgdict[val]
else:
return []
def get_value(self, group):
if self.gdict.has_key(group):
return self.gdict[group]
else:
return []
def iter_def(self):
for group, l_val in self.gdict.iteritems():
for val in l_val:
yield group, val
def str2dur(string):
"""
Note:
\d+s: \d seconds
\d+m: \d minutes
\d+h: \d hours
\d+d: \d days
\d+w: \d * 7 days
"""
if "s" in string:
num = int(string.partition("s")[0])
return datetime.timedelta(seconds = num)
elif "m" in string:
num = int(string.partition("m")[0])
return datetime.timedelta(minutes = num)
elif "h" in string:
num = int(string.partition("h")[0])
return datetime.timedelta(hours = num)
elif "d" in string:
num = int(string.partition("d")[0])
return datetime.timedelta(days = num)
elif "w" in string:
num = int(string.partition("w")[0])
return datetime.timedelta(days = num * 7)
else:
raise ValueError("Duration string invalid")
def open_config(fn = None):
conf = ExtendedConfigParser()
conf.read(DEFAULT_CONFIG_NAME)
if fn is not None:
conf2 = ExtendedConfigParser()
conf2.read(fn)
conf.update(conf2, warn = True)
if conf.has_section("general"):
if conf.has_option("general", "import"):
if not conf.get("general", "import") == "":
conf3 = ExtendedConfigParser()
conf3.read(conf.get("general", "import"))
conf.merge(conf3)
conf.filename = fn
return conf
# common objects for logging
def set_common_logging(conf, logger = None, l_logger_name = [],
lv = logging.INFO):
fn = conf.get("general", "info_log")
fmt = logging.Formatter(
fmt = "%(asctime)s %(levelname)s (%(processName)s) %(message)s",
datefmt = "%Y-%m-%d %H:%M:%S")
#lv = logging.INFO
if fn == "":
ch = logging.StreamHandler()
else:
ch = logging.FileHandler(fn)
ch.setFormatter(fmt)
ch.setLevel(lv)
if logger is not None:
logger.setLevel(lv)
logger.addHandler(ch)
for ln in l_logger_name:
temp = logging.getLogger(ln)
temp.setLevel(lv)
temp.addHandler(ch)
return ch
def release_common_logging(ch, logger = None, l_logger_name = None):
if logger is not None:
logger.removeHandler(ch)
if l_logger_name is not None:
for ln in l_logger_name:
temp = logging.getLogger(ln)
temp.removeHandler(ch)
def test_config(conf_name):
conf = open_config(conf_name)
for section in conf.sections():
print "[{0}]".format(section)
for option, value in conf.items(section):
print "{0} = {1}".format(option, value)
if __name__ == "__main__":
## import old config, merge it with new default config, and output
if len(sys.argv) < 2:
sys.exit("usage : {0} config".format(sys.argv[0]))
#default_conf = DEFAULT_CONFIG_NAME
conf = sys.argv[1]
test_config(conf)
#output = sys.argv[2]
#overwrite_config(default_conf, conf, output)
| 2.40625 | 2 |
Torrent Checker/Torrent_ip_checker.py | Utkarsh1308/FunUtils | 48 | 12797659 | #!/usr/bin/python
import Tkinter
import ctypes
import tkMessageBox
import socket
import sys
import urllib2
import httplib
from bs4 import BeautifulSoup as BS
class window(Tkinter.Tk):
Ip_text = ''
def __init__(self,parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
self.iconbitmap(default='eye.ico')
self.geometry("500x100")
self.resizable(0, 0)
self.initialize()
def initialize(self):
L = Tkinter.Label(master=self,text="Targeted IP:")
L.pack()
self.E = Tkinter.Entry(master=self,width=50)
self.E.pack()
B= Tkinter.Button(master=self,text='Check IP',command=self.checkIp)
B.pack()
self.grid()
def work(IP):
conn = httplib.HTTPConnection("www.iknowwhatyoudownload.com/en/peer/?ip="+IP)
conn.request("GET","/")
response = conn.getresponse()
data =response.read()
soup = BS(data,"html.parser")
table = soup.find('tbody')
rows = table.findAll('tr')
for tr in rows:
cols = tr.findAll('td')
Begin,End,Category,title,size=[c.text for c in cols]
RESULT+="\n"+Begin+" "+Category+" "+title+" "+size
toplevel = Toplevel()
label= Label(toplevel,text=RESULT,height=0,width=100)
label.pack()
print RESULT
def checkIp(self):
IP=self.E.get()
try:
urllib2.urlopen('http://172.16.17.32',timeout=1)
print IP
self.work(IP)
except urllib2.URLError as err:
tkMessageBox.showerror('No Internet Connection','You have no Internet Connection')
if __name__ == "__main__":
app = window(None)
app.title('Torrent IP tracker - By <NAME> v0.1')
AppID= "Torrent Tracker By Houssem. Torrent IP Tracker.V0.1"
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(AppID)
app.mainloop()
| 2.90625 | 3 |
data_visualization/api_blueprint/views.py | danielrenes/car-data-visualization | 0 | 12797660 | <gh_stars>0
import json
from flask import g, url_for, jsonify, abort, request
from sqlalchemy.exc import IntegrityError
from . import api
from .. import db
from ..models import ViewWrapper, View, Subview
from ..queries import query_all_views, query_get_view_by_id, query_get_sensor_by_name
@api.route('/views', methods=['GET'])
def get_views():
return jsonify({'views': [view.to_dict() for view in query_all_views(g.current_user.id)]})
@api.route('/view/<id>', methods=['GET'])
def get_view(id):
view = query_get_view_by_id(id, g.current_user.id)
return jsonify(view.to_dict())
@api.route('/view', methods=['POST'])
def add_view():
view = ViewWrapper.from_dict(json.loads(request.data))
db.session.add(view)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
abort(400)
resp = jsonify(view.to_dict())
resp.status_code = 201
resp.headers['Location'] = url_for('api.get_view', id=view.id)
return resp
@api.route('/view/<id>', methods=['PUT'])
def modify_view(id):
view = query_get_view_by_id(id, g.current_user.id)
request_data = json.loads(request.data)
if 'count' in request_data.iterkeys():
if not view.check_count(request_data['count']):
abort(400)
if 'refresh_time' in request_data.iterkeys():
if not view.check_refresh_time(request_data['refresh_time']):
abort(400)
for key, value in request_data.iteritems():
if key is not 'id':
setattr(view, key, value)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
abort(400)
resp = jsonify(view.to_dict())
resp.status_code = 201
return resp
@api.route('/view/<id>', methods=['DELETE'])
def remove_view(id):
view = query_get_view_by_id(id, g.current_user.id)
db.session.delete(view)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
abort(400)
return '', 204
@api.route('/view/dynamic/add', methods=['GET'])
def add_dynamic_view():
sensor_name = request.args.get('sensor_name')
sensor_id = query_get_sensor_by_name(sensor_name, g.current_user.id).id
view = View.from_dict({
'name': 'dynamic',
'count': 1,
'refresh_time': 10,
'user_id': g.current_user.id
})
db.session.add(view)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
abort(400)
subview = Subview.from_dict({
'sensor_id': sensor_id,
'chartconfig_id': 1,
'view_id': view.id
})
db.session.add(subview)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
abort(400)
resp = jsonify({'view_id': view.id})
resp.status_code = 201
return resp
@api.route('/view/dynamic/remove', methods=['GET'])
def remove_dynamic_view():
view_id = request.args.get('view_id')
db.session.delete(query_get_view_by_id(view_id, g.current_user.id))
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
abort(400)
return '', 204
| 2.28125 | 2 |
app.py | Dkadariya/SBN_pos_server | 0 | 12797661 | <reponame>Dkadariya/SBN_pos_server
from flask import Flask, jsonify, request
from flask_restful import Resource, Api
from flask_cors import CORS
from fileIO import commit_file
# importing methods from SQL handler module for CRUD operations
from sql_handler import get_item, insert_item, remove_item, sell_item, list_items
import json
app = Flask(__name__)
CORS(app)
api=Api(app)
# base route defination
class HelloWorld(Resource):
def get(self):
return"Welcome to SBN Point of Sale application endpoint. Please refer to API documentation for available API endpoints and methods."
# defination for Items
# allowes to get item details based on id and add a new item to the DB
class Items(Resource):
# POST method
def post(self):
id = request.form['item_id']
# calling get_item method from database handler module
item_detail = get_item(id)
# checking database response for given ID and generating response
if item_detail!=[]:
resp={
"id":item_detail[0],
"name":item_detail[1],
"category":item_detail[2],
"total_count":item_detail[3],
"price":item_detail[4],
"date_created":item_detail[5]
}
else:
resp={"Error":"No matching record for given ID"}
return jsonify(resp)
# PUT method
def put(self):
# extract item details from request body form
param=request.form['details']
print (param)
# insert item details into the database
commit = insert_item(json.loads(param))
# return status keyword and description
return jsonify({"status":commit[0],"desc":commit[1]})
# DELETE by ID
def delete(self):
# get item id from request form
id=request.form['item_id']
# pass id to handler method to remove record
commit = remove_item (id)
# return commit status
return jsonify({"status":commit[0],"desc":commit[1]})
class Sale(Resource):
def post(self):
# get list of items sold form the request form parmeter
sold_items = json.loads(request.form['sold'])
# pass the list to update the items in database through handler
resp = sell_item (sold_items)
# return handler response
return jsonify(resp)
class get_all(Resource):
def get(self):
# get all records from the inventory table in database
resp = list_items()
# return item list
return jsonify(resp)
class log_sale(Resource):
def put(self):
# extract item details from request body form
data=request.form['order_details']
# insert item details into the database
response = commit_file(data)
return (response)
# API resources routing
api.add_resource(HelloWorld, '/')
api.add_resource(Items, '/item')
api.add_resource(Sale, '/update_item')
api.add_resource(get_all, '/get_items')
api.add_resource(log_sale, '/log_sale')
if __name__ == '__main__':
app.run(host='0.0.0.0',debug=True)
| 2.359375 | 2 |
bitbots_vision/src/bitbots_vision/dynamic_color_space.py | MosHumanoid/bitbots_vision | 3 | 12797662 | <filename>bitbots_vision/src/bitbots_vision/dynamic_color_space.py
#! /usr/bin/env python3
import cv2
import yaml
import rospy
import rospkg
import numpy as np
from cv_bridge import CvBridge
from collections import deque
from sensor_msgs.msg import Image
from bitbots_msgs.msg import ColorSpace, Config
from bitbots_vision.vision_modules import field_boundary, color, debug, evaluator
class DynamicColorSpace:
def __init__(self):
# type: () -> None
"""
DynamicColorSpace is a ROS node, that is used by the vision node to better recognize the field color.
DynamicColorSpace is able to calculate dynamically changing color spaces to accommodate e.g.
changing lighting conditions or to compensate for not optimized base color space files.
This node subscribes to an Image-message (default: image_raw) and to the 'vision_config'-message.
This node publishes ColorSpace-messages.
Initiating 'bitbots_dynamic_color_space' node.
:return: None
"""
# Init package
rospack = rospkg.RosPack()
self.package_path = rospack.get_path('bitbots_vision')
rospy.init_node('bitbots_dynamic_color_space')
rospy.loginfo('Initializing dynamic color-space...')
self.bridge = CvBridge()
# Init params
self.vision_config = {}
# Subscribe to 'vision_config'-message
# The message topic name MUST be the same as in the config publisher in vision.py
self.sub_vision_config_msg = rospy.Subscriber(
'vision_config',
Config,
self.vision_config_callback,
queue_size=1,
tcp_nodelay=True)
rospy.spin()
def vision_config_callback(self, msg):
# type: (Config) -> None
"""
This method is called by the 'vision_config'-message subscriber.
Load and update vision config.
Handle config changes.
This callback is delayed (about 30 seconds) after changes through dynamic reconfigure
:param Config msg: new 'vision_config'-message subscriber
:return: None
"""
# Load dict from string in yaml-format in msg.data
vision_config = yaml.load(msg.data)
self.debug_printer = debug.DebugPrinter(
debug_classes=debug.DebugPrinter.generate_debug_class_list_from_string(
vision_config['vision_debug_printer_classes']))
self.runtime_evaluator = evaluator.RuntimeEvaluator(None)
# Print status of dynamic color space after toggeling 'dynamic_color_space_active' parameter
if 'dynamic_color_space_active' not in self.vision_config or \
vision_config['dynamic_color_space_active'] != self.vision_config['dynamic_color_space_active']:
if vision_config['dynamic_color_space_active']:
rospy.loginfo('Dynamic color space turned ON.')
else:
rospy.logwarn('Dynamic color space turned OFF.')
# Set publisher of ColorSpace-messages
if 'ROS_dynamic_color_space_msg_topic' not in self.vision_config or \
self.vision_config['ROS_dynamic_color_space_msg_topic'] != vision_config['ROS_dynamic_color_space_msg_topic']:
if hasattr(self, 'pub_color_space'):
self.pub_color_space.unregister()
self.pub_color_space = rospy.Publisher(
vision_config['ROS_dynamic_color_space_msg_topic'],
ColorSpace,
queue_size=1)
# Set Color- and FieldBoundaryDetector
self.color_detector = color.DynamicPixelListColorDetector(
self.debug_printer,
self.package_path,
vision_config)
self.field_boundary_detector = field_boundary.FieldBoundaryDetector(
self.color_detector,
vision_config,
self.debug_printer,
used_by_dyn_color_detector=True)
# Reset queue
if hasattr(self, 'color_value_queue'):
self.color_value_queue.clear()
# Set params
self.queue_max_size = vision_config['dynamic_color_space_queue_max_size']
self.color_value_queue = deque(maxlen=self.queue_max_size)
self.pointfinder = Pointfinder(
self.debug_printer,
vision_config['dynamic_color_space_threshold'],
vision_config['dynamic_color_space_kernel_radius'])
self.heuristic = Heuristic(self.debug_printer)
# Subscribe to Image-message
if 'ROS_img_msg_topic' not in self.vision_config or \
self.vision_config['ROS_img_msg_topic'] != vision_config['ROS_img_msg_topic']:
if hasattr(self, 'sub_image_msg'):
self.sub_image_msg.unregister()
self.sub_image_msg = rospy.Subscriber(
vision_config['ROS_img_msg_topic'],
Image,
self.image_callback,
queue_size=vision_config['ROS_img_queue_size'],
tcp_nodelay=True,
buff_size=60000000)
# https://github.com/ros/ros_comm/issues/536
self.vision_config = vision_config
def image_callback(self, image_msg):
# type: (Image) -> None
"""
This method is called by the Image-message subscriber.
Old Image-messages were dropped.
Sometimes the queue gets to large, even when the size is limeted to 1.
That's, why we drop old images manually.
:param Image image_msg: new Image-message from Image-message subscriber
:return: None
"""
# Turn off dynamic color space, if parameter of config is false
if 'dynamic_color_space_active' not in self.vision_config or \
not self.vision_config['dynamic_color_space_active']:
return
# Drops old images
image_age = rospy.get_rostime() - image_msg.header.stamp
if image_age.to_sec() > 0.1:
self.debug_printer.info('Dynamic color space: Dropped Image-message', 'image')
return
self.handle_image(image_msg)
def handle_image(self, image_msg):
# type: (Image) -> None
"""
This method handles the processing of an Image-message.
New colors are calculated, appended to queue and published.
:param Image image_msg: Image-message
:return: None
"""
# Converting the ROS image message to CV2-image
image = self.bridge.imgmsg_to_cv2(image_msg, 'bgr8')
# Get new dynamic colors from image
colors = self.get_new_dynamic_colors(image)
# Add new colors to the queue
self.color_value_queue.append(colors)
# Publishes to 'ROS_dynamic_color_space_msg_topic'
self.publish(image_msg)
def get_unique_color_values(self, image, coordinate_list):
# type: (np.array, np.array) -> np.array
"""
Returns array of unique colors values from a given image at pixel coordinates from given list.
:param np.array image: image
:param np.array coordinate_list: list of pixel coordinates
:return np.array: array of unique color values from image at pixel coordinates
"""
# Create list of color values from image at given coordinates
colors = image[coordinate_list[0], coordinate_list[1]]
# np.unique requires list with at least one element
if colors.size > 0:
unique_colors = np.unique(colors, axis=0)
else:
unique_colors = colors
return unique_colors
def get_new_dynamic_colors(self, image):
# type: (np.array) -> np.array
"""
Returns array of new dynamically calculated color values.
Those values were filtered by the heuristic.
:param np.array image: image
:return np.array: array of new dynamic color values
"""
# Masks new image with current color space
mask_image = self.color_detector.mask_image(image)
# Get mask from field_boundary detector
self.field_boundary_detector.set_image(image)
mask = self.field_boundary_detector.get_mask()
if mask is not None:
# Get array of pixel coordinates of color candidates
pixel_coordinates = self.pointfinder.get_coordinates_of_color_candidates(mask_image)
# Get unique color values from the candidate pixels
color_candidates = self.get_unique_color_values(image, pixel_coordinates)
# Filters the colors using the heuristic.
colors = np.array(self.heuristic.run(color_candidates, image, mask), dtype=np.int32)
return colors
return np.array([[]])
def queue_to_color_space(self, queue):
# type: (deque) -> np.array
"""
Returns color space as array of all queue elements stacked, which contains all colors from the queue.
:param dequeue queue: queue of array of color values
:return np.array: color space
"""
# Initializes an empty color space
color_space = np.array([]).reshape(0,3)
# Stack every color space in the queue
for new_color_value_list in queue:
color_space = np.append(color_space, new_color_value_list[:,:], axis=0)
# Return a color space, which contains all colors from the queue
return color_space
def publish(self, image_msg):
# type: (Image) -> None
"""
Publishes the current color space via ColorSpace-message.
:param Image image_msg: 'image_raw'-message
:return: None
"""
# Get color space from queue
color_space = self.queue_to_color_space(self.color_value_queue)
# Create ColorSpace-message
color_space_msg = ColorSpace()
color_space_msg.header.frame_id = image_msg.header.frame_id
color_space_msg.header.stamp = image_msg.header.stamp
color_space_msg.blue = color_space[:,0].tolist()
color_space_msg.green = color_space[:,1].tolist()
color_space_msg.red = color_space[:,2].tolist()
# Publish ColorSpace-message
self.pub_color_space.publish(color_space_msg)
class Pointfinder():
def __init__(self, debug_printer, threshold, kernel_radius):
# type: (DebugPrinter, float, int) -> None
"""
Pointfinder is used to find false-color pixels with higher true-color / false-color ratio as threshold in their surrounding in masked image.
:param DebugPrinter: debug-printer
:param float threshold: necessary amount of previously detected color in percentage
:param int kernel_radius: radius surrounding the center element of kernel matrix, defines relevant surrounding of pixel
:return: None
"""
# Init params
self.debug_printer = debug_printer
self.threshold = threshold
self.kernel_radius = kernel_radius
self.kernel_edge_size = 2 * self.kernel_radius + 1
# Defines kernel
# Init kernel as M x M matrix of ONEs
self.kernel = None
self.kernel = np.ones((self.kernel_edge_size, self.kernel_edge_size))
# Set value of the center element of the matrix to the negative of the count of the matrix elements
# In case the value of this pixel is 1, it's value in the sum_array would be 0
self.kernel[int(np.size(self.kernel, 0) / 2), int(np.size(self.kernel, 1) / 2)] = - self.kernel.size
def get_coordinates_of_color_candidates(self, masked_image):
# type (np.array) -> np.array
"""
Returns array of pixel coordinates of color candidates.
Color candidates are false-color pixels with a higher true-color/ false-color ratio as threshold in their surrounding in masked image.
:param np.array masked_image: masked image
:return np.array: list of indices
"""
# Normalizes the masked image to values of 1 or 0
normalized_image = np.floor_divide(masked_image, 255, dtype=np.int16)
# Calculates the count of neighbors for each pixel
sum_array = cv2.filter2D(normalized_image, -1, self.kernel, borderType=0)
# Returns all pixels with a higher true-color / false-color ratio than the threshold
return np.array(np.where(sum_array > self.threshold * (self.kernel.size - 1)))
class Heuristic:
def __init__(self, debug_printer):
# type: (DebugPrinter) -> None
"""
Filters new color space colors according to their position relative to the field boundary.
Only colors that occur under the field boundary and have no occurrences over the field boundary get picked.
:param DebugPrinter debug_printer: Debug-printer
:return: None
"""
self.debug_printer = debug_printer
def run(self, color_list, image, mask):
# type: (np.array, np.array, np.array) -> np.array
"""
This method filters a given list of colors using the original image and a field-boundary-mask.
:param np.array color_list: list of color values, that need to be filtered
:param np.array image: raw vision image
:param np.array mask: binary field-boundary-mask
:return np.array: filtered list of colors
"""
# Simplifies the handling by merging the three color channels
color_list = self.serialize(color_list)
# Making a set and removing duplicated colors
color_set = set(color_list)
# Generates whitelist
whitelist = self.recalculate(image, mask)
# Takes only whitelisted values
color_set = color_set.intersection(whitelist)
# Restructures the color channels
return self.deserialize(np.array(list(color_set)))
def recalculate(self, image, mask):
# type: (np.array, np.array) -> set
"""
Generates a whitelist of allowed colors using the original image and the field-boundary-mask.
:param np.array image: image
:param np.array mask: field-boundary-mask
:return set: whitelist
"""
# Generates whitelist
colors_over_field_boundary, colors_under_field_boundary = self.unique_colors_for_partitions(image, mask)
return set(colors_under_field_boundary) - set(colors_over_field_boundary)
def unique_colors_for_partitions(self, image, mask):
# type: (np.array, np.array) -> (np.array, np.array)
"""
Masks picture and returns the unique colors that occurs in both mask partitions.
:param np.array image: image
:param np.array mask: field-boundary-mask
:return np.array: colors over the field boundary
:return np.array: colors under the field boundary
"""
# Calls a function to calculate the number of occurrences of all colors in the image
return (self.get_unique_colors(cv2.bitwise_and(image, image, mask=255 - mask)),
self.get_unique_colors(cv2.bitwise_and(image, image, mask=mask)))
def get_unique_colors(self, image):
# type: (np.array) -> np.array
"""
Calculates unique color for an input image.
:param np.array image: image
:return np.array: unique colors
"""
# Simplifies the handling by merging the 3 color channels
serialized_img = self.serialize(np.reshape(image, (1, int(image.size / 3), 3))[0])
# Returns unique colors in the image
return np.unique(serialized_img, axis=0)
def serialize(self, input_matrix):
# type: (np.array) -> np.array
"""
Serializes the different color channels of a list of colors in an single channel. (Like a HTML color code)
:param np.array input_matrix: list of colors values with 3 channels
:return: list of serialized colors
"""
return np.array(
np.multiply(input_matrix[:, 0], 256 ** 2) \
+ np.multiply(input_matrix[:, 1], 256) \
+ input_matrix[:, 2], dtype=np.int32)
def deserialize(self, input_matrix):
# type: (np.array) -> np.array
"""
Resolves the serialization of colors into different channels. (Like a HTML color code)
:param np.array input_matrix: serialized colors
:return: original colors with 3 channels
"""
new_matrix = np.zeros((input_matrix.size, 3))
new_matrix[:, 0] = np.array(input_matrix // 256 ** 2, dtype=np.uint8)
new_matrix[:, 1] = np.array((input_matrix - (new_matrix[:, 0] * 256 ** 2)) / 256, dtype=np.uint8)
new_matrix[:, 2] = np.array((input_matrix - (new_matrix[:, 0] * 256 ** 2) - (new_matrix[:, 1] * 256)), dtype=np.uint8)
return new_matrix
if __name__ == '__main__':
DynamicColorSpace()
| 2.5 | 2 |
setup.py | RuyiLi/pituo | 0 | 12797663 | <reponame>RuyiLi/pituo
from setuptools import setup
with open('README.md') as f:
long_description = f.read()
setup(
name='pituo',
packages=['pituo'],
version='0.0.2',
license='MIT',
description='Go-styled errors in Python.',
long_description=long_description,
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/RuyiLi/pituo',
keywords=['go', 'error', 'exception'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2',
'Operating System :: OS Independent',
],
)
| 1.453125 | 1 |
btk20_src/lib/pykaldiarkio.py | musiclvme/distant_speech_recognition | 136 | 12797664 | <reponame>musiclvme/distant_speech_recognition<filename>btk20_src/lib/pykaldiarkio.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Basic classes to read/write a binary Kaldi ark file
"""
import struct, numpy
BFM_SYM = b'BFM '
BIV_SYM = b'B'
FEAT_BYTE_SIZE = b'\x04'
NULLC = b'\0'
WAV_SYM = b'RIFF'
class KaldiArkReader:
"""
Base class for readling a Kaldi ark file
"""
def __init__(self, store_image=False):
"""
Constructor of KaldiArkReader
:params store_image: Every utterance data in the ark file will be kept in RAM if True
"""
self.arkfp = None
self.curr_arkfile = None
if store_image == True:# store all the utterance data into image
self.arkdata = {} # arkdata[ID] = {matrix|vector}
self.uttids = [] # remember the order of utterance IDs in an ark file
else:
self.arkdata = None
self.uttids = None
def __enter__(self):
return self
def __iter__(self):
"""
Read each utterance from the ark file and return it
:returns : Python dictionary that contains the utterance ID as a key and data as a value
"""
raise NotImplemented('Implement this')
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def accumulate(self, uttid, dataelem):
"""
Store all the utterance data into the RAM if this is constructed with store_image = True.
"""
if self.arkdata is None:
self.arkdata = {}
self.uttids = []
self.arkdata[uttid] = dataelem
self.uttids.append(uttid)
def open(self, arkfile):
"""
Set the ark file to be read later
"""
if self.arkfp is not None:
raise IOError('call close() first')
self.arkfp = open(arkfile, 'rb')
self.curr_arkfile = arkfile
def close(self):
"""
Close the file pointer if it is opened
"""
if self.arkfp is not None:
self.arkfp.close()
self.arkfp = None
self.curr_arkfile = None
if self.arkdata is not None:
self.arkdata = {}
self.uttids = []
def seek(self, position_in_bytes):
"""
Skip the file pointer. You can pick up the file position from .scp file
"""
if self.arkfp is not None:
self.arkfp.seek(position_in_bytes, 0)
class KaldiFeatArkReader(KaldiArkReader):
"""
Read a Kaldi .feat.ark file per utterance iteratively
"""
def __init__(self, store_image=False):
KaldiArkReader.__init__(self, store_image)
def __iter__(self):
"""
Return a tuple of an utterance ID and feature matrix
where each row vector correpsonds to a feature vector per frame
:returns: (string, numpy.matrix)
"""
uttid = b''
while True:
arkdata = self.arkfp.read(1)
if arkdata == b'':
raise StopIteration('End of feat ark file')
c = struct.unpack('<s', arkdata)[0]
if c == b' ':
arkdata = self.arkfp.read(1) # skip '\0'
arkdata = self.arkfp.read(4) # read the end symbol 'BFM '
endSym = struct.unpack('<4s', arkdata)[0]
if endSym != BFM_SYM:
raise ValueError('ERROR: %s could not find BFM but %s' %(self.curr_arkfile, endSym))
arkdata = self.arkfp.read(1) # skip one byte data '\x04'
arkdata = self.arkfp.read(4) # read no. frames
frameN = struct.unpack( '<I', arkdata )[0]
arkdata = self.arkfp.read(1) # skip one byte data '\x04'
arkdata = self.arkfp.read(4) # read the dimension
featD = struct.unpack( '<I', arkdata )[0]
coeffN = frameN * featD
# read the coefficients
arkdata = self.arkfp.read(coeffN * 4)
feMat = numpy.reshape(struct.unpack('<%df' %(coeffN), arkdata), (frameN,featD))
uttid = uttid.decode()
if self.arkdata is not None:
self.accumulate(uttid, feMat)
uttid2data = {uttid:feMat}
uttid = b''
yield uttid2data
else:
uttid += c
class KaldiIntVectorArkReader(KaldiArkReader):
"""
Read a Kaldi integer-vector file per utterance iteratively
"""
def __init__(self, store_image=False):
KaldiArkReader.__init__(self, store_image)
def __iter__(self):
"""
Return a tuple of an utterance ID and vector
:returns: (string, numpy.vector)
"""
uttid = b''
while True:
arkdata = self.arkfp.read(1)
if arkdata == b'':
break
c = struct.unpack('<s', arkdata)[0]
if c == b' ':
arkdata = self.arkfp.read(1) # skip '\0'
arkdata = self.arkfp.read(1) # read the end symbol 'B'
endSym = struct.unpack('<s', arkdata)[0]
if endSym != BIV_SYM:
raise ValueError('ERROR: %s: Unmatched symbol %s!=%s' %(self.curr_arkfile, endSym, BIV_SYM))
arkdata = self.arkfp.read(1) # skip one byte data '\x04'
arkdata = self.arkfp.read(4) # read no. frames
frameN = struct.unpack('<i', arkdata)[0]
# read the coefficients
vals = []
for i in range(frameN):
arkdata = self.arkfp.read(1)
arkdata = self.arkfp.read(4)
vals.append(struct.unpack('<i', arkdata)[0])
intVec = numpy.array(vals)
uttid = uttid.decode()
if self.arkdata is not None:
self.accumulate(uttid, intVec)
uttid2data = {uttid:intVec}
uttid = b''
yield uttid2data
else:
uttid += c
class KaldiWavArkReader(KaldiArkReader):
"""
Read a Kaldi .wav.ark file per utterance iteratively
"""
def __init__(self, store_image=False):
KaldiArkReader.__init__(self, store_image)
self.riff_header = None
self.samplerate = None
self.num_channels = None
def get_riff_header(self):
return self.riff_header
def get_samplerate(self):
return self.samplerate
def get_num_channel(self):
return self.num_channels
def __iter__(self):
"""
Return a tuple of an utterance ID and audio samples as a 16-bit integer vector
:returns: (string, numpy.int16 vector)
"""
uttid = b''
while True:
arkdata = self.arkfp.read(1)
if arkdata == b'':
raise StopIteration('End of wav ark file')
c = struct.unpack('<s', arkdata)[0]
if c == b' ':
# read the 44 Byte header block of the RIFF file
riff_header = self.arkfp.read(44) # skip '\0'
endSym = struct.unpack('<4s',riff_header[0:4])[0]
dataLength = struct.unpack('<L', riff_header[40:44])[0]
bitsPerSample = struct.unpack('<h', riff_header[34:36])[0]
# nsamps = int(dataLength / (bitsPerSample/8)) # divide 2 (Byte)
self.samplerate = struct.unpack('<L', riff_header[24:28])[0]
self.num_channels = struct.unpack('<h', riff_header[22:24])[0]
if endSym != WAV_SYM:
raise ValueError('ERROR: %s: could not find %s but %s' %(self.curr_arkfile, WAV_SYM, endSym))
if bitsPerSample != 16:
raise ValueError('ERROR: %s: expecting utterance with int16 format but %d bits per sample.' % (self.curr_arkfile, bitsPerSample))
uttBinary = self.arkfp.read(dataLength)
# expecting 16 bit per sample
uttInt = [struct.unpack('<h', uttBinary[i:i+2]) for i in numpy.arange(0,len(uttBinary), 2)]
samples = numpy.array(numpy.int16(numpy.resize(uttInt, (len(uttInt),))))
self.riff_header = riff_header
uttid = uttid.decode()
if self.arkdata is not None:
self.accumulate(uttid, samples)
uttid2data = {uttid:samples}
uttid = b''
yield uttid2data
else:
uttid += c
class KaldiArkWriter:
"""
Base class for writing a Kaldi ark file
"""
def __init__(self):
self.arkfp = None
def __entry__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def open(self, arkfile):
if self.arkfp is not None:
raise IOError('call close() first')
self.arkfp = open(arkfile, 'wb')
def close(self):
if self.arkfp is not None:
self.arkfp.close()
self.arkfp = None
class KaldiFeatArkWriter(KaldiArkWriter):
"""
Write utterance data as a Kaldi .feat.ark file
"""
def __init__(self):
KaldiArkWriter.__init__(self)
def write(self, uttid2feats, uttids=None):
if uttids is None:
uttids = list(uttid2feats.keys())
for uttid in uttids:
feMat = uttid2feats[uttid]
frameN = len(feMat)
featD = len(feMat[0])
outData = b''
for c in uttid + ' ':
outData += struct.pack('<c', c.encode())
outData += struct.pack('<c', NULLC)
for c in BFM_SYM.decode():
outData +=struct.pack('<c', c.encode())
outData += struct.pack('<c', FEAT_BYTE_SIZE)
outData += struct.pack('<I', frameN)
outData += struct.pack('<c', FEAT_BYTE_SIZE)
outData += struct.pack('<I', featD)
self.arkfp.write(outData)
outData = b''
for frameX in range(frameN):
for coeff in feMat[frameX]:
outData += struct.pack('<f', coeff)
self.arkfp.write(outData)
self.arkfp.flush()
class KaldiIntVectorArkWriter(KaldiArkWriter):
"""
Write utterance data as a Kaldi int-vector ark file
"""
def __init__(self):
KaldiArkWriter.__init__(self)
def write(self, uttid2feats, uttids=None):
if uttids is None:
uttids = list(uttid2feats.keys())
for uttid in uttids:
intVec = uttid2feats[uttid]
# write data header
frameN = len(intVec)
outData = b''
for c in uttid + ' ':
outData += struct.pack('<c', c.encode())
outData += struct.pack('<c', NULLC)
for c in BIV_SYM.decode():
outData +=struct.pack('<c', c.encode())
outData += struct.pack('<c', FEAT_BYTE_SIZE)
outData += struct.pack('<I', frameN)
self.arkfp.write(outData)
# write actual vector data
outData = b''
for coeff in intVec:
outData += struct.pack('<c', FEAT_BYTE_SIZE)
outData += struct.pack('<i', coeff)
self.arkfp.write(outData)
self.arkfp.flush()
def correct_chunk_size(numSamples, riff_header):
"""
Correct the data length in header information; see http://soundfile.sapp.org/doc/WaveFormat/ for details
"""
bytesPerSample = struct.unpack( '<h', riff_header[34:36] )[0] // 8
dataLength = numSamples * bytesPerSample
totalChunkSize = 36 + dataLength
return (riff_header[0:4] + struct.pack('<L', totalChunkSize) + riff_header[8:40] + struct.pack('<L', dataLength) + riff_header[44:])
class KaldiWavArkWriter(KaldiArkWriter):
"""
Write utterance data as a Kaldi .wav.ark file
"""
def __init__(self):
KaldiArkWriter.__init__(self)
def write(self, uttid2feats, uttid2headers, uttids=None):
if uttids is None:
uttids = list(uttid2feats.keys())
for uttid in uttids:
outData = b''
for c in uttid + ' ':
outData += struct.pack('<c', c.encode())
self.arkfp.write(outData)
samples = uttid2feats[uttid]
# write the corrected header information
uttid2header = correct_chunk_size(len(samples), uttid2headers[uttid])
self.arkfp.write(uttid2header)
outData = b''
for samp in samples:
outData += struct.pack('<h', samp)
self.arkfp.write(outData)
self.arkfp.flush()
def dump_riff_file(self, riff_file, uttid):
"""
Dump the data in a RIFF file into the wav ark file
"""
outData = b''
for c in uttid + ' ':
outData += struct.pack('<c', c.encode())
self.arkfp.write(outData)
with open(riff_file, 'rb') as riffF:
self.arkfp.write(riffF.read())
self.arkfp.flush()
def test():
import argparse
def build_parser():
parser = argparse.ArgumentParser(description='List utterance IDs in the ark file')
parser.add_argument('-t', '--type', default='f', help='Ark file type (i/f/w)')
parser.add_argument('input_ark', help='input ark path')
parser.add_argument('output_ark', help='output ark path')
return parser
parser = build_parser()
args, argv = parser.parse_known_args()
if args.type == 'f':
reader = KaldiFeatArkReader()
writer = KaldiFeatArkWriter()
elif args.type == 'w':
reader = KaldiWavArkReader()
writer = KaldiWavArkWriter()
else:
reader = KaldiIntVectorArkReader()
writer = KaldiIntVectorArkWriter()
reader.open(args.input_ark)
writer.open(args.output_ark)
for uttid2data in reader:
print(('uttid: %s' %list(uttid2data.keys())[0]))
if args.type == 'w':
writer.write(uttid2data, {list(uttid2data.keys())[0]:reader.get_riff_header()})
else:
writer.write(uttid2data)
reader.close()
writer.close()
if __name__ == '__main__':
test()
| 1.820313 | 2 |
tests/lib/result_reporters/pyne_stat_summary_reporter_test.py | Avvir/pyne | 4 | 12797665 | <reponame>Avvir/pyne
from pynetest.expectations import expect
from pynetest.lib.result_reporters.printing_reporter import PrintingReporter
from pynetest.lib.result_reporters.pyne_result_reporters import PyneStatSummaryReporter
from pynetest.lib.pyne_test_blocks import ItBlock, DescribeBlock
from tests.test_helpers.fake_print import StubPrint, printed_text
def test__report_failure__increases_the_failure_count():
reporter = PyneStatSummaryReporter()
it_block = ItBlock(None, None, None)
reporter.report_failure(it_block, it_block, Exception("some exception"), 0)
reporter.report_failure(it_block, it_block, Exception("some exception"), 0)
expect(reporter.stats.failure_count).to_be(2)
def test__report_failure__increases_the_test_run_count():
reporter = PyneStatSummaryReporter()
it_block = ItBlock(None, None, None)
reporter.report_failure(it_block, it_block, Exception("some exception"), 0)
reporter.report_failure(it_block, it_block, Exception("some exception"), 0)
expect(reporter.stats.test_count).to_be(2)
def test__report_failure__sets_overall_failure():
reporter = PyneStatSummaryReporter()
it_block = ItBlock(None, None, None)
reporter.report_failure(it_block, it_block, Exception("some exception"), 0)
expect(reporter.stats.is_failure).to_be(True)
def test__report_failure__increases_the_total_timing():
reporter = PyneStatSummaryReporter()
it_block = ItBlock(None, None, None)
reporter.report_failure(it_block, it_block, Exception("some exception"), 1000)
reporter.report_failure(it_block, it_block, Exception("some exception"), 20)
expect(reporter.stats.total_timing_millis).to_be(1020)
def test__report_success__increases_the_test_run_count():
reporter = PyneStatSummaryReporter()
it_block = ItBlock(None, None, None)
reporter.report_success(it_block, 0)
reporter.report_success(it_block, 0)
expect(reporter.stats.test_count).to_be(2)
def test__report_success__increases_the_passes_count():
reporter = PyneStatSummaryReporter()
it_block = ItBlock(None, None, None)
reporter.report_success(it_block, 0)
reporter.report_success(it_block, 0)
expect(reporter.stats.pass_count).to_be(2)
def test__report_success__increases_the_total_timing():
reporter = PyneStatSummaryReporter()
it_block = ItBlock(None, None, None)
reporter.report_success(it_block, 10)
reporter.report_success(it_block, 300)
expect(reporter.stats.total_timing_millis).to_be(310)
def test__report_pending__increases_the_test_run_count():
reporter = PyneStatSummaryReporter()
it_block = ItBlock(None, None, None)
reporter.report_pending(it_block)
reporter.report_pending(it_block)
expect(reporter.stats.test_count).to_be(2)
def test__report_enter_context__increases_depth():
reporter = PyneStatSummaryReporter()
describe_block = DescribeBlock(None, None, None)
reporter.report_enter_context(describe_block)
expect(reporter.depth).to_be(1)
reporter.report_enter_context(describe_block)
expect(reporter.depth).to_be(2)
def test__report_exit_context__decreases_depth():
reporter = PyneStatSummaryReporter()
describe_block = DescribeBlock(None, None, None)
reporter.report_enter_context(describe_block)
reporter.report_enter_context(describe_block)
reporter.report_exit_context(describe_block)
expect(reporter.depth).to_be(1)
reporter.report_exit_context(describe_block)
expect(reporter.depth).to_be(0)
def test__report_end_result__when_a_test_has_failed__it_prints_stats():
with StubPrint():
reporter = PrintingReporter(PyneStatSummaryReporter())
it_block = ItBlock(None, None, None)
reporter.report_failure(it_block, it_block, Exception("some exception"), 1000)
reporter.report_success(it_block, 500)
reporter.report_success(it_block, 500)
printed_text.clear()
reporter.report_end_result()
expect(printed_text[0]).to_contain("1 failed, 2 passed in 2.00 seconds")
def test__report_end_result__when_all_tests_passed__it_prints_stats():
with StubPrint():
reporter = PrintingReporter(PyneStatSummaryReporter())
it_block = ItBlock(None, None, None)
reporter.report_success(it_block, 1000)
reporter.report_success(it_block, 500)
printed_text.clear()
reporter.report_end_result()
expect(printed_text[0]).to_contain("2 passed in 1.50 seconds")
def test__report_end_result__test_is_pending__reports_stats():
with StubPrint():
reporter = PrintingReporter(PyneStatSummaryReporter())
passing_it_block = ItBlock(None, None, None)
pending_it_block = ItBlock(None, None, None, pending=True)
reporter.report_success(passing_it_block, 1000)
reporter.report_pending(pending_it_block)
printed_text.clear()
reporter.report_end_result()
expect(printed_text[0]).to_contain("1 passed, 1 pending in 1.00 seconds")
def test__report_end_result__when_no_tests_run__reports_stats():
with StubPrint():
reporter = PrintingReporter(PyneStatSummaryReporter())
printed_text.clear()
reporter.report_end_result()
expect(printed_text[0]).to_contain("Ran 0 tests")
def test__reset__sets_stats_to_0():
describe_block = DescribeBlock(None, None, None)
it_block = ItBlock(None, None, None)
reporter = PyneStatSummaryReporter()
reporter.report_enter_context(describe_block)
reporter.report_enter_context(describe_block)
reporter.report_success(it_block, 1000)
reporter.report_failure(it_block, it_block, Exception("some exception"), 1000)
reporter.report_failure(it_block, it_block, Exception("some exception"), 1000)
reporter.reset()
expect(reporter.stats.pass_count).to_be(0)
expect(reporter.stats.is_failure).to_be(False)
expect(reporter.stats.total_timing_millis).to_be(0)
expect(reporter.stats.failure_count).to_be(0)
expect(reporter.stats.test_count).to_be(0)
expect(reporter.depth).to_be(0)
| 2.375 | 2 |
raypipe/core/rpipe/callback.py | billyyann/RayPipe | 0 | 12797666 | <reponame>billyyann/RayPipe<filename>raypipe/core/rpipe/callback.py
import os
from tensorflow.python.keras.callbacks import Callback, ModelCheckpoint
import ray.train as train
import zipfile
from raypipe import logger
from raypipe.core.distribution import object_store_impl
from raypipe.core.distribution.object_store import ObjectPath
from raypipe.core.rpipe.utils import zip_dir
class TrainReportCallback(Callback):
def on_epoch_end(self, epoch:int, logs=None):
train.report(**logs)
class DistModelSaveCallBack(ModelCheckpoint):
def __init__(self,save_freq:int,verbose:int):
self.FILE_PATH_template="/tmp/training/cp-{epoch:04d}.ckpt"
super(DistModelSaveCallBack, self).__init__(
filepath=self.FILE_PATH_template,
save_freq=save_freq,
verbose=verbose
)
@staticmethod
def _upload(file_path):
path_segments=file_path.split("/")
object_path=ObjectPath()
object_path.create("/".join(path_segments[:-1]),path_segments[-1])
logger.info("=========== Object Path %s =========== "%object_path.get_path())
zip_dir(file_path,file_path+".zip")
with open(file_path+".zip","rb") as f:
object_store_impl.upload(object_path,f.read())
def on_train_batch_end(self, batch, logs=None):
if self._should_save_on_batch(batch):
self._save_model(epoch=self._current_epoch, logs=logs)
file_path = self._get_file_path(self._current_epoch, logs)
#todo distributed saving
self._upload(file_path)
| 2.140625 | 2 |
setup.py | jonspock/vdf-competition | 97 | 12797667 | <filename>setup.py
#!/usr/bin/env python
from setuptools import setup
from inkfish.version import version
setup(
name="inkfish",
version=version,
packages=[
"inkfish",
],
author="<NAME>, Inc.",
entry_points={
'console_scripts':
[
'pot = inkfish.cmds:pot',
]
},
author_email="<EMAIL>",
url="https://github.com/Chia-Network",
license="https://opensource.org/licenses/Apache-2.0",
description="Verifiable delay function (proof of time) developed by Chia" +
"Networks.",
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: Apache Software License',
'Topic :: Security :: Cryptography',
],)
| 1.25 | 1 |
tests/test_git_history.py | mthurman/git-history | 92 | 12797668 | from click.testing import CliRunner
from git_history.cli import cli
from git_history.utils import RESERVED
import itertools
import json
import pytest
import subprocess
import sqlite_utils
import textwrap
git_commit = [
"git",
"-c",
"user.name='Tests'",
"-c",
"user.email='<EMAIL>'",
"commit",
]
def make_repo(tmpdir):
repo_dir = tmpdir / "repo"
repo_dir.mkdir()
# This one is used for the README generated by Cog:
(repo_dir / "incidents.json").write_text(
json.dumps(
[
{
"IncidentID": "abc123",
"Location": "Corner of 4th and Vermont",
"Type": "fire",
},
{
"IncidentID": "cde448",
"Location": "555 West Example Drive",
"Type": "medical",
},
]
),
"utf-8",
)
(repo_dir / "items.json").write_text(
json.dumps(
[
{
"product_id": 1,
"name": "Gin",
},
{
"product_id": 2,
"name": "Tonic",
},
]
),
"utf-8",
)
(repo_dir / "items-with-reserved-columns.json").write_text(
json.dumps(
[
{
"_id": 1,
"_item": "Gin",
"_version": "v1",
"_commit": "commit1",
"rowid": 5,
},
{
"_id": 2,
"_item": "Tonic",
"_version": "v1",
"_commit": "commit1",
"rowid": 6,
},
]
),
"utf-8",
)
(repo_dir / "items-with-banned-columns.json").write_text(
json.dumps(
[
{
"_id_": 1,
"_version_": "Gin",
}
]
),
"utf-8",
)
(repo_dir / "trees.csv").write_text(
"TreeID,name\n1,Sophia\n2,Charlie",
"utf-8",
)
(repo_dir / "trees.tsv").write_text(
"TreeID\tname\n1\tSophia\n2\tCharlie",
"utf-8",
)
(repo_dir / "increment.txt").write_text("1", "utf-8")
subprocess.call(["git", "init"], cwd=str(repo_dir))
subprocess.call(
[
"git",
"add",
"incidents.json",
"items.json",
"items-with-reserved-columns.json",
"items-with-banned-columns.json",
"trees.csv",
"trees.tsv",
"increment.txt",
],
cwd=str(repo_dir),
)
subprocess.call(git_commit + ["-m", "first"], cwd=str(repo_dir))
subprocess.call(["git", "branch", "-m", "main"], cwd=str(repo_dir))
(repo_dir / "items.json").write_text(
json.dumps(
[
{
"product_id": 1,
"name": "Gin",
},
{
"product_id": 2,
"name": "Tonic 2",
},
{
"product_id": 3,
"name": "Rum",
},
]
),
"utf-8",
)
(repo_dir / "items-with-reserved-columns.json").write_text(
json.dumps(
[
{
"_id": 1,
"_item": "Gin",
"_version": "v1",
"_commit": "commit1",
"rowid": 5,
},
{
"_id": 2,
"_item": "Tonic 2",
"_version": "v1",
"_commit": "commit1",
"rowid": 6,
},
{
"_id": 3,
"_item": "Rum",
"_version": "v1",
"_commit": "commit1",
"rowid": 7,
},
]
),
"utf-8",
)
subprocess.call(git_commit + ["-m", "second", "-a"], cwd=str(repo_dir))
# Three more commits to test --skip
for i in range(2, 4):
(repo_dir / "increment.txt").write_text(str(i), "utf-8")
subprocess.call(
git_commit + ["-m", "increment {}".format(i), "-a"], cwd=str(repo_dir)
)
return repo_dir
@pytest.fixture
def repo(tmpdir):
return make_repo(tmpdir)
def expected_create_view(namespace):
return textwrap.dedent(
"""
CREATE VIEW {namespace}_version_detail AS select
commits.commit_at as _commit_at,
commits.hash as _commit_hash,
{namespace}_version.*,
(
select json_group_array(name) from columns
where id in (
select column from {namespace}_changed
where item_version = {namespace}_version._id
)
) as _changed_columns
from {namespace}_version
join commits on commits.id = {namespace}_version._commit;
""".format(
namespace=namespace
)
).strip()
@pytest.mark.parametrize("namespace", (None, "custom"))
def test_file_without_id(repo, tmpdir, namespace):
runner = CliRunner()
db_path = str(tmpdir / "db.db")
with runner.isolated_filesystem():
options = ["file", db_path, str(repo / "items.json"), "--repo", str(repo)]
if namespace:
options += ["--namespace", namespace]
result = runner.invoke(cli, options)
assert result.exit_code == 0
db = sqlite_utils.Database(db_path)
assert db.schema == (
"CREATE TABLE [namespaces] (\n"
" [id] INTEGER PRIMARY KEY,\n"
" [name] TEXT\n"
");\n"
"CREATE UNIQUE INDEX [idx_namespaces_name]\n"
" ON [namespaces] ([name]);\n"
"CREATE TABLE [commits] (\n"
" [id] INTEGER PRIMARY KEY,\n"
" [namespace] INTEGER REFERENCES [namespaces]([id]),\n"
" [hash] TEXT,\n"
" [commit_at] TEXT\n"
");\n"
"CREATE UNIQUE INDEX [idx_commits_namespace_hash]\n"
" ON [commits] ([namespace], [hash]);\n"
"CREATE TABLE [{}] (\n".format(namespace or "item")
+ " [product_id] INTEGER,\n"
" [name] TEXT\n"
");"
)
assert db["commits"].count == 2
# Should have some duplicates
assert [(r["product_id"], r["name"]) for r in db[namespace or "item"].rows] == [
(1, "Gin"),
(2, "Tonic"),
(1, "Gin"),
(2, "Tonic 2"),
(3, "Rum"),
]
@pytest.mark.parametrize("namespace", (None, "custom"))
def test_file_with_id(repo, tmpdir, namespace):
runner = CliRunner()
db_path = str(tmpdir / "db.db")
with runner.isolated_filesystem():
result = runner.invoke(
cli,
[
"file",
db_path,
str(repo / "items.json"),
"--repo",
str(repo),
"--id",
"product_id",
]
+ (["--namespace", namespace] if namespace else []),
)
assert result.exit_code == 0
db = sqlite_utils.Database(db_path)
item_table = namespace or "item"
version_table = "{}_version".format(item_table)
assert db.schema == """
CREATE TABLE [namespaces] (
[id] INTEGER PRIMARY KEY,
[name] TEXT
);
CREATE UNIQUE INDEX [idx_namespaces_name]
ON [namespaces] ([name]);
CREATE TABLE [commits] (
[id] INTEGER PRIMARY KEY,
[namespace] INTEGER REFERENCES [namespaces]([id]),
[hash] TEXT,
[commit_at] TEXT
);
CREATE UNIQUE INDEX [idx_commits_namespace_hash]
ON [commits] ([namespace], [hash]);
CREATE TABLE [{namespace}] (
[_id] INTEGER PRIMARY KEY,
[_item_id] TEXT
, [product_id] INTEGER, [name] TEXT, [_commit] INTEGER);
CREATE UNIQUE INDEX [idx_{namespace}__item_id]
ON [{namespace}] ([_item_id]);
CREATE TABLE [{namespace}_version] (
[_id] INTEGER PRIMARY KEY,
[_item] INTEGER REFERENCES [{namespace}]([_id]),
[_version] INTEGER,
[_commit] INTEGER REFERENCES [commits]([id]),
[product_id] INTEGER,
[name] TEXT,
[_item_full_hash] TEXT
);
CREATE TABLE [columns] (
[id] INTEGER PRIMARY KEY,
[namespace] INTEGER REFERENCES [namespaces]([id]),
[name] TEXT
);
CREATE UNIQUE INDEX [idx_columns_namespace_name]
ON [columns] ([namespace], [name]);
CREATE TABLE [{namespace}_changed] (
[item_version] INTEGER REFERENCES [{namespace}_version]([_id]),
[column] INTEGER REFERENCES [columns]([id]),
PRIMARY KEY ([item_version], [column])
);
{view}
CREATE INDEX [idx_{namespace}_version__item]
ON [{namespace}_version] ([_item]);
""".strip().format(
namespace=namespace or "item",
view=expected_create_view(namespace or "item"),
)
assert db["commits"].count == 2
# Should have no duplicates
item_version = [
r
for r in db.query(
"select product_id, _version, name from {}".format(version_table)
)
]
assert item_version == [
{"product_id": 1, "_version": 1, "name": "Gin"},
{"product_id": 2, "_version": 1, "name": "Tonic"},
# product_id is None because it did not change here
{"product_id": None, "_version": 2, "name": "Tonic 2"},
{"product_id": 3, "_version": 1, "name": "Rum"},
]
changed = list(
db.query(
"""
select
{namespace}.product_id,
{namespace}_version._version as version,
columns.name as column_name
from {namespace}_changed
join columns on {namespace}_changed.column = columns.id
join {namespace}_version on {namespace}_changed.item_version = {namespace}_version._id
join {namespace} on {namespace}._id = {namespace}_version._item
order by {namespace}.product_id, {namespace}_version._version, columns.name
""".format(
namespace=namespace or "item"
)
)
)
assert changed == [
{"product_id": 1, "version": 1, "column_name": "name"},
{"product_id": 1, "version": 1, "column_name": "product_id"},
{"product_id": 2, "version": 1, "column_name": "name"},
{"product_id": 2, "version": 1, "column_name": "product_id"},
{"product_id": 2, "version": 2, "column_name": "name"},
{"product_id": 3, "version": 1, "column_name": "name"},
{"product_id": 3, "version": 1, "column_name": "product_id"},
]
# Test the view
view_rows = list(
db.query(
"select _version, product_id, name, _changed_columns from {}_version_detail".format(
namespace or "item"
)
)
)
# Sort order of _changed_columns JSON is undefined, so fix that
for row in view_rows:
row["_changed_columns"] = list(sorted(json.loads(row["_changed_columns"])))
assert view_rows == [
{
"_version": 1,
"product_id": 1,
"name": "Gin",
"_changed_columns": ["name", "product_id"],
},
{
"_version": 1,
"product_id": 2,
"name": "Tonic",
"_changed_columns": ["name", "product_id"],
},
{
"_version": 2,
"product_id": None,
"name": "Tonic 2",
"_changed_columns": ["name"],
},
{
"_version": 1,
"product_id": 3,
"name": "Rum",
"_changed_columns": ["name", "product_id"],
},
]
@pytest.mark.parametrize("namespace", (None, "custom"))
def test_file_with_id_resume(repo, tmpdir, namespace):
runner = CliRunner()
db_path = str(tmpdir / "db.db")
result = runner.invoke(
cli,
[
"file",
db_path,
str(repo / "items.json"),
"--repo",
str(repo),
"--id",
"product_id",
]
+ (["--namespace", namespace] if namespace else []),
)
namespace = namespace or "item"
assert result.exit_code == 0
db = sqlite_utils.Database(db_path)
item_version = [
r
for r in db.query(
"select product_id, _version, name from {}_version".format(namespace)
)
]
assert item_version == [
{"product_id": 1, "_version": 1, "name": "Gin"},
{"product_id": 2, "_version": 1, "name": "Tonic"},
{"product_id": None, "_version": 2, "name": "Tonic 2"},
{"product_id": 3, "_version": 1, "name": "Rum"},
]
# Now we edit, commit and try again
(repo / "items.json").write_text(
json.dumps(
[
{"product_id": 1, "name": "Gin"},
{"product_id": 2, "name": "Tonic 2"},
# This line has changed from "Rum" to "Rum Pony":
{"product_id": 3, "name": "Rum Pony"},
]
),
"utf-8",
)
subprocess.call(git_commit + ["-a", "-m", "another"], cwd=str(repo))
result2 = runner.invoke(
cli,
[
"file",
db_path,
str(repo / "items.json"),
"--repo",
str(repo),
"--id",
"product_id",
]
+ (["--namespace", namespace] if namespace else []),
catch_exceptions=False,
)
assert result2.exit_code == 0
item_version2 = [
r
for r in db.query(
"select _item, product_id, _version, name from {}_version order by _item, _version".format(
namespace
)
)
]
assert item_version2 == [
{"_item": 1, "product_id": 1, "_version": 1, "name": "Gin"},
{"_item": 2, "product_id": 2, "_version": 1, "name": "Tonic"},
{"_item": 2, "product_id": None, "_version": 2, "name": "Tonic 2"},
{"_item": 3, "product_id": 3, "_version": 1, "name": "Rum"},
{"_item": 3, "product_id": None, "_version": 2, "name": "Rum Pony"},
]
def test_file_with_id_resume_two_namespaces(repo, tmpdir):
# https://github.com/simonw/git-history/issues/43
runner = CliRunner()
db_path = str(tmpdir / "db.db")
def run(namespace):
result = runner.invoke(
cli,
[
"file",
db_path,
str(repo / "items.json"),
"--repo",
str(repo),
"--id",
"product_id",
"--namespace",
namespace,
],
)
assert result.exit_code == 0
for namespace in ("one", "two"):
run(namespace)
# Now modify items.json, commit and run again
(repo / "items.json").write_text(
json.dumps(
[
{"product_id": 1, "name": "Gin"},
{"product_id": 2, "name": "Tonic 2"},
{"product_id": 3, "name": "Rum Pony"},
]
),
"utf-8",
)
subprocess.call(git_commit + ["-a", "-m", "another"], cwd=str(repo))
for namespace in ("one", "two"):
run(namespace)
db = sqlite_utils.Database(db_path)
assert set(db.table_names()) == {
"namespaces",
"commits",
"one",
"one_version",
"columns",
"one_changed",
"two",
"two_version",
"two_changed",
}
# Should be five versions: Gin, Tonic -> Tonic 2, Rum -> Rum Pony
assert db["one_version"].count == 5
assert db["two_version"].count == 5
@pytest.mark.parametrize("namespace", (None, "custom"))
def test_file_with_id_full_versions(repo, tmpdir, namespace):
runner = CliRunner()
db_path = str(tmpdir / "db.db")
with runner.isolated_filesystem():
result = runner.invoke(
cli,
[
"file",
db_path,
str(repo / "items.json"),
"--repo",
str(repo),
"--id",
"product_id",
"--full-versions",
]
+ (["--namespace", namespace] if namespace else []),
)
assert result.exit_code == 0
db = sqlite_utils.Database(db_path)
item_table = namespace or "item"
version_table = "{}_version".format(item_table)
assert db.schema == (
"CREATE TABLE [namespaces] (\n"
" [id] INTEGER PRIMARY KEY,\n"
" [name] TEXT\n"
");\n"
"CREATE UNIQUE INDEX [idx_namespaces_name]\n"
" ON [namespaces] ([name]);\n"
"CREATE TABLE [commits] (\n"
" [id] INTEGER PRIMARY KEY,\n"
" [namespace] INTEGER REFERENCES [namespaces]([id]),\n"
" [hash] TEXT,\n"
" [commit_at] TEXT\n"
");\n"
"CREATE UNIQUE INDEX [idx_commits_namespace_hash]\n"
" ON [commits] ([namespace], [hash]);\n"
"CREATE TABLE [{}] (\n".format(item_table) + " [_id] INTEGER PRIMARY KEY,\n"
" [_item_id] TEXT\n"
", [product_id] INTEGER, [name] TEXT, [_commit] INTEGER);\n"
"CREATE UNIQUE INDEX [idx_{}__item_id]\n".format(item_table)
+ " ON [{}] ([_item_id]);\n".format(item_table)
+ "CREATE TABLE [{}] (\n".format(version_table)
+ " [_id] INTEGER PRIMARY KEY,\n"
" [_item] INTEGER REFERENCES [{}]([_id]),\n".format(item_table)
+ " [_version] INTEGER,\n"
" [_commit] INTEGER REFERENCES [commits]([id]),\n"
" [product_id] INTEGER,\n"
" [name] TEXT\n"
");\n"
+ expected_create_view(namespace or "item")
+ "\nCREATE INDEX [idx_{}__item]\n".format(version_table)
+ " ON [{}] ([_item]);".format(version_table)
)
assert db["commits"].count == 2
# Should have no duplicates
item_version = [
r
for r in db.query(
"select product_id, _version, name from {}".format(version_table)
)
]
assert item_version == [
{"product_id": 1, "_version": 1, "name": "Gin"},
{"product_id": 2, "_version": 1, "name": "Tonic"},
{"product_id": 2, "_version": 2, "name": "Tonic 2"},
{"product_id": 3, "_version": 1, "name": "Rum"},
]
def test_file_with_reserved_columns(repo, tmpdir):
runner = CliRunner()
db_path = str(tmpdir / "reserved.db")
with runner.isolated_filesystem():
result = runner.invoke(
cli,
[
"file",
db_path,
str(repo / "items-with-reserved-columns.json"),
"--repo",
str(repo),
"--id",
"_id",
"--full-versions",
],
catch_exceptions=False,
)
assert result.exit_code == 0
db = sqlite_utils.Database(db_path)
expected_schema = (
textwrap.dedent(
"""
CREATE TABLE [namespaces] (
[id] INTEGER PRIMARY KEY,
[name] TEXT
);
CREATE UNIQUE INDEX [idx_namespaces_name]
ON [namespaces] ([name]);
CREATE TABLE [commits] (
[id] INTEGER PRIMARY KEY,
[namespace] INTEGER REFERENCES [namespaces]([id]),
[hash] TEXT,
[commit_at] TEXT
);
CREATE UNIQUE INDEX [idx_commits_namespace_hash]
ON [commits] ([namespace], [hash]);
CREATE TABLE [item] (
[_id] INTEGER PRIMARY KEY,
[_item_id] TEXT
, [_id_] INTEGER, [_item_] TEXT, [_version_] TEXT, [_commit_] TEXT, [rowid_] INTEGER, [_commit] INTEGER);
CREATE UNIQUE INDEX [idx_item__item_id]
ON [item] ([_item_id]);
CREATE TABLE [item_version] (
[_id] INTEGER PRIMARY KEY,
[_item] INTEGER REFERENCES [item]([_id]),
[_version] INTEGER,
[_commit] INTEGER REFERENCES [commits]([id]),
[_id_] INTEGER,
[_item_] TEXT,
[_version_] TEXT,
[_commit_] TEXT,
[rowid_] INTEGER
);"""
)
+ "\n"
+ expected_create_view("item")
+ "\nCREATE INDEX [idx_item_version__item]\n"
" ON [item_version] ([_item]);"
).strip()
assert db.schema == expected_schema
item_version = [
r
for r in db.query(
"select _id_, _item_, _version_, _commit_, rowid_ from item_version"
)
]
assert item_version == [
{
"_id_": 1,
"_item_": "Gin",
"_version_": "v1",
"_commit_": "commit1",
"rowid_": 5,
},
{
"_id_": 2,
"_item_": "Tonic",
"_version_": "v1",
"_commit_": "commit1",
"rowid_": 6,
},
{
"_id_": 2,
"_item_": "Tonic 2",
"_version_": "v1",
"_commit_": "commit1",
"rowid_": 6,
},
{
"_id_": 3,
"_item_": "Rum",
"_version_": "v1",
"_commit_": "commit1",
"rowid_": 7,
},
]
@pytest.mark.parametrize("file", ("trees.csv", "trees.tsv"))
def test_csv_tsv(repo, tmpdir, file):
runner = CliRunner()
db_path = str(tmpdir / "db.db")
with runner.isolated_filesystem():
result = runner.invoke(
cli,
[
"file",
db_path,
str(repo / file),
"--repo",
str(repo),
"--id",
"TreeID",
"--csv",
"--full-versions",
],
catch_exceptions=False,
)
assert result.exit_code == 0
db = sqlite_utils.Database(db_path)
assert (
db.schema
== textwrap.dedent(
"""
CREATE TABLE [namespaces] (
[id] INTEGER PRIMARY KEY,
[name] TEXT
);
CREATE UNIQUE INDEX [idx_namespaces_name]
ON [namespaces] ([name]);
CREATE TABLE [commits] (
[id] INTEGER PRIMARY KEY,
[namespace] INTEGER REFERENCES [namespaces]([id]),
[hash] TEXT,
[commit_at] TEXT
);
CREATE UNIQUE INDEX [idx_commits_namespace_hash]
ON [commits] ([namespace], [hash]);
CREATE TABLE [item] (
[_id] INTEGER PRIMARY KEY,
[_item_id] TEXT
, [TreeID] TEXT, [name] TEXT, [_commit] INTEGER);
CREATE UNIQUE INDEX [idx_item__item_id]
ON [item] ([_item_id]);
CREATE TABLE [item_version] (
[_id] INTEGER PRIMARY KEY,
[_item] INTEGER REFERENCES [item]([_id]),
[_version] INTEGER,
[_commit] INTEGER REFERENCES [commits]([id]),
[TreeID] TEXT,
[name] TEXT
);"""
).strip()
+ "\n"
+ expected_create_view("item")
+ "\nCREATE INDEX [idx_item_version__item]\n"
" ON [item_version] ([_item]);"
)
@pytest.mark.parametrize(
"dialect,expected_schema",
(
("excel", "CREATE TABLE [item] (\n [TreeID] TEXT,\n [name] TEXT\n)"),
("excel-tab", "CREATE TABLE [item] (\n [TreeID,name] TEXT\n)"),
),
)
def test_csv_dialect(repo, tmpdir, dialect, expected_schema):
runner = CliRunner()
db_path = str(tmpdir / "db.db")
with runner.isolated_filesystem():
result = runner.invoke(
cli,
[
"file",
db_path,
str(repo / "trees.csv"),
"--repo",
str(repo),
"--dialect",
dialect,
],
catch_exceptions=False,
)
assert result.exit_code == 0
db = sqlite_utils.Database(db_path)
assert db["item"].schema == expected_schema
@pytest.mark.parametrize(
"convert,expected_rows",
(
(
"json.loads(content.upper())",
[
{"PRODUCT_ID": 1, "NAME": "GIN"},
{"PRODUCT_ID": 2, "NAME": "TONIC"},
{"PRODUCT_ID": 1, "NAME": "GIN"},
{"PRODUCT_ID": 2, "NAME": "TONIC 2"},
{"PRODUCT_ID": 3, "NAME": "RUM"},
],
),
# Generator
(
(
"data = json.loads(content)\n"
"for item in data:\n"
' yield {"just_name": item["name"]}'
),
[
{"just_name": "Gin"},
{"just_name": "Tonic"},
{"just_name": "Gin"},
{"just_name": "<NAME>"},
{"just_name": "Rum"},
],
),
),
)
def test_convert(repo, tmpdir, convert, expected_rows):
runner = CliRunner()
db_path = str(tmpdir / "db.db")
with runner.isolated_filesystem():
result = runner.invoke(
cli,
[
"file",
db_path,
str(repo / "items.json"),
"--repo",
str(repo),
"--convert",
convert,
],
catch_exceptions=False,
)
assert result.exit_code == 0
db = sqlite_utils.Database(db_path)
rows = [{k: v for k, v in r.items() if k != "_commit"} for r in db["item"].rows]
assert rows == expected_rows
def test_convert_xml(repo, tmpdir):
runner = CliRunner()
(repo / "items.xml").write_text(
"""
<items>
<item name="one" value="1" />
<item name="two" value="2" />
</items>
""",
"utf-8",
)
subprocess.call(["git", "add", "items.xml"], cwd=str(repo))
subprocess.call(git_commit + ["-m", "items.xml"], cwd=str(repo))
db_path = str(tmpdir / "db.db")
result = runner.invoke(
cli,
[
"file",
db_path,
str(repo / "items.xml"),
"--repo",
str(repo),
"--convert",
textwrap.dedent(
"""
tree = xml.etree.ElementTree.fromstring(content)
return [el.attrib for el in tree.iter("item")]
"""
),
"--import",
"xml.etree.ElementTree",
],
catch_exceptions=False,
)
assert result.exit_code == 0
db = sqlite_utils.Database(db_path)
assert list(db["item"].rows) == [
{"name": "one", "value": "1"},
{"name": "two", "value": "2"},
]
@pytest.mark.parametrize(
"options,expected_texts",
(
([], ["1", "2", "3"]),
(["--skip", 0], ["2", "3"]),
(["--skip", 0, "--skip", 2], ["3"]),
(["--start-at", 2], ["2", "3"]),
(["--start-after", 2], ["3"]),
(["--start-at", 3], ["3"]),
(["--start-after", 3], []),
),
)
def test_skip_options(repo, tmpdir, options, expected_texts):
runner = CliRunner()
commits = list(
reversed(
subprocess.check_output(["git", "log", "--pretty=format:%H"], cwd=str(repo))
.decode("utf-8")
.split("\n")
)
)
assert len(commits) == 4
# Rewrite options to replace integers with the corresponding commit hash
options = [commits[item] if isinstance(item, int) else item for item in options]
db_path = str(tmpdir / "db.db")
result = runner.invoke(
cli,
[
"file",
db_path,
str(repo / "increment.txt"),
"--repo",
str(repo),
"--convert",
'[{"id": 1, "text": content.decode("utf-8")}]',
"--id",
"id",
]
+ options,
catch_exceptions=False,
)
assert result.exit_code == 0
db = sqlite_utils.Database(db_path)
actual_text = [r["text"] for r in db["item_version"].rows]
assert actual_text == expected_texts
def test_reserved_columns_are_reserved(tmpdir, repo):
runner = CliRunner()
db_path = str(tmpdir / "db.db")
runner.invoke(
cli,
[
"file",
db_path,
str(repo / "items.json"),
"--repo",
str(repo),
"--id",
"product_id",
],
)
# Find all columns with _ prefixes and no suffix
db = sqlite_utils.Database(db_path)
with_prefix = {"rowid"}
for table in itertools.chain(db.tables, db.views):
for column in table.columns_dict:
if column.startswith("_") and not column.endswith("_"):
with_prefix.add(column)
assert with_prefix == set(RESERVED)
@pytest.mark.parametrize("use_wal", (True, False))
def test_wal(repo, tmpdir, use_wal):
runner = CliRunner()
db_path = str(tmpdir / "db.db")
options = [
"file",
db_path,
str(repo / "items.json"),
"--repo",
str(repo),
]
if use_wal:
options.append("--wal")
result = runner.invoke(
cli,
options,
catch_exceptions=False,
)
assert result.exit_code == 0
db = sqlite_utils.Database(db_path)
expected_journal_mode = "wal" if use_wal else "delete"
assert db.journal_mode == expected_journal_mode
| 2.015625 | 2 |
Config/HEADERS.py | solider245/Requests_Html_Spider | 10 | 12797669 | <reponame>solider245/Requests_Html_Spider
#!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: W_H_J
@license: Apache Licence
@contact: <EMAIL>
@software: PyCharm
@file: HEADERS.py
@time: 2018/9/26 17:31
@describe: 请求头集合--爬虫请求头信息在此配置
'User-Agent': '%s' % UserAgent.pc_agent() 启用轮换浏览器请求头
"""
import os
import sys
sys.path.append(r'your_path')
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
sys.path.append("..")
from BaseFile.UserAgent import UserAgent
HEADERS = {
# 配置样例
"heasers": {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': 'GA1.2.151205434.1528702564; user_trace_token=<PASSWORD>1<PASSWORD>; LGUID=20180611153613-1e11da71-6d4a-11e8-9446-5254005c3644; JSESSIONID=ABAAABAAAGFABEFA887FF2126C2345351E1CF33022A085A; _gid=GA1.2.295504001.1536894927; LGSID=20180914111529-6ee84ad5-b7cc-11e8-b939-5254005c3644; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1536894927; index_location_city=%E5%85%A8%E5%9B%BD; TG-TRACK-CODE=index_navigation; SEARCH_ID=f8b502632588469da5ea73ee9dd382a5; _gat=1; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1536897145; LGRID=20180914115228-993585b5-b7d1-11e8-b939-5254005c3644',
'Host': 'www.lagou.com',
# 'Referer': 'https://www.lagou.com/zhaopin/Java/?labelWords=label',
'Upgrade-Insecure-Requests': '1',
'User-Agent': '%s' % UserAgent.pc_agent()},
# 简书
"headersJianShun": {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Host': 'www.jianshu.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': '%s' % UserAgent.pc_agent()},
}
if __name__ == '__main__':
print(HEADERS['heasers'])
| 1.84375 | 2 |
vcflat/tests/test_HeaderExtraction/test_pop_header.py | arontommi/VCFlat | 4 | 12797670 | <reponame>arontommi/VCFlat
import os
from vcflat.HeaderExtraction import VcfHeader, pop_header
def get_input():
test_data_dir = os.path.join(os.path.dirname(__file__), "..", "test_data")
i = os.path.join(test_data_dir, "test.snpeff.vcf")
return i
def get_no_chrom():
i = get_input()
vch = VcfHeader(i)
output = vch.get_raw_header()
output_no_chrom = pop_header(output)
return output_no_chrom
def test_1():
""" checks that output is list """
assert type(get_no_chrom()) is list
def test_2():
""" checks that output is not empty """
assert len(get_no_chrom()) != 0
def test_3():
""" checks that the header has been poped """
header = VcfHeader(get_input()).extract_header()
assert header not in get_no_chrom()
| 2.40625 | 2 |
learn_cedev/book/models.py | atthana/django_cedev | 0 | 12797671 | <gh_stars>0
from django.db import models
from django.utils.html import format_html
BOOK_LEVEL_CHOICE = ( # โชว์ชุดหลัง แต่เซฟด้วยชุดหน้า เช่น โชว์ Basic แต่เซฟด้วย B นะ
('B', 'Basic'),
('M', 'Medium'),
('A', 'Advance'),
)
class Category(models.Model):
name = models.CharField(max_length=100)
class Meta:
verbose_name_plural = "Category"
def __str__(self):
return self.name
class Author(models.Model):
name = models.CharField(max_length=100)
class Meta:
verbose_name_plural = "Author"
def __str__(self):
return self.name
class Book(models.Model):
code = models.CharField(max_length=10, unique=True)
slug = models.SlugField(max_length=200, unique=True)
name = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
price = models.FloatField(default=0)
category = models.ForeignKey(Category, null=True, blank=True, on_delete=models.CASCADE)
author = models.ManyToManyField(Author, blank=True)
level = models.CharField(max_length=5, null=True, blank=True, choices=BOOK_LEVEL_CHOICE) # เอา choices จากข้างบนมาใช้
image = models.FileField(upload_to='upload', null=True, blank=True) # รูปจะถุกเก็บเข้า folder "upload" อัตโนมัติ
published = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-code'] # ถ้ามีติดลบนำหน้า คือเรียงจากมากไปน้อย
verbose_name_plural = "Book"
def __str__(self):
return self.name
def show_image(self):
if self.image:
return format_html('<img src="' + self.image.url + '" height="50px">')
return '' # เช็คว่ามีรูปไหม ถ้ามีก้อโชว ถ้าไม่มีก้อเอาค่าว่างๆออกไป
show_image.allow_tags = True # allow tage เพื่อให้มันแปลความหมายของ tag ข้างบน
def get_comment_count(self):
return self.bookcomment_set.count()
class BookComment(models.Model):
book = models.ForeignKey(Book, on_delete=models.CASCADE)
comment = models.CharField(max_length=100)
rating = models.IntegerField(default=0)
class Meta:
ordering = ['id'] # เรียงตาม id จากน้อยไปมาก (id ไม่มีใน Model แต่ Django สร้างให้เราแล้วนะ)
verbose_name_plural = "Book Comment"
def __str__(self):
return self.comment
| 2.40625 | 2 |
generate_dataset.py | birnbaum/racist-comment-generator | 2 | 12797672 | import mysql.connector
import progressbar
import argparse
import yaml
import re
import collections
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--out', type=str, default='data/racist/racist.txt',
help='text file where the data is written to')
args = parser.parse_args()
with open('config.yml', 'r') as c:
config = yaml.load(c)
generate_dataset(args, config)
def iter_row(cursor, size=1000):
while True:
rows = cursor.fetchmany(size)
if not rows:
break
for row in rows:
yield row
def generate_dataset(args, config):
cnx = mysql.connector.connect(user=config["database"]["user"],
password=config["database"]["password"],
host=config["database"]["host"],
database=config["database"]["db"])
cursor = cnx.cursor()
cursor.execute('SELECT count(*) FROM comment')
count = cursor.fetchone()[0]
bar = progressbar.ProgressBar(max_value=count)
# This query groups comments by posts and places subcomments after their parent comments to
# have as much context between the comments as possible. Everything is sorted ASC by date.
print('Executing SQL query...')
cursor.execute('''
# Parent comments
SELECT p.message,
user.name,
post.created_time as post_created_time,
p.created_time as comment_created_time,
Null as subcomment_created_time
FROM comment p
JOIN user ON user.id = p.user
JOIN post ON post.id = p.post
WHERE p.parent_comment IS NULL
UNION
# Child comments
SELECT c.message,
user.name,
post.created_time as post_created_time,
p.created_time as comment_created_time,
c.created_time as subcomment_created_time
FROM comment c
JOIN user ON user.id = c.user
JOIN comment p on p.id = c.parent_comment
JOIN post ON post.id = p.post
ORDER BY post_created_time ASC,
comment_created_time ASC,
subcomment_created_time ASC
LIMIT 300000
''')
print('Done')
ds = Dataset()
# As people tend to reference other people in subcomments, we collect the names of
# all subcomment authors to remove them from the result in the end.
authors = set()
comments = []
for (message, author, post_date, comment_date, subcomment_date) in bar(iter_row(cursor)):
if subcomment_date is None: # is parent
ds.push(comments, authors)
authors = {author}
comments = [message]
else: # is child
authors.add(author)
comments.append(message)
ds.write(args.out)
class Dataset:
def __init__(self):
self.batches = []
self.vocab_counter = collections.Counter()
def write(self, outfile):
"""Writes the dataset to a text file"""
output = self.create_output()
ending = outfile.split('.')[-1]
if ending == 'txt':
with open(outfile, "wb") as f:
f.write(output)
# TODO add bzip
else:
raise ValueError('outfile has to be a .txt file')
@profile
def push(self, comments, authors):
"""Adds a new bathch of comments to the dataset. The set of authors ist used to further clean the comments"""
lines = []
for comment in comments:
lines.extend(comment.replace('\r', '\n').split('\n'))
txt = ''
authors = [re.escape(author) for author in authors]
for line in lines:
line = self.remove_usernames(line, authors)
if 4 < len(line) < 500:
txt += '> {}\n'.format(line)
self.batches.append(txt)
self.vocab_counter.update(txt)
def remove_usernames(self, text, authors):
"""Removing user names that the crawler was not able to filter out because they were not returned in Graph API's message_tags"""
# First remove the old fashined @ tags
if len(text) == 0 or ('@' in text and len(text.split(' ')) <= 3):
return ''
if text[0] == '@':
text = re.sub('@ ?.*?((:|,|\.| {2})| .*?[:,. ])', '', text)
else:
text = re.sub('@', '', text)
# Then the names of all the authors from the comment and it's subcomments because they mainly reference each other
text = re.sub('({})'.format('|'.join(authors)), '', text)
return text.strip()
@profile
def create_output(self):
"""Generates one big cp1252 string"""
output = ''.join(self.batches)
#Remove all characters that appear in less than 0.002% of the cases
threshold = len(output) * 0.00002
chars_to_remove = []
for char, count in reversed(self.vocab_counter.most_common()):
if count < threshold:
chars_to_remove.append(char)
else:
break
output = re.sub('[' + re.escape(''.join(chars_to_remove)) + ']', '', output)
return output.encode("cp1252", errors="ignore")
def merge_lines(self, lines):
"""Cleans and selects qualifying lines and merges them to a string"""
txt = ''
for line in lines:
line = self.clean_tags(line)
if 4 < len(line) < 500:
txt += '> {}\n'.format(line)
return txt
if __name__ == '__main__':
main() | 2.71875 | 3 |
backend/logger/migrations/0003_requestlogger_sha1.py | AstroMatt/esa-subjective-time-perception | 1 | 12797673 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-15 12:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logger', '0002_auto_20161231_1740'),
]
operations = [
migrations.AddField(
model_name='requestlogger',
name='sha1',
field=models.CharField(blank=True, default=None, max_length=40, null=True, verbose_name='SHA1'),
),
]
| 1.65625 | 2 |
experiments/models/mnist_torch_models.py | VKCOM/TopicsDataset | 1 | 12797674 | <filename>experiments/models/mnist_torch_models.py
import torch
import torch.nn as nn
import torch.nn.functional as F
N_CLASSES = 10
mnist_input_shape = (28, 28, 1)
class MnistModel(nn.Module):
def __init__(self):
super().__init__()
self.conv_1 = nn.Conv2d(1, 32, 3, 1)
self.conv_2 = nn.Conv2d(32, 64, 3, 1)
self.dropout = nn.Dropout(p=0.5)
self.fc_1 = nn.Linear(9216, 128)
self.out = nn.Linear(128, N_CLASSES)
def forward(self, x):
x = F.relu(self.conv_1(x))
x = F.relu(self.conv_2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = torch.flatten(x, 1)
x = self.dropout(F.relu(self.fc_1(x)))
out = F.log_softmax(self.out(x))
return out
| 3.328125 | 3 |
csv2xlsx.py | dorbarker/csv2xlsx | 4 | 12797675 | import os
import glob
import csv
import argparse
from xlsxwriter.workbook import Workbook
def arguments():
parser = argparse.ArgumentParser()
parser.add_argument('path', default = os.getcwd(), help = "Path to CSV files")
parser.add_argument('--outname', default = None, help = "Name of output XLSX file")
return parser.parse_args()
def wrap_csvs(csvpath, outname):
directory_path = os.path.abspath(csvpath)
if outname is None:
filename = os.path.basename(directory_path + ".xlsx")
else:
filename = outname
workbook_name = os.path.join(directory_path, filename)
workbook = Workbook(workbook_name)
for c in glob.glob(os.path.join(csvpath, "*.csv")):
sheetname = os.path.basename(c[:c.rfind(".")])
print("Adding {} to {}".format(c, workbook_name))
worksheet = workbook.add_worksheet(sheetname)
with open(c, 'r') as f:
reader = csv.reader(f)
for rindex, row in enumerate(reader):
for cindex, col in enumerate(row):
try:
worksheet.write(rindex,cindex, float(col))
except ValueError:
worksheet.write(rindex, cindex, col)
workbook.close()
def main():
args = arguments()
wrap_csvs(args.path, args.outname)
if __name__ == "__main__":
main()
| 3.265625 | 3 |
openstack_dashboard/dashboards/identity/signups/panel.py | sreenathmmenon/astttproject | 0 | 12797676 | from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.local.local_settings import SIGNUP_ROLES, OPENSTACK_API_VERSIONS
from openstack_dashboard.dashboards.identity.signups.common import get_admin_ksclient
class Signups(horizon.Panel):
name = _("Signups")
slug = 'signups'
def allowed(self, context):
# check if user has special signup role
user = context['request'].user.id
tenant = context['request'].user.tenant_id
keystone = get_admin_ksclient()
if OPENSTACK_API_VERSIONS['identity'] == 3:
roles = keystone.roles.list(user=user, project=tenant)
else:
roles = keystone.roles.roles_for_user(user, tenant)
for role in roles:
if role.name in SIGNUP_ROLES or role.id in SIGNUP_ROLES:
return True
return False
| 1.976563 | 2 |
lesson5/times_table_challenge.py | chrisglencross/python-lessons | 1 | 12797677 | <filename>lesson5/times_table_challenge.py
import random
import time
def input_number(prompt):
while True:
try:
print(prompt)
number=int(input("> "))
break
except ValueError:
print("Oops! That wasn't a valid number. Try again.")
return number
def ask_times_table(num1, num2):
answer=input_number("What is " + str(num1) + " x " + str(num2) + "?")
if answer==num1*num2:
print("Correct!")
return True
else:
print("Wrong, the answer is", num1*num2)
return False
def do_times_table_test(table):
questions=list(range(1, 13))
random.shuffle(questions)
correct=0
start_time=time.time()
for question in questions:
print()
if ask_times_table(table, question):
correct=correct+1
end_time=time.time()
duration=end_time-start_time
# Bonus score if you took less than 60 seconds
if duration<60:
score_multiplier=1+3*(60-duration)/60
else:
score_multiplier=1
score=correct*score_multiplier
print()
print("You got", correct, "sums correct in", int(duration), "seconds")
print("This means you scored", int(score), "points!")
print()
# Main game loop
while True:
table=input_number("Which times table do you want to try?")
do_times_table_test(table)
| 4.09375 | 4 |
reviews/mixins.py | moshthepitt/answers | 6 | 12797678 | <reponame>moshthepitt/answers
from django.shortcuts import redirect
from django.contrib import messages
from django.utils.translation import ugettext as _
from answers.models import Answer
class ReviewMixin(object):
"""
Control who can take a review
"""
def dispatch(self, *args, **kwargs):
review = self.get_object()
# dont allow a non reviewer to review
if (self.request.user.userprofile not in review.reviewers.all()) and (not review.public):
messages.add_message(
self.request, messages.WARNING, _('Sorry, you do not have access to that section'))
return redirect('dashboard')
# not more than one reviews
if review.quiz.single_attempt:
if not self.request.user.is_authenticated():
messages.add_message(
self.request, messages.WARNING, _("You need to be logged in to review"))
return redirect('dashboard')
elif Answer.objects.filter(userprofile=self.request.user.userprofile).filter(review=review).exists():
messages.add_message(
self.request, messages.WARNING, _("You can only review once"))
return redirect('dashboard')
return super(ReviewMixin, self).dispatch(*args, **kwargs)
| 2.28125 | 2 |
gamecam/pyrcolate.py | SpooksMcGoose/gamecam | 0 | 12797679 | <reponame>SpooksMcGoose/gamecam<gh_stars>0
import json
import operator
import os
import shutil
import string
import sys
import time
import cv2
import piexif
import numpy as np
from sklearn.cluster import KMeans
from datetime import datetime as dt, timedelta as td
from matplotlib import pyplot as plt
from matplotlib.widgets import Slider, RectangleSelector
import matplotlib.gridspec as gridspec
# CONSTANTS
BIG_NUM = int(1e9)
NEG_NUM = -0.1
RESP_NUM = 3.5
def PARSE_DT(raw):
"""Default parser for EXIF "Image DateTime" tag.
"""
str_ = ''.join(x for x in str(raw) if x in string.digits)
return dt.strptime(str_, "%Y%m%d%H%M%S")
def SORT_BY_DT(row):
"""Default sort for construct_jpg_data().
"""
return row["datetime"]
DEFAULT_PARSE = [
(("0th", 306), "datetime", PARSE_DT)
]
DEFAULT_PLOT_PARAMS = {
"ceiling": 40000,
"resp_thresh": 20000,
"trans_thresh": 30,
"smooth_time": 1,
"night_mult": 0
}
DIRECTION_MULTS = {
"A": (-1, -1, 0, 0),
"B": (1, 1, 0, 0),
"R": (0, 0, 1, 1),
"L": (0, 0, -1, -1)
}
COL_ORDER = [
"old_name", "old_path", "filename", "filepath", "shape",
"datetime", "24hr", "is_night", "timedelta", "td_minutes",
"median", "med_diff", "count", "new_count",
"selected", "user_edit", "trans_edit"
]
RS_PARAMS = {
"drawtype": "box", "useblit": True, "button": [1, 3], "minspanx": 5,
"minspany": 5, "spancoords": "pixels", "interactive": True
}
# GENERAL FUNCTIONS
class Mem():
def __init__(self):
pass
def rint(n):
return round(int(n))
def sort_cols(var_name):
if var_name in COL_ORDER:
return COL_ORDER.index(var_name)
else:
return BIG_NUM
def input_filename():
while True:
filename = input("FILEPATH > ")
if os.path.exists(filename):
print("File already exists, would you like to overwrite it?")
answer = input("Y / N > ")
if answer.lower() in ("y", "yes"):
return filename
elif filename:
return filename
def input_directory():
while True:
directory = input("DIRPATH > ")
if os.path.isdir(directory):
return directory
else:
print("Directory does not exist, would you like to make it?")
answer = input("Y / N > ")
if answer.lower() in ("y", "yes"):
os.makedirs(directory)
return directory
print("Please input a new directory path.")
def csv_safe(obj):
"""Puts quotes around strings with commas in them.
"""
string = str(obj)
return '"' + string + '"' if "," in string else string
def to_24_hour(datetime):
"""Converts datetime.datetime type to 24 hour float type.
"""
time = datetime.time()
return time.hour + time.minute / 60 + time.second / 3600
def extract_var(data, var):
"""Returns a list of values corresponding to a variable name.
>>> foo = [{"name": "Shane", "age": 22}, {"name": "Eve", "age": 7}]
>>> extract_var(data=foo, var="name")
["Shane", "Eve"]
"""
return [x[var] for x in data]
def stop_watch(i, timer):
progress = (i * 10) // timer[0]
elapsed = time.time() - timer[1]
total = elapsed / (progress / 100)
remain = strfdelta(td(seconds=total - elapsed),
"{days}:{hours}:{minutes}:{seconds}")
print(f"{progress}% done. {remain} left.")
def strfdelta(tdelta, fmt):
"""Formats a timedelta object as a string.
Parameters
----------
tdelta : datetime.timedelta
Timedelta object to format.
fmt : str
Contains format calls to days, hours, minutes, and seconds.
Returns
-------
str
Right justified 2 spaces, filled with zeros.
"""
d = {"days": tdelta.days}
d["hours"], rem = divmod(tdelta.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
for k, v in d.items():
if k != "days":
d[k] = str(v).rjust(2, "0")
return fmt.format(**d)
def resize_long_edge(im, size):
h, w, *_ = im.shape
if w > h:
scale = size / w
else:
scale = size / h
new_w = rint(w * scale)
new_h = rint(h * scale)
return cv2.resize(im, (new_w, new_h))
def generate_thumbnails(jpg_data, export_dir, long_edge=1024):
"""Formats a timedelta object as a string.
Parameters
----------
jpg_data : list of dictionaries
Requires that "filepath" and "filename" is in dictionary keys, which is
easily provided by find_imgs() prior to this function.
export_dir : str
Directory path that is used for exporting thumbnails.
long_edge : int, optional
By default, images will be resized to 1024 pixels on the long edge of
the image. Smaller sizes speed up performance, but decrease acuity.
"""
if not os.path.exists(export_dir):
os.makedirs(export_dir)
timer = (len(jpg_data) // 10, time.time())
for i, jpg in enumerate(jpg_data):
if not i % timer[0] and i:
stop_watch(i, timer)
from_path = jpg["filepath"]
to_path = os.path.join(export_dir, jpg["filename"])
im = cv2.imread(from_path)
resized = resize_long_edge(im, long_edge)
cv2.imwrite(to_path, resized)
piexif.transplant(from_path, to_path)
# SPECIFIC FUNCTIONS
def find_imgs(dirpath, img_type=(".jpg", ".jpeg")):
"""Walks directory path, finding all files ending in img_type.
Parameters
----------
dirpath : str
Path to an image-containing directory.
img_type : tuple, optional
By default, finds JPG image types, but can be changed if camera
exports a different filetype.
Returns
-------
list of dictionaries
Contains filenames and filepaths.
"""
output = []
for dir_, _, files in os.walk(dirpath):
if "_selected" not in dir_:
found = (
f for f in files
if f.lower().endswith(img_type)
)
for filename in found:
filepath = os.path.join(dir_, filename)
output.append({
"filename": filename,
"filepath": filepath
})
return output
def attach_exif(jpg_data, parse_tags=DEFAULT_PARSE):
"""Loops through jpg_data, reading filepaths and attaching EXIF data.
Parameters
----------
jpg_data : list of dictionaries
Requires that "filepath" is in dictionary keys, which is easily
provided by find_imgs() prior to this function.
parse_tags : list of tuples, optional
By default, only Image DateTime is retrieved from EXIF data using
DEFAULT_PARSE. Examine DEFAULT_PARSE as an example parameter to
pass to attach_exif(), if more data is desired from EXIF tags.
Returns
-------
list of dictionaries
Same as jpg_data, but now with desired EXIF data attached.
"""
output = []
for deep_row in jpg_data:
row = deep_row.copy()
tags = piexif.load(row["filepath"])
for (key, tag), var, anon in parse_tags:
row[var] = anon(tags[key][tag])
output.append(row)
return output
def hist_median(image):
"""Quickly finds the median tone of a grayscale image.
"""
px_count = image.shape[0] * image.shape[1]
hist = cv2.calcHist([image], [0], None, [256], [0, 256])
tally = 0
threshold = px_count / 2
for i, count in enumerate(hist):
tally += count
if tally > threshold:
return i
def generate_clone_tuples(clone_to, fill_from):
"""Loops through jpg_data, reading filepaths and attaching EXIF data.
Useful for cloning out timestamps, aids in histogram equalization.
Parameters
----------
clone_to : tuple
Format is (y1, y2, x1, x2), just like slicing images as np.arrays.
fill_from : {"A", "B", "R", "L"}
Calls directional tuples from DIRECTION_MULTS to fill pixels within
the clone_to area. Neighboring pixels from "[A]bove," "[B]elow,"
"[R]ight," and "[L]eft" are used for filling the area.
Returns
-------
pair of tuples
Matches the format for the clone parameter in process_jpgs().
"""
clone_to = np.array(clone_to)
mults = np.array(DIRECTION_MULTS[fill_from[0].upper()])
a, b, c, d = clone_to
h, w = b - a, d - c
h_or_w = np.array([h, h, w, w])
clone_from = clone_to + (h_or_w * mults)
return (tuple(clone_to), tuple(clone_from))
# IMAGE PREPROCESSING
def CROPPER(image, crop):
"""Returns a cropped image.
Parameters
----------
image : numpy.ndarray
Image array, typically from cv2.imread().
crop : tuple
Format is (y1, y2, x1, x2), just like slicing images as np.arrays.
"""
y1, y2, x1, x2 = crop
return image[y1:y2, x1:x2]
def CROP_EQUALIZE(image, crop, clone=None):
"""Returns a cropped image with an equalized histogram.
Parameters
----------
image : numpy.ndarray
Image array, typically from cv2.imread().
crop : tuple
Format is (y1, y2, x1, x2), just like slicing images as np.arrays.
clone : pair of tuples, optional
Matches the format ((clone_to), (clone_from)). For simplicity,
use generate_clone_tuples() to generate this object.
"""
image = CROPPER(image, crop)
return cv2.equalizeHist(image)
def CROP_CLONE_EQUALIZE(image, crop, clone):
"""Returns a cropped image, with specified cloning and equalization.
Parameters
----------
image : numpy.ndarray
Image array, typically from cv2.imread().
crop : tuple
Format is (y1, y2, x1, x2), just like slicing images as np.arrays.
clone : pair of tuples
Matches the format ((clone_to), (clone_from)). For simplicity,
use generate_clone_tuples() to generate this object.
"""
(a, b, c, d), (e, f, g, h) = clone
image[a:b, c:d] = image[e:f, g:h]
image = CROPPER(image, crop)
return cv2.equalizeHist(image)
def crop_clone_preview(image):
"""Interactive viewer to quickly get crop and clone parameters.
Parameters
----------
image : numpy.ndarray
Image array, typically from cv2.imread().
Returns
-------
tuple
Format is (crop, clone_to, clone_directs). The crop_tuple
variable can be fed directly into process_jpgs(). Then, use
"generate_clone_tuples(clone_to, directs[0])" to get the clone
parameter, or any other direction in the directs list (all have
been checked for bounds, unlike generate_clone_tuples).
"""
mem = Mem()
mem.crop_tuple = False
mem.clone_tuple = False
mem.clone_directs = False
def update(event):
if crop_RS.active:
crop_RS.update()
if clone_RS.active:
clone_RS.update()
def safe_corners(geometry):
min_y = max(0, min(geometry[0, :]))
max_y = min(H, max(geometry[0, :]))
min_x = max(0, min(geometry[1, :]))
max_x = min(W, max(geometry[1, :]))
ignore = (min_y > H or max_y < 0 or min_x > W or max_x < 0)
return tuple(map(
lambda x: int(round(x)), (min_y, max_y, min_x, max_x))), ignore
def RS_event(eclick, erelease):
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
cr_corn, cr_ig = safe_corners(crop_RS.geometry)
cl_corn, cl_ig = safe_corners(clone_RS.geometry)
mod = image.copy()
if not cl_ig:
mem.clone_tuple = cl_corn
trials = []
for direct in ("A", "B", "R", "L"):
tups = generate_clone_tuples(cl_corn, direct)
a, b, c, d = tups[1]
if not (a < 0 or b > H or c < 0 or d > W):
trials.append((tups, direct))
choose = []
for (tups, direct) in trials:
test = image.copy()
(a, b, c, d), (e, f, g, h) = tups
test[a:b, c:d] = test[e:f, g:h]
diff = cv2.absdiff(test, image)
choose.append((sum(cv2.sumElems(diff)), direct, test))
choose.sort()
if choose:
_, directs, imgs = list(zip(*choose))
mem.clone_directs = directs
mod = imgs[0]
del choose
else:
mem.clone_tuple = False
mem.clone_directs = False
if cr_ig:
cr_corn = (0, H, 0, W)
mem.crop_tuple = False
else:
mem.crop_tuple = cr_corn
y1, y2, x1, x2 = cr_corn
mod = mod[y1:y2, x1:x2]
mod = cv2.cvtColor(mod, cv2.COLOR_BGR2RGB)
ax_mod.imshow(mod)
fig.canvas.draw_idle()
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
H, W, *_ = image.shape
scale = 10
lo_W, hi_W = (0 - W // scale), (W + W // scale)
lo_H, hi_H = (0 - H // scale), (H + H // scale)
gs = gridspec.GridSpec(2, 2, height_ratios=[1, 1.25])
fig = plt.figure()
fig.canvas.set_window_title("Crop and Clone Preview")
ax_crop = fig.add_subplot(gs[0, 0])
ax_clone = fig.add_subplot(gs[0, 1])
ax_mod = fig.add_subplot(gs[1, :])
crop_RS = RectangleSelector(
ax_crop, RS_event, **RS_PARAMS)
clone_RS = RectangleSelector(
ax_clone, RS_event, **RS_PARAMS)
ax_crop.set_title("Crop")
ax_clone.set_title("Clone")
ax_mod.set_title("Result")
for axe in (ax_crop, ax_clone):
axe.set_xticklabels([])
axe.set_yticklabels([])
axe.set_xlim(lo_W, hi_W)
axe.set_ylim(hi_H, lo_H)
ax_crop.imshow(rgb)
ax_clone.imshow(rgb)
ax_mod.imshow(rgb)
plt.connect("draw_event", update)
plt.show()
return mem.crop_tuple, mem.clone_tuple, mem.clone_directs
# FRAME DIFFERENCING METHODS
# Bare-bones. Take difference, threshold, and sum the mask.
def SIMPLE(curr, prev, threshold, ksize=None, min_area=None):
"""Most basic frame differencing method.
Takes two images, then finds their absolute difference. A simple
threshold is called, the resulting white pixels are counted toward
response (movement). Very noisy, but fast.
Parameters
----------
curr : numpy.ndarray
Image array, typically from cv2.imread(). One of the two images
for the absolute difference to be taken.
prev : numpy.ndarray
Like curr. The second image to be differenced.
threshold : int, in range(0, 256)
Parameter to be passed to the cv2.threshold() function.
ksize : int, unused
Used in the BLURRED() and CONTOURS() functions, but retained here to
shorten the process_jpgs() function.
min_area : int, unused
Only used in the CONTOURS() function, but retained here to shorten
the process_jpgs() function.
"""
difference = cv2.absdiff(curr, prev)
_, mask = cv2.threshold(
difference,
threshold, 255,
cv2.THRESH_BINARY)
return cv2.countNonZero(mask)
# Difference, blur (amount changes with ksize), mask and sum.
def BLURRED(curr, prev, threshold, ksize=11, min_area=None):
"""Useful, mid-grade frame differencing method.
Takes two images, then finds their absolute difference. Prior to
thresholding, the differenced image is blurred to reduce noise.
After thresholding, the resulting white pixels are counted toward
response (movement). Works decently, a little faster than COUNTOURS.
Parameters
----------
curr : numpy.ndarray
Image array, typically from cv2.imread(). One of the two images
for the absolute difference to be taken.
prev : numpy.ndarray
Like curr. The second image to be differenced.
threshold : int, in range(0, 256)
Parameter to be passed to the cv2.threshold() function.
ksize : int
Parameter to be passed to the cv2.medianBlur() function.
Default is 11. Must be positive, odd number.
min_area : int, unused
Only used in the CONTOURS() function, but retained here to shorten
the process_jpgs() function.
"""
difference = cv2.absdiff(curr, prev)
blurred = cv2.medianBlur(difference, ksize)
_, mask = cv2.threshold(
blurred,
threshold, 255,
cv2.THRESH_BINARY)
return cv2.countNonZero(mask)
# Like BLURRED, but only sums drawn contours over given limit.
def CONTOURS(curr, prev, threshold, ksize=11, min_area=100):
"""Slower, but powerful frame differencing method.
Takes two images, then finds their absolute difference. Prior to
thresholding, the differenced image is blurred to reduce noise.
After thresholding, contours are drawn around the resulting white pixels.
If the contours are above the min_area parameter, they are counted as a
response (movement). Works very well, little noise; slower than others.
Parameters
----------
curr : numpy.ndarray
Image array, typically from cv2.imread(). One of the two images
for the absolute difference to be taken.
prev : numpy.ndarray
Like curr. The second image to be differenced.
threshold : int, in range(0, 256)
Parameter to be passed to the cv2.threshold() function.
ksize : int
Parameter to be passed to the cv2.medianBlur() function.
Default is 11. Must be positive, odd number.
min_area : int
Minimum contour area to count as a response (movement). Default is
an area of 100 pixels. Larger numbers decreases sensitivity.
"""
difference = cv2.absdiff(curr, prev)
blurred = cv2.medianBlur(difference, ksize)
_, mask = cv2.threshold(
blurred,
threshold, 255,
cv2.THRESH_BINARY)
contours, _ = cv2.findContours(
mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[-2:]
count = 0
for cnt in contours:
area = cv2.contourArea(cnt)
if area > min_area:
count += area
return count
# JPG PROCESSING
def process_jpgs(
jpg_data,
method=CONTOURS,
crop=False, clone=False,
threshold=False, ksize=11, min_area=100
):
"""Generates a response (movement) metric between images.
Works hierarchically to preform image cropping, cloning, and histogram
equalization on images read from jpg_data filepaths before being passed
on to frame differencing methods that generate a response metric.
This is the last step before the jpg_data list can be fed into the
Cam() class for response filtering.
Parameters
----------
jpg_data : list of dictionaries
Requires that "filepath" is in dictionary keys, which is easily
provided by find_imgs() prior to this function.
method : function, {SIMPLE, BLURRED, COUNTOURS}
Determines the frame differencing method to use. Ordered from
left to right based on increasing accuracy, decreasing speed.
crop : tuple
Format is (y1, y2, x1, x2), just like slicing images as np.arrays.
clone : pair of tuples
Matches the format ((clone_to), (clone_from)). For simplicity,
use generate_clone_tuples() to generate this object.
threshold : int, in range(0, 256)
Parameter to be passed to the cv2.threshold() function.
ksize : int
Parameter to be passed to the cv2.medianBlur() function.
Default is 11. Must be positive, odd number.
min_area : int
Minimum contour area to count as a response (movement). Default is
an area of 100 pixels. Larger numbers decreases sensitivity.
Returns
-------
list of dictionaries
Same as incoming jpg_data, but now with the median image tone and
a count variable, which respresents how many pixels have changed
between a photo and its previous, after preprocessing / thresholding.
"""
if not threshold:
thresh_init = False
if not clone:
preprocess = CROP_EQUALIZE
else:
preprocess = CROP_CLONE_EQUALIZE
output = []
timer = (len(jpg_data) // 10, time.time())
for i, deep_row in enumerate(jpg_data):
row = deep_row.copy()
if i == 0:
jpg = cv2.imread(row["filepath"], 0)
h, w = jpg.shape
if not crop:
crop = (0, h, 0, w)
prev = preprocess(jpg, crop, clone)
elif i % timer[0] == 0:
stop_watch(i, timer)
jpg = cv2.imread(row["filepath"], 0)
curr = preprocess(jpg, crop, clone)
row["shape"] = jpg.shape
row["median"] = hist_median(jpg)
if not thresh_init:
threshold = row["median"]*1.05
try:
row["count"] = method(curr, prev, threshold, ksize, min_area)
except cv2.error as inst:
if "(-209:Sizes" in str(inst):
(a, b), (c, d) = curr.shape[:2], prev.shape[:2]
h, w = min(a, c), min(b, d)
tup = (0, h, 0, w)
print("FUNCTION ABORTED!\n"
"Not all images are of same size, "
"consider using the crop parameter.\n"
f"Try crop={tup}.")
return tup
else:
print(inst)
prev = curr
output.append(row)
return output
def construct_jpg_data(
dirpath,
parse_tags=DEFAULT_PARSE,
sort_key=SORT_BY_DT,
process_options={}
):
"""Performs all necessary steps to make jpg_data feedable to Cam().
Parameters
----------
dirpath : str
Path to an image-containing directory.
parse_tags : list of tuples, optional
By default, only Image DateTime is retrieved from EXIF data using
DEFAULT_PARSE. Examine DEFAULT_PARSE as an example parameter to
pass to attach_exif(), if more data is desired from EXIF tags.
sort_key : function, optional
By default, dictionaries within the jpg_data list are sorted by
their "Image DateTime" EXIF tag. This can be changed if images don"t
have a datetime, but do have a variable for sequence.
process_options : dictionary, optional
Passes along paramters to the process_jpgs() function. By default,
no options are specified. Make sure that this parameter is mappable.
Returns
-------
list of dictionaries
Each row contains everything that is needed to feed a Cam() object
with its initial data.
"""
jpg_paths = find_imgs(dirpath)
print(f"{len(jpg_paths)} images found.")
jpg_data = attach_exif(jpg_paths, parse_tags)
jpg_data.sort(key=sort_key)
if process_options == {}:
print("Crop and clone image as desired.")
crop, clone_to, directs = crop_clone_preview(
cv2.imread(jpg_data[len(jpg_data) // 2]["filepath"]))
clone = (generate_clone_tuples(clone_to, directs[0]) if clone_to
else False)
process_options = {"crop": crop, "clone": clone}
print("Started processing...")
output = process_jpgs(jpg_data, **process_options)
print("Done!")
return output
# THE MEAT AND POTATOES
# Requires data from process_jpg(). Object essentially holds data
# used in exporting, parameters for graphing, and plot function.
class Cam():
"""
A class used to store, plot, filter, and export image data.
Contains the heart of this module, the interactive plot() method.
A simply initialization could be: Cam(construct_jpg_data()). After
filtering images with the plot, save() and export() should be called
so as not to lose any prior work.
Attributes
----------
jpg_data : list of dictionaries
Similar to what is inputted, but includes variables denoting
selection, edits, event, etc.
plot_params : dictionary
Parameters used in the plot() method. To reset these values, re-
assign DEFAULT_PLOT_PARAMS to this attribute.
Methods
-------
attach_diffs(var, new_var)
Finds the difference between the current and previous items.
export(directory)
Exports selected images and a .csv file.
load(filename)
Loads a .sav file.
mark_edits(i)
Marks which photos have been edited.
plot()
Interactive plot used to select images for export.
save(filename)
Dumps a JSON object as a .sav file.
update_counts()
Updates jpg_data attribute with new counts.
update_events()
Updates jpg_data attribute with new events.
"""
def __init__(self, jpg_data=False, resp_var="count", thumb_dir=None):
"""
Parameters
----------
jpg_data : list of dictionaries, optional
Typically requires the output of process_jpgs(). Can be omitted
if a empty Cam() object is desired for a load() method call.
resp_var : str, optional
A key found in jpg_data, typically the "count" variable is used
as a response, but alternatives like "median" can be used to plot
jpgs without processing them first.
thumb_dir : str, optional
If the interactive plot is laggy, use resize_with_exif() on your
jpg_data list, and specify the export directory here. These smaller
images will be displayed in the Gallery tab.
"""
self.resp_var = resp_var
self.plt_vals = DEFAULT_PLOT_PARAMS
self.lines = {}
self.sliders = {}
self.user_edits = {}
self.press = [None, None]
self.toggle = False
self.buffer = [[None] * 64, [None] * 64]
self.recent_folder = os.path.expanduser("~")
# Cam objects don"t need to have data to initialize.
# This way, someone can load() older, processed data.
if jpg_data:
self.jpg_data = list(jpg_data)
self.length = len(self.jpg_data)
self.dt_present = "datetime" in self.jpg_data[0].keys()
h, w = jpg_data[0]["shape"]
self.plt_vals["ceiling"] = h * w * 0.02
self.plt_vals["resp_thresh"] = self.plt_vals["ceiling"] / 2
self.attach_diffs("median", "med_diff")
for row in self.jpg_data:
if thumb_dir is not None:
row["thumbpath"] = os.path.join(thumb_dir, row["filename"])
else:
row["thumbpath"] = row["filepath"]
row["count"] = row[self.resp_var]
row["med_diff"] = abs(row["med_diff"])
row["selected"] = False
row["user_edit"] = False
row["trans_edit"] = False
if self.dt_present:
self.attach_diffs("datetime", "timedelta")
for row in self.jpg_data:
row["24hr"] = to_24_hour(row["datetime"])
row["td_minutes"] = round(
row["timedelta"].total_seconds() / 60, 2)
day_hr = extract_var(self.jpg_data, "24hr")
# meds = extract_var(self.jpg_data, "median")
X = np.array(day_hr).reshape(-1, 1)
kmeans = KMeans(n_clusters=3).fit(X)
night_indices = [np.argmin(kmeans.cluster_centers_),
np.argmax(kmeans.cluster_centers_)]
for n, index in enumerate(kmeans.labels_):
self.jpg_data[n]["is_night"] = index in night_indices
def save(self, filename):
"""Dumps a JSON object with jpg_data, plot_params, and user_edits.
"""
temp_data = [
{k: v for k, v in row.items()}
for row in self.jpg_data]
for row in temp_data:
if "datetime" in row.keys():
row["datetime"] = dt.strftime(
row["datetime"], "%Y-%m-%d %H:%M:%S")
if "timedelta" in row.keys():
row["timedelta"] = row["timedelta"].total_seconds()
if "selected" in row.keys():
row["selected"] = int(row["selected"])
with open(filename, "w") as f:
f.write(json.dumps(self.plt_vals) + "\n")
f.write(json.dumps(temp_data) + "\n")
f.write(json.dumps(self.user_edits))
def load(self, filename):
"""Loads a .sav file, the Cam() object is now identical to the last.
"""
with open(filename, "r") as f:
self.plt_vals = json.loads(next(f))
self.plt_vals["resp_thresh"] = (
self.plt_vals["resp_thresh"] ** (1 / RESP_NUM))
temp_data = json.loads(next(f))
temp_dict = json.loads(next(f))
for row in temp_data:
if "datetime" in row.keys():
row["datetime"] = dt.strptime(
row["datetime"], "%Y-%m-%d %H:%M:%S"
)
self.dt_present = True
else:
self.dt_present = False
if "timedelta" in row.keys():
try:
row["timedelta"] = td(seconds=row["timedelta"])
except AttributeError:
pass
if "selected" in row.keys():
row["selected"] = bool(row["selected"])
self.jpg_data = temp_data.copy()
self.length = len(self.jpg_data)
self.user_edits = {int(k): v for k, v in temp_dict.items()}
def export(self, directory):
"""Exports selected images and a .csv file to specified directory.
"""
if not os.path.exists(directory):
os.makedirs(directory)
write_data = []
for i, row in enumerate(self.jpg_data):
write_row = row.copy()
if row["selected"]:
if self.dt_present:
dt_ISO = dt.strftime(
row["datetime"], "%Y%m%dT%H%M%S"
)
else:
dt_ISO = str(i)
new_name = "_".join((dt_ISO, row["filename"]))
new_path = os.path.join(directory, new_name)
write_row["filename"] = new_name
write_row["filepath"] = new_path
write_row["old_name"] = row["filename"]
write_row["old_path"] = row["filepath"]
shutil.copy2(row["filepath"], new_path)
write_data.append(write_row)
if write_data:
with open(os.path.join(directory, "_export.csv"), "w") as f:
variables = sorted(write_data[0].keys(),
key=sort_cols)
for i, row in enumerate(write_data):
if i != 0:
f.write("\n")
else:
f.write(",".join(variables) + "\n")
f.write(",".join(csv_safe(row[v]) for v in variables))
else:
raise ValueError("No images selected for export.")
def attach_diffs(self, var, new_var):
"""Finds the difference between the current and previous variable.
Requires a list of dictionaries.
"""
prev = self.jpg_data[0][var]
for row in self.jpg_data:
curr = row[var]
row[new_var] = curr - prev
prev = curr
def mark_edits(self, i, kind):
"""Marks which photos to label as edited based on [i]ndex.
"""
if i == 0:
self.jpg_data[i][kind] = True
else:
self.jpg_data[i][kind] = True
self.jpg_data[i-1][kind] = True
def update_counts(self):
"""Updates jpg_data with new counts (response metric).
Variable new_count is attached to jpg_data. Day to night transitions
are filtered out based on slider. Counts taken at with nighttime are
multiplied based on slider. Manual user edits are applied.
"""
for i, row in enumerate(self.jpg_data):
row["user_edit"] = False
row["trans_edit"] = False
new_count = row["count"]
if row["med_diff"] > self.plt_vals["trans_thresh"]:
new_count = 0
self.mark_edits(i, "trans_edit")
if self.dt_present:
new_count *= 1 + (
self.plt_vals["night_mult"] * row["is_night"])
row["new_count"] = new_count
for i, shift in self.user_edits.items():
self.jpg_data[i]["new_count"] = shift
self.mark_edits(i, "user_edit")
def update_events(self):
"""Updates jpg_data with new events (runs of detection).
An event is a contiguous sequence of images. First, images are
identified as being selected or not. Then, events are lumped based
on time since last image (SMOOTH slider value).
"""
self.pano_i = 0
prev = self.jpg_data[0]
for i, curr in enumerate(self.jpg_data):
prev["selected"] = (
prev["new_count"] > self.plt_vals["resp_thresh"] or
curr["new_count"] > self.plt_vals["resp_thresh"])
if i == self.length-1:
curr["selected"] = (
curr["new_count"] > self.plt_vals["resp_thresh"])
prev = curr
if self.dt_present:
for move in (1, -1):
is_neg = move < 0
prev = self.jpg_data[-is_neg]
for i in range(-is_neg, (self.length * move) - is_neg, move):
curr = self.jpg_data[i]
boo = (not curr["selected"] and prev["selected"] and
not (prev["user_edit"] and curr["user_edit"]))
if boo:
if not is_neg:
lapse = curr["td_minutes"]
else:
lapse = prev["td_minutes"]
curr["selected"] = (
lapse <= self.plt_vals["smooth_time"])
prev = curr
else:
nudge = int(self.plt_vals["smooth_time"])
master_set = set()
for i, row in enumerate(self.jpg_data):
if row["selected"]:
for func in (operator.add, operator.sub):
for j in range(nudge+1):
ind = func(i, j)
try:
row = self.jpg_data[ind]
if row["new_count"] < 0:
if func == operator.sub:
master_set.add(ind)
break
else:
master_set.add(ind)
except IndexError:
pass
for i in master_set:
self.jpg_data[i]["selected"] = True
def plot(self):
"""Interactive plot used to select images for export.
QUICK GUIDE:
c - Hold to VIEW IMAGES in gallery.
x - Hold and release to INCREASE response value to Inf.
z - Hold and release to DECREASE response value to 0.
, - Hold and release to REMOVE EDITS.
. - Press to RESET ALL EDITS.
v - Press for EQUALIZED image (for dark images).
"""
try:
self.jpg_data
except AttributeError as inst:
raise inst
mod = self.plt_vals["resp_thresh"] ** (1 / RESP_NUM)
SLIDER_PARAMS = [
("RESP", 0.08, 0, 100, mod, "%.2e"),
("TRANS", 0.06, 0, 120, self.plt_vals["trans_thresh"], "%.1f"),
("SMOOTH", 0.04, 0, 10, self.plt_vals["smooth_time"], "%.1f"),
("NIGHT", 0.02, -1, 50, self.plt_vals["night_mult"], "%.1f")
]
def update():
self.update_counts()
self.update_events()
draw()
def draw():
for name in self.lines.keys():
self.lines[name].remove()
np_counts = np.array(extract_var(self.jpg_data, "new_count"))
self.lines["edited"] = ax.fill_between(
np.arange(0, self.length) + 0.5, -BIG_NUM, BIG_NUM,
where=[r["trans_edit"] or r["user_edit"]
for r in self.jpg_data],
facecolor="#D8BFAA", alpha=0.5)
self.lines["selected"] = ax.fill_between(
np.arange(0, self.length) + 0.5, -BIG_NUM, BIG_NUM,
where=extract_var(self.jpg_data, "selected"),
facecolor="#F00314")
self.lines["count"] = ax.fill_between(
range(self.length), 0, np_counts, facecolor="black")
self.lines["threshold"] = ax.axhline(
self.plt_vals["resp_thresh"], color="#14B37D")
fig.canvas.draw_idle()
def on_slide(val):
for key in self.plt_vals.keys():
if key != "ceiling":
slider_key = key[:key.find("_")].upper()
if key == "resp_thresh":
mod = self.sliders[slider_key].val ** RESP_NUM
self.plt_vals[key] = mod
self.sliders[slider_key].valtext.set_text("%.2e" % mod)
else:
self.plt_vals[key] = self.sliders[slider_key].val
update()
# Displays last two and next two images from response spike.
def image_pano(xdata):
i = int(round(xdata))
if i != self.pano_i:
self.pano_i = i
array = np.array(range(i - 2, i + 2))
while True:
if any(n < 0 for n in array):
array += 1
elif any(n >= self.length for n in array):
array -= 1
else:
break
stack = []
for n in array:
if n in self.buffer[0]:
ind = self.buffer[0].index(n)
img = self.buffer[1][ind]
self.buffer[0].append(self.buffer[0].pop(ind))
self.buffer[1].append(self.buffer[1].pop(ind))
else:
img = cv2.imread(self.jpg_data[n]["thumbpath"])
self.buffer[0] = self.buffer[0][1:] + [n]
self.buffer[1] = self.buffer[1][1:] + [img]
if self.toggle:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.cv2.equalizeHist(gray)
stack.append(img)
min_y = min(img.shape[0] for img in stack)
pano = np.hstack([img[:min_y, :] for img in stack])
h, w, *_ = pano.shape
cv2.namedWindow("Gallery", cv2.WINDOW_NORMAL)
cv2.imshow("Gallery", cv2.resize(pano, (w // 2, h // 2)))
cv2.waitKey(1)
def on_click(event):
if event.dblclick and event.xdata is not None:
image_pano(event.xdata)
def on_key(event):
if event.xdata is not None:
if self.press[0] is None:
i = int(round(event.xdata))
if event.key == "z":
self.press = [NEG_NUM, i]
elif event.key == "x":
self.press = [BIG_NUM, i]
elif event.key == ",":
self.press = [0, i]
if event.key in "zxc,":
image_pano(event.xdata)
elif event.key == "v":
self.toggle = not self.toggle
image_pano(event.xdata)
elif event.key == ".":
self.user_edits = {}
def off_key(event):
try:
if event.xdata is not None and event.key in "zx,":
i = int(round(event.xdata))
low, high = sorted((self.press[1], i))
lo_to_hi = range(max(0, low), min(self.length, high+1))
if event.key in "zx":
new_edits = {i: self.press[0] for i in lo_to_hi}
self.user_edits = {**self.user_edits, **new_edits}
elif event.key == ",":
for i in lo_to_hi:
self.user_edits.pop(i, None)
self.press = [None, None]
update()
except TypeError:
pass
plt.rc("font", **{"size": 8})
plt.rcParams["keymap.back"] = "left, backspace"
fig = plt.figure()
ax = fig.add_subplot(111)
fig.canvas.set_window_title("Response Filtering")
fig.subplots_adjust(0.07, 0.18, 0.97, 0.97)
ax.grid(alpha=0.4)
ax.set_axisbelow(True)
ax.set_ylim([0, self.plt_vals["ceiling"]])
ax.set_xlim([0, min(500, self.length)])
ax.set_xlabel("Frame")
ax.set_ylabel("Response")
plt.yticks(rotation=45)
for name in ("count", "threshold", "selected", "edited"):
self.lines[name] = ax.axhline()
for name, pos, minv, maxv, init, fmt in SLIDER_PARAMS:
slider_ax = fig.add_axes([0.125, pos, 0.8, 0.02])
self.sliders[name] = Slider(
slider_ax, name, minv, maxv,
valinit=init, valfmt=fmt, color="#003459", alpha=0.5)
self.sliders[name].on_changed(on_slide)
fig.canvas.mpl_connect("key_press_event", on_key)
fig.canvas.mpl_connect("key_release_event", off_key)
fig.canvas.mpl_connect("button_press_event", on_click)
ax.fill_between(
np.arange(0, self.length) + 0.5, -BIG_NUM, 0,
facecolor="white", alpha=0.75, zorder=2)
try:
ax.fill_between(
np.arange(0, self.length) + 0.5, -BIG_NUM, BIG_NUM,
where=extract_var(self.jpg_data, "is_night"),
facecolor="#003459", alpha=0.5)
except KeyError:
pass
on_slide(1)
plt.show()
cv2.destroyAllWindows()
if __name__ == "__main__":
print("→ Please input a directory path with camera-trapping images.")
jpg_paths = find_imgs(input_directory())
jpg_data = attach_exif(jpg_paths)
jpg_data.sort(key=lambda x: x["datetime"])
print("→ Crop and clone out any timestamps from images.")
crop, clone_to, directs = crop_clone_preview(
cv2.imread(jpg_data[len(jpg_data) // 2]["filepath"]))
clone = generate_clone_tuples(clone_to, directs[0]) if clone_to else False
print("→ Images are being processed.")
processed_data = process_jpgs(jpg_data, crop=crop, clone=clone)
if type(processed_data) is not tuple:
cam = Cam(processed_data)
print("→ Please input a file path for an initial save.")
cam.save(input_filename())
print("→ Use the interactive plot to select images for export.")
help(Cam.plot)
cam.plot()
print("→ Save once again, so changes are recorded.")
cam.save(input_filename())
print("→ Finally, choose a location for export.")
cam.export(input_directory())
| 2.203125 | 2 |
SCODE-R/mrr.py | rizwan09/REDCODER | 22 | 12797680 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Adapted from https://github.com/microsoft/CodeXGLUE/blob/main/Text-Code/NL-code-search-Adv/evaluator/evaluator.py
import logging
import sys, json
import numpy as np
def read_answers(filename):
answers = {}
with open(filename) as f:
for idx, line in enumerate(f):
line = line.strip()
js = json.loads(line)
answers[str(idx)] = str(idx)
return answers
def read_predictions(filename):
predictions = {}
with open(filename) as f:
for idx, line in enumerate(f):
line = line.strip()
js = json.loads(line)
predictions[str(idx)] = js['answers']
return predictions
def calculate_scores(answers, predictions):
scores = []
for key in answers:
# import ipdb
# ipdb.set_trace()
if key not in predictions:
logging.error("Missing prediction for url {}.".format(key))
sys.exit()
flag = False
for rank, idx in enumerate(predictions[key]):
if idx == answers[key]:
scores.append(1 / (rank + 1))
flag = True
break
if flag is False:
scores.append(0)
result = {}
result['MRR'] = round(np.mean(scores), 4)
return result
def main():
import argparse
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for POJ-104 dataset.')
parser.add_argument('--answers', '-a', help="filename of the labels, in txt format.")
parser.add_argument('--predictions', '-p', help="filename of the leaderboard predictions, in txt format.")
args = parser.parse_args()
print("reading gold answers")
answers = read_answers(args.answers)
print("reading predcited answers")
predictions = read_predictions(args.predictions)
print("computing scores")
scores = calculate_scores(answers, predictions)
print(scores)
if __name__ == '__main__':
main()
# python mrr.py -a /home/wasiahmad/workspace/projects/NeuralKpGen/data/scikp/kp20k_separated/KP20k.test.jsonl -p /home/rizwan/DPR/predictions_KP20k.jsonl
| 2.765625 | 3 |
src/py_profiler/long_adder.py | andy1xx8/py-profiler | 3 | 12797681 | import threading
#
# @author andy
#
class LongAdder:
def __init__(self):
self._lock = threading.Lock()
self._value = 0
def get_value(self):
return self._value
def increment(self):
with self._lock:
self._value += 1
def decrement(self):
with self._lock:
self._value -= 1
def set(self, amount: int):
with self._lock:
self._value += amount
def reset(self):
with self._lock:
self._value = 0
| 3.09375 | 3 |
app/__init__.py | bjrnwnklr/random-poetry | 0 | 12797682 | <filename>app/__init__.py
from flask import Flask
from config import Config
from randompoetry import PoemFormRegistry, CorpusRegistry
app = Flask(__name__)
app.config.from_object(Config)
pfr = PoemFormRegistry.from_json('poemforms.json')
cr = CorpusRegistry()
# this has to be at the bottom to avoid circular references
from app import routes
| 1.992188 | 2 |
gazebo/0810_robot.py | shmpwk/robot_assembler | 0 | 12797683 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import actionlib
import math
from trajectory_msgs.msg import JointTrajectry
from trajectry_msgs.msg import JointTrajectoryPoint
from control_msgs.msg import JointTrajectory
from sensor_msgs.msg import LaserScan
i=0
def callback(msg):
rospy.loginfo('min %f -(%f)-> max %f'%(msg.angle_min, msg.angle_increment, msg.angle_max))
# トピック名,メッセージ型を使って ActionLib client を定義
client = actionlib.SimpleActionClient('/fullbody_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
client.wait_for_server() # ActionLib のサーバと通信が接続されることを確認
# gen msg
traj_msg = FollowJointTrajectoryGoal()
traj_msg.trajectory = JointTrajectory()
traj_msg.trajectory.header.stamp = rospy.Time.now() + rospy.Duration(0.2)
traj_msg.trajectory.joint_names = ['JOINT0', 'JOINT1', 'JOINT2', 'JOINT3', 'JOINT4', 'JOINT5', 'JOINT6', 'JOINT7', 'JOINT8', 'JOINT9', 'JOINT10', 'JOINT11', 'JOINT12', 'JOINT13', 'JOINT14', 'JOINT15', 'JOINT16', 'JOINT17', 'JOINT18', 'JOINT19', 'JOINT20', 'JOINT21', 'JOINT22', 'JOINT23', 'JOINT24', 'JOINT25', 'JOINT26', 'JOINT27', 'JOINT28', 'JOINT29']
global i
val = int(-msg.angle_min / msg.angle_increment)
if (msg.ranges[val] < 100):
i += 1
traj_msg.trajectory.points.append(JointTrajectoryPoint(positions=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, i, 0, i, 0, 0, 0, 0, i, 0, i], time_from_start = rospy.Duration(1.0 + i)))
#目標姿勢をゴールとして送信
client.send_goal(traj_msg)
rospy.loginfo("wait for goal ...")
client.wait_for_result() #ロボットの動作が終わるまで待つ
rospy.loginfo("done")
if __name__ == '__main__':
rospy.init_node('range_listener', anonymous=True)
rospy.Subscriber("/range_sensor", LaserScan, callback)
rospy.spin()
| 2.109375 | 2 |
lcof/33-er-cha-sou-suo-shu-de-hou-xu-bian-li-xu-lie-lcof.py | yuenliou/leetcode | 0 | 12797684 | #!/usr/local/bin/python3.7
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def verifyPostorder(self, postorder: List[int]) -> bool:
"""剑指Offer:判断有点繁琐"""
length = len(postorder)
if length <= 0: return True
root = postorder[length - 1]
#左子树小于跟节点,已经保证
lIndex = 0
for i in range(length - 1):
if postorder[i] > root:
break
lIndex += 1
#右子树大于跟节点,待求证
rIndex = 0
for i in range(lIndex, length - 1):
if postorder[i] < root:
return False
rIndex += 1
left = True
if lIndex: left = self.verifyPostorder(postorder[:lIndex])
right = True
if rIndex: right = self.verifyPostorder(postorder[lIndex:lIndex+rIndex])
return left and right
def verifyPostorder1(self, postorder: List[int]) -> bool:
"""递归分治:https://leetcode-cn.com/problems/er-cha-sou-suo-shu-de-hou-xu-bian-li-xu-lie-lcof/solution/mian-shi-ti-33-er-cha-sou-suo-shu-de-hou-xu-bian-6/"""
def recur(i, j):
#终止条件: 当 i >= j ,说明此子树节点数量 <=1,无需判别正确性,因此直接返回 true
if i >= j: return True
#计数判断
p = i
while postorder[p] < postorder[j]: p += 1 #先定左区间,小于root
m = p
while postorder[p] > postorder[j]: p += 1 #判断右区间,是否都大于
# 左子树区间 [i, m - 1]、右子树区间 [m, j - 1]、根节点索引 j
return p == j and recur(i, m - 1) and recur(m, j - 1)
return recur(0, len(postorder) - 1)
def main():
param = [1,6,3,2,5]
param = [1,3,2,6,5]
solution = Solution()
ret = solution.verifyPostorder(param)
print(ret)
ret = solution.verifyPostorder1(param)
print(ret)
'''剑指 Offer 33. 二叉搜索树的后序遍历序列
输入一个整数数组,判断该数组是不是某二叉搜索树的后序遍历结果。如果是则返回 true,否则返回 false。假设输入的数组的任意两个数字都互不相同。
参考以下这颗二叉搜索树:
5
/ \
2 6
/ \
1 3
示例 1:
输入: [1,6,3,2,5]
输出: false
示例 2:
输入: [1,3,2,6,5]
输出: true
提示:
数组长度 <= 1000
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/er-cha-sou-suo-shu-de-hou-xu-bian-li-xu-lie-lcof
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
if __name__ == '__main__':
main()
| 3.671875 | 4 |
tests/steps.py | Django-Stack-Backend/Django-backend-React-frontend | 1 | 12797685 | import json
from datetime import datetime
from django.db.models import Model
from jsonpath_ng import parse
from rdflib import Graph, URIRef
from safetydance import step_data
from safetydance_django.steps import ( # noqa: F401
http_client,
http_response,
json_values_match,
)
from safetydance_django.test import Http # noqa: F401
from safetydance_test.step_extension import step_extension
from scrud_django.models import Resource, ResourceType
__all__ = [
'an_instance_named',
'is_valid_resource',
'is_valid_resource_type',
'an_registration_data',
'check_registration_results',
'json_path_expressions',
'json_path_match',
'json_path_matches',
'ld_context_states_sub_class_of',
'named_instances',
'resource_json_is',
'response_json_matches',
]
named_instances = step_data(dict, initializer=dict)
json_path_expressions = step_data(dict, initializer=dict)
json_path_match = step_data(object)
sub_class_of = URIRef("http://www.w3.org/2000/01/rdf-schema#subClassOf")
@step_extension
def an_instance_named(name: str, instance: Model):
named_instances[name] = instance
@step_extension
def is_valid_resource(name: str):
instance = named_instances[name]
assert isinstance(instance, Resource)
assert isinstance(instance.content, dict)
assert isinstance(instance.modified_at, datetime)
assert isinstance(instance.etag, str)
assert len(instance.etag) == 32
@step_extension
def is_valid_resource_type(name: str):
instance = named_instances[name]
assert isinstance(instance, ResourceType)
assert isinstance(instance.type_uri, str)
assert len(instance.type_uri) > 0
@step_extension
def ld_context_states_sub_class_of(subclass_uri, superclass_uri):
data = http_response.json()
g = Graph().parse(data=json.dumps(data), format='json-ld')
superclasses = list(g.transitive_objects(subclass_uri, sub_class_of))
assert len(superclasses) > 0, superclasses
assert superclass_uri in superclasses, superclasses
# register
named_registration_data = step_data(dict, initializer=dict)
@step_extension
def an_registration_data(name: str, registration_data: dict):
named_registration_data[name] = registration_data
@step_extension
def check_registration_results(name: str, result):
expected = named_registration_data[name]
# result = register_resource_type(**expected)
resource_type_name = list(expected.keys())[0]
expected = expected[resource_type_name]
assert result.type_uri == expected['rdf_type_uri']
assert result.slug == resource_type_name
assert result.context_uri == expected['json_ld_context_url']
assert result.schema_uri == expected['json_schema_url']
def get_compiled_expression(expr, compiled_expressions):
compiled_expr = compiled_expressions.get(expr, None)
if compiled_expr is None:
compiled_expr = parse(expr)
compiled_expressions[expr] = compiled_expr
return compiled_expr
@step_extension
def json_path_matches(expr, condition):
data = http_response.json()
compiled_expr = get_compiled_expression(expr, json_path_expressions)
json_path_match = compiled_expr.find(data)
assert len(json_path_match) > 0, data
match = json_path_match[0].value
assert condition(
json_path_match
), f"expr: {expr} \ncondition: {condition}, \n match: {match}"
def resource_envelope_json_is(resource_data: dict):
assert 'href' in resource_data
assert isinstance(resource_data['href'], str)
assert 'last_modified' in resource_data
assert isinstance(resource_data['last_modified'], str)
assert 'etag' in resource_data
assert isinstance(resource_data['etag'], str)
assert 'content' in resource_data
assert isinstance(resource_data['content'], dict)
response = http_response.json()
assert response["content"] == resource_data["content"]
def resource_json_is(resource_data):
response = http_response.json()
assert response == resource_data, response
def response_json_matches(expected):
observed = http_response.json()
assert json_values_match(expected, observed), http_response
resource_envelope_json_is = step_extension(
f=resource_envelope_json_is, target_type=Http
)
resource_json_is = step_extension(f=resource_json_is, target_type=Http)
response_json_matches = step_extension(f=response_json_matches, target_type=Http)
| 2.125 | 2 |
app/entity/service.py | conatel-i-d/flask-blueprint | 0 | 12797686 | <gh_stars>0
from app.utils.base_service import BaseService
from .model import Entity
from .interfaces import EntityInterfaces
class EntityService(BaseService):
model = Entity
interfaces = EntityInterfaces
| 1.460938 | 1 |
apps/paypal/urls.py | youssriaboelseod/pyerp | 115 | 12797687 | # Django Library
from django.urls import path
# Localfolder Library
from .views.paypal_config import UpdatePaypalConfigView
# http://www.secnot.com/django-shop-paypal-rest-1.html
app_name = 'paypal'
urlpatterns = [
path('paypal-config/<int:pk>', UpdatePaypalConfigView.as_view(), name='paypal-config'),
]
| 1.8125 | 2 |
uscisstatus/__init__.py | meauxt/uscisstatus | 2 | 12797688 | <filename>uscisstatus/__init__.py
from lxml import html
import re
import requests
from datetime import datetime
CASE_DATE_PATTERN = r"[(A-Za-z)]*\s[\d]*,\s[\d]*"
URL = "https://egov.uscis.gov/casestatus/mycasestatus.do"
APP_RECEIPT_NUM = "appReceiptNum"
INIT_CASE_SEARCH = "initCaseSearch"
CASE_STATUS = "CHECK STATUS"
UPDATE_TEXT_XPATH = "/html/body/div[2]/form/div/div[1]/div/div/div[2]/div[3]/p/text()"
MISSING_URL_PATTEN = "','|', '"
TEXT_FILTER_PATTERN = r"['\[\]]"
def get_case_status(case_id):
data = {APP_RECEIPT_NUM: case_id,
INIT_CASE_SEARCH: CASE_STATUS}
r = requests.post(URL, data=data)
content = html.fromstring(r.content)
text = str(content.xpath(UPDATE_TEXT_XPATH))
if len(text) < 2:
raise ValueError("Please make sure you case id is valid")
text = str(re.sub("','|', '", 'USCIS website', text))
status_message = re.sub(r"['\[\]]", ' ', text)
p = re.search(CASE_DATE_PATTERN, status_message)
if p is not None:
match = p.group(0)
last_update_date = datetime.strptime(str(match), "%B %d, %Y")
last_update_date = last_update_date.strftime('%m/%d/%Y')
return {'status': status_message, 'date': last_update_date}
else:
raise ValueError("Please make sure you case id is valid")
| 2.875 | 3 |
src/Scene.py | rookies/dmx | 0 | 12797689 | <reponame>rookies/dmx
#!/usr/bin/python3
import logging
import xml.etree.ElementTree as ET
from Exceptions import SceneFileException
import SceneCommands
class Scene(object):
metaData = {}
commands = {}
def fromFile(self, f):
## Parse XML ##
logging.info("Reading from scene file `%s'.", f)
tree = ET.parse(f)
root = tree.getroot()
## Check root element (scene) ##
if root.tag != "scene":
raise SceneFileException("Root tag must be named scene.")
## Get children ##
for child in root:
## Check element name ##
if child.tag == "meta":
if "key" in child.attrib:
key = child.attrib["key"]
if key in self.metaData:
raise SceneFileException("MetaData key already exists: %s." % key)
else:
self.metaData[key] = child.text
else:
raise SceneFileException("Meta tag must have key attribute.")
elif child.tag == "command":
if ("start" in child.attrib) and ("value" in child.attrib):
key = int(child.attrib["start"])
if not key in self.commands:
self.commands[key] = []
self.commands[key].append(SceneCommands.Helpers.get(child.attrib["value"]))
else:
raise SceneFileException("Command tag must have start and value attributes.")
else:
raise SceneFileException("Child tag must be named meta or command, not ." % child.tag)
def getDuration(self):
dur = 0
for l in self.commands.items():
for c in l[1]:
tmp = l[0] + c.getDuration()
if tmp > dur:
dur = tmp
return dur
| 2.6875 | 3 |
snakemake/report/__init__.py | CIMAC-CIDC/cidc-snakemake | 0 | 12797690 | __author__ = "<NAME>"
__copyright__ = "Copyright 2015, <NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
import os
import sys
import mimetypes
import base64
import textwrap
import datetime
import io
import uuid
import json
import time
import shutil
import subprocess as sp
import itertools
from collections import namedtuple, defaultdict
import requests
from docutils.parsers.rst.directives.images import Image, Figure
from docutils.parsers.rst import directives
from docutils.core import publish_file, publish_parts
from snakemake.utils import format
from snakemake.logging import logger
from snakemake.io import is_flagged, get_flag_value
from snakemake.exceptions import WorkflowError
from snakemake.script import Snakemake
from snakemake import __version__
class EmbeddedMixin(object):
"""
Replaces the URI of a directive with a base64-encoded version.
Useful for embedding images/figures in reports.
"""
def run(self):
"""
Image.run() handles most of the
"""
result = Image.run(self)
reference = directives.uri(self.arguments[0])
self.options['uri'] = data_uri_from_file(reference)[0]
return result
# Create (and register) new image:: and figure:: directives that use a base64
# data URI instead of pointing to a filename.
class EmbeddedImage(Image, EmbeddedMixin):
pass
directives.register_directive('embeddedimage', EmbeddedImage)
class EmbeddedFigure(Figure, EmbeddedMixin):
pass
directives.register_directive('embeddedfigure', EmbeddedFigure)
def data_uri(data, filename, encoding="utf8", mime="text/plain"):
"""Craft a base64 data URI from file with proper encoding and mimetype."""
data = base64.b64encode(data)
uri = ("data:{mime};charset={charset};filename={filename};base64,{data}"
"".format(filename=filename,
mime=mime,
charset=encoding,
data=data.decode("utf-8")))
return uri
def data_uri_from_file(file, defaultenc="utf8"):
"""Craft a base64 data URI from file with proper encoding and mimetype."""
mime, encoding = mimetypes.guess_type(file)
if mime is None:
mime = "text/plain"
logger.info("Could not detect mimetype for {}, assuming "
"text/plain.".format(file))
if encoding is None:
encoding = defaultenc
with open(file, "rb") as f:
return data_uri(f.read(), os.path.basename(file), encoding, mime), mime
def report(text, path,
stylesheet=os.path.join(os.path.dirname(__file__), "report.css"),
defaultenc="utf8",
template=None,
metadata=None, **files):
outmime, _ = mimetypes.guess_type(path)
if outmime != "text/html":
raise ValueError("Path to report output has to be an HTML file.")
definitions = textwrap.dedent("""
.. role:: raw-html(raw)
:format: html
""")
metadata = textwrap.dedent("""
.. container::
:name: metadata
{metadata}{date}
""").format(metadata=metadata + " | " if metadata else "",
date=datetime.date.today().isoformat())
text = format(textwrap.dedent(text), stepout=3)
attachments = []
if files:
attachments = [textwrap.dedent("""
.. container::
:name: attachments
""")]
for name, _files in sorted(files.items()):
if not isinstance(_files, list):
_files = [_files]
links = []
for file in sorted(_files):
data, _ = data_uri_from_file(file)
links.append(':raw-html:`<a href="{data}" download="{filename}" draggable="true">{filename}</a>`'.format(
data=data, filename=os.path.basename(file)))
links = "\n\n ".join(links)
attachments.append('''
.. container::
:name: {name}
{name}:
{links}
'''.format(name=name,
links=links))
text = definitions + text + "\n\n" + "\n\n".join(attachments) + metadata
overrides = dict()
if template is not None:
overrides["template"] = template
if stylesheet is not None:
overrides["stylesheet_path"] = stylesheet
html = open(path, "w")
publish_file(source=io.StringIO(text),
destination=html,
writer_name="html",
settings_overrides=overrides)
class RuleRecord:
def __init__(self, job, job_rec):
import yaml
self.name = job_rec.rule
self.singularity_img_url = job_rec.singularity_img_url
self.conda_env = None
self._conda_env_raw = None
if job_rec.conda_env:
self._conda_env_raw = base64.b64decode(job_rec.conda_env).decode()
self.conda_env = yaml.load(self._conda_env_raw)
self.n_jobs = 1
self.output = list(job_rec.output)
self.id = uuid.uuid4()
def add(self, job_rec):
self.n_jobs += 1
self.output.extend(job_rec.output)
def __eq__(self, other):
return (self.name == other.name and
self.conda_env == other.conda_env and
self.singularity_img_url == other.singularity_img_url)
class JobRecord:
def __init__(self):
self.rule = None
self.starttime = sys.maxsize
self.endtime = 0
self.output = []
self.conda_env_file = None
self.singularity_img_url = None
class FileRecord:
def __init__(self, path, job, caption):
self.path = path
logger.info("Adding {}.".format(self.name))
self.raw_caption = caption
self.data_uri, self.mime = data_uri_from_file(path)
self.id = uuid.uuid4()
self.job = job
self.png_uri = None
if self.is_img:
convert = shutil.which("convert")
if convert is not None:
try:
png = sp.check_output(["convert", "-density", "100", self.path, "png:-"],
stderr=sp.PIPE)
uri = data_uri(png, os.path.basename(self.path) + ".png",
mime="image/png")
self.png_uri = uri
except sp.CalledProcessError as e:
logger.warning("Failed to convert image to png with "
"imagemagick convert: {}".format(e.stderr))
else:
logger.warning("Command convert not in $PATH. Install "
"imagemagick in order to have embedded "
"images and pdfs in the report.")
def render(self, env, rst_links, results):
if self.raw_caption is not None:
try:
from jinja2 import Template
except ImportError as e:
raise WorkflowError("Python package jinja2 must be installed to create reports.")
job = self.job
snakemake = Snakemake(job.input, job.output, job.params, job.wildcards,
job.threads, job.resources, job.log,
job.dag.workflow.config, job.rule.name, None)
try:
caption = open(self.raw_caption).read() + rst_links
caption = env.from_string(caption).render(snakemake=snakemake,
results=results)
self.caption = publish_parts(caption, writer_name="html")["body"]
except Exception as e:
raise WorkflowError("Error loading caption file of output "
"marked for report.", e)
@property
def is_img(self):
web_safe = {"image/gif", "image/jpeg", "image/png", "image/svg+xml", "application/pdf"}
return self.mime in web_safe
@property
def is_text(self):
text = {"text/csv", "text/plain", "text/tab-separated-values"}
return self.mime in text
@property
def icon(self):
if self.is_img:
return "image"
elif self.is_text:
return "file-text"
else:
return "file"
@property
def name(self):
return os.path.basename(self.path)
@property
def size(self):
"""Return size in Bytes."""
return os.path.getsize(self.path)
def rulegraph_d3_spec(dag):
try:
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
from networkx.readwrite import json_graph
except ImportError as e:
raise WorkflowError("Python packages networkx and pygraphviz must be "
"installed to create reports.", e)
g = nx.DiGraph()
g.add_nodes_from(sorted(job.rule.name for job in dag.jobs))
for job in dag.jobs:
target = job.rule.name
for dep in dag.dependencies[job]:
source = dep.rule.name
g.add_edge(source, target)
pos = graphviz_layout(g, "dot", args="-Grankdir=BT")
xmax = max(x for x, y in pos.values()) + 100 # add offset to account for labels
ymax = max(y for x, y in pos.values())
def encode_node(node):
x, y = pos[node]
return {"rule": node, "fx": x, "fy": y}
nodes = list(map(encode_node, g.nodes))
idx = {node: i for i, node in enumerate(g.nodes)}
links = [{"target": idx[u], "source": idx[v], "value": 1}
for u, v in g.edges]
return {"nodes": nodes, "links": links}, xmax, ymax
def get_resource_as_string(url):
r = requests.get(url)
if r.status_code == requests.codes.ok:
return r.text
raise WorkflowError("Failed to download resource needed for "
"report: {}".format(url))
def auto_report(dag, path):
try:
from jinja2 import Template, Environment, PackageLoader
except ImportError as e:
raise WorkflowError("Python package jinja2 must be installed to create reports.")
if not path.endswith(".html"):
raise WorkflowError("Report file does not end with .html")
logger.info("Creating report...")
persistence = dag.workflow.persistence
results = defaultdict(list)
records = defaultdict(JobRecord)
recorded_files = set()
for job in dag.jobs:
for f in itertools.chain(job.expanded_output, job.input):
if is_flagged(f, "report") and f not in recorded_files:
if not f.exists:
raise WorkflowError("File {} marked for report but does "
"not exist.".format(f))
if os.path.isfile(f):
report_obj = get_flag_value(f, "report")
category = report_obj.category or " "
results[category].append(
FileRecord(f, job, report_obj.caption))
recorded_files.add(f)
for f in job.expanded_output:
meta = persistence.metadata(f)
if not meta:
logger.warning("Missing metadata for file {}".format(f))
continue
try:
job_hash = meta["job_hash"]
rule = meta["rule"]
rec = records[(job_hash, rule)]
rec.rule = rule
rec.starttime = min(rec.starttime, meta["starttime"])
rec.endtime = max(rec.endtime, meta["endtime"])
rec.conda_env_file = None
rec.conda_env = meta["conda_env"]
rec.singularity_img_url = meta["singularity_img_url"]
rec.output.append(f)
except KeyError as e:
print(e)
logger.warning("Metadata for file {} was created with a too "
"old Snakemake version.".format(f))
for catresults in results.values():
catresults.sort(key=lambda res: res.name)
# prepare runtimes
runtimes = [{"rule": rec.rule, "runtime": rec.endtime - rec.starttime}
for rec in sorted(records.values(), key=lambda rec: rec.rule)]
# prepare end times
timeline = [{"rule": rec.rule,
"starttime": datetime.datetime.fromtimestamp(rec.starttime).isoformat(),
"endtime": datetime.datetime.fromtimestamp(rec.endtime).isoformat()}
for rec in sorted(records.values(), key=lambda rec: rec.rule)]
# prepare per-rule information
rules = defaultdict(list)
for rec in records.values():
rule = RuleRecord(job, rec)
if rec.rule not in rules:
rules[rec.rule].append(rule)
else:
merged = False
for other in rules[rec.rule]:
if rule == other:
other.add(rec)
merged = True
break
if not merged:
rules[rec.rule].append(rule)
# rulegraph
rulegraph, xmax, ymax = rulegraph_d3_spec(dag)
env = Environment(loader=PackageLoader("snakemake", "report"),
trim_blocks=True,
lstrip_blocks=True)
env.filters["get_resource_as_string"] = get_resource_as_string
rst_links = textwrap.dedent("""
.. _Results: #results
.. _Rules: #rules
.. _Statistics: #stats
{% for cat, catresults in results|dictsort %}
.. _{{ cat }}: #results-{{ cat|replace(" ", "_") }}
{% for res in catresults %}
.. _{{ res.name }}: #{{ res.id }}
{% endfor %}
{% endfor %}
.. _
""")
for cat, catresults in results.items():
for res in catresults:
res.render(env, rst_links, results)
# global description
text = ""
if dag.workflow.report_text:
with open(dag.workflow.report_text) as f:
class Snakemake:
config = dag.workflow.config
text = f.read() + rst_links
text = publish_parts(env.from_string(text).render(
snakemake=Snakemake, results=results),
writer_name="html")["body"]
# record time
now = "{} {}".format(datetime.datetime.now().ctime(), time.tzname[0])
results_size = sum(res.size for cat in results.values() for res in cat)
# render HTML
template = env.get_template("report.html")
with open(path, "w", encoding="utf-8") as out:
out.write(template.render(results=results,
results_size=results_size,
text=text,
rulegraph_nodes=rulegraph["nodes"],
rulegraph_links=rulegraph["links"],
rulegraph_width=xmax + 20,
rulegraph_height=ymax + 20,
runtimes=runtimes,
timeline=timeline,
rules=[rec for recs in rules.values() for rec in recs],
version=__version__,
now=now))
logger.info("Report created.")
| 2.1875 | 2 |
main.py | bjtuer-math/GSMBCD | 0 | 12797691 | <gh_stars>0
import pickle
with open('argfile','r') as f:
data=pickle.load(f)
#data[0].u_rules=['stochastic_lb','Lb','stochastic_lb_double']
data[0].u_rules=['Lb-NN_refined_sto_double','Lb-NN_refined','Lb-NN_refined_sto']
data[0].s_rules=['GS','Random','all']
#data[0].u_rules=['Lb-NN_refined_sto']
#data[0].s_rules=['all']
data[0].loss_names=['lsl1nn_r']
#data[0].L1=[1e-2,1e-5,1e-9]
#data[0].L1=[0.01,0.00001,0.000000001]
data[0].dataset_names=['log1p.E2006.train']
data[0].L1=[1e2]
data[0].blockList=100
data[0].plot_names=['Lb-NN_refined_sto_double:GSMBCD2','Lb-NN_refined_sto:GSMBCD','Lb:GBCD']
data[0].stdout_freq=1
data[0].time_cost=300 #18
data[0].fixed_step=0.0001
data[0].stop_criterion='time'
#data[0].L2=10
#data[0].u_rules=['Lb-NN_refined_sto','Lb-NN_refined_sto_double','Lb-NN_refined']
| 1.984375 | 2 |
utils.py | i0Ek3/libloss | 0 | 12797692 | <filename>utils.py
#!/usr/bin/env python
# coding=utf-8
import numpy as np
def validate(x, y):
if np.shape(x) != np.shape(y):
return False
else:
if x.any() == y.any():
print("----")
return True
else:
print("++++")
return False
| 2.953125 | 3 |
upload_app.py | nprapps/inauguration | 0 | 12797693 | #!/usr/bin/env python
import os
import re
from flask import Flask, redirect
from tumblpy import Tumblpy
import app_config
app = Flask(app_config.PROJECT_NAME)
app.config['PROPAGATE_EXCEPTIONS'] = True
@app.route('/dear-mr-president/', methods=['POST'])
def _post_to_tumblr():
"""
Handles the POST to Tumblr.
"""
def clean(string):
"""
Formats a string all pretty.
"""
return string.replace('-', ' ').replace("id ", "I'd ").replace("didnt", "didn't").replace('i ', 'I ')
# Request is a global. Import it down here where we need it.
from flask import request
def strip_html(value):
"""
Strips HTML from a string.
"""
return re.compile(r'</?\S([^=]*=(\s*"[^"]*"|\s*\'[^\']*\'|\S*)|[^>])*?>', re.IGNORECASE).sub('', value)
def strip_breaks(value):
"""
Converts newlines, returns and other breaks to <br/>.
"""
value = re.sub(r'\r\n|\r|\n', '\n', value)
return value.replace('\n', '<br />')
caption = u"<p class='intro'>Dear Mr. President,</p><p class='voted' data-vote-type='%s'>%s.</p><p class='message'>%s</p><p class='signature-name'>Signed,<br/>%s from %s</p><p class='footnote'>What do <em>you</em> want President Obama to remember in his second term? Share your message at <a href='http://inauguration2013.tumblr.com/'>NPR's Dear Mr. President</a>.</p>" % (
request.form['voted'],
clean(request.form['voted']),
strip_breaks(strip_html(request.form['message'])),
strip_html(request.form['signed_name']),
strip_html(request.form['location'])
)
t = Tumblpy(
app_key=app_config.TUMBLR_KEY,
app_secret=os.environ['TUMBLR_APP_SECRET'],
oauth_token=os.environ['TUMBLR_OAUTH_TOKEN'],
oauth_token_secret=os.environ['TUMBLR_OAUTH_TOKEN_SECRET'])
try:
tumblr_post = t.post('post', blog_url=app_config.TUMBLR_URL, params={
'type': 'photo',
'caption': caption,
'tags': u"%s" % request.form['voted'].replace('-', ''),
'data': request.files['image']
})
except:
return 'Sorry, we\'re probably over capacity. Please try again later.'
return redirect(u"http://%s/%s#posts" % (app_config.TUMBLR_URL, tumblr_post['id']), code=301)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=app_config.DEBUG)
| 3.203125 | 3 |
src/Echo_Chamber/main.py | kms70847/Animation | 17 | 12797694 | from math import sqrt
#Courtesy of Aran-Fey https://chat.stackoverflow.com/transcript/message/47258396#47258396
def circle_octant(r):
r2 = r ** 2
y = 0
while y <= r:
x = sqrt(r2 - y**2)
if x-int(x) >= 0.5:
x += 1
else:
# If we moved left, find out which adjacent pixel
# the circle moved through - either the one to the
# left or the one above (or neither?).
# To do this, we calculate the x coordinate in the
# center between the current pixel and the one below.
x2 = sqrt(r2 - (y-0.5)**2)
if x2 < int(x)+0.5:
yield int(x), y-1
else:
yield int(x)+1, y
x = int(x)
yield x, y
if x <= r//2:
break
y += 1
#Courtesy of Aran-Fey https://chat.stackoverflow.com/transcript/message/47258396#47258396
def circle_coords(x, y, r):
for h, v in circle_octant(r):
yield x+h, y+v
yield x-h, y+v
yield x+h, y-v
yield x-h, y-v
yield x+v, y+h
yield x-v, y+h
yield x+v, y-h
yield x-v, y-h
def brush(g, r):
"""
draws over an iterable of coordinates using a square "brush" of side length 1+2*r
"""
for x,y in g:
for dx in range(-r, r+1):
for dy in range(-r, r+1):
yield dx+x, dy+y
from PIL import Image, ImageDraw
SIZE = 500
def render_echo_chamber(r):
def screen_circle(center, r, **style):
x,y = center
draw.chord((x-r,y-r, x+r,y+r), 0, 359, **style)
def logical_circle(center, r, **style):
logical_x, logical_y = center
screen_x = (logical_x + 0.5) * SIZE
screen_y = (logical_y + 0.5) * SIZE
screen_r = r * SIZE
screen_circle((screen_x, screen_y), screen_r, **style)
img = Image.new("RGB", (SIZE, SIZE), "white")
draw = ImageDraw.Draw(img)
if r <= 0.5:
logical_circle((0, 0), r, outline="black")
else:
tiles = set(brush(circle_coords(0,0,r), 2))
for x,y in tiles:
logical_circle((x,y), r, outline="black")
return img
def frange(start, stop, steps):
for i in range(steps+1):
f = i / float(steps)
yield start+(f * (stop-start))
import animation
max_radius = 8
frames_per_radius = 64
frames = []
for f in frange(0, max_radius, frames_per_radius*max_radius):
print(f)
frames.append(render_echo_chamber(f))
animation.make_gif(frames, delay=8, delete_temp_files=True) | 3.90625 | 4 |
examples/python/cpu/scalars/scalar_cast_01.py | kant/ocean-tensor-package | 27 | 12797695 | import pyOcean_cpu as ocean
s = ocean.cdouble(3+4j)
print(s)
print(s.asPython())
print(s.imag.asPython())
print(s.real.asPython())
print(int(s.real))
print(float(s.real))
| 2.296875 | 2 |
app/__init__.py | thomasbhatia/kigata | 1 | 12797696 | from . import factory
def create_app(settings_override=None):
# Returns the app API application instance
app = factory.create_app(__name__, __path__, settings_override)
return app
| 1.8125 | 2 |
example/hello_fastapi.py | ifplusor/ahserver | 1 | 12797697 | <filename>example/hello_fastapi.py
# encoding=utf-8
if __name__ == "__main__":
import asyncio
import os
import uvloop
from ahserver.application.asgi import server
# 设置 uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
# 启动服务
server(
__file__ + ":app",
port=8080,
# port=8443,
worker_nums=1,
# enable_https=True,
certfile=os.path.join(os.path.dirname(__file__), "test.crt"),
keyfile=os.path.join(os.path.dirname(__file__), "test.key"),
)
else:
def create_application():
from typing import Optional
from fastapi import FastAPI, Body
app = FastAPI()
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.get("/items/{item_id}")
def read_item(item_id: int, q: Optional[str] = None):
return {"item_id": item_id, "q": q}
@app.post("/post")
def read_body(body=Body(...)):
return body
return app
app = create_application()
| 2.65625 | 3 |
aoc_wim/aoc2018/q19.py | wimglenn/advent-of-code-wim | 20 | 12797698 | <filename>aoc_wim/aoc2018/q19.py
"""
--- Day 19: Go With The Flow ---
https://adventofcode.com/2018/day/19
"""
import math
from aocd import data
from aoc_wim.aoc2018.q16 import all_ops
def parsed(data):
lines = data.splitlines()
ip = int(lines.pop(0).split()[1])
lines = [s.split() for s in lines]
lines = [[a, int(b), int(c), int(d)] for a, b, c, d in lines]
return ip, lines
funcs = all_ops()
class Comp:
def __init__(self, ip, d, r0, hacked):
self.hacked = hacked
self.ip = ip
self.i = 0
self.d = d
self.r = [0] * 6
self.r[0] = r0
def step(self):
opname, a, b, c = self.d[self.i]
op = funcs[opname]
self.r[self.ip] = self.i
op(self.r, a, b, c)
self.r[self.ip] += 1
self.i = self.r[self.ip]
if self.hacked and self.i == 1 and max(self.r) > 5:
self.r[0] = sum(divisors(max(self.r)))
[][0]
def compute(data, r0=0, hack=False):
ip, lines = parsed(data)
comp = Comp(ip, lines, r0=r0, hacked=hack)
while True:
try:
comp.step()
except IndexError:
break
return comp.r[0]
def divisors(n):
divs = {1, n}
for i in range(2, int(math.sqrt(n)) + 1):
if not n % i:
divs |= {i, n // i}
return divs
print("part a:", compute(data, r0=0, hack=True))
print("part b:", compute(data, r0=1, hack=True))
| 3.21875 | 3 |
game/migrations/0001_initial.py | vinsmokemau/ProyectoLoteria | 0 | 12797699 | # Generated by Django 2.2.4 on 2019-09-11 03:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('deck', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Board',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('player', models.CharField(max_length=50, verbose_name='jugador')),
],
options={
'verbose_name': 'Tablero',
'verbose_name_plural': 'Tableros',
},
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'verbose_name': 'Juego',
'verbose_name_plural': 'Juegos',
},
),
migrations.CreateModel(
name='GameCard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.IntegerField(verbose_name='posicion')),
('board', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cards', to='game.Board')),
('card', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='deck.Card')),
],
options={
'verbose_name': 'Carta de Juego',
'verbose_name_plural': 'Carta de Juegos',
},
),
migrations.AddField(
model_name='board',
name='game',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='game.Game'),
),
]
| 1.882813 | 2 |
BasicRead.py | Seeed-Studio/seeed-ardupy-si114x | 0 | 12797700 | <filename>BasicRead.py<gh_stars>0
from arduino import grove_si114x
import time
SI1145 = grove_si114x()
while True:
print('Visible %03d UV %.2f IR %03d' % (SI1145.Visible , SI1145.UV/100 , SI1145.IR),end=" ")
print('\r', end='')
time.sleep(0.5)
| 2.578125 | 3 |
tests/integ_time.py | mp-extras/vl53l5cx | 2 | 12797701 | <gh_stars>1-10
# MicroPython
from machine import lightsleep, Pin
from time import sleep, ticks_ms
from esp32 import wake_on_ext1, WAKEUP_ALL_LOW
from sensor import make_sensor
from vl53l5cx import DATA_TARGET_STATUS, DATA_DISTANCE_MM, POWER_MODE_WAKEUP
from vl53l5cx import RESOLUTION_4X4, RANGING_MODE_AUTONOMOUS, POWER_MODE_SLEEP
def main():
wake_on_ext1([Pin(13, Pin.IN)], WAKEUP_ALL_LOW)
trigger = Pin(2, Pin.OUT, value=0)
bl = Pin(4, Pin.OUT, value=0)
button = Pin(0)
# while button.value():
# pass
trigger.value(1)
tof = make_sensor()
tof.reset()
tof.init()
tof.resolution = RESOLUTION_4X4
tof.ranging_freq = 15
tof.ranging_mode = RANGING_MODE_AUTONOMOUS
tof.integration_time_ms = 10
tof.start_ranging((DATA_DISTANCE_MM, DATA_TARGET_STATUS))
for _ in range(10):
lightsleep()
results = tof.get_ranging_data()
# print(len(results.distance_mm))
tof.integration_time_ms = 25
for _ in range(10):
lightsleep()
results = tof.get_ranging_data()
# print(len(results.distance_mm))
tof.stop_ranging()
# print(ticks_ms())
lightsleep(100)
tof.power_mode = POWER_MODE_SLEEP
lightsleep(1000)
# print(ticks_ms())
tof.power_mode = POWER_MODE_WAKEUP
lightsleep(1000)
# print(ticks_ms())
trigger.value(0)
# print("done:", tof.is_alive())
main()
| 2.578125 | 3 |
glucosetracker/settings/production.py | arhanair/glucose-tracker-monitor | 134 | 12797702 | from .base import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# Use the cached template loader so template is compiled once and read from
# memory instead of reading from disk on each load.
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['.glucosetracker.net']
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'glucosetracker',
'USER': os.environ['DATABASE_USER'],
'PASSWORD': os.environ['DATABASE_PASSWORD'],
'HOST': 'localhost',
'PORT': '',
}
}
# 3rd-party apps tracking IDs.
INTERCOM_APP_ID = 'a6d0326564469dfd7f7d9b1bfc909ee3815a85a8'
GOOGLE_ANALYTICS_TRACKING_ID = 'UA-45698014-1'
ADDTHIS_PUBLISHER_ID = 'ra-52fffdf9456ec7d2'
# The 'From:' header of admin-related emails.
DEFAULT_FROM_EMAIL = '<EMAIL>'
ADMINS = (
('Local Admin', '<EMAIL>'),
)
MANAGERS = ADMINS
CONTACTS = {
'support_email': 'GlucoseTracker.net <<EMAIL>>',
'admin_email': '<EMAIL>',
'info_email': 'GlucoseTracker.net <<EMAIL>>',
}
# Subscribers app settings
SEND_SUBSCRIBERS_EMAIL_CONFIRMATION = True
# Django-storages settings
DEFAULT_FILE_STORAGE = 'core.s3utils.MediaRootS3BotoStorage'
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME = 'glucosetracker-assets'
AWS_QUERYSTRING_AUTH = False
MEDIA_URL = '//%s.s3.amazonaws.com/%s/' % (AWS_STORAGE_BUCKET_NAME, MEDIA_ROOT) | 1.703125 | 2 |
scripts/test_vlm_1.py | Xero64/pyvlm | 2 | 12797703 | #%% Load Dependencies
from math import pi
from IPython.display import display
from pyvlm import LatticeResult, LatticeOptimum
from pyvlm import latticesystem_from_json
from pyvlm.tools import elliptical_lift_force_distribution
#%% Create Lattice System
jsonfilepath = '../files/Straight_Wing_Cosine_100.json'
lsys = latticesystem_from_json(jsonfilepath)
display(lsys)
#%% Original Case
alpha = 3.0 # degrees
beta = 0.0 # degrees
lres_org = LatticeResult('Baseline', lsys)
lres_org.set_state(alpha=alpha, beta=beta)
display(lres_org)
#%% Equivalent Elliptical Lift
L = lres_org.nfres.CL*lres_org.qfs*lsys.sref
lell = elliptical_lift_force_distribution(lsys.srfcs[0].strpy, lsys.bref, L)
lopt_ell = LatticeOptimum('Equivalent Elliptical', lsys)
lopt_ell.set_state(alpha=alpha)
lopt_ell.set_target_lift_force_distribution(lell, rho=1.0, speed=1.0)
display(lopt_ell)
#%% Calculate Unconstrained Optimum
# Constrain Only Lift and the resulting lift distribution is Elliptical.
# Note: You have to consider root moment for both sides of the lifting surface.
lopt1 = LatticeOptimum('Unconstrained Optimised', lsys)#, sym=False)
lopt1.set_state()
lopt1.add_constraint('L', L)
lopt1.add_record('l', strplst=lsys.lstrpi)
lopt1.add_record('l', strplst=lsys.mstrpi)
phi1, lam1 = lopt1.optimum_lift_force_distribution()
display(lopt1)
#%% Calculate Constrained Optimum
# Constrain Root Bending Moment (Rolling Moment) to 75% of the Elliptical Optimimum.
# Note: You have to consider both sides of the lifting surface.
lspec = lopt1.record[0].value*0.75
mspec = lopt1.record[1].value*0.75
lopt2 = LatticeOptimum('Constrained Optimised', lsys)#, sym=False)
lopt2.set_state()
lopt2.add_constraint('L', L)
lopt2.add_constraint('l', lspec, strplst=lsys.lstrpi)
lopt2.add_constraint('l', mspec, strplst=lsys.mstrpi)
phi2, lam2 = lopt2.optimum_lift_force_distribution()
display(lopt2)
#%% Plot Distributions
axl = None
axl = lres_org.plot_trefftz_lift_force_distribution(ax=axl)
axl = lopt_ell.plot_trefftz_lift_force_distribution(ax=axl)
axl = lopt1.plot_trefftz_lift_force_distribution(ax=axl)
axl = lopt2.plot_trefftz_lift_force_distribution(ax=axl)
_ = axl.set_ylabel('Lift Distribution')
_ = axl.set_xlabel('Span Position')
#%% Print Results
CDi_org_theory = lres_org.trres.CL**2/pi/lsys.ar/lres_org.trres.e
CDi_ell_theory = lres_org.trres.CL**2/pi/lsys.ar
print(f'CL_org = {lres_org.trres.CL:.3f}')
print(f'CL_ell = {lopt_ell.trres.CL:.3f}')
print(f'CL_opt1 = {lopt1.trres.CL:.3f}')
print(f'CL_opt2 = {lopt2.trres.CL:.3f}')
print('')
print(f'CDi_org_theory = {CDi_org_theory:.7f}')
print(f'CDi_org = {lres_org.trres.CDi:.7f}')
print(f'CDi_ell_theory = {CDi_ell_theory:.7f}')
print(f'CDi_ell = {lopt_ell.trres.CDi:.7f}')
print(f'CDi_opt1 = {lopt1.trres.CDi:.7f}')
print(f'CDi_opt2 = {lopt2.trres.CDi:.7f}')
print('')
print(f'Efficiency Improvement = {100.0*(1.0-lopt1.trres.CDi/lres_org.trres.CDi):.2f}%')
| 2.515625 | 3 |
avaloq/avaloq/urls.py | Tankiolegend/AvaloqCodingWebsite | 0 | 12797704 | from django.contrib import admin
from django.urls import path, include
from avaloq_app import views
urlpatterns = [
path('', views.review, name='review'),
path('avaloq/', include('avaloq_app.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('registration.backends.default.urls')),
]
handler404 = 'avaloq_app.views.page_not_found'
handler500='avaloq_app.views.server_error'
handler400='avaloq_app.views.bad_request'
handler403='avaloq_app.views.permission_denied'
| 1.625 | 2 |
main.py | Manoj-Paramsetti/auto-meeting-launcher | 4 | 12797705 | import os
import re
import sys
import webbrowser
import yaml
import pyautogui
import tkinter
file = __file__[:-7]
meetings = {"meetings": []}
def more_option(opt):
os.system("cls || clear")
# Add
if opt == 1:
print(" Give alias for your meeting")
alias = input(" [User] >>> ")
print(" Now paste your meeting invite link")
link = input(" [User] >>> ")
meetings["meetings"].append(dict({alias: link}))
if validate(link):
with open(file + "meeting.yml", "w") as f:
yaml.dump(meetings, f)
print(" Data is added")
input("\n [ENTER] >>> ")
print()
else:
more_option(1)
# Remove
elif opt == 2:
i = 1
print("\n Id Alias")
for meeting in meetings["meetings"]:
x = meeting.keys()
print(" " + str(i) + "." + ((5 - len(str(i))) * " "), list(x)[0])
i += 1
print(" Type the id number to delete")
alias = int(input(" [User] >>> "))
meetings["meetings"].pop(alias - 1)
with open(file + "meeting.yml", "w") as f:
yaml.dump(meetings, f)
print(" removed")
input("\n [ENTER] >>> ")
print()
# Show
elif opt == 3:
i = 1
print("\n Id Alias")
for meeting in meetings["meetings"]:
x = meeting.keys()
print(" " + str(i) + "." + ((5 - len(str(i))) * " "), list(x)[0])
i += 1
input("\n [ENTER] >>> ")
print()
# Attend
elif opt == 4:
i = 1
print("\n Id Alias")
for meeting in meetings["meetings"]:
x = meeting.keys()
print(" " + str(i) + "." + ((5 - len(str(i))) * " "), list(x)[0])
i += 1
print(" Select the meeting number to start")
alias = int(input(" [User] >>> "))
print()
open_key_link(meetings["meetings"][alias - 1])
# Help
elif opt == 5:
display_help()
input("\n [ENTER] >>> ")
print()
# Exit
elif opt == 6:
exit()
else:
print(" Incorrect option")
start()
# Displaying Help
def display_help():
print("\n =====================================================")
print(" To open your meeting from command line")
print("\n\tCheck this following syntax")
print("\tpython3 main.py [meeting_number]")
print(" Example: python3 main.py 1")
print(" =====================================================")
print(" To open your meeting from gui")
print("\n\tuse this command")
print("\tpython3 main.py --gui")
print(" =====================================================")
print(" To add shortcut in windows")
print("\n\t- Right click the opengui.bat")
print("\t- Select Properties")
print("\t- Click the Shortcut tab")
print(
"""\t- Click in the Shortcut key box and
press a letter. For example, if
you press the P key,the key combination to
run this shortcut is `CTRL+ALT+P`"""
)
print(" =====================================================\n")
# menu
def start():
os.system("cls || clear")
print(""" __ __ __ __ __ __ _ _ ___ _ _ ____ ____
( \\/ )___( ) /__\\ ( )( )( \\( )/ __)( )_( )( ___)( _ \\
) ((___))(__ /(__)\\ )(__)( ) (( (__ ) _ ( )__) ) /
(_/\\/\\_) (____)(__)(__)(______)(_)\\_)\\___)(_) (_)(____)(_)\\_)
""")
print("==================")
print("1. Add meeting")
print("2. Remove meeting")
print("3. Show meeting")
print("4. Attend meeting")
print("5. Shortcut")
print("6. Exit")
print("==================")
try:
opt = int(input("[User] >>> "))
except ValueError:
print("Incorrect option")
start
more_option(opt)
# GUI
def gui():
window_main = tkinter.Tk(className=" Meeting Launcher")
frame = tkinter.Frame(window_main)
frame.pack(expand=True, padx=10, pady=20)
tkinter.Label(frame, text="Select your meeting to open", font=("Arial", 12)).pack(
pady=10
)
for meeting in meetings["meetings"]:
x = meeting.keys()
tkinter.Button(
frame,
text=list(x)[0],
height="2",
width="40",
bg="white",
command=lambda link=meeting[list(x)[0]]: find_whom_to_call(link),
).pack(padx=10)
window_main.mainloop()
# link opener
def open_link(link):
webbrowser.open(link)
f.close()
print(" Waiting for browser to open", end="")
# banana peeler :) (gives link from yaml)
def open_key_link(_alias):
key = list(_alias)[0]
print()
link = _alias[key]
find_whom_to_call(link)
# LAUNCHER
def launch_zoom(loading_symbol):
try:
x, y = pyautogui.locateCenterOnScreen(
file + "Zoom-Launch-Meeting-Button.png", confidence=0.8
)
pyautogui.click(x, y)
except TypeError:
if loading_symbol == "\\":
print("\r Waiting for browser to open [" + loading_symbol + "]", end="")
launch_zoom("/")
else:
print("\r Waiting for browser to open [" + loading_symbol + "]", end="")
launch_zoom("\\")
def launch_gmeet(loading_symbol):
try:
x, y = pyautogui.locateCenterOnScreen(
file + "google_meet_join.png", confidence=0.8
)
print(x, y)
pyautogui.click(x, y)
except TypeError:
if loading_symbol == "\\":
print("\r Waiting for browser to open [" + loading_symbol + "]", end="")
launch_gmeet("/")
else:
print("\r Waiting for browser to open [" + loading_symbol + "]", end="")
launch_gmeet("\\")
# def launch_teams(loading_symbol):
# try:
# x, y = pyautogui.locateCenterOnScreen(
# file+"Zoom-Launch-Meeting-Button.png", confidence=0.8
# )
# pyautogui.click(x, y)
#
# except TypeError:
# if loading_symbol == "\\":
# print("\r Waiting for browser to open [" + loading_symbol + "]", end="")
# launch_teams("/")
# else:
# print("\r Waiting for browser to open [" + loading_symbol + "]", end="")
# launch_teams("\\")
def find_whom_to_call(link):
if re.search("meet.google.com", link):
open_link(link)
launch_gmeet("\\")
elif re.search(".zoom.us", link):
open_link(link)
launch_zoom("\\")
elif re.search("teams.live", link):
print(" Chori....")
print(" I'm can't launch microsoft teams but I can open it for you")
input(" [ENTER] >>> ")
open_link(link)
# launch_teams("\\")
else:
print("Wrong link")
start()
def validate(link):
if re.search("meet.google.com", link):
return True
elif re.search("zoom", link):
return True
elif re.search("teams.live", link):
print(" Chori....")
print(" I'm can't launch microsoft teams but I can open it for you")
input(" [ENTER] >>> ")
return True
# launch_teams("\\")
else:
print(" Wrong link")
print(" Try again")
input(" [ENTER] >>> ")
return False
try:
if __name__ == "__main__":
try:
with open(file + "meeting.yml", "r") as f:
meetings = yaml.load(f, Loader=yaml.FullLoader)
except FileNotFoundError:
print("Give alias for your meeting")
alias = input("[User] >>> ")
print("Now paste your meeting invite link")
link = input("[User] >>> ")
meetings["meetings"].append(dict({alias: link}))
with open(file + "meeting.yml", "w") as f:
yaml.dump(meetings, f)
# open_link(link)
find_whom_to_call(link)
if len(sys.argv) == 1:
start()
else:
arg1 = sys.argv[1]
if re.match(r"[0-9]*", arg1).group():
i = int(arg1)
print()
try:
open_key_link(meetings["meetings"][i - 1])
except IndexError:
print("There is no data on that number")
print("Press ENTER to get the menu. To exit press CTRL+C")
start()
elif arg1.lower() == "--gui":
print("Opening gui")
gui()
elif arg1.lower() == "--version" or arg1.lower() == "-v":
print("Version: 1.0.1")
else:
display_help()
except KeyboardInterrupt:
os.system("cls || clear")
print(""" __ __ __ __ __ __ _ _ ___ _ _ ____ ____
( \\/ )___( ) /__\\ ( )( )( \\( )/ __)( )_( )( ___)( _ \\
) ((___))(__ /(__)\\ )(__)( ) (( (__ ) _ ( )__) ) /
(_/\\/\\_) (____)(__)(__)(______)(_)\\_)\\___)(_) (_)(____)(_)\\_)
""")
print("\n\rExited Successfully")
| 3.0625 | 3 |
morph_net/framework/output_non_passthrough_op_handler.py | MohammadChalaki/morph-net | 1,061 | 12797706 | <gh_stars>1000+
"""OpHandler for OutputNonPassthrough ops.
OutputNonPassthrough ops take their regularizer from the output and do not
passthrough the regularizer to their input. This is the default OpHandler for
ops like Conv2D and MatMul when L1-gamma regularization is used.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from morph_net.framework import op_handler
from morph_net.framework import op_handler_util
class OutputNonPassthroughOpHandler(op_handler.OpHandler):
"""OpHandler implementation for OutputNonPassthrough operations.
These ops take their regularizer from the output and do not
passthrough the regularizer to their input.
"""
@property
def is_source_op(self):
return False
@property
def is_passthrough(self):
return False
def assign_grouping(self, op, op_reg_manager):
"""Assign grouping to the given op and updates the manager.
Args:
op: tf.Operation to assign grouping to.
op_reg_manager: OpRegularizerManager to keep track of the grouping.
"""
# Check if all input ops have groups, or tell the manager to process them.
input_ops = op_handler_util.get_input_ops(op, op_reg_manager)
input_ops_without_group = op_handler_util.get_ops_without_groups(
input_ops, op_reg_manager)
# Check if all output ops have groups, or tell the manager to process them.
output_ops = op_handler_util.get_output_ops(op, op_reg_manager)
output_ops_without_group = op_handler_util.get_ops_without_groups(
output_ops, op_reg_manager)
# Remove non-passthrough ops from outputs ops to group with.
output_ops = op_handler_util.remove_non_passthrough_ops(
output_ops, op_reg_manager)
# Only group with ops that have the same size. Process the ops that have
# mismatched size.
output_ops_to_group, output_ops_to_process = (
op_handler_util.separate_same_size_ops(op, output_ops))
# Also process ungrouped ops.
input_ops_to_process = input_ops_without_group
output_ops_to_process.extend(output_ops_without_group)
# Align op slice sizes if needed.
op_slices = op_reg_manager.get_op_slices(op)
output_op_slices = op_handler_util.get_op_slices(
output_ops_to_group, op_reg_manager)
aligned_op_slice_sizes = op_handler_util.get_aligned_op_slice_sizes(
op_slices, [], output_op_slices)
op_handler_util.reslice_ops([op] + output_ops_to_group,
aligned_op_slice_sizes, op_reg_manager)
# TODO(a1): Consider refactoring this method.
# Repopulate OpSlice data, as ops may have been resliced.
output_op_slices = self._get_output_op_slices(
output_ops_to_group, op_reg_manager)
# Group with inputs and outputs.
op_handler_util.group_op_with_inputs_and_outputs(
op, [], output_op_slices, aligned_op_slice_sizes,
op_reg_manager)
# Reprocess ops.
op_reg_manager.process_ops(output_ops_to_process + input_ops_to_process)
def _group_with_output_slices(
self, op, output_op_slices, op_slices, op_reg_manager):
"""Groups OpSlice of current op with output ops.
Assuming OpSlice of op have been aligned with output, groups the
corresponding OpSlice.
Args:
op: tf.Operation to determine grouping for.
output_op_slices: List of list of OpSlice, with a list per output op.
op_slices: List of OpSlice for current op.
op_reg_manager: OpRegularizerManager to keep track of grouping.
Raises:
ValueError: If sizes for current and output op slices are not the same.
"""
# Assert that op slices for output and current op are aligned.
output_op_slices_sizes = op_handler_util.get_op_slice_sizes(
output_op_slices)
op_slice_sizes = op_handler_util.get_op_slice_sizes([op_slices])
if op_slice_sizes != output_op_slices_sizes:
raise ValueError('Current op and output op have differing slice '
'sizes: {}, {}'.format(
op_slice_sizes, output_op_slices_sizes))
op_handler_util.group_op_with_inputs_and_outputs(
op, [], output_op_slices, op_slice_sizes, op_reg_manager)
def _get_output_op_slices(self, output_ops, op_reg_manager):
"""Returns op slices for outputs.
Args:
output_ops: List of tf.Operation.
op_reg_manager: OpRegularizerManager to keep track of the grouping.
Returns:
A list of list of OpSlice with a list per output op.
"""
return op_handler_util.get_op_slices(output_ops, op_reg_manager)
def create_regularizer(self, _):
raise NotImplementedError('Not a source op.')
| 2.296875 | 2 |
cogs/chess.py | adgj-1/cs221bot | 0 | 12797707 | <reponame>adgj-1/cs221bot<gh_stars>0
from discord.ext import commands
POS = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7}
class Chess(commands.Cog, name="chess test"):
board = []
w = 8
h = 8
turn = False
def __init__(self, bot):
self.bot = bot
@commands.command(name="chessreset")
@commands.cooldown(1, 5, commands.BucketType.default)
async def chessreset(self, ctx):
self.board = [[0 for x in range(self.w)] for y in range(self.h)]
for y in range(0, 2):
for x in range(y % 2, 8, 2):
self.board[x][y] = 2
for y in range(6, 8):
for x in range(y % 2, 8, 2):
self.board[x][y] = 1
msg = self.print_board()
await ctx.send(msg)
@commands.command(name="chess")
@commands.cooldown(1, 5, commands.BucketType.default)
async def chess(self, ctx, arg1, *args):
try:
left = POS[arg1[0]]
top = 7 - (int(arg1[1]) - 1)
except KeyError as e:
await ctx.send("incorrect input")
return
for i in range(len(args)):
try:
left_d = POS[args[i][0]]
top_d = 7-(int(args[i][1])-1)
except KeyError as e:
await ctx.send("incorrect input")
return
await ctx.send(str(left) + " " + str(top) + " to " + str(left_d) + " " + str(top_d))
self.move(left, top, left_d, top_d)
left = left_d
top = top_d
self.turn = not self.turn
msg = self.print_board()
await ctx.send(msg)
def move(self, x1, y1, x2, y2):
if self.turn:
color = 2
opponent_color = 1
sign = 1
else:
color = 1
opponent_color = 2
sign = -1
if x1 < 0 or y1 < 0 or x1 > self.w or y1 > self.h or x2 < 0 or y2 < 0 or x2 > self.w or y2 > self.h:
return
if self.board[x1][y1] != color or self.board[x2][y2] != 0:
return
if (y2 - y1) * sign == 2:
if x2 - x1 == 2:
if self.board[(x1 + 1)][y1+sign] == opponent_color:
self.board[x1][y1] = 0
self.board[x2][y2] = color
self.board[(x1 + 1)][y1 + sign] = 0
return
else:
return
elif x2 - x1 == -2:
if self.board[(x1 - 1)][y1+sign] == opponent_color:
self.board[x1][y1] = 0
self.board[x2][y2] = color
self.board[(x1 - 1)][y1 + sign] = 0
return
else:
return
elif (y2 - y1) * sign == 1:
self.board[x1][y1] = 0
self.board[x2][y2] = color
return
def print_board(self):
res = "```"
for y in range(0, 8):
res = res + str(8-y) + "| "
for x in range(0, 8):
res = res + str(self.board[x][y]) + " "
res += "\n"
res = res + " _______________\n"
res = res + " a b c d e f g h\n"
res += "```"
return res
def setup(bot):
bot.add_cog(Chess(bot))
| 3.09375 | 3 |
tests/test_components_eta_hms.py | Robpol86/etaprogress | 13 | 12797708 | <filename>tests/test_components_eta_hms.py
from etaprogress.components.eta_conversions import eta_hms
def test():
assert '00' == eta_hms(0)
assert '09' == eta_hms(9)
assert '59' == eta_hms(59)
assert '01:00' == eta_hms(60)
assert '01:01' == eta_hms(61)
assert '59:59' == eta_hms(3599)
assert '1:00:00' == eta_hms(3600)
assert '1:00:01' == eta_hms(3601)
assert '1:01:01' == eta_hms(3661)
assert '167:59:59' == eta_hms(604799)
assert '168:00:00' == eta_hms(604800)
assert '168:00:01' == eta_hms(604801)
def test_always_show_minutes():
assert '00:00' == eta_hms(0, always_show_minutes=True)
assert '00:09' == eta_hms(9, always_show_minutes=True)
assert '00:59' == eta_hms(59, always_show_minutes=True)
assert '01:00' == eta_hms(60, always_show_minutes=True)
assert '01:01' == eta_hms(61, always_show_minutes=True)
assert '59:59' == eta_hms(3599, always_show_minutes=True)
assert '1:00:00' == eta_hms(3600, always_show_minutes=True)
assert '1:00:01' == eta_hms(3601, always_show_minutes=True)
assert '1:01:01' == eta_hms(3661, always_show_minutes=True)
assert '167:59:59' == eta_hms(604799, always_show_minutes=True)
assert '168:00:00' == eta_hms(604800, always_show_minutes=True)
assert '168:00:01' == eta_hms(604801, always_show_minutes=True)
def test_always_show_hours():
assert '0:00:00' == eta_hms(0, always_show_hours=True)
assert '0:00:09' == eta_hms(9, always_show_hours=True)
assert '0:00:59' == eta_hms(59, always_show_hours=True)
assert '0:01:00' == eta_hms(60, always_show_hours=True)
assert '0:01:01' == eta_hms(61, always_show_hours=True)
assert '0:59:59' == eta_hms(3599, always_show_hours=True)
assert '1:00:00' == eta_hms(3600, always_show_hours=True)
assert '1:00:01' == eta_hms(3601, always_show_hours=True)
assert '1:01:01' == eta_hms(3661, always_show_hours=True)
assert '167:59:59' == eta_hms(604799, always_show_hours=True)
assert '168:00:00' == eta_hms(604800, always_show_hours=True)
assert '168:00:01' == eta_hms(604801, always_show_hours=True)
# The rest is mostly copy/pasted. ############################################
def test_hours_leading_zero():
assert '00' == eta_hms(0, hours_leading_zero=True)
assert '09' == eta_hms(9, hours_leading_zero=True)
assert '59' == eta_hms(59, hours_leading_zero=True)
assert '01:00' == eta_hms(60, hours_leading_zero=True)
assert '01:01' == eta_hms(61, hours_leading_zero=True)
assert '59:59' == eta_hms(3599, hours_leading_zero=True)
assert '01:00:00' == eta_hms(3600, hours_leading_zero=True)
assert '01:00:01' == eta_hms(3601, hours_leading_zero=True)
assert '01:01:01' == eta_hms(3661, hours_leading_zero=True)
assert '167:59:59' == eta_hms(604799, hours_leading_zero=True)
assert '168:00:00' == eta_hms(604800, hours_leading_zero=True)
assert '168:00:01' == eta_hms(604801, hours_leading_zero=True)
def test_always_show_minutes_hours_leading_zero():
assert '00:00' == eta_hms(0, hours_leading_zero=True, always_show_minutes=True)
assert '00:09' == eta_hms(9, hours_leading_zero=True, always_show_minutes=True)
assert '00:59' == eta_hms(59, hours_leading_zero=True, always_show_minutes=True)
assert '01:00' == eta_hms(60, hours_leading_zero=True, always_show_minutes=True)
assert '01:01' == eta_hms(61, hours_leading_zero=True, always_show_minutes=True)
assert '59:59' == eta_hms(3599, hours_leading_zero=True, always_show_minutes=True)
assert '01:00:00' == eta_hms(3600, hours_leading_zero=True, always_show_minutes=True)
assert '01:00:01' == eta_hms(3601, hours_leading_zero=True, always_show_minutes=True)
assert '01:01:01' == eta_hms(3661, hours_leading_zero=True, always_show_minutes=True)
assert '167:59:59' == eta_hms(604799, hours_leading_zero=True, always_show_minutes=True)
assert '168:00:00' == eta_hms(604800, hours_leading_zero=True, always_show_minutes=True)
assert '168:00:01' == eta_hms(604801, hours_leading_zero=True, always_show_minutes=True)
def test_always_show_hours_hours_leading_zero():
assert '00:00:00' == eta_hms(0, hours_leading_zero=True, always_show_hours=True)
assert '00:00:09' == eta_hms(9, hours_leading_zero=True, always_show_hours=True)
assert '00:00:59' == eta_hms(59, hours_leading_zero=True, always_show_hours=True)
assert '00:01:00' == eta_hms(60, hours_leading_zero=True, always_show_hours=True)
assert '00:01:01' == eta_hms(61, hours_leading_zero=True, always_show_hours=True)
assert '00:59:59' == eta_hms(3599, hours_leading_zero=True, always_show_hours=True)
assert '01:00:00' == eta_hms(3600, hours_leading_zero=True, always_show_hours=True)
assert '01:00:01' == eta_hms(3601, hours_leading_zero=True, always_show_hours=True)
assert '01:01:01' == eta_hms(3661, hours_leading_zero=True, always_show_hours=True)
assert '167:59:59' == eta_hms(604799, hours_leading_zero=True, always_show_hours=True)
assert '168:00:00' == eta_hms(604800, hours_leading_zero=True, always_show_hours=True)
assert '168:00:01' == eta_hms(604801, hours_leading_zero=True, always_show_hours=True)
| 2.171875 | 2 |
code/visualization/display_metric_vs_epochs_plot.py | mrbarbasa/kaggle-spooky-author | 1 | 12797709 | import matplotlib.pyplot as plt
def display_metric_vs_epochs_plot(scores, metric, nth_iter, nth_fold):
"""Display a metric vs. epochs plot.
Both the training and validation scores will be plotted for the
chosen metric.
Parameters
----------
scores : pandas.DataFrame
Scores containing the `epoch` column and metric columns such as
`acc`, `loss`, `val_acc`, and `val_loss`.
metric : string
The metric to display, such as 'loss' or 'acc' (note that `val_`
is also appended automatically and should not be provided).
nth_iter : int
The current random search iteration.
nth_fold : int
The current k-fold fold.
Returns
-------
None
"""
metric_fold_scores = scores[metric].values.tolist()
epochs = range(1, len(metric_fold_scores) + 1)
plt.figure(figsize=(6,4))
plt.plot(epochs, metric_fold_scores, 'bo', label=f'Training {metric}')
plt.plot(epochs,
scores[f'val_{metric}'].values.tolist(),
'b',
label=f'Validation {metric}')
plt.title(f'Training and Validation {metric.title()} for '
f'Iteration {nth_iter} Fold {nth_fold}')
plt.legend()
plt.ylabel(metric.title())
plt.xlabel('Number of Epochs')
plt.show()
| 3.703125 | 4 |
core/src/trezor/messages/StellarPaymentOp.py | Kayuii/trezor-crypto | 0 | 12797710 | # Automatically generated by pb2py
# fmt: off
import protobuf as p
from .StellarAssetType import StellarAssetType
class StellarPaymentOp(p.MessageType):
MESSAGE_WIRE_TYPE = 211
def __init__(
self,
source_account: str = None,
destination_account: str = None,
asset: StellarAssetType = None,
amount: int = None,
) -> None:
self.source_account = source_account
self.destination_account = destination_account
self.asset = asset
self.amount = amount
@classmethod
def get_fields(cls):
return {
1: ('source_account', p.UnicodeType, 0),
2: ('destination_account', p.UnicodeType, 0),
3: ('asset', StellarAssetType, 0),
4: ('amount', p.SVarintType, 0),
}
| 2.09375 | 2 |
1101-1200/1125-Smallest Sufficient Team/1125-Smallest Sufficient Team.py | jiadaizhao/LeetCode | 49 | 12797711 | <gh_stars>10-100
class Solution:
def smallestSufficientTeam(self, req_skills: List[str], people: List[List[str]]) -> List[int]:
table = {skill : (1 << i) for i, skill in enumerate(req_skills)}
dp = {0 : []}
for i, p in enumerate(people):
curr = 0
for s in p:
if s in table:
curr |= table[s]
for skills, team in list(dp.items()):
newSkills = skills | curr
if newSkills != skills:
if newSkills not in dp or len(dp[newSkills]) > len(team) + 1:
dp[newSkills] = team + [i]
return dp[(1 << len(req_skills)) - 1]
| 2.84375 | 3 |
2016/starter.py | iKevinY/advent | 11 | 12797712 | import os # NOQA
import sys # NOQA
import re # NOQA
import math # NOQA
import fileinput
from collections import Counter, deque, namedtuple # NOQA
from itertools import count, product, permutations, combinations, combinations_with_replacement # NOQA
from utils import parse_line, mul, factors, memoize, primes, new_table, Point # NOQA
# Itertools Functions:
# product('ABCD', repeat=2) AA AB AC AD BA BB BC BD CA CB CC CD DA DB DC DD
# permutations('ABCD', 2) AB AC AD BA BC BD CA CB CD DA DB DC
# combinations('ABCD', 2) AB AC AD BC BD CD
# combinations_with_replacement('ABCD', 2) AA AB AC AD BB BC BD CC CD DD
total = 0
result = []
table = new_table(None, width=2, height=4)
for i, line in enumerate(fileinput.input()):
line = line.strip()
# data = [x for x in line.split(', ')]
# data = [x for x in line]
# data = [int(x) for x in line.split()]
# data = re.findall(r'(\w+)', line)
data = parse_line(r'', line)
if i == 0:
print(data)
| 3.046875 | 3 |
STN.py | newrotik/pySTN | 4 | 12797713 | <gh_stars>1-10
__author__ = 'robin'
"""
Implementation of a scheduling system based on the STN model. The data of the problem is
in the __init__() method, and can be changed. Additional constraints can be included as functions
that return cvx.Constraints, and added in the STN.construst_nominal_model() method.
We pack everything in a class to avoid having to implement functions (now the class methods)
with exceedingly long signatures.
"""
import numpy as np
import cvxpy as cvx
from plotting import *
class STN(object):
def __init__(self):
# Data
# ----
# Definition of the STN (order in the list matters!)
self.units = ['Heater', 'Reactor 1', 'Reactor 2', 'Column']
self.tasks = ['Heat', 'Rea. 1', 'Rea. 2', 'Rea. 3', 'Sep.']
self.states = ['Feed A', 'Feed B', 'Feed C', 'Hot A', 'Intermediate AB',
'Intermediate BC', 'Impure E', 'Product 1', 'Product 2']
self.input_states = [0, 1, 2] # indices of the input states
self.horizon = 11 # horizon in number of of steps
# Aliases
self.I = self.units.__len__()
self.J = self.tasks.__len__()
self.S = self.states.__len__()
self.T = self.horizon
# Units capability (what tasks can be processed on which unit)
# row = unit, column = task
self.J_i = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 1]])
# Recipes and timing
# fractions of input state s (column) required to execute task j (row)
self.rho_in = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0.5, 0.5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0.4, 0, 0.6, 0, 0, 0],
[0, 0, 0.2, 0, 0.8, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0]])
# fractions of output state s (column) produced by task j (row)
self.rho_out = np.array([[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0.6, 0, 0, 0.4, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0.1, 0, 0, 0, 0.9]])
# time (in # of steps) required to produce output state s (column) from task j (row)
self.P = np.array([[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 2, 0, 0, 0],
[0, 0, 0, 0, 2, 0, 0, 2, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 2, 0, 0, 0, 1]])
# total execution time of task j (row-wise max of P)
self.P_j = np.amax(self.P, axis=1)
# Capacities
# max capacity of unit i (row) to process task j (column), in e.g. kg
self.V_max = np.array([[100, 100, 100, 100, 100],
[80, 80, 80, 80, 80],
[50, 50, 50, 50, 50],
[200, 200, 200, 200, 200]])
self.V_min = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
# storage capacity for state s
self.C_max = np.array([np.infty, np.infty, np.infty, 100, 200, 150, 100, np.infty, np.infty])
self.C_min = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])
# objective to maximize (revenue from the states)
self.c = np.array([0, 0, 0, -1, -1, -1, -1, 10, 10])
# Optimization problem structure (cvx.Problem type)
self.model = 0
# Optimization Variables
# ----------------------
self.x_ijt = {} # allocation variable (bool)
self.y_ijt = {} # batch size [kg]
self.y_s = {} # state quantity [kg]
for i in range(self.I):
for j in range(self.J):
for t in range(self.T):
self.x_ijt[i,j,t] = cvx.Bool()
# we separate creation of x_ijt and y_ijt in two loops, so that the optimization variables
# in the standard model do not get mixed up
for i in range(self.I):
for j in range(self.J):
for t in range(self.T):
self.y_ijt[i,j,t] = cvx.Variable()
# state equations are eliminated to allow for the robust counterpart. We only store
# the initial state y_s(t=0), which we name y_s
for s in range(self.S):
self.y_s[s] = cvx.Variable()
# auxiliary expressions used in the state equations
self.y_st_inflow = {}
self.y_st_outflow = {}
# Attributes
# ----------
# to store results
self.X_ijt = np.zeros((self.I,self.J,self.T))
self.Y_ijt = np.zeros((self.I,self.J,self.T))
self.Y_st = np.zeros((self.S,self.T))
self.Y_st_inflow = np.zeros((self.S,self.T))
self.Y_st_outflow = np.zeros((self.S,self.T))
# to store the standard model
# min c_x'*x + c_y'*y
# s.t. A_eq*x + B_eq*y = b_eq
# A_ineq*x + B_ineq*y <= b_ineq
# x \in {0,1}
self.c_x = 0
self.c_y = 0
self.A_eq = 0
self.A_ineq = 0
self.B_eq = 0
self.B_ineq = 0
self.b_eq = 0
self.b_ineq = 0
self.bool_ix = 0
self.cont_ix = 0
self.m_eq = 0
self.m_ineq = 0
def construct_allocation_constraint(self):
""" construct the allocation constraints:
1) each unit i is processing at most one task j at each t
2) units can only perform tasks that are compatible with self.J_i
:return: list of cvx.Constraint
"""
I = self.units.__len__()
J = self.tasks.__len__()
S = self.states.__len__()
T = self.T
# TODO BIG-M for allocation
BIG_M = 40
constraint_allocation = []
# 1) each unit i is processing at most one task j at each t
for i in range(I):
for t in range(T):
constraint_allocation.append( sum( [self.x_ijt[(i,j,t)] for j in range(J)] ) <= 1 )
for j in range(J):
for t in range(T-self.P_j[j]+1):
constraint_allocation.append(
sum ( sum( [[self.x_ijt[(i,jj,tt)] for jj in range(J)] for tt in range(t,t+self.P_j[j])], [] ) )
<=
self.P_j[j]*BIG_M*(1 - self.x_ijt[(i,j,t)]) + 1
)
# 2) units can only perform tasks that are compatible with self.J_i, and tasks should be started
# early enough such that they are completed within T, i.e., no tasks can start after T-P_j
for i in range(I):
for j in range(J):
for t in range(T):
constraint_allocation.append( self.x_ijt[i,j,t]*(1-self.J_i[i,j]) == 0 )
if t >= T-self.P_j[j]:
constraint_allocation.append( self.x_ijt[i,j,t] == 0 )
return constraint_allocation
def construct_box_constraint(self):
""" Construct box constraints on x_ijt and y_ijt; useful for testing with continuous
variables instead of bools.
:return: list of cvx.Constraint
"""
I = self.units.__len__()
J = self.tasks.__len__()
T = self.T
constraint_box = []
for i in range(I):
for j in range(J):
for t in range(T):
constraint_box.append(self.x_ijt[i,j,t] >= 0)
constraint_box.append(self.x_ijt[i,j,t] <= 1)
constraint_box.append(self.y_ijt[i,j,t] >= 0)
constraint_box.append(self.y_ijt[i,j,t] <= 1)
return constraint_box
def construct_units_capacity_constraint(self):
""" Ensure maximum and minimum sizes of the batches to be processed are within
unit constraints.
:return: list of cvx.Constraint
"""
constraint_capacity = []
for i in range(self.I):
for j in range(self.J):
for t in range(self.T):
constraint_capacity.append( self.x_ijt[i,j,t]*self.V_min[i,j] <= self.y_ijt[i,j,t] )
constraint_capacity.append( self.y_ijt[i,j,t] <= self.x_ijt[i,j,t]*self.V_max[i,j] )
return constraint_capacity
def construct_state_equations_and_storage_constraint(self):
""" Implementation of state equations, and states capacities (storages)
:return: list of cvx.Constraint
"""
constraint_state_eq = []
# 1) every intermediate / output state starts with a quantity of 0 kg
for s in range(self.S):
if s not in self.input_states:
constraint_state_eq.append( self.y_s[s] == 0 )
# 2) state equations and storage capacities
for t in range(self.T):
for s in range(self.S):
self.y_st_inflow[s,t] = cvx.Constant(0)
self.y_st_outflow[s,t] = cvx.Constant(0)
for i in range(self.I):
for j in range(self.J):
if self.J_i[i,j]:
# set inflows
if (t-self.P[j,s] >= 0):
self.y_st_inflow[s,t] += self.rho_out[j,s]*self.y_ijt[i,j,t-self.P[j,s]]
# set outflows
self.y_st_outflow[s,t] += self.rho_in[j,s]*self.y_ijt[i,j,t]
constraint_state_eq.append( self.C_min[s] <= self.y_s[s]
+ sum( [self.y_st_inflow[(s,tt)] for tt in range(t+1)] )
- sum( [self.y_st_outflow[(s,tt)] for tt in range(t+1)] ))
constraint_state_eq.append( self.C_max[s] >= self.y_s[s]
+ sum( [self.y_st_inflow[(s,tt)] for tt in range(t+1)] )
- sum( [self.y_st_outflow[(s,tt)] for tt in range(t+1)] ))
return constraint_state_eq
def construct_konidili_solution_enforce(self):
""" The nominal model with the data of Kondili's paper has several optimizers. The following
constraints force the exact same solution as in the paper (the objective is unaffected)
:return: list of cvx.Constraint
"""
constraint_kondili = []
constraint_kondili.append( [self.y_ijt[0,0,1] == 52] )
constraint_kondili.append( [self.y_ijt[1,1,0] == 80] )
return constraint_kondili
def construct_objective(self):
""" Objective encodes c'*(y_s(t=end)-y_s(t=0)), i.e., value of the final products minus
cost of the input feeds.
:return: cvx.Objective
"""
return - sum( [self.c[s]*( sum([self.y_st_inflow[s,t] for t in range(self.T)])
- sum([self.y_st_outflow[s,t] for t in range(self.T)]))
for s in range(self.S)] )
def construct_nominal_model(self):
""" Constructs the nominal STN model, and saves it in the class attribute self.model as
a cvx.Problem type. Constraints can be added/removed here.
:return: None
"""
# Constraints
# -----------
constraints = []
constraints.append(self.construct_allocation_constraint())
constraints.append(self.construct_units_capacity_constraint())
constraints.append(self.construct_state_equations_and_storage_constraint())
# can force Kondili's solution with
# constraints.append(self.construct_konidili_solution_enforce())
constraints = sum(constraints, [])
# Objective
# ---------
objective = cvx.Minimize(self.construct_objective())
# Model
# -----
self.model = cvx.Problem(objective, constraints)
# Also store the matrices of te standard model:
data = self.model.get_problem_data('ECOS_BB')
self.retrieve_standard_model(data)
def retrieve_standard_model(self, data):
""" Here we store the problem matrices (A_eq, B_eq, b_eq, etc) with ordered columns.
:param data: dictionary, as returned by cvx.Problem.get_problem_data('ECOS_BB')
:return: None
"""
n = data['c'].shape[0]
self.bool_ix = data['bool_vars_idx']
self.cont_ix = list(set(range(n)) - set(data['bool_vars_idx']))
self.n_x = self.bool_ix.__len__()
self.n_y = self.cont_ix.__len__()
range_bool_ix = range(self.n_x)
range_cont_ix = range(self.n_x, self.n_x+self.n_y)
self.c_x = data['c'][range_bool_ix]
self.c_y = data['c'][range_cont_ix]
self.A_eq = data['A'][:,range_bool_ix]
self.B_eq = data['A'][:,range_cont_ix]
self.b_eq = data['b']
self.A_ineq = data['G'][:,range_bool_ix]
self.B_ineq = data['G'][:,range_cont_ix]
self.b_ineq = data['h']
self.b_ineq = data['h']
self.m_eq = self.A_eq.shape[0]
self.m_ineq = self.A_ineq.shape[0]
def solve(self):
""" Constructs and solved the nominal STN model. The solution is stored in the np.arrays
- STN.X_ijt (assignments, bool)
- STN.Y_ijt (batch sizes, float)
- STN.Y_st (material quantities, float)
- STN.Y_st_inflow and Y_st_outflow (material flows, float)
:return: optimal value (float)
"""
print 'Constructing nominal model...'
self.construct_nominal_model()
print 'Solving...'
self.model.solve(verbose=True, solver='GUROBI')
self.unpack_results()
return self.model.value
def unpack_results(self):
""" Once model is solved, transform the solution dictionaries (self.x_ijt, self.y_ijt) into
np.arrays for easier inspection/plotting. The np.arrays are saved within the instance attributes
- STN.X_ijt (assignments, bool)
- STN.Y_ijt (batch sizes, float)
- STN.Y_st (stored material quantities, float)
- STN.Y_st_inflow and STN.Y_st_outflow (material flows, float),
and can be accessed from there once the method is executed.
:return: None
"""
for t in range(self.T):
for j in range(self.J):
for i in range(self.I):
self.X_ijt[i,j,t] = self.x_ijt[i,j,t].value
self.Y_ijt[i,j,t] = self.y_ijt[i,j,t].value
for t in range(self.T):
for s in range(self.S):
self.Y_st_inflow[s,t] = self.y_st_inflow[s,t].value
self.Y_st_outflow[s,t] = self.y_st_outflow[s,t].value
self.Y_st[s,t] = self.y_s[s].value + (sum([self.y_st_inflow[s,tt].value for tt in range(t+1)])
- sum([self.y_st_outflow[s,tt].value for tt in range(t+1)]))
def plot_schedule(self, color='red', style='ggplot'):
""" Plot the nominal schedule.
:return: None
"""
plot_stn_schedule(self, self.Y_ijt, color=color, style=style) # imported with plotting
if __name__ == '__main__':
# example usage of the class
model = STN()
model.solve()
model.plot_schedule()
| 2.609375 | 3 |
ex036.py | AleLucasG/Estudos-Python-I | 0 | 12797714 | <reponame>AleLucasG/Estudos-Python-I
"""APROVANDO EMPRESTIMO PARA COMPRA DE UMA CASA.
- VALOR DO EMPRESTIMO: 80000.00;
- SALARIO EMPREGADO: 1.600;
- ANOS DE FINANCIAMENTO: 7 ANOS;
-
"""
| 1.476563 | 1 |
6th_semester/NumMethods/course_project/cp.py | mehakun/Labs | 3 | 12797715 | <reponame>mehakun/Labs
import numpy as np
import math
from scipy.stats import truncnorm
from time import time
from tabulate import tabulate
def simpson(interval, f, h):
a, b = interval
n = int((b - a) / h + 1)
first_part = 4 * np.sum([f(a + i * h) for i in range(1, n, 2)])
second_part = 2 * np.sum([f(a + i * h) for i in range(2, n - 1, 2)])
integral = h * (f(a) + first_part + second_part + f(b)) / 3
return integral
def normal_rand_generator(lower, upper, size):
res = [truncnorm.rvs(a, b) for a, b in zip(lower, upper)]
return res
def monte_carlo(intervals, f, experiments_amount, random_generator=np.random.uniform):
size = len(intervals)
lower, upper = zip(*intervals)
volume = np.prod([b - a for a, b in intervals])
integral = volume * np.sum([f(random_generator(lower, upper, size)) for _ in range(experiments_amount)]) / experiments_amount
return integral
def calc_int(f, interval):
a, b = interval
return f(b) - f(a)
def bench_methods(funcs, monte_funcs, anal_funcs, intervals, h, experiments_amount):
quad_times = []
quad_res = []
monte_times = []
monte_res = []
norm_times = []
norm_res = []
anal_res = []
for i in range(len(intervals)):
j = i + 1
start = time()
quad_ints = np.prod([simpson(interval, func, h) for func, interval in zip(funcs[:j], intervals[:j])])
end = time()
quad_res.append("%.4f" % quad_ints)
quad_times.append("%.4f" % (end - start))
start = time()
monte_int = monte_carlo(intervals[:j], monte_funcs[i], experiments_amount)
end = time()
monte_res.append("%.4f" % monte_int)
monte_times.append("%.4f" % (end - start))
start = time()
monte_int = monte_carlo(intervals[:j], monte_funcs[i], experiments_amount, normal_rand_generator)
end = time()
norm_res.append("%.4f" % monte_int)
norm_times.append("%.4f" % (end - start))
anal_res.append("%.4f" % np.prod([calc_int(func, interval) for func, interval in zip(anal_funcs[:j], intervals[:j])]))
headers = ("Arity", "Monte-Carlo with uniform", "Monte-Carlo with normal", "Simpson", "Analytical")
content = zip(list(range(1, len(intervals) + 1)), list(zip(monte_res, monte_times)), list(zip(norm_res, norm_times)), list(zip(quad_res, quad_times)), anal_res)
table = tabulate(list(content), headers, tablefmt="fancy_grid")
print(f'Intervals = {intervals}')
print(f'Monte-Carlo experiments {experiments_amount}')
print(f'Step size = {h}')
print(table)
def main():
funcs = [lambda x: 1 / (x + np.cbrt(x)), \
lambda x: 1 / math.sqrt(x ** 2 + 3.22), \
lambda x: math.sin(x) ** 3, \
lambda x: 1 / (1 + math.sin(x)), \
lambda x: math.log(x + 2) / x, \
lambda x: math.log(x) / (x ** 2)
]
monte_funcs = [funcs[0], \
lambda x: funcs[0](x[0]) * funcs[1](x[1]), \
lambda x: funcs[0](x[0]) * funcs[1](x[1]) * funcs[2](x[2]), \
lambda x: funcs[0](x[0]) * funcs[1](x[1]) * funcs[2](x[2]) * funcs[3](x[3]), \
lambda x: funcs[0](x[0]) * funcs[1](x[1]) * funcs[2](x[2]) * funcs[3](x[3]) * funcs[4](x[4]), \
lambda x: funcs[0](x[0]) * funcs[1](x[1]) * funcs[2](x[2]) * funcs[3](x[3]) * funcs[4](x[4]) * funcs[5](x[5])
]
anal_funcs = [lambda x: 3 * math.log(np.cbrt(x ** 2) + 1) / 2,\
lambda x: math.log(np.abs(x + math.sqrt(x ** 2 + 3.22))),\
lambda x: (math.cos(3 * x) - 9 *math.cos(x)) / 12,\
lambda x: -2 / (math.tan(x / 2) + 1),
lambda x: 2 * 0.648437 if x == 2 else 0.648437,
lambda x: -(math.log(x) + 1) / x
]
intervals = [(-3, -1), (1, 3), (3, 5), (5, 7), (1.2, 2), (14, 88)]
print("Format in methods is (value, time in seconds)")
for h, experiments_amount in [(0.1, 100), (0.01, 1000)]:
bench_methods(funcs, monte_funcs, anal_funcs, intervals, h, experiments_amount)
main() | 2.5 | 2 |
tests/test_hash_writer.py | terlar/ion-hash-python | 9 | 12797716 | <filename>tests/test_hash_writer.py<gh_stars>1-10
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from io import BytesIO
from amazon.ion.writer import blocking_writer
from amazon.ion.writer_binary import binary_writer
from amazon.ion.writer_text import raw_writer
from ionhash.hasher import hash_writer
from .util import binary_reader_over
from .util import consume
from .util import hash_function_provider
def test_hash_writer():
ion_str = '[1, 2, {a: 3, b: (4 {c: 5} 6) }, 7]'
algorithm = "md5"
# generate events to be used to write the data
events = consume(binary_reader_over(ion_str))
_run_test(_writer_provider("binary"), events, algorithm)
_run_test(_writer_provider("text"), events, algorithm)
def _run_test(writer_provider, events, algorithm):
# capture behavior of an ion-python writer
expected_bytes = BytesIO()
w = blocking_writer(writer_provider(), expected_bytes)
expected_write_event_types = _write_to(w, events)
hw_bytes = BytesIO()
hw = hash_writer(blocking_writer(writer_provider(), hw_bytes), hash_function_provider(algorithm))
hw_write_event_types = _write_to(hw, events)
# assert writer/hash_writer response behavior is identical
assert hw_write_event_types == expected_write_event_types
# assert writer/hash_writer produced the same output
assert hw_bytes.getvalue() == expected_bytes.getvalue()
def _write_to(w, events):
write_event_types = []
for event in events:
write_event_types.append(w.send(event))
return write_event_types
def _writer_provider(type):
def _f():
if type == "binary":
return binary_writer()
elif type == "text":
return raw_writer()
return _f
| 2.515625 | 3 |
pretraining_advCL.py | LijieFan/AdvCL | 22 | 12797717 | from __future__ import print_function
import argparse
import numpy as np
import os, csv
from dataset import CIFAR10IndexPseudoLabelEnsemble
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.utils.data as Data
import torch.backends.cudnn as cudnn
from utils import progress_bar, TwoCropTransformAdv
from losses import SupConLoss
import tensorboard_logger as tb_logger
from models.resnet_cifar_multibn_ensembleFC import resnet18 as ResNet18
import random
from fr_util import generate_high
from utils import adjust_learning_rate, warmup_learning_rate
import apex
# ================================================================== #
# Inputs and Pre-definition #
# ================================================================== #
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, default='advcl_cifar10',
help='name of the run')
parser.add_argument('--cname', type=str, default='imagenet_clPretrain',
help='')
parser.add_argument('--batch-size', type=int, default=512,
help='batch size')
parser.add_argument('--epoch', type=int, default=1000,
help='total epochs')
parser.add_argument('--save-epoch', type=int, default=100,
help='save epochs')
parser.add_argument('--epsilon', type=float, default=8,
help='The upper bound change of L-inf norm on input pixels')
parser.add_argument('--iter', type=int, default=5,
help='The number of iterations for iterative attacks')
parser.add_argument('--radius', type=int, default=8,
help='radius of low freq images')
parser.add_argument('--ce_weight', type=float, default=0.2,
help='cross entp weight')
# contrastive related
parser.add_argument('-t', '--nce_t', default=0.5, type=float,
help='temperature')
parser.add_argument('--seed', default=0, type=float,
help='random seed')
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset')
parser.add_argument('--cosine', action='store_true',
help='using cosine annealing')
parser.add_argument('--warm', action='store_true',
help='warm-up for large batch training')
parser.add_argument('--learning_rate', type=float, default=0.5,
help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='700,800,900',
help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1,
help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=1e-4,
help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum')
args = parser.parse_args()
args.epochs = args.epoch
args.decay = args.weight_decay
args.cosine = True
import math
if args.batch_size > 256:
args.warm = True
if args.warm:
args.warmup_from = 0.01
args.warm_epochs = 10
if args.cosine:
eta_min = args.learning_rate * (args.lr_decay_rate ** 3)
args.warmup_to = eta_min + (args.learning_rate - eta_min) * (
1 + math.cos(math.pi * args.warm_epochs / args.epochs)) / 2
else:
args.warmup_to = args.learning_rate
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.name = 'AdvCL_Cifar10'
config = {
'epsilon': args.epsilon / 255.,
'num_steps': args.iter,
'step_size': 2.0 / 255,
'random_start': True,
'loss_func': 'xent',
}
# ================================================================== #
# Data and Pre-processing #
# ================================================================== #
print('=====> Preparing data...')
# Multi-cuda
if torch.cuda.is_available():
n_gpu = torch.cuda.device_count()
batch_size = args.batch_size
transform_train = transforms.Compose([
transforms.RandomResizedCrop(size=32, scale=(0.2, 1.)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
])
train_transform_org = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_train = TwoCropTransformAdv(transform_train, train_transform_org)
transform_test = transforms.Compose([
transforms.ToTensor(),
])
label_pseudo_train_list = []
num_classes_list = [2, 10, 50, 100, 500]
dict_name = 'data/{}_pseudo_labels.pkl'.format(args.cname)
f = open(dict_name, 'rb') # Pickle file is newly created where foo1.py is
feat_label_dict = pickle.load(f) # dump data to f
f.close()
for i in range(5):
class_num = num_classes_list[i]
key_train = 'pseudo_train_{}'.format(class_num)
label_pseudo_train = feat_label_dict[key_train]
label_pseudo_train_list.append(label_pseudo_train)
train_dataset = CIFAR10IndexPseudoLabelEnsemble(root='data',
transform=transform_train,
pseudoLabel_002=label_pseudo_train_list[0],
pseudoLabel_010=label_pseudo_train_list[1],
pseudoLabel_050=label_pseudo_train_list[2],
pseudoLabel_100=label_pseudo_train_list[3],
pseudoLabel_500=label_pseudo_train_list[4],
download=True)
# Data Loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=n_gpu*4)
# ================================================================== #
# Model, Loss and Optimizer #
# ================================================================== #
# PGD attack model
class AttackPGD(nn.Module):
def __init__(self, model, config):
super(AttackPGD, self).__init__()
self.model = model
self.rand = config['random_start']
self.step_size = config['step_size']
self.epsilon = config['epsilon']
self.num_steps = config['num_steps']
assert config['loss_func'] == 'xent', 'Plz use xent for loss function.'
def forward(self, images_t1, images_t2, images_org, targets, criterion):
x1 = images_t1.clone().detach()
x2 = images_t2.clone().detach()
x_cl = images_org.clone().detach()
x_ce = images_org.clone().detach()
images_org_high = generate_high(x_cl.clone(), r=args.radius)
x_HFC = images_org_high.clone().detach()
if self.rand:
x_cl = x_cl + torch.zeros_like(x1).uniform_(-self.epsilon, self.epsilon)
x_ce = x_ce + torch.zeros_like(x1).uniform_(-self.epsilon, self.epsilon)
for i in range(self.num_steps):
x_cl.requires_grad_()
x_ce.requires_grad_()
with torch.enable_grad():
f_proj, f_pred = self.model(x_cl, bn_name='pgd', contrast=True)
fce_proj, fce_pred, logits_ce = self.model(x_ce, bn_name='pgd_ce', contrast=True, CF=True, return_logits=True, nonlinear=False)
f1_proj, f1_pred = self.model(x1, bn_name='normal', contrast=True)
f2_proj, f2_pred = self.model(x2, bn_name='normal', contrast=True)
f_high_proj, f_high_pred = self.model(x_HFC, bn_name='normal', contrast=True)
features = torch.cat([f_proj.unsqueeze(1), f1_proj.unsqueeze(1), f2_proj.unsqueeze(1), f_high_proj.unsqueeze(1)], dim=1)
loss_contrast = criterion(features)
loss_ce = 0
for label_idx in range(5):
tgt = targets[label_idx].long()
lgt = logits_ce[label_idx]
loss_ce += F.cross_entropy(lgt, tgt, size_average=False, ignore_index=-1) / 5.
loss = loss_contrast + loss_ce * args.ce_weight
grad_x_cl, grad_x_ce = torch.autograd.grad(loss, [x_cl, x_ce])
x_cl = x_cl.detach() + self.step_size * torch.sign(grad_x_cl.detach())
x_cl = torch.min(torch.max(x_cl, images_org - self.epsilon), images_org + self.epsilon)
x_cl = torch.clamp(x_cl, 0, 1)
x_ce = x_ce.detach() + self.step_size * torch.sign(grad_x_ce.detach())
x_ce = torch.min(torch.max(x_ce, images_org - self.epsilon), images_org + self.epsilon)
x_ce = torch.clamp(x_ce, 0, 1)
return x1, x2, x_cl, x_ce, x_HFC
print('=====> Building model...')
bn_names = ['normal', 'pgd', 'pgd_ce']
model = ResNet18(bn_names=bn_names)
model = model.cuda()
# tb_logger
if not os.path.exists('logger'):
os.makedirs('logger')
logname = ('logger/pretrain_{}'.format(args.name))
logger = tb_logger.Logger(logdir=logname, flush_secs=2)
if torch.cuda.device_count() > 1:
print("=====> Let's use", torch.cuda.device_count(), "GPUs!")
model = apex.parallel.convert_syncbn_model(model)
model = nn.DataParallel(model)
model = model.cuda()
cudnn.benchmark = True
else:
print('single gpu version is not supported, please use multiple GPUs!')
raise NotImplementedError
net = AttackPGD(model, config)
# Loss and optimizer
ce_criterion = nn.CrossEntropyLoss(ignore_index=-1)
contrast_criterion = SupConLoss(temperature=args.nce_t)
optimizer = torch.optim.SGD(net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=args.decay)
# optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
# ================================================================== #
# Train and Test #
# ================================================================== #
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, _, targets, ind) in enumerate(train_loader):
tt = []
for tt_ in targets:
tt.append(tt_.to(device).long())
targets = tt
image_t1, image_t2, image_org = inputs
image_t1 = image_t1.cuda(non_blocking=True)
image_t2 = image_t2.cuda(non_blocking=True)
image_org = image_org.cuda(non_blocking=True)
warmup_learning_rate(args, epoch+1, batch_idx, len(train_loader), optimizer)
# attack contrast
optimizer.zero_grad()
x1, x2, x_cl, x_ce, x_HFC = net(image_t1, image_t2, image_org, targets, contrast_criterion)
f_proj, f_pred = model(x_cl, bn_name='pgd', contrast=True)
fce_proj, fce_pred, logits_ce = model(x_ce, bn_name='pgd_ce', contrast=True, CF=True, return_logits=True, nonlinear=False)
f1_proj, f1_pred = model(x1, bn_name='normal', contrast=True)
f2_proj, f2_pred = model(x2, bn_name='normal', contrast=True)
f_high_proj, f_high_pred = model(x_HFC, bn_name='normal', contrast=True)
features = torch.cat(
[f_proj.unsqueeze(1), f1_proj.unsqueeze(1), f2_proj.unsqueeze(1), f_high_proj.unsqueeze(1)], dim=1)
contrast_loss = contrast_criterion(features)
ce_loss = 0
for label_idx in range(5):
tgt = targets[label_idx].long()
lgt = logits_ce[label_idx]
ce_loss += ce_criterion(lgt, tgt) / 5.
loss = contrast_loss + ce_loss * args.ce_weight
loss.backward()
optimizer.step()
train_loss += loss.item()
total += targets[0].size(0)
progress_bar(batch_idx, len(train_loader),
'Loss: %.3f (%d/%d)'
% (train_loss/(batch_idx+1), correct, total))
return train_loss/batch_idx, 0.
# ================================================================== #
# Checkpoint #
# ================================================================== #
# Save checkpoint
def checkpoint(epoch):
print('=====> Saving checkpoint...')
state = {
'model': model.state_dict(),
'epoch': epoch,
'rng_state': torch.get_rng_state()
}
save_dir = './checkpoint/{}'.format(args.name)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
torch.save(state, '{}/epoch_{}.ckpt'.format(save_dir, epoch))
# ================================================================== #
# Run the model #
# ================================================================== #
np.random.seed(args.seed)
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
for epoch in range(start_epoch, args.epoch+2):
adjust_learning_rate(args, optimizer, epoch+1)
train_loss, train_acc = train(epoch)
logger.log_value('train_loss', train_loss, epoch)
logger.log_value('learning_rate', optimizer.param_groups[0]['lr'], epoch)
if epoch % args.save_epoch == 0:
checkpoint(epoch)
| 2.125 | 2 |
Utils/saveFigure.py | LeonardoFagundesJr/iAuRoRA | 0 | 12797718 | <gh_stars>0
#---
# Save Figure:
def saveFig_old(fig,nameFig):
# ------- the name must contain the figure format:
# e.g., nameFig = 'Position.png' or nameFig = 'linearVelocity.eps'
import os
nameFig = os.path.join("/content/Figures/",nameFig)
fig.savefig(nameFig, dpi=300, bbox_inches='tight')
#---
# Save Figure:
def saveFig(fig,nameFig):
# ------- the name must contain the figure format:
# e.g., nameFig = 'Position.png' or nameFig = 'linearVelocity.eps'
import os
if not(os.path.isdir('../content/Figures/')):
os.mkdir('../content/Figures/')
nameFig = os.path.join("../content/Figures/",nameFig)
fig.savefig(nameFig, dpi=600, bbox_inches='tight')
| 2.53125 | 3 |
mkMeteoF/libera5.py | YannChemin/LINGRA_RS | 0 | 12797719 | <gh_stars>0
import netCDF4
import cdsapi
import numpy as np
import matplotlib.pyplot as plt
def download_ERA5_bbox(i_year, out_dir, prefix, north, west, south, east):
c = cdsapi.Client()
c.retrieve(
'reanalysis-era5-single-levels',
{
'product_type': 'reanalysis',
'variable': [
'surface_solar_radiation_downwards', '10m_u_component_of_wind',
'10m_v_component_of_wind', '2m_temperature', 'surface_pressure',
'total_precipitation', '2m_dewpoint_temperature'
# 'evaporation', 'potential_evaporation',
# 'snow_density', 'snow_depth'
# '10m_u_component_of_wind', '10m_v_component_of_wind', '2m_dewpoint_temperature',
# '2m_temperature', 'angle_of_sub_gridscale_orography',
# 'anisotropy_of_sub_gridscale_orography',
# 'evaporation', 'land_sea_mask',
# 'maximum_total_precipitation_rate_since_previous_post_processing',
# 'mean_evaporation_rate', 'mean_potential_evaporation_rate',
# 'minimum_total_precipitation_rate_since_previous_post_processing',
# 'orography', 'potential_evaporation', 'precipitation_type',
# 'snow_density', 'snow_depth', 'soil_temperature_level_1',
# 'soil_temperature_level_2', 'soil_temperature_level_3', 'soil_temperature_level_4',
# 'soil_type', 'surface_pressure', 'surface_solar_radiation_downwards',
# 'total_column_snow_water', 'total_precipitation', 'volumetric_soil_water_layer_1',
# 'volumetric_soil_water_layer_2', 'volumetric_soil_water_layer_3',
# 'volumetric_soil_water_layer_4',
],
'area': [
north, west, south, east,
], # North, West, South, East. Default: global
'year': str(i_year),
'month': [
'01', '02', '03',
'04', '05', '06',
'07', '08', '09',
'10', '11', '12',
],
'day': [
'01', '02', '03',
'04', '05', '06',
'07', '08', '09',
'10', '11', '12',
'13', '14', '15',
'16', '17', '18',
'19', '20', '21',
'22', '23', '24',
'25', '26', '27',
'28', '29', '30',
'31',
],
'time': [
'00:00', '01:00', '02:00',
'03:00', '04:00', '05:00',
'06:00', '07:00', '08:00',
'09:00', '10:00', '11:00',
'12:00', '13:00', '14:00',
'15:00', '16:00', '17:00',
'18:00', '19:00', '20:00',
'21:00', '22:00', '23:00',
],
'format': 'netcdf',
},
out_dir + '/ERA5_' + prefix + '_' + str(i_year) + '.nc')
def getTimeNetcdf(netcdffile):
"""
Function to extract the time component of a netCDF file
"""
# Get the time array from NETCDF file and convert to Python datetime
ncfile = netCDF4.Dataset(netcdffile, 'r')
tim = ncfile.variables['time'] # do not cast to numpy array yet
time_convert = netCDF4.num2date(tim[:], tim.units, tim.calendar, only_use_cftime_datetimes=False)
return time_convert
def getPixelVals(netcdffile, layer, longitude, latitude):
"""
Function to query pixels values for a given layer at a given Lat/long
"""
nc = netCDF4.Dataset(netcdffile)
dict_nc = nc[layer].__dict__
# Extract the offset and scale from netcdf file
offset = float(dict_nc['add_offset'])
scale = float(dict_nc['scale_factor'])
# Extract NoData Value from layer
nodata = int(dict_nc['missing_value'])
fillva = int(dict_nc['_FillValue'])
# print(offset, scale, nodata, fillva)
lat, lon = nc.variables['latitude'], nc.variables['longitude']
# extract lat/lon values (in degrees) to numpy arrays
latvals = lat[:]
lonvals = lon[:]
#print(lonvals.shape)
#print(latvals.shape)
xl = np.linspace(lonvals.min(), lonvals.max(), lonvals.shape[0])
yl = np.linspace(latvals.min(), latvals.max(), latvals.shape[0])
xx, yy = np.meshgrid(xl, yl, sparse=True)
def distance_2d(longitude, latitude, xgrid, ygrid):
#print(longitude, latitude)
#print(xgrid, xgrid.dtype)
#print(ygrid, ygrid.dtype)
return np.hypot(np.subtract(xgrid,np.float(longitude)), np.subtract(ygrid,np.float(latitude)))
dist_grid = distance_2d(longitude, latitude, xx, yy)
minval = dist_grid.min()
for x in range(dist_grid.shape[1]):
for y in range(dist_grid.shape[0]):
if dist_grid[y][x] == minval:
ix_min = x
iy_min = y
# Select all of the temporal instances of the pixel
arr = nc.variables[layer][:, 0, iy_min, ix_min]
if layer == 'ssrd':
arr[arr == 0] = np.nan
# Replace nodata (often -32767) with NAN
arr[arr == nodata] = np.nan
arr[arr == -nodata] = np.nan
# Fill in value is sometimes -32766, but not documented...
arr[arr == 32766] = np.nan
# Fill values to NAN
arr[arr == fillva] = np.nan
arr[arr == -fillva] = np.nan
if layer == 'ssrd':
return arr
# Rescale the data
array = offset + arr * scale
# Return the array
return array
def plotLayer(layer, time_convert, array):
"""
Function to plot the array data with time
"""
fig, ax = plt.subplots()
ax.plot_date(time_convert, array, 'g-')
if (layer.strip() == "ssrd"):
ylab = 'surface downwelling shortwave flux in air (J m-2)'
elif (layer.strip() == "u10"):
ylab = 'windspeed u component (m s-1)'
elif (layer.strip() == "v10"):
ylab = 'windspeed v component (m s-1)'
elif (layer.strip() == "t2m"):
ylab = 'T2m (K)'
elif (layer.strip() == "d2m"):
ylab = 'Dewpoint T2m (K)'
#elif (layer.strip() == "sp"):
# ylab = 'Surface air pressure (Pa)'
elif (layer.strip() == "tp"):
ylab = 'Total precipitation (m)'
else:
ylab = 'undefined product'
ax.set(xlabel='time', ylabel=ylab, title='ERA5 ' + ylab)
ax.grid()
# fig.savefig("test.png")
plt.show()
def d2m2eact(d2m):
"""
Converts dew point temperature to actual vapour pressure (hPa)
"""
return (0.6108 * np.exp((17.27 * d2m) / (d2m + 237.3)))
| 2.15625 | 2 |
currency_calculator/__init__.py | jie17/PyCurrency | 1 | 12797720 | from .PyCurrency import get, convert | 1.085938 | 1 |
text_classification/train.py | Ronalmoo/nlp_with_torch | 0 | 12797721 | <reponame>Ronalmoo/nlp_with_torch
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from pathlib import Path
from tqdm import tqdm
from torch.nn.utils import clip_grad_norm_
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.tensorboard import SummaryWriter
from data_loader import DataLoader
from model.ops import RNNClassifier
| 1.242188 | 1 |
07-handy-haversacks/solution.py | johntelforduk/advent-of-code-2020 | 1 | 12797722 | <filename>07-handy-haversacks/solution.py
# Solution to day 7 of AOC 2020, <NAME>.
# https://adventofcode.com/2020/day/7
import sys
VERBOSE = ('-v' in sys.argv)
filename = sys.argv[1]
def search_for_shiny_gold(root: str) -> int:
"""Search a tree of bags. Return the number of shiny gold bags it contains."""
if VERBOSE:
print('root, rule_dict[root]:', root, rule_dict[root])
if len(rule_dict[root]) == 0: # If value is empty list...
return 0 # ... there are no shiny gold bags in this tree.
shiny_gold_inside = 0
for each_inside, num_inside in rule_dict[root]:
if each_inside == 'shiny gold':
shiny_gold_inside = num_inside
else:
shiny_gold_inside += search_for_shiny_gold(each_inside)
return shiny_gold_inside
def search_inside(root: str) -> int:
"""Search a tree of bags. Return the total number of bags it contains."""
if VERBOSE:
print('root, rule_dict[root]:', root, rule_dict[root])
if len(rule_dict[root]) == 0: # If value is empty list...
return 0 # ... there are no bags this tree.
total_inside = 0
for each_inside, num_inside in rule_dict[root]:
total_inside += num_inside + num_inside * search_inside(each_inside)
return total_inside
f = open(filename)
whole_text = (f.read())
f.close()
rules = whole_text.split('\n') # Start of each group is indicated by a blank line.
if VERBOSE:
print('rules:', rules)
# Dictionary contains k: v pairs, one pair per rule.
# k = bag that the rule is about, eg. "light red".
# v = Empty list if bag contains no other bags. Otherwise, contains list of tuples (b, n), where b is bag inside key
# bag, and v is number of those bags.
#
# For example, "light red bags contain 1 bright white bag, 2 muted yellow bags."
# {'light red': [('bright white', 1), ('muted yellow', 2)}
#
# Another example,
# "dotted black bags contain no other bags."
# {'dotted black': []}
rule_dict = {}
for each_rule in rules:
words = each_rule.split()
# First 2 words are the name of the bag that the rules is about.
k = words[0] + ' ' + words[1]
if VERBOSE:
print('each_rule, words, k:', each_rule, words, k)
v = []
if each_rule[-22:] != 'contain no other bags.':
four_words = []
for i in words[4:]:
four_words.append(i)
# if VERBOSE:
# print('each_rule, i, four_words:', each_rule, i, four_words)
if len(four_words) == 4:
v.append((four_words[1] + ' ' + four_words[2], int(four_words[0])))
four_words = []
rule_dict[k] = v
if VERBOSE:
print('rule_dict, len(rule_dict)', rule_dict, len(rule_dict))
shiny_gold = 0
for k in rule_dict:
if search_for_shiny_gold(k) > 0:
shiny_gold += 1
print('Part 1:', shiny_gold)
print('Part 2:', search_inside(root='shiny gold'))
| 3.765625 | 4 |
diventi/tooltips/__init__.py | flavoi/diven | 2 | 12797723 | default_app_config = 'diventi.tooltips.apps.TooltipsConfig' | 1.054688 | 1 |
free/urls.py | ParkHyeonChae/Re-Born-Web | 23 | 12797724 | from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from . import views
from django.contrib import messages
from django.shortcuts import redirect
app_name = 'free'
def protected_file(request, path, document_root=None):
messages.error(request, "접근 불가")
return redirect('/')
urlpatterns = [
path('', views.AllListView.as_view(), name='all_list'),
path('free/', views.FreeListView.as_view(), name='free_list'),
path('question/', views.QuestionListView.as_view(), name='question_list'),
path('information/', views.InformationListView.as_view(), name='information_list'),
path('write/', views.free_write_view, name='free_write'),
path('<int:pk>/', views.free_detail_view, name='free_detail'),
path('<int:pk>/edit/', views.free_edit_view, name='free_edit'),
path('<int:pk>/delete/', views.free_delete_view, name='free_delete'),
path('download/<int:pk>', views.free_download_view, name="free_download"),
path('<int:pk>/comment/write/', views.comment_write_view, name='comment_write'),
path('<int:pk>/comment/delete/', views.comment_delete_view, name='comment_delete'),
]
urlpatterns += static(settings.MEDIA_URL, protected_file, document_root=settings.MEDIA_ROOT) | 1.96875 | 2 |
school/lecture1/isi_cv_03_sol.py | kubekbreha/ML-Python-Algorithms | 0 | 12797725 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 19 21:04:18 2017
@author: pd
"""
#from IPython import get_ipython
#get_ipython().magic('reset -sf')
import numpy as np
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
n_features=200
X, y = datasets.make_classification(750, n_features=n_features, n_informative=5, random_state=29)
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.25, random_state=0)
accuracies = []
for x in np.arange(1, n_features+1,5):
dt = DecisionTreeClassifier(max_depth=x)
dt.fit(X_train, y_train)
preds = dt.predict(X_test)
accuracies.append((preds == y_test).mean())
f, ax = plt.subplots(figsize=(7, 5))
ax.plot(range(1, n_features+1,5), accuracies, 'ko')
#ax.plot(range(1, n_features+1)[:12], accuracies[:12], color='k')
ax.set_title("Decision Tree Accuracy")
ax.set_ylabel("% Correct")
ax.set_xlabel("Max Depth")
plt.show() | 2.96875 | 3 |
scripts/stock_price/findSimilarLines.py | zettsu-t/cPlusPlusFriend | 9 | 12797726 | #!/usr/bin/python
# coding: utf-8
import sys
import Levenshtein
import numpy as np
assert len(sys.argv) > 1
with open(sys.argv[1], 'r', encoding='utf-8') as file:
lines = file.readlines()
n_lines = len(lines)
distances = np.zeros((n_lines, n_lines), dtype=int)
messages = []
for x in range(n_lines):
for y in range(x + 1, n_lines):
if x != y:
value = Levenshtein.distance(lines[x], lines[y])
distances[x,y] = value
if value < 5:
message = "lines {} and {} look similar\n{}{}\n".format(x, y, lines[x], lines[y])
messages.append(message)
for message in messages:
print(message)
| 3.125 | 3 |
mock_api/mock_gh_api.py | equinor/ms-continuus | 0 | 12797727 | import json
from copy import deepcopy
from random import randrange
from typing import List
import uvicorn
from fastapi import FastAPI, HTTPException, status
from pydantic import BaseModel
from starlette.responses import FileResponse
app = FastAPI()
class Repos(BaseModel):
repositories: List[str]
with open("mock_data.json") as file:
test_data = json.load(file)
@app.get("/orgs/{org}/repos")
def repos():
return test_data["repos"]
@app.get("/orgs/{org}/migrations")
def list_migrations():
return test_data["migrations"]
@app.post("/orgs/{org}/migrations")
def start_migration(repos: Repos):
if repos.repositories[0] == "Repo4":
raise HTTPException(status_code=status.HTTP_502_BAD_GATEWAY)
print(repos.repositories[0])
return test_data["startedMigration"][repos.repositories[0]]
@app.get("/orgs/{org}/migrations/{id}")
def migration_status(id: str):
status = deepcopy(test_data["status"][id])
# Add some delay to the export (not the failed ones)
if status["state"] != "failed" and randrange(10) > 6:
status["state"] = "exported"
print(status)
return status
@app.get("/orgs/{org}/migrations/{id}/archive")
async def download_archive(id: str):
# Can be created with `head -c 5M </dev/urandom > archive1`
return FileResponse("archive1")
if __name__ == "__main__":
uvicorn.run("mock_gh_api:app", host="0.0.0.0", port=5000, reload=True)
| 2.484375 | 2 |
backend/stock/migrations/0024_auto_20210218_2347.py | fengxia41103/stock | 1 | 12797728 | <reponame>fengxia41103/stock<filename>backend/stock/migrations/0024_auto_20210218_2347.py
# Generated by Django 3.1.6 on 2021-02-18 23:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stock', '0023_auto_20210216_1552'),
]
operations = [
migrations.RenameField(
model_name='balancesheet',
old_name='ac',
new_name='ar',
),
migrations.AlterField(
model_name='incomestatement',
name='total_revenue',
field=models.FloatField(blank=True, default=0, null=True, verbose_name='Sales'),
),
migrations.AlterField(
model_name='mystrategyvalue',
name='method',
field=models.IntegerField(choices=[(1, 'daily return'), (2, 'overnight return'), (3, 'night day consistency'), (4, 'two daily trend'), (5, 'night day compounded return')], default=1),
),
]
| 1.65625 | 2 |
error_propagation/__init__.py | davidreissmello/error_propagation | 4 | 12797729 | <filename>error_propagation/__init__.py
from error_propagation.core import arrays_to_complex
from error_propagation.core import Complex
__all__ = ["Complex", "arrays_to_complex"]
| 1.046875 | 1 |
normalization.py | iskandr/attention-in-neural-networks | 5 | 12797730 | substitutions = {
"'’‘": " <SINGLE-QUOTE> ",
'"«»': " <DOUBLE-QUOTE> ",
"/": " <SLASH> ",
"\\": " <BACK-SLASH> ",
"”": " <RIGHT-QUOTE> ",
"“": " <LEFT-QUOTE> ",
"-": " <HYPHEN> ",
"[": " <LEFT-BRACKET> ",
"]": " <RIGHT-BRACKET> ",
"?": " <QUESTION> ",
"!": " <EXCLAMATION> ",
",": " <COMMA> ",
":": " <COLON> ",
";": " <SEMI-COLON> ",
"+": "<PLUS> ",
"(": " <LEFT-PAREN> ",
")": " <RIGHT-PAREN> ",
"&": " <AND>",
}
deletions = {"¿", "¡"}
def normalize(s):
s = s.lower().replace(".", "").replace("\n", "")
while "--" in s:
s = s.replace("--", "-")
for (ks, v) in substitutions.items():
for k in ks:
if k in s:
s = s.replace(k, v)
for k in deletions:
if k in s:
s = s.replace(k, "")
s = "<SOS> " + s.strip() + " <EOS>"
while " " in s:
s = s.replace(" ", " ")
return s
def tokenize(s):
s = normalize(s)
tokens = []
for word in s.split(" "):
if len(word) == 0:
continue
elif word.isdigit():
tokens.append("<NUMBER>")
else:
tokens.append(word)
return tokens
def denormalize_string(s):
for (ks, v) in substitutions.items():
if v in s:
s = s.replace(v, ks[0])
return s
def detokenize(tokens):
tokens = [token for token in tokens if token not in ["<EOS>", "<SOS>"]]
s = " ".join(tokens)
return denormalize_string(s)
| 3.25 | 3 |
meta/DesignDataPackage/iFAB/validate/validate.py | lefevre-fraser/openmeta-mms | 0 | 12797731 | """
validate.py
Validate XML file against XSD schema using minixsd library.
<NAME> (<EMAIL>)
The Pennsylvania State University
Applied Research Laboratory
2012
"""
import argparse
from collections import namedtuple
from minixsv.pyxsval import parseAndValidate
from genxmlif import GenXmlIfError
from minixsv.xsvalErrorHandler import XsvalError
ValidationResult = namedtuple("Result", "passed message")
def get_args():
description = "Validate XML file against XSD schema using "\
"the minixsd Python library."
parser = argparse.ArgumentParser(description=description)
parser.add_argument("xml", help="XML file to validate")
parser.add_argument("xsd", help="XSD schema to validate against")
return parser.parse_args()
def validate(xml, xsd):
try:
parseAndValidate(xml, xsdFile=xsd)
except IOError, errstr:
return ValidationResult(False, errstr)
except GenXmlIfError, errstr:
return ValidationResult(False, errstr)
except XsvalError, errstr:
return ValidationResult(False, errstr)
return ValidationResult(True,
"{0} complies to {1}".format(xml, xsd))
def cli():
args = get_args()
result = validate(args.xml, args.xsd)
print result.message
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:fdm=indent:wrap lbr:ai
| 2.75 | 3 |
src/resources/face_train.py | RoliKhanna/SafeRide | 0 | 12797732 | <filename>src/resources/face_train.py
from math import sqrt
from sklearn import neighbors
from os import listdir
from os.path import isdir, join, isfile, splitext
import pickle
import face_recognition
from face_recognition import face_locations
from face_recognition.cli import image_files_in_folder
import cv2
def train(train_dir, n_neighbors = None, knn_algo = 'ball_tree', verbose=False):
X = []
y = []
for class_dir in listdir(train_dir):
if not isdir(join(train_dir, class_dir)):
continue
for img_path in image_files_in_folder(join(train_dir, class_dir)):
image = face_recognition.load_image_file(img_path)
faces_bboxes = face_locations(image)
if len(faces_bboxes) != 1:
if verbose:
print("image {} not fit for training: {}".format(img_path, "didn't find a face" if len(faces_bboxes) < 1 else "found more than one face"))
continue
X.append(face_recognition.face_encodings(image, known_face_locations=faces_bboxes)[0])
y.append(class_dir)
if n_neighbors is None:
n_neighbors = int(round(sqrt(len(X))))
if verbose:
print("Chose n_neighbors automatically as:", n_neighbors)
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance')
knn_clf.fit(X, y)
# Dump the trained decision tree classifier with Pickle
model_save_path = '/home/pi/Desktop/model_classifier.pkl'
# Open the file to save as pkl file
with open(model_save_path, 'wb') as model_pkl:
pickle.dump(knn_clf, model_pkl)
# Close the pickle instance
model_pkl.close()
print("Model created successfully.")
#return knn_clf
train("TrainData") | 3.171875 | 3 |
models/chat.py | Akkihi/Custom-Commands-Bot | 1 | 12797733 | from peewee import *
from .base_model import BaseModel
class Chat(BaseModel):
telegram_id = BigIntegerField(unique=True)
title = TextField(null=True)
link = TextField(null=True)
| 2.109375 | 2 |
student_dashboard_prototype.py | gregorygsimon/gregorygsimon.github.io | 2 | 12797734 | <filename>student_dashboard_prototype.py<gh_stars>1-10
import matplotlib.pyplot as plt
from matplotlib import rcParams
import matplotlib.dates as mdates
import pandas as pd
import numpy as np
import requests, datetime, webbrowser
## This code will not run without MindEdge access / password
if 'un' not in vars():
un = 'wgudb'
pw = ''
if pw=='':
pw = input("Enter Mindedge password:")
base_url = 'https://wguapps.mindedgeonline.com/services/api/'
domain_id = 55
def get_token(password):
r = requests.post(base_url+'auth',
json = {'username':un,'password':password})
try:
return r.json()[0]['token']
except Exception as e:
print('status: ',r.status_code,':',r.reason)
print(e)
print("Token not returned. Check password is correct")
def get_me_sid(email,token):
"""get MindEdge Student ID"""
r = requests.post(base_url+'students',
json={'token':token,'dId':55,'emailAddress':email})
return r.json()[0].get('sId')
def get_completion_perc(sid,token):
"""returns dataframe of percent completion by module"""
r3 = requests.post(base_url+'studentCompleteAssignments',
json={'token':token,'dId':55,'cId':612,'sId':sid})
read_pages = [t['title'] for t in r3.json()] # titles of pages read
# select only pages with learning content:
pages = [title for title in read_pages if (
'.' in title and
'Feedback' not in title and
'Flashcards' not in title and
'Review' not in title and
'Problem Set' not in title
)]
idx = [i for i in range(1,8)]
# all_pages gives the total possible pages indexed by module
# counted manually in Sep 2020
all_pages = pd.Series([23, 22, 30, 17, 8, 12, 16],index=idx)
completion=100*pd.Series([int(p[0]) for p in pages])\
.value_counts().reindex(idx,fill_value=0)/all_pages
return completion
def get_quiz_results(sid,token):
r4 = requests.post(base_url+'studentTestData',
json={'token':token,'dId':55,'cId':612,'sId':sid})
# parsing the text names of the quizzes to give:
# the ch number, quiz number, percent, date, and # of Q's answered
data_quiz = [
(*test['title'].split('Module ')[1].split('Problem Set '),
result['final_score'],
result['dateCompleted'],
result['totalQuestions']
)
for test in r4.json() for result in test['resultData']
]
quiz_df = pd.DataFrame(
data_quiz,
columns=['Chapter','Problem Set','Percent','Date','Completed']
)
quiz_df['Percent'] = pd.to_numeric(quiz_df['Percent'])
quiz_df['days ago'] = pd.to_datetime(quiz_df['Date'])\
.apply(lambda x: (datetime.datetime.now()-x).days)
quiz_df['# correct'] = quiz_df['Percent']/100 * quiz_df['Completed']
quiz_df['y_value'] = pd.to_numeric(quiz_df['Chapter'])
return quiz_df
def get_logins(sid,token):
"""given studentID and active API token,
returns dataframe of login dates and login duration"""
r5 = requests.post(base_url+'studentLogins',
json={'token':token,'dId':55,'cId':612,'sId':sid})
studytimes = pd.DataFrame(
# first 10 characters of 'first_accessed' gives the date
[(x['first_accessed'][:10],x['duration']/3600) for x in r5.json()]
)
studytimes.columns = ['date','duration']
studytimes = studytimes.groupby('date').sum()
# to trim outliers/glitches (e.g. 300 hours spent in one session)
# we make 10 the maximum reported hours possible in one session
studytimes[studytimes>10] = 10
studytimes.index = pd.to_datetime(studytimes.index)
return studytimes
def mindedge_dashboard(sid,token,filename='dashboard.png'):
"""input: studentID, active API token, and image output filename.
Gathers all student data and creates dashboard locally"""
studytimes = get_logins(sid,token)
completion = get_completion_perc(sid,token)
quiz_df = get_quiz_results(sid,token)
# reindex studytimes to ensure that at least one month is visible
# so min of date_rage is the lesser of 1 mo ago and the min date
month_ago = (datetime.datetime.now()+datetime.timedelta(days=-30)).date()
all_dates = pd.date_range(
min(month_ago, studytimes.index.min()),
datetime.datetime.now()
)
studytimes_reidx = studytimes.reindex(all_dates,fill_value=0)
# WGU colors taken from https://www.wgu.edu/about/brand.html
blue = '#003057'
lblue = '#4986AD'
green = '#509E2F'
# change font
rcParams['font.sans-serif'] = "Futura Lt BT"
rcParams['font.family'] = "sans-serif"
rcParams['font.size'] = 14
fig, axs = plt.subplots(2, 2, figsize=(12, 8),sharey='row')
fig.patch.set_facecolor('white')
plt.subplots_adjust(hspace = 0.3) #more horizontal space between plots
#top left axis - completion horiz bar chart
ax = plt.subplot(2,2,1)
colors = [lblue]*7
for i in range(7):
if completion.iloc[i]==100:
colors[i]=blue
plt.barh(completion.index,completion.values,
height=1,color=colors,edgecolor='black')
plt.ylim(0.5,7.5)
plt.xlim(0,100)
ax.invert_yaxis()
ax.set_ylabel('Chapter')
ax.set_title('Completion of Learning Resource (%)')
ticks_set = [0,20,40,60,80,100]
plt.xticks(ticks_set,[str(val)+'%' for val in ticks_set])
plt.yticks(range(1,8))
# top right axis - circle plot for quiz results
ax = plt.subplot(2,2,2);
sc = plt.scatter(
quiz_df['Percent'].apply(lambda x: x+np.random.normal(0,0.1)),
quiz_df['y_value'],
s=700,alpha=0.8,c=quiz_df['days ago'],cmap=plt.cm.viridis_r,
vmin=0,vmax=max(14,quiz_df['days ago'].max())
)
cbar = plt.colorbar(sc)
cbar.set_label("Days Since Quiz")
ax.set_yticks(np.arange(0.5,7.5,1), minor=True)
plt.ylim(0.5,7.5)
plt.gca().invert_yaxis() # want Chapter 1 on top, Ch.7 on bottom
plt.xlim(0,105)
ax.set_xticks(np.arange(0,105,10))
ax.tick_params(length=0)
ax.yaxis.grid(True, which='minor')
ax.set_ylabel('Chapter')
ax.set_title('Quiz Results')
plt.yticks(range(1,8))
# bottom axis - study times vertical bar chart
ax = plt.subplot(2,1,2)
plt.bar(studytimes_reidx.index, studytimes_reidx['duration'])
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
datemin = studytimes_reidx.index.min()
datemax = datetime.datetime.today()
ax.set_xlim(datemin, datemax)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.xticks(rotation=45);
ax.set_ylabel('hours studied');
ax.set_title('Hours engaged in Learning Resource');
plt.savefig(filename,dpi=300,bbox_inches='tight')
#webbrowser.open(filename)
| 2.90625 | 3 |
examples/crypto/rsaEncrypt.py | aaronize/pooryam | 0 | 12797735 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rsa
def do_encrypt(plain_data):
# rsa.encrypt(plainMessage, )
pass
def do_decrypt(cipher_data):
pass
if __name__ == "__main__":
message = "hello world"
pubkey = ""
# 加密
cipherMessage = rsa.encrypt(message.encode(), pubkey)
print cipherMessage
| 3.21875 | 3 |
sacorg/algorithms/blitzstein_diaconis/blitzstein_diaconis.py | abdcelikkanat/sacorg | 0 | 12797736 | <gh_stars>0
from sacorg.utils import *
from sacorg.algorithms.isgraphical import *
def s(deg_seq):
"""
Generates a sample graph for a given degree sequence d
:param deg_seq: Given degree sequence
:return E, p, c: edges with vertex labels starting from 1,
probability of the generated graph and
the number of edge combinations
"""
# Copy the given degree sequence to use it as residual sequence
r = deg_seq.copy()
p = 1.0 # probability of the generated graph
c = 1 # the number of edge combinations for the same graph that can be generated by the algorithm
E = [] # list of edges
N = len(r) # length of the sequence
adjacentVertices = [[] for _ in range(N)] # stores the vertices which are adjacent
# run until residual sequence completely becomes 0 vector
while np.any(r != 0):
# Get the index of vertex having minimum degree greater than 0
i = np.where(r == np.amin(r[r > 0]))[0][-1]
c *= factorial(r[i])
while r[i] != 0:
J = np.asarray([], dtype=np.int) # Construct candidate list J
possibleVertices = [o for o in np.arange(N) if (r[o] > 0 and o != i and (o not in adjacentVertices[i]))]
for j in possibleVertices:
# Decrease degrees by one
(r[i], r[j]) = (r[i] - 1, r[j] - 1)
# add the the vertex j to candidate list J, if residual sequence is graphical
if is_graphical(r):
J = np.append(J, j)
# Increase degrees by one
(r[i], r[j]) = (r[i] + 1, r[j] + 1)
# Pick a vertex j in the candidate list J with probability proportional to its degree d_j
degrees = np.asarray([r[u] for u in J])
prob = degrees / float(np.sum(degrees))
j = np.random.choice(J, p=prob, size=1)[0]
# Add the found edge to the edge lists
if i < j:
E.append([i + 1, j + 1]) # indices start from 1
else:
E.append([j + 1, i + 1]) # indices start from 1
# Add the chosen vertex to the list in order to not choose it again
adjacentVertices[i].append(j)
# Decrease degrees by 1
(r[i], r[j]) = (r[i] - 1, r[j] - 1)
p *= prob[J == j][0]
# Sort the edge sequences
E.sort()
return E, p, c
def get_sample(deg_seq, num_of_samples, verbose=False):
"""
Generates graphs realizing the degree sequence 'deg_seq' with vertex labels {1,...,len(deg_seq}}
:param deg_seq: Degree sequence
:param num_of_samples: Number of samples which will be generated
:return:
"""
# Get the initial time
time_start = time.clock()
# If the sequence is empty or is not graphical
if len(deg_seq) == 0 or is_graphical(deg_seq) is False:
if verbose is True:
# Get the total computation time
time_elapsed = (time.clock() - time_start)
print "Total computation time : " + str(time_elapsed)
return []
edges = []
for _ in range(num_of_samples):
# Call the s function
e, p, c = s(deg_seq)
# Append the edges
edges.append(e)
# Get the total computation time
time_elapsed = (time.clock() - time_start)
if verbose is True:
print "Total computation time : " + str(time_elapsed)
# Return the edges
return edges
def count(deg_seq, num_of_samples=1000, verbose=False):
"""
Estimates the number of graphs satisfying the degree sequence
:param deq_seq: Degree sequence
:param num_of_samples: number of samples used in estimation
:return estimation, std: Estimation for the number of graphs satisfying the given degree sequence d
and standard deviation
"""
estimate = 0.0
# Get initial time
time_start = time.clock()
# If the sequence is empty or is not graphical
if len(deg_seq) == 0 or is_graphical(deg_seq) is False:
if verbose is True:
# Get the total computation time
time_elapsed = (time.clock() - time_start)
print "Total computation time : " + str(time_elapsed)
return 0.0, 0.0
weights = np.zeros(num_of_samples, dtype=float)
for i in range(num_of_samples):
(edges, p, c) = s(deg_seq)
weights[i] = 1.0 / float(c * p)
estimate = (1.0 / float(num_of_samples)) * np.sum(weights)
std = np.std(weights, ddof=1)
# Get the total computation time
time_elapsed = (time.clock() - time_start)
if verbose is True:
print "Total computation time : " + str(time_elapsed)
return estimate, std | 3.015625 | 3 |
native_client/src/trusted/validator_ragel/check_dis_section.py | zipated/src | 2,151 | 12797737 | <reponame>zipated/src
#!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import objdump_parser
import test_format
class RdfaTestRunner(test_format.TestRunner):
SECTION_NAME = 'dis'
def CommandLineOptions(self, parser):
parser.add_option('--objdump',
help='Path to objdump')
def GetSectionContent(self, options, sections):
arch = {32: '-Mi386', 64: '-Mx86-64'}[options.bits]
data = ''.join(test_format.ParseHex(sections['hex']))
# TODO(shcherbina): get rid of custom prefix once
# https://code.google.com/p/nativeclient/issues/detail?id=3631
# is actually fixed.
tmp = tempfile.NamedTemporaryFile(
prefix='tmprdfa_', mode='wb', delete=False)
try:
tmp.write(data)
tmp.close()
objdump_proc = subprocess.Popen(
[options.objdump,
'-mi386', arch, '--target=binary',
'--disassemble-all', '--disassemble-zeroes',
'--insn-width=15',
tmp.name],
stdout=subprocess.PIPE,
# On Windows, builds of binutils based on Cygwin end lines with
# \n while builds of binutils based on MinGW end lines with \r\n.
# The 'universal_newlines' feature makes this work with either one.
universal_newlines=True)
result = ''.join(objdump_parser.SkipHeader(objdump_proc.stdout))
return_code = objdump_proc.wait()
assert return_code == 0, 'error running objdump'
finally:
tmp.close()
os.remove(tmp.name)
return result
def main(argv):
RdfaTestRunner().Run(argv)
if __name__ == '__main__':
main(sys.argv[1:])
| 1.78125 | 2 |
books/learn-python-the-hard-way/ex29.py | phiratio/lpthw | 1 | 12797738 | <gh_stars>1-10
people = 20
cats = 30
dogs = 15
if people < cats:
print(f'Too many Cats! The world is doomed!')
if people > cats:
print(f'Not many cats! The world is saved')
if people > dogs:
print(f'The world is drooled on')
if people < dogs:
print(f'The world is dry')
dogs += 5
if people >= dogs:
print(f'people are greater than or equal to dogs')
if people <= dogs:
print(f'people are less than or equal to dogs')
if people == dogs:
print(f'people are dogs.')
| 4.09375 | 4 |
easyai/train_task.py | lpj0822/image_point_cloud_det | 1 | 12797739 | <reponame>lpj0822/image_point_cloud_det<gh_stars>1-10
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:
from easyai.helper.arguments_parse import TaskArgumentsParse
from easyai.tasks.cls.classify_train import ClassifyTrain
from easyai.tasks.det2d.detect2d_train import Detection2dTrain
from easyai.tasks.seg.segment_train import SegmentionTrain
from easyai.tasks.pc_cls.pc_classify_train import PointCloudClassifyTrain
from easyai.tools.model_to_onnx import ModelConverter
from easyai.base_name.task_name import TaskName
class TrainTask():
def __init__(self, train_path, val_path, pretrain_model_path, is_convert=False):
self.train_path = train_path
self.val_path = val_path
self.pretrain_model_path = pretrain_model_path
self.is_convert = is_convert
def classify_train(self, cfg_path, gpu_id, config_path):
cls_train_task = ClassifyTrain(cfg_path, gpu_id, config_path)
cls_train_task.load_pretrain_model(self.pretrain_model_path)
cls_train_task.train(self.train_path, self.val_path)
self.image_model_convert(cls_train_task, cfg_path)
def detect2d_train(self, cfg_path, gpu_id, config_path):
det2d_train = Detection2dTrain(cfg_path, gpu_id, config_path)
det2d_train.load_pretrain_model(self.pretrain_model_path)
det2d_train.train(self.train_path, self.val_path)
self.image_model_convert(det2d_train, cfg_path)
def segment_train(self, cfg_path, gpu_id, config_path):
seg_train = SegmentionTrain(cfg_path, gpu_id, config_path)
seg_train.load_pretrain_model(self.pretrain_model_path)
seg_train.train(self.train_path, self.val_path)
self.image_model_convert(seg_train, cfg_path)
def pc_classify_train(self, cfg_path, gpu_id, config_path):
pc_cls_train_task = PointCloudClassifyTrain(cfg_path, gpu_id, config_path)
pc_cls_train_task.load_pretrain_model(self.pretrain_model_path)
pc_cls_train_task.train(self.train_path, self.val_path)
def image_model_convert(self, train_task, cfg_path):
if self.is_convert:
converter = ModelConverter(train_task.train_task_config.image_size)
converter.model_convert(cfg_path, train_task.train_task_config.best_weights_file,
train_task.train_task_config.snapshot_path)
def main():
print("process start...")
options = TaskArgumentsParse.train_input_parse()
train_task = TrainTask(options.trainPath, options.valPath, options.pretrainModel)
if options.task_name == TaskName.Classify_Task:
train_task.classify_train(options.model, 0, options.config_path)
elif options.task_name == TaskName.Detect2d_Task:
train_task.detect2d_train(options.model, 0, options.config_path)
elif options.task_name == TaskName.Segment_Task:
train_task.segment_train(options.model, 0, options.config_path)
elif options.task_name == TaskName.PC_Classify_Task:
train_task.pc_classify_train(options.model, 0, options.config_path)
print("process end!")
if __name__ == '__main__':
main()
| 1.953125 | 2 |
src/rayoptics/qtgui/ipyconsole.py | wuffi/ray-optics | 106 | 12797740 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2018 <NAME>
""" Support creation of an iPython console, with rayoptics environment
.. Created on Wed Nov 21 21:48:02 2018
.. codeauthor: <NAME>
"""
from PyQt5.QtGui import QColor
from qtconsole.rich_jupyter_widget import RichJupyterWidget
from qtconsole.inprocess import QtInProcessKernelManager
from qtconsole import styles
from IPython.lib import guisupport
import qdarkstyle
from rayoptics.gui.appmanager import ModelInfo
from rayoptics.util import colors
default_template = '''\
QPlainTextEdit, QTextEdit {
background-color: %(bgcolor)s;
background-clip: padding;
color: %(fgcolor)s;
selection-background-color: %(select)s;
}
.inverted {
background-color: %(fgcolor)s;
color: %(bgcolor)s;
}
.error { color: red; }
.in-prompt { color: %(i_color)s; }
.in-prompt-number { font-weight: bold; }
.out-prompt { color: %(o_color)s; }
.out-prompt-number { font-weight: bold; }
'''
def create_ipython_console(gui_parent, opt_model, title, view_width, view_ht):
""" create a iPython console with a rayoptics environment """
def create_light_or_dark_callback(ipy_console):
# if not hasattr(ipy_console, 'background'):
# ipy_console.background = ipy_console.background()
def l_or_d(is_dark):
accent = colors.accent_colors(is_dark)
prompt_style = {
'i_color': accent['cyan'],
'o_color': accent['orange'],
}
if is_dark:
ipy_console.setStyleSheet(
qdarkstyle.load_stylesheet(qt_api='pyqt5'))
# color_defs = {**styles.get_colors('solarized-dark'),
# **prompt_style }
else:
ipy_console.setStyleSheet('')
# color_defs = {**styles.get_colors('solarized-light'),
# **prompt_style }
# ipy_console.style_sheet = default_template%color_defs
# ipy_console._style_sheet_changed()
return l_or_d
if opt_model:
ro_env = {
'gui_parent': gui_parent,
'app': gui_parent.app_manager,
'opm': opt_model,
'sm': opt_model.seq_model,
'osp': opt_model.optical_spec,
'pm': opt_model.parax_model,
'em': opt_model.ele_model,
'pt': opt_model.part_tree,
}
else:
ro_env = {
'gui_parent': gui_parent,
'app': gui_parent.app_manager,
}
ro_setup = 'from rayoptics.environment import *'
# construct the top level widget
ipy_console = ConsoleWidget()
# load the environment
ipy_console.execute_command(ro_setup)
ipy_console.push_vars(ro_env)
mi = ModelInfo(opt_model)
sub_window = gui_parent.add_subwindow(ipy_console, mi)
sub_window.setWindowTitle(title)
sub_window.sync_light_or_dark = create_light_or_dark_callback(ipy_console)
orig_x, orig_y = gui_parent.initial_window_offset()
sub_window.setGeometry(orig_x, orig_y, view_width, view_ht)
sub_window.show()
class ConsoleWidget(RichJupyterWidget):
def __init__(self, customBanner=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if customBanner is not None:
self.banner = customBanner
self.font_size = 6
self.kernel_manager = kernel_manager = QtInProcessKernelManager()
kernel_manager.start_kernel(show_banner=False)
kernel_manager.kernel.gui = 'qt'
self.kernel_client = kernel_client = self.kernel_manager.client()
kernel_client.start_channels()
def stop():
kernel_client.stop_channels()
kernel_manager.shutdown_kernel()
guisupport.get_app_qt().exit()
self.exit_requested.connect(stop)
def push_vars(self, variableDict):
"""
Given a dictionary containing name / value pairs, push those variables
to the Jupyter console widget
"""
self.kernel_manager.kernel.shell.push(variableDict)
def clear(self):
"""
Clears the terminal
"""
self._control.clear()
# self.kernel_manager
def print_text(self, text):
"""
Prints some plain text to the console
"""
self._append_plain_text(text)
def execute_command(self, command):
"""
Execute a command in the frame of the console widget
"""
self._execute(command, False)
| 2.375 | 2 |
009_web_data/inet_data.py | RicardoEscobar/learning-python | 0 | 12797741 | #
# Example file for retrieving data from the internet
#
import urllib.request
def main():
# open a connection to a URL using urllib2
web_url = urllib.request.urlopen("http://www.google.com")
# get the result code and print it
print ("result code: " + str(web_url.getcode()))
# read the data from the URL and print it
data = web_url.read()
print (data)
if __name__ == "__main__":
main() | 3.875 | 4 |
pygw2/api/daily.py | Natsku123/pygw2 | 1 | 12797742 | <filename>pygw2/api/daily.py
from ..core.models.general import DailyCrafting, DailyMapChest, DailyWorldBoss
from ..utils import endpoint, object_parse
class DailyApi:
_instances = {}
def __new__(cls, *args, api_key: str = "", **kwargs):
if api_key not in cls._instances:
cls._instances[api_key] = super().__new__(cls, *args, **kwargs)
return cls._instances[api_key]
def __init__(self, *, api_key: str = ""):
self.api_key: str = api_key
@endpoint("/v2/dailycrafting", has_ids=True)
async def crafting(self, *, data, ids: list = None):
"""
Fetch daily craftable items by ID(s).
None returns all time-gated crafting items.
:param data:
:param ids:
:return:
"""
if ids is None:
return data
return object_parse(data, DailyCrafting)
@endpoint("/v2/mapchests", has_ids=True)
async def mapchests(self, *, data, ids: list = None):
"""
Fetch daily hero's choice chests by ID(s).
None returns all time-gated hero's choice chests.
:param data:
:param ids:
:return:
"""
if ids is None:
return data
return object_parse(data, DailyMapChest)
@endpoint("/v2/worldbosses", has_ids=True)
async def worldbosses(self, *, data, ids: list = None):
"""
Fetch daily world bosses by ID(s).
None returns all world bosses.
:param data:
:param ids:
:return:
"""
if ids is None:
return data
return object_parse(data, DailyWorldBoss)
| 2.4375 | 2 |
src/DictionaryVector.py | Dorijan-Cirkveni/AutoSolvingMatrix | 0 | 12797743 | <reponame>Dorijan-Cirkveni/AutoSolvingMatrix
from src.VectorAbstract import VectorAbstract
class DictionaryVector(VectorAbstract):
def __init__(self, values=None):
if values is None:
values = dict()
self.values: dict = dict()
for e in values:
v = values[e]
if v != 0:
self.values[e] = v
return
def __repr__(self):
return self.values
def __str__(self):
return str(self.values)
def FindSolvables(self, potentials):
potentials: set
return potentials.intersection(set(self.values.keys()))
def SolveWith(self, other):
other: (int, DictionaryVector)
if other[0] not in self.values:
return
factor = self.values.pop(other[0])
otv: DictionaryVector = other[1]
for e in otv.values:
if e not in self.values:
self.values[e] = 0
newvalue = self.values[e] - otv.values[e] * factor
if newvalue == 0:
self.values.pop(e)
else:
self.values[e] = newvalue
return
def ConvertToCanon(self):
if len(self.values)==0:
print("Colinearity found!")
return None
minvalue = min(self.values.keys())
factor = self.values.pop(minvalue)
for e in self.values:
self.values[e] /= factor
return minvalue
if __name__ == "__main__":
A = DictionaryVector({3: 2})
B = DictionaryVector({1:2, 2: 2, 3: 2})
B.SolveWith((1,A))
print("Done!")
| 3.046875 | 3 |
backend/ArrowSelection/Application.py | Ezetowers/tp-final-fallas-I | 0 | 12797744 | import sys, logging, time, random
import web
import json
from intellect.Intellect import Intellect
from MyIntellect import MyIntellect
from Question import Question
from Arrow_Model import Arrow_Model
from Model import Model
class Application(object):
def __init__(self):
# Load the rules
self._myIntellect = MyIntellect()
self._myIntellect.learn(
self._myIntellect.local_file_uri('./rulesets/secondRuleSet.policy'))
def GET(self):
# Habilitate the cross-domain communication
web.header('Access-Control-Allow-Origin', '*')
web.header('Access-Control-Allow-Credentials', 'true')
# Receive the data from the browser and create
# the objects used by the policies
user_data = web.input()
self._model = Model()
self._myIntellect.initialize()
self.set_length( user_data )
self.set_height( user_data )
self.set_poundage( user_data )
self.set_temperature( user_data )
self.set_target_distance( user_data )
self._question = Question()
self._arrow_model = Arrow_Model()
self._myIntellect.learn( self._model )
self._myIntellect.reason()
self._question.number = self._model.question
self._arrow_model.value = self._model.arrow_model
# Send the results to the browser on a json
json_map = { 'question' : self._question.number,
'model' : self._arrow_model.get_model_name() }
return json.dumps( json_map )
def set_length(self, user_data):
try:
self._model.arm_length = int(user_data.longitud)
except AttributeError:
logging.getLogger( 'ArrowSelection' ).error( '[APPLICATION] Arm_Length does not exists' )
def set_height(self, user_data):
try:
self._model.height = int(user_data.altura)
except AttributeError:
logging.getLogger( 'ArrowSelection' ).error( '[APPLICATION] Height does not exists' )
def set_poundage(self, user_data):
try:
self._model.poundage = int(user_data.libraje)
except AttributeError:
logging.getLogger( 'ArrowSelection' ).error( '[APPLICATION] Poundage does not exists' )
def set_temperature(self, user_data):
try:
self._model.temperature = int(user_data.temperatura)
except AttributeError:
logging.getLogger( 'ArrowSelection' ).error( '[APPLICATION] Temperature does not exists' )
def set_target_distance(self, user_data):
try:
self._model.target_distance = int(user_data.distancia)
except AttributeError:
logging.getLogger( 'ArrowSelection' ).error( '[APPLICATION] Target Distance does not exists' )
| 2.21875 | 2 |
Co-Simulation/Sumo/sumo-1.7.0/tools/personGenerator.py | uruzahe/carla | 4 | 12797745 | #!/usr/bin/env python
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2019-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file personGenerator.py
# @author <NAME>
# @date 2019-03-22
"""
This tool allows to generate flows of persons for a SUMO simulation which is currently not possible in SUMO route files.
It does so by converting an xml file (usually having the ``.pflow.xml`` extension) to a sumo route file
containing the generated <peron> elements.
Here is an example ``.pflow.xml`` :
.. code-block:: xml
<routes>
<personRoute id="route-1">
<walk from="e1" busStop="1" />
<probability>
<probabilityItem probability="0.5">
<ride busStop="2" modes="public" />
<probability>
<probabilityItem probability="0.5">
<stop busStop="2" duration="10" />
</probabilityItem>
<probabilityItem probability="0.5" />
</probability>
</probabilityItem>
<probabilityItem probability="0.5">
<ride busStop="3" modes="public">
</probabilityItem>
</probability>
</personRoute>
<personFlow id="forward" begin="0" end="3600" number="7" perWave="10" departPos="0" route="forward" />
<personFlow id="backward" begin="0" end="3600" period="600" perWave="10" departPos="0">
<walk from="e3" busStop="3" />
<ride busStop="1" modes="public"/>
<stop busStop="1" duration="50"/>
</personFlow>
</routes>
The example above allows to generate two flows of persons :
- The first flow consists of persons taking a bus from stop 1 to either stop 2 or stop 3
(with a 50% chance for each). The persons of this flow are spawned in 7 waves (equally
separated in time) and each wave consists of 10 persons. For the persons going to bus
stop 2, there's a 50% chance they'll stay there during 10 ticks. The route followed by
the persons of this flow is defined separately in a ``<personRoute>`` element and
referenced by its ID.
- The second flow consists of persons taking a bus from stop 3 to stop 1 and then
stopping there for 50 ticks. The persons of this flow are spawned in periodic waves
with 10 persons pere wave. The route followed by the persons is defined directly under
the ``<personFlow>``
How to Use
----------
Via Command Line
~~~~~~~~~~~~~~~~
This script can be accessed directly by command line passing an input `.pflow.xml`` file's path
and an output ``.rou.xml`` file's path.
.. code-block:: bash
python personGenerator.py pedestrians.pflow.xml pedestrians.rou.xml
Note that the output file is overwritten without asking for permission.
In your script
~~~~~~~~~~~~~~
You can import the classes and methods in this module and use them in your own python script.
See the documentation below for more details.
"""
from lxml import etree
import argparse
import random
class PersonGenerationElement(object):
"""
This class serves as a base for person generation elements
"""
def __init__(self, xml_element):
self.xml_element = xml_element
if self.xml_element.tag != self.get_xml_tag():
raise Exception("Bad tag")
@classmethod
def get_xml_tag(cls):
"""
This class method is meant to be implemented by subclasses
It should return the xml tag for elements described by the current class
"""
raise NotImplementedError
def generate(self):
"""
This method is meant to be implemented by subclasses
It should return a list of elements generated by the element
"""
raise NotImplementedError
@classmethod
def wrap_elements(cls, elements, *args, **kwargs):
"""
Replaces xml elements with the appropriate tag (the one defined in get_xml_tag)
with an object of the current class.
The given list is modified, be careful
:param elements: a list of xml elements
:type elements: list
"""
for i in range(len(elements)):
if not isinstance(elements[i], PersonGenerationElement) and elements[i].tag == cls.get_xml_tag():
elements[i] = cls(elements[i], *args, **kwargs)
@staticmethod
def generate_multiple(elements):
"""
Loops over a list containing xml elements and PersonGenerationElement objects.
The PersonGenerationElement objects are replaced by elements generated from them
The given list is not modified
:param elements: A list containing xml elements and PersonGenerationElement objects.
:type elements: list
:return: a list of resulting xml elements
:rtype list
"""
result = list()
for element in elements:
if isinstance(element, PersonGenerationElement):
result.extend(element.generate())
else:
result.append(element.__copy__())
return result
class ProbabilityElement(PersonGenerationElement):
"""
This class describes probability elements that are used to generate alternatives with given probabilities.
In XML it looks like:
.. code-block:: xml
<probability>
<probabilityItem probability="0.5">fist alternative</probabilityItem>
<probabilityItem probability="0.5">second alternative</probabilityItem>
</probability>
Each time the element is asked to generate,
it returns the children of one of its alternatives according to the probabilities.
Probability elements can be nested, so you can have:
.. code-block:: xml
<probability>
<probabilityItem probability="0.5">
<probability>
...
</probability>
...Possibly other stuff
</probabilityItem>
<probabilityItem probability="0.5">
second alternative
</probabilityItem>
</probability>
This allows you to define conditional probabilities.
Note that the nested <probability> element should be a direct child of <probabilityItem>
"""
def __init__(self, xml_element):
"""
:param xml_element: The source xml element
"""
super().__init__(xml_element)
self.possibilities = []
for sub_element in list(self.xml_element):
if sub_element.tag != "probabilityItem":
raise Exception("Only probabilityItem elements are allowed inside probability")
try:
proba = float(sub_element.get("probability"))
if proba < 0 or proba > 1:
raise ValueError("")
possibility = (proba, list(sub_element))
ProbabilityElement.wrap_elements(possibility[1])
self.possibilities.append(possibility)
except (KeyError, ValueError):
raise ValueError("probabilityItem element requires attribute probability between 0 and 1")
if sum([child[0] for child in self.possibilities]) != 1:
raise ValueError("Probabilities not summing up to 1 at line : " + str(self.xml_element.sourceline))
@classmethod
def get_xml_tag(cls):
"""
:return: The tag of xml element coresponding to this class (probability)
"""
return "probability"
def generate(self):
"""
:return: One of the alternatives according to the given probabilities
"""
result = []
cumulated_probability = 0
p = random.random()
for possibility in self.possibilities:
cumulated_probability += float(possibility[0])
if p <= cumulated_probability:
result.extend(self.generate_multiple(possibility[1]))
break
return result
class PersonRouteElement(PersonGenerationElement):
"""
This class describes xml elements that are used to define person routes separately.
.. code-block:: xml
<personRoute id="route">
<walk />
<stop />
<ride />
</personRoute>
The content of the route is then copied to each person using it.
You can use probabilities inside the **personRoute** element to have different alternatives.
Basically, you can have:
.. code-block:: xml
<personRoute id="route">
<walk from="edge1" busStop="1">
<probability>
<probabilityItem probability="0.5">
<ride busStop="2" modes="public" />
</probabilityItem>
<probabilityItem probability="0.5">
<ride busStop="3" modes="public" />
</probabilityItem>
</probability>
</personRoute>
"""
def __init__(self, xml_element):
super().__init__(xml_element)
self.id = self.xml_element.get("id")
self.children = list(self.xml_element)
ProbabilityElement.wrap_elements(self.children)
@classmethod
def get_xml_tag(cls):
"""
:return: The tag of the xml elements corresponding to this class (personRoute)
"""
return "personRoute"
@staticmethod
def get_route_by_id(routes, route_id):
"""
:param routes:
:type routes: collections.Iterable
:param route_id:
:type route_id: str
:return: The PersonRouteElement object having the given id from the given iterable. None if not found
"""
for route in routes:
if isinstance(route, PersonRouteElement) and route.id == route_id:
return route
return None
def generate(self):
"""
:return: A copy of the sub elements of the original personRoute element
probability elements are taken into account & used to generate an alternative
"""
return self.generate_multiple(self.children)
class PersonFlowElement(PersonGenerationElement):
"""
This class describes xml elements that are used to generate flows of persons as it is already possible for vehicles.
For example, this xml code:
.. code-block:: xml
<personFlow id="flow" begin="0" end="3600" number="7" perWave="10">
<walk />
<ride />
<stop />
</personFlow>
will generate person elements having the same children (walk, ride, stop).
The generated persons will be in 7 waves each containing 10 persons.
These waves will be equally separated in time between 0 and 3600
The complete attributes list is:
- id
- begin : the time at which the flow starts
- end : the time at which the flow ends. Not mandatory, default is 3600.
- period : The time (in seconds) between two consecutive waves.
Not mandatory, if not given, number will be used
- number : the number of waves. Only meaningful when period is not specified
- perWave : the number of persons in each wave. Not mandatory, default is 1
- route : the id of the route that the persons will follow
Not mandatory, if not given, uses the children of the <personFlow> element
The id of generated persons will be `<id>_<person_index>` where `<person_index>` is the index
of the person in the flow (starting from 0)
"""
default_end = 3600
id_attribute_key = "id"
begin_attribute_key = "begin"
end_attribute_key = "end"
period_attribute_key = "period"
number_attribute_key = "number"
per_wave_attribute_key = "perWave"
route_attribute_key = "route"
def __init__(self, xml_element, routes):
"""
:param xml_element: The xml element
:param routes: An iterable where to look for routes
:type routes: collections.Iterable
"""
super().__init__(xml_element)
self.routes = routes
self.attributes = {item[0]: item[1] for item in self.xml_element.items()}
self.children = list(self.xml_element)
ProbabilityElement.wrap_elements(self.children)
self.id = None
self.begin = None
self.period = None
self.route = None
# We check for the attributes that concern us & we leave the others
try:
self.id = self.attributes.pop(self.id_attribute_key)
except KeyError:
print("No id attribute in personFlow, quitting")
exit(-1)
try:
self.begin = int(self.attributes.pop(self.begin_attribute_key))
except KeyError:
print("No begin in personFlow " + str(id) + ", quitting")
exit(-1)
try:
self.end = int(self.attributes.pop(self.end_attribute_key))
except KeyError:
self.end = self.default_end
try:
self.period = int(self.attributes.pop(self.period_attribute_key))
except KeyError:
try:
self.number = int(self.attributes.pop(self.number_attribute_key))
if self.number == 1:
self.period = (self.end - self.begin) * 2 + 1
else:
self.period = (self.end - self.begin) / (self.number - 1)
except KeyError:
print("Neither period nor number given for personFlow " + str(id) + ", quitting")
exit(-1)
try:
self.per_wave = int(self.attributes.pop(self.per_wave_attribute_key))
except KeyError:
self.per_wave = 1
try:
route_id = self.attributes.pop(self.route_attribute_key)
self.route = PersonRouteElement.get_route_by_id(routes, route_id)
if self.route is None:
raise Exception("Route with id " + route_id + " not found at line " + str(self.xml_element.sourceline))
except KeyError:
pass
@classmethod
def get_xml_tag(cls):
"""
:return: The tag of the xml elements corresponding to the current class (personFlow)
"""
return "personFlow"
def generate(self):
"""
:return: The persons of the flow
"""
begin = self.begin
p_id = 0
elements = list()
while begin <= self.end:
for i in range(self.per_wave):
element = etree.Element("person", self.attributes)
element.set("depart", str(int(begin)))
element.set("id", self.id + "_" + str(p_id))
if self.route is not None:
element.extend(self.route.generate())
else:
element.extend(self.generate_multiple(self.children))
elements.append(element)
p_id += 1
begin += self.period
return elements
def generate_persons(input_file, output_file):
"""
Core method of the script, parses <personFlow> tags in an XML file and generates <person> elements.
The generated <person> elements are sorted by their depart time.
The original file is not modified and the result is written in another file.
The resulting file will not contain the <personFlow> elements.
Note that the output file is overwritten if it is already exist
:param input_file: The path of the input file
:param output_file: The path of the output file
"""
# Parse the input file
tree = etree.parse(input_file)
routes = tree.getroot()
children = list(routes)
for child in children:
routes.remove(child)
PersonRouteElement.wrap_elements(children)
person_routes = [child for child in children if isinstance(child, PersonRouteElement)]
PersonFlowElement.wrap_elements(children, routes=person_routes)
for person_route in person_routes:
children.remove(person_route)
person_elements = PersonGenerationElement.generate_multiple(children)
person_elements.sort(key=lambda e: int(e.get('depart')))
routes.extend(person_elements)
with open(output_file, "w") as f:
f.write(etree.tostring(routes).decode())
f.close()
if __name__ == "__main__":
# Parses the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("source")
parser.add_argument("destination")
source, destination = parser.parse_args()
generate_persons(source, destination)
| 2.390625 | 2 |
4_SC_project/students/urls.py | abdullah1107/Django-for-Begineers | 0 | 12797746 | <filename>4_SC_project/students/urls.py
from django.urls import path,include
from rest_framework import routers
from . import views
from students.views import StudentsView,StudentViewusingID
router = routers.DefaultRouter()
router.register('',views.StudentsView)
urlpatterns = [
#path('<int:pk>/', StudentViewusingID.as_view()),
path('', include(router.urls)),
]
| 2.015625 | 2 |
docs/02.AI_ML/code-1805/Day05all/cc1.py | mheanng/PythonNote | 2 | 12797747 | <reponame>mheanng/PythonNote<filename>docs/02.AI_ML/code-1805/Day05all/cc1.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import functools
def f1(x):
return x + 3
def f2(x):
return x * 6
def f3(x):
return x - 9
def fc(*fs):
return functools.reduce(
lambda fa, fb: lambda x: fa(fb(x)), *fs)
a = 1
b = f3(f2(f1(a)))
print(b)
c = functools.reduce(
lambda fa, fb: lambda x: fa(fb(x)), [f3, f2, f1])(a)
print(c)
d = fc([f3, f2, f1])(a)
print(d)
| 2.609375 | 3 |
Code/predictTCGAData.py | HalforcNull/Research_PatternRecognition | 0 | 12797748 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 25 17:14:19 2017
@author: Student
"""
from sklearn.naive_bayes import GaussianNB
import numpy as np
import csv
import pickle
TrainingData = []
TrainingResult = []
TestSampleData = []
TestActualResult = []
def __loadData(dataFile):
data = []
i = 0
with open(dataFile, 'rt') as csvfile:
datas = csv.reader(csvfile, delimiter = ',')
for row in datas:
if row is None or len(row) == 0:
continue
i = i + 1
if i <= 5000:
continue
data.append(row)
return data
log = open('TCGA-predict-log.txt', 'w')
TestData = __loadData('tcga_data.csv')
model = pickle.load( open( "gtex_TrainingResult.pkl", "rb" ) )
me = np.array(TestData).astype(np.float)
del(TestData)
log.write(model.predict(me))
log.write('\n')
| 2.515625 | 3 |
sources/migrations/0024_rename_owned_by_to_exportable_by.py | industrydive/sourcedive | 0 | 12797749 | <reponame>industrydive/sourcedive<gh_stars>0
# Generated by Django 2.2.10 on 2020-02-24 21:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sources', '0023_add_person_owned_by_field_for_dive_relationship'),
]
operations = [
migrations.RemoveField(
model_name='person',
name='owned_by',
),
migrations.AddField(
model_name='person',
name='exportable_by',
field=models.ManyToManyField(blank=True, help_text='These are the dives that can export this source.', related_name='dive_owner', to='sources.Dive'),
),
]
| 1.71875 | 2 |
scraper/reset_data.py | Ignis-Altum/scraper | 22 | 12797750 | <reponame>Ignis-Altum/scraper
import logging
from scraper import Filemanager
def reset():
print("Resetting data...")
logging.getLogger(__name__).info("Resetting data")
data = Filemanager.get_record_data()
for category in data.values():
for product in category.values():
for website in product.values():
website["info"] = {"id": "", "url": "", "currency": ""}
website["datapoints"] = []
Filemanager.save_record_data(data)
def hard_reset():
print("Hard resetting data...")
logging.getLogger(__name__).info("Hard resetting data")
data = {}
Filemanager.save_record_data(data)
Filemanager.clear_product_csv()
| 2.640625 | 3 |
Subsets and Splits