hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9b84b07bfeb0b1498473bf71e8cf00668429868a
| 1,138 |
py
|
Python
|
datastruct/TreeNode.py
|
cocobear/LeetCode-in-Python
|
b4ecd5cb7122467ee479f38497faaabb17e6025e
|
[
"MIT"
] | null | null | null |
datastruct/TreeNode.py
|
cocobear/LeetCode-in-Python
|
b4ecd5cb7122467ee479f38497faaabb17e6025e
|
[
"MIT"
] | null | null | null |
datastruct/TreeNode.py
|
cocobear/LeetCode-in-Python
|
b4ecd5cb7122467ee479f38497faaabb17e6025e
|
[
"MIT"
] | null | null | null |
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def __str__(self, depth=0):
ret = ''
if self.right != None:
ret += self.right.__str__(depth + 1)
if not self.val:
ret += '\n'
else:
ret += '\n' + (' '*depth) + str(self.val)
if self.left != None:
ret += self.left.__str__(depth + 1)
return ret
@staticmethod
def constructTree(levelOrder):
# 从给定的层先遍历的数组构建一个二叉树
# 给定的数组为一个满二叉树
def conTree(root, levelOrder, i):
if i >= len(levelOrder):
return
if not levelOrder[i]:
return
tmp = TreeNode(levelOrder[i])
root = tmp
root.left = conTree(root.left, levelOrder, 2*i+1)
root.right = conTree(root.right, levelOrder, 2*i+2)
return root
root = None
return conTree(root, levelOrder, 0)
if __name__ == "__main__":
print(TreeNode.constructTree([3,9,20,None,None,15,7]))
print(TreeNode.constructTree([2,3,4]))
| 27.756098 | 63 | 0.51406 | 1,066 | 0.889816 | 0 | 0 | 599 | 0.5 | 0 | 0 | 120 | 0.100167 |
9b88125439c00c982850039874ca9e2e40963ded
| 17,936 |
py
|
Python
|
src/test_quality_measures.py
|
hackalog/dimension_reduction
|
18c54256f4b1f1fbfe0b99e86b6701e708b7c85c
|
[
"MIT"
] | 1 |
2018-10-22T11:45:45.000Z
|
2018-10-22T11:45:45.000Z
|
src/test_quality_measures.py
|
hackalog/dimension_reduction
|
18c54256f4b1f1fbfe0b99e86b6701e708b7c85c
|
[
"MIT"
] | null | null | null |
src/test_quality_measures.py
|
hackalog/dimension_reduction
|
18c54256f4b1f1fbfe0b99e86b6701e708b7c85c
|
[
"MIT"
] | null | null | null |
import hypothesis.strategies as st
from hypothesis.extra.numpy import arrays
from hypothesis import given
import unittest
import numpy as np
from sklearn.base import BaseEstimator
import inspect
import src.quality_measures as qm
from .logging import logger
# old functions to test against while refactoring
def old_centering_matrix(N):
'''
Returns the N x N centering matrix.
'''
I_N = np.identity(N)
one_N = np.matrix(np.ones(N)).transpose()
J = I_N - one_N * one_N.transpose()/N
return J
def old_doubly_center_matrix(matrix):
'''
Doubly center the matrix. That is, -J * matrix * J.
Note that this matrix must be square.
'''
m, n = matrix.shape
assert m == n, "Matrix must be square"
J = old_centering_matrix(m)
new_matrix = -J * matrix * J
return new_matrix / 2
def old_strain(high_distances, low_distances):
B = qm.doubly_center_matrix(qm.square_matrix_entries(high_distances))
top = qm.square_matrix_entries(B - qm.square_matrix_entries(low_distances))
result = np.sqrt(np.sum(top)/np.sum(qm.square_matrix_entries(B)))
return result
def old_point_strain(high_distances, low_distances):
B = qm.doubly_center_matrix(qm.square_matrix_entries(high_distances))
top = qm.square_matrix_entries(B - qm.square_matrix_entries(low_distances))
result = np.sum(top, axis=1)/np.sum(qm.square_matrix_entries(B))
return result
def knn_to_point_untrustworthiness(high_knn, low_knn, n_neighbors=None,
high_rank=None):
'''
Given the n_neighbors nearest neighbors in high space and low space,
together with the rank matrix, compute the value of
"untrustworthiness" of a point (this is the factor that a point
contributes negatively to trustworthiness).
'''
if n_neighbors is None or high_rank is None:
raise ValueError("n_neighbors and high_rank are required")
point_scores = []
N = high_knn.shape[0]
G_K = qm._trustworthiness_normalizating_factor(n_neighbors, N)
for i, low in enumerate(low_knn):
trust_indices = set(low).difference(set(high_knn[i]))
score = 0
for j in trust_indices:
score += (high_rank[i, j] - n_neighbors) * 2 / G_K
point_scores.append(score)
return np.array(point_scores)
def old_point_untrustworthiness(high_distances=None, low_distances=None,
high_data=None, low_data=None,
metric='euclidean', n_neighbors=None):
'''
Given high/low distances or data, compute the value of
"untrustworthiness" of a point (this is the factor that a point
contributes negatively to trustworthiness).
'''
hd, ld, _ = qm.pairwise_distance_differences(high_distances=high_distances,
low_distances=low_distances,
high_data=high_data,
low_data=low_data,
metric=metric)
if n_neighbors is None:
raise ValueError("n_neighbors is required")
high_rank = qm.rank_matrix(hd)
low_rank = qm.rank_matrix(ld)
high_knn = qm.rank_to_knn(high_rank, n_neighbors=n_neighbors)
low_knn = qm.rank_to_knn(low_rank, n_neighbors=n_neighbors)
point_scores = knn_to_point_untrustworthiness(high_knn, low_knn,
n_neighbors=n_neighbors,
high_rank=high_rank)
return point_scores
class test_estimator(BaseEstimator):
def fit(self, X):
self._return_value = X
def transform(self, X):
return self._return_value
# Start of tests
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)))
def test_square_matrix_entries(array):
matrix = np.matrix(array)
s_array = array**2
assert (qm.square_matrix_entries(matrix) == s_array).all()
@given(st.integers(min_value=1, max_value=100))
def test_old_new_centering_matrix(N):
assert (qm.centering_matrix(N) == old_centering_matrix(N)).all()
@given(st.integers(min_value=1, max_value=100))
def test_centering_matrix_output(N):
matrix = qm.centering_matrix(N)
assert matrix.shape == (N, N)
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)))
def test_old_new_doubly_center_matrix(matrix):
assert (qm.doubly_center_matrix(matrix) ==
old_doubly_center_matrix(matrix)).all()
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
arrays(np.float, (3, 2), elements=st.floats(min_value=-100,
max_value=100)))
def test_pairwise_distance_differences_data(high_data, low_data):
hd, ld, dd = qm.pairwise_distance_differences(high_data=high_data,
low_data=low_data)
n_pts = high_data.shape[0]
assert hd.shape == (n_pts, n_pts)
assert ld.shape == (n_pts, n_pts)
assert dd.shape == (n_pts, n_pts)
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)))
def test_pairwise_distance_differences_dist(high_dist, low_dist):
hd, ld, dd = qm.pairwise_distance_differences(high_distances=high_dist,
low_distances=low_dist)
n_pts = high_dist.shape[0]
assert hd.shape == (n_pts, n_pts)
assert ld.shape == (n_pts, n_pts)
assert dd.shape == (n_pts, n_pts)
assert (hd == high_dist).all()
assert (ld == low_dist).all()
assert (dd == (high_dist-low_dist)).all()
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
arrays(np.float, (3, 2), elements=st.floats(min_value=-100,
max_value=100)))
def test_stress_data(high_data, low_data):
stress = qm.stress(high_data=high_data, low_data=low_data)
assert stress.dtype == 'float64'
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)))
def test_stress_distances(high_distances, low_distances):
stress = qm.stress(high_distances=high_distances,
low_distances=low_distances)
assert stress.dtype == 'float64'
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
arrays(np.float, (3, 2), elements=st.floats(min_value=-100,
max_value=100)))
def test_point_stress_data(high_data, low_data):
pstress = qm.point_stress(high_data=high_data, low_data=low_data)
n_pts = high_data.shape[0]
assert pstress.shape == (n_pts, )
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)))
def test_point_stress_distances(high_distances, low_distances):
pstress = qm.point_stress(high_distances=high_distances,
low_distances=low_distances)
n_pts = high_distances.shape[0]
assert pstress.shape == (n_pts, )
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)))
def test_old_new_strain(high_distances, low_distances):
# all zeros raises an error. tested later.
if not (high_distances == 0).all():
assert (qm.strain(high_distances, low_distances) ==
old_strain(high_distances, low_distances)).all()
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)))
def test_old_new_point_strain(high_distances, low_distances):
# all zeros raises an error. tested later.
if not (high_distances == 0).all():
assert (qm.point_strain(high_distances, low_distances) ==
old_point_strain(high_distances, low_distances)).all()
# TODO: Test various input styles.
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
st.integers(min_value=1, max_value=3))
def test_old_new_point_untrustworthiness(high_distances, low_distances,
n_neighbors):
old = old_point_untrustworthiness(high_distances=high_distances,
low_distances=low_distances,
n_neighbors=n_neighbors)
new = qm.point_untrustworthiness(high_distances=high_distances,
low_distances=low_distances,
n_neighbors=n_neighbors)
assert all(old == new)
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
st.integers(min_value=1, max_value=3))
def test_trustworthiness_distances(high_distances, low_distances,
n_neighbors):
new = qm.trustworthiness(high_distances=high_distances,
low_distances=low_distances,
n_neighbors=n_neighbors)
old_point = old_point_untrustworthiness(high_distances=high_distances,
low_distances=low_distances,
n_neighbors=n_neighbors)
assert new == (1-sum(old_point))
assert new >= 0.0
assert new <= 1.0
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
st.integers(min_value=1, max_value=3))
def test_trustworthiness_data(high_data, low_data, n_neighbors):
new = qm.trustworthiness(high_data=high_data,
low_data=low_data,
n_neighbors=n_neighbors)
old_point = old_point_untrustworthiness(high_data=high_data,
low_data=low_data,
n_neighbors=n_neighbors)
assert new == (1-sum(old_point))
assert new >= 0.0
assert new <= 1.0
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
st.integers(min_value=1, max_value=3))
def test_trustworthiness_point_scores(high_distances, low_distances,
n_neighbors):
old_point = old_point_untrustworthiness(high_distances=high_distances,
low_distances=low_distances,
n_neighbors=n_neighbors)
new = qm.trustworthiness(point_scores=old_point)
assert new == (1-sum(old_point))
assert new >= 0.0
assert new <= 1.0
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
st.integers(min_value=1, max_value=3))
def test_continuity_distances(high_distances, low_distances,
n_neighbors):
new = qm.continuity(high_distances=high_distances,
low_distances=low_distances,
n_neighbors=n_neighbors)
assert new >= 0.0
assert new <= 1.0
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
st.integers(min_value=1, max_value=3))
def test_continuity_data(high_data, low_data, n_neighbors):
new = qm.continuity(high_data=high_data,
low_data=low_data,
n_neighbors=n_neighbors)
assert new >= 0.0
assert new <= 1.0
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)),
st.integers(min_value=1, max_value=3))
def test_continuity_point_scores(high_distances, low_distances,
n_neighbors):
point = qm.point_discontinuity(high_distances=high_distances,
low_distances=low_distances,
n_neighbors=n_neighbors)
new = qm.continuity(point_scores=point)
assert new == (1-sum(point))
assert new >= 0.0
assert new <= 1.0
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100),
unique=True),
arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100),
unique=True),
arrays(np.bool, (3, 1)),
st.integers(min_value=1, max_value=3))
def test_scorers(hd, ld, target, n_neighbors):
key_l = qm.available_quality_measures().keys()
high_low_l = ["continuity", "stress", "strain", "trustworthiness"]
greater_is_better = ["continuity", "trustworthiness"]
estimator = test_estimator()
estimator.fit(ld)
for key in key_l:
if key in greater_is_better:
val = 1.0
else:
val = -1.0
logger.debug(key)
measure = qm.available_quality_measures()[key]
scorer = qm.available_scorers()[key]
if key in high_low_l:
if 'n_neighbors' in inspect.getfullargspec(measure).args:
m = measure(high_data=hd, low_data=ld, n_neighbors=n_neighbors)
s = scorer(estimator, hd, n_neighbors=n_neighbors)
else:
m = measure(high_data=hd, low_data=ld)
s = scorer(estimator, hd)
elif key == '1nn-error':
m = measure(data=ld, classes=target)
s = scorer(estimator, hd, y=target)
else:
logger.debug("Untested measure:{key}. Add me to test_scorers")
assert False
logger.debug(f"measure:{m}, scorer:{s}")
if m != 0 and s!=0:
assert np.isclose(m/s, val)
else:
assert s == m
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)))
def test_rank_matrix_compatibility(matrix):
assert (qm.slower_rank_matrix(matrix) == qm.rank_matrix(matrix)).all()
class TestEncoding(unittest.TestCase):
@given(arrays(np.float, (3, 2), elements=st.floats(min_value=-100,
max_value=100)))
def test_doubly_center_matrix_input(self, matrix):
with self.assertRaises(ValueError):
qm.doubly_center_matrix(matrix)
@given(arrays(np.float, (3, 3), elements=st.floats(min_value=-100,
max_value=100)))
def test_pairwise_distance_differences_input(self, matrix):
with self.assertRaises(ValueError):
qm.pairwise_distance_differences(high_data=matrix)
with self.assertRaises(ValueError):
qm.pairwise_distance_differences(high_distances=matrix)
with self.assertRaises(ValueError):
qm.pairwise_distance_differences(low_data=matrix)
with self.assertRaises(ValueError):
qm.pairwise_distance_differences(low_distances=matrix)
def test_point_untrustworthiness_input(self):
with self.assertRaises(ValueError):
qm.point_untrustworthiness()
@given(st.integers(min_value=1, max_value=100))
def test_zero_input_strain(self, N):
matrix = np.zeros((N, N))
with self.assertRaises(ValueError):
qm.strain(high_distances=matrix, low_distances=matrix)
@given(st.integers(min_value=1, max_value=100))
def test_zero_input_point_strain(self, N):
matrix = np.zeros((N, N))
with self.assertRaises(ValueError):
qm.point_strain(high_distances=matrix, low_distances=matrix)
if __name__ == '__main__':
unittest.main()
| 42.301887 | 79 | 0.575992 | 1,747 | 0.097402 | 0 | 0 | 13,815 | 0.770239 | 0 | 0 | 1,085 | 0.060493 |
9b884dfe98e3224d03e56b0ff9073cf479be11aa
| 797 |
py
|
Python
|
addons/odoo_elasticsearch/models/trend_search_mapping.py
|
marionumza/vocal_v12
|
480990e919c9410903e06e7813ee92800bd6a569
|
[
"Unlicense"
] | null | null | null |
addons/odoo_elasticsearch/models/trend_search_mapping.py
|
marionumza/vocal_v12
|
480990e919c9410903e06e7813ee92800bd6a569
|
[
"Unlicense"
] | null | null | null |
addons/odoo_elasticsearch/models/trend_search_mapping.py
|
marionumza/vocal_v12
|
480990e919c9410903e06e7813ee92800bd6a569
|
[
"Unlicense"
] | 1 |
2021-05-05T07:59:08.000Z
|
2021-05-05T07:59:08.000Z
|
import logging
_logger = logging.getLogger(__name__)
from odoo import api, fields, models
class TrendSearchMapping(models.Model):
_name = 'trend.search.mapping'
_order = "sequence"
name = fields.Char(string="Keywords", required=True, help="Name of product")
trend_search_mapping_id = fields.Many2one('elastic.search.config',string='Field Index Mapping')
sequence = fields.Integer(default=5)
@api.model
def trend_search_map_set_default(self):
config_id = self.env['elastic.search.config'].search([],limit=1)
config_id.trending_state = 'enable'
self.create({
'name':'imac',
'trend_search_mapping_id':config_id.id
})
self.create({
'name':'ipad',
'trend_search_mapping_id':config_id.id
})
| 30.653846 | 99 | 0.667503 | 705 | 0.884567 | 0 | 0 | 378 | 0.474279 | 0 | 0 | 208 | 0.260979 |
9b897b3e2e2a162da4ba6ef2e1e00007c3d0d7d3
| 25,363 |
py
|
Python
|
src/python/twitter/common/app/application.py
|
wfarner/commons
|
42988a7a49f012665174538cca53604c7846ee86
|
[
"Apache-2.0"
] | 1 |
2019-12-20T14:13:27.000Z
|
2019-12-20T14:13:27.000Z
|
src/python/twitter/common/app/application.py
|
wfarner/commons
|
42988a7a49f012665174538cca53604c7846ee86
|
[
"Apache-2.0"
] | null | null | null |
src/python/twitter/common/app/application.py
|
wfarner/commons
|
42988a7a49f012665174538cca53604c7846ee86
|
[
"Apache-2.0"
] | 1 |
2019-12-20T14:13:29.000Z
|
2019-12-20T14:13:29.000Z
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from __future__ import print_function
__author__ = 'Dave Buchfuhrer, Brian Wickman'
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import atexit
import copy
import inspect
import optparse
import os
import shlex
import sys
import threading
from collections import defaultdict, deque
from functools import partial
from twitter.common import options
from twitter.common.app.module import AppModule
from twitter.common.app.inspection import Inspection
from twitter.common.lang import Compatibility
from twitter.common.process import daemonize
from twitter.common.util import topological_sort
class Application(object):
class Error(Exception): pass
# enforce a quasi-singleton interface (for resettable applications in test)
_Global = None
@staticmethod
def reset():
"""Reset the global application. Only useful for testing."""
Application._Global = Application()
@staticmethod
def active():
"""Return the current resident application object."""
return Application._Global
HELP_OPTIONS = [
options.Option("-h", "--help", "--short-help",
action="callback",
callback=lambda *args, **kwargs: Application.active()._short_help(*args, **kwargs),
help="show this help message and exit."),
options.Option("--long-help",
action="callback",
callback=lambda *args, **kwargs: Application.active()._long_help(*args, **kwargs),
help="show options from all registered modules, not just the __main__ module.")
]
IGNORE_RC_FLAG = '--app_ignore_rc_file'
APP_OPTIONS = {
'daemonize':
options.Option('--app_daemonize',
action='store_true',
default=False,
dest='twitter_common_app_daemonize',
help="Daemonize this application."),
'daemon_stdout':
options.Option('--app_daemon_stdout',
default='/dev/null',
dest='twitter_common_app_daemon_stdout',
help="Direct this app's stdout to this file if daemonized."),
'daemon_stderr':
options.Option('--app_daemon_stderr',
default='/dev/null',
dest='twitter_common_app_daemon_stderr',
help="Direct this app's stderr to this file if daemonized."),
'pidfile':
options.Option('--app_pidfile',
default=None,
dest='twitter_common_app_pidfile',
help="The pidfile to use if --app_daemonize is specified."),
'debug':
options.Option('--app_debug',
action='store_true',
default=False,
dest='twitter_common_app_debug',
help="Print extra debugging information during application initialization."),
'profiling':
options.Option('--app_profiling',
action='store_true',
default=False,
dest='twitter_common_app_profiling',
help="Run profiler on the code while it runs. Note this can cause slowdowns."),
'profile_output':
options.Option('--app_profile_output',
default=None,
metavar='FILENAME',
dest='twitter_common_app_profile_output',
help="Dump the profiling output to a binary profiling format."),
'rc_filename':
options.Option('--app_rc_filename',
action='store_true',
default=False,
dest='twitter_common_app_rc_filename',
help="Print the filename for the rc file and quit."),
'ignore_rc_file':
options.Option(IGNORE_RC_FLAG,
action='store_true',
default=False,
dest='twitter_common_app_ignore_rc_file',
help="Ignore default arguments from the rc file."),
}
NO_COMMAND = 'DEFAULT'
OPTIONS = 'options'
OPTIONS_ATTR = '__options__'
def __init__(self):
self._name = None
self._registered_modules = []
self._init_modules = []
self._option_targets = defaultdict(dict)
self._global_options = {}
self._interspersed_args = False
self._main_options = Application.HELP_OPTIONS[:]
self._usage = ""
self._profiler = None
self._commands = {}
self._reset()
for opt in Application.APP_OPTIONS.values():
self.add_option(opt)
self._configure_options(None, Application.APP_OPTIONS)
def _raise_if_initialized(self, msg="Cannot perform operation after initialization!"):
if self.initialized:
raise Application.Error(msg)
def _raise_if_uninitialized(self, msg="Cannot perform operation before initialization!"):
if not self.initialized:
raise Application.Error(msg)
def _reset(self):
"""
Resets the state set up by init() so that init() may be called again.
"""
self.initialized = False
self._option_values = options.Values()
self._argv = []
def interspersed_args(self, value):
self._interspersed_args = bool(value)
def _configure_options(self, module, option_dict):
for opt_name, opt in option_dict.items():
self._option_targets[module][opt_name] = opt.dest
def configure(self, module=None, **kw):
"""
Configure the application object or its activated modules.
Typically application modules export flags that can be defined on the
command-line. In order to allow the application to override defaults,
these modules may export named parameters to be overridden. For example,
the Application object itself exports named variables such as "debug" or
"profiling", which can be enabled via:
app.configure(debug=True)
and
app.configure(profiling=True)
respectively. They can also be enabled with their command-line argument
counterpart, e.g.
./my_application --app_debug --app_profiling
Some modules export named options, e.g. twitter.common.app.modules.http exports
'enable', 'host', 'port'. The command-line arguments still take precedence and
will override any defaults set by the application in app.configure. To activate
these options, just pass along the module name:
app.configure(module='twitter.common.app.modules.http', enable=True)
"""
if module not in self._option_targets:
if not self._import_module(module):
raise Application.Error('Unknown module to configure: %s' % module)
def configure_option(name, value):
if name not in self._option_targets[module]:
raise Application.Error('Module %s has no option %s' % (module, name))
self.set_option(self._option_targets[module][name], value)
for option_name, option_value in kw.items():
configure_option(option_name, option_value)
def _main_parser(self):
return (options.parser()
.interspersed_arguments(self._interspersed_args)
.options(self._main_options)
.usage(self._usage))
def command_parser(self, command):
assert command in self._commands
values_copy = copy.deepcopy(self._option_values)
parser = self._main_parser()
command_group = options.new_group(('For %s only' % command) if command else 'Default')
for option in getattr(self._commands[command], Application.OPTIONS_ATTR):
op = copy.deepcopy(option)
if not hasattr(values_copy, op.dest):
setattr(values_copy, op.dest, op.default if op.default != optparse.NO_DEFAULT else None)
Application.rewrite_help(op)
op.default = optparse.NO_DEFAULT
command_group.add_option(op)
parser = parser.groups([command_group]).values(values_copy)
usage = self._commands[command].__doc__
if usage:
parser = parser.usage(usage)
return parser
def _construct_partial_parser(self):
"""
Construct an options parser containing only options added by __main__
or global help options registered by the application.
"""
if hasattr(self._commands.get(self._command), Application.OPTIONS_ATTR):
return self.command_parser(self._command)
else:
return self._main_parser().values(copy.deepcopy(self._option_values))
def _construct_full_parser(self):
"""
Construct an options parser containing both local and global (module-level) options.
"""
return self._construct_partial_parser().groups(self._global_options.values())
def _rc_filename(self):
rc_short_filename = '~/.%src' % self.name()
return os.path.expanduser(rc_short_filename)
def _add_default_options(self, argv):
"""
Return an argument list with options from the rc file prepended.
"""
rc_filename = self._rc_filename()
options = argv
if Application.IGNORE_RC_FLAG not in argv and os.path.exists(rc_filename):
command = self._command or Application.NO_COMMAND
rc_config = ConfigParser.SafeConfigParser()
rc_config.read(rc_filename)
if rc_config.has_option(command, Application.OPTIONS):
default_options_str = rc_config.get(command, Application.OPTIONS)
default_options = shlex.split(default_options_str, True)
options = default_options + options
return options
def _parse_options(self, force_args=None):
"""
Parse options and set self.option_values and self.argv to the values to be passed into
the application's main() method.
"""
argv = sys.argv[1:] if force_args is None else force_args
if argv and argv[0] in self._commands:
self._command = argv.pop(0)
else:
self._command = None
parser = self._construct_full_parser()
self._option_values, self._argv = parser.parse(self._add_default_options(argv))
def _short_help(self, option, opt, value, parser):
self._construct_partial_parser().print_help()
sys.exit(1)
def _long_help(self, option, opt, value, parser):
self._construct_full_parser().print_help()
sys.exit(1)
def _setup_modules(self):
"""
Setup all initialized modules.
"""
module_registry = AppModule.module_registry()
for bundle in topological_sort(AppModule.module_dependencies()):
for module_label in bundle:
assert module_label in module_registry
module = module_registry[module_label]
self._debug_log('Initializing: %s (%s)' % (module.label(), module.description()))
try:
module.setup_function()
except AppModule.Unimplemented:
pass
self._init_modules.append(module.label())
def _teardown_modules(self):
"""
Teardown initialized module in reverse initialization order.
"""
module_registry = AppModule.module_registry()
for module_label in reversed(self._init_modules):
assert module_label in module_registry
module = module_registry[module_label]
self._debug_log('Running exit function for %s (%s)' % (module_label, module.description()))
try:
module.teardown_function()
except AppModule.Unimplemented:
pass
def _maybe_daemonize(self):
if self._option_values.twitter_common_app_daemonize:
daemonize(pidfile=self._option_values.twitter_common_app_pidfile,
stdout=self._option_values.twitter_common_app_daemon_stdout,
stderr=self._option_values.twitter_common_app_daemon_stderr)
# ------- public exported methods -------
def init(self, force_args=None):
"""
Initialize the state necessary to run the application's main() function but
without actually invoking main. Mostly useful for testing. If force_args
specified, use those arguments instead of sys.argv[1:].
"""
self._raise_if_initialized("init cannot be called twice. Use reinit if necessary.")
self._parse_options(force_args)
self._maybe_daemonize()
self._setup_modules()
self.initialized = True
def reinit(self, force_args=None):
"""
Reinitialize the application. This clears the stateful parts of the application
framework and reruns init(). Mostly useful for testing.
"""
self._reset()
self.init(force_args)
def argv(self):
self._raise_if_uninitialized("Must call app.init() before you may access argv.")
return self._argv
def add_module_path(self, name, path):
"""
Add all app.Modules defined by name at path.
Typical usage (e.g. from the __init__.py of something containing many
app modules):
app.add_module_path(__name__, __path__)
"""
import pkgutil
for _, mod, ispkg in pkgutil.iter_modules(path):
if ispkg: continue
fq_module = '.'.join([name, mod])
__import__(fq_module)
for (kls_name, kls) in inspect.getmembers(sys.modules[fq_module], inspect.isclass):
if issubclass(kls, AppModule):
self.register_module(kls())
def register_module(self, module):
"""
Register an app.Module and all its options.
"""
if not isinstance(module, AppModule):
raise TypeError('register_module should be called with a subclass of AppModule')
if module.label() in self._registered_modules:
# Do not reregister.
return
if hasattr(module, 'OPTIONS'):
if not isinstance(module.OPTIONS, dict):
raise Application.Error('Registered app.Module %s has invalid OPTIONS.' % module.__module__)
for opt in module.OPTIONS.values():
self._add_option(module.__module__, opt)
self._configure_options(module.label(), module.OPTIONS)
self._registered_modules.append(module.label())
@staticmethod
def _get_module_key(module):
return 'From module %s' % module
def _add_main_option(self, option):
self._main_options.append(option)
def _add_module_option(self, module, option):
calling_module = Application._get_module_key(module)
if calling_module not in self._global_options:
self._global_options[calling_module] = options.new_group(calling_module)
self._global_options[calling_module].add_option(option)
@staticmethod
def rewrite_help(op):
if hasattr(op, 'help') and isinstance(op.help, Compatibility.string):
if op.help.find('%default') != -1 and op.default != optparse.NO_DEFAULT:
op.help = op.help.replace('%default', str(op.default))
else:
op.help = op.help + ((' [default: %s]' % str(op.default))
if op.default != optparse.NO_DEFAULT else '')
def _add_option(self, calling_module, option):
op = copy.deepcopy(option)
if op.dest and hasattr(op, 'default'):
self.set_option(op.dest, op.default if op.default != optparse.NO_DEFAULT else None,
force=False)
Application.rewrite_help(op)
op.default = optparse.NO_DEFAULT
if calling_module == '__main__':
self._add_main_option(op)
else:
self._add_module_option(calling_module, op)
def _get_option_from_args(self, args, kwargs):
if len(args) == 1 and kwargs == {} and isinstance(args[0], options.Option):
return args[0]
else:
return options.TwitterOption(*args, **kwargs)
def add_option(self, *args, **kwargs):
"""
Add an option to the application.
You may pass either an Option object from the optparse/options module, or
pass the *args/**kwargs necessary to construct an Option.
"""
self._raise_if_initialized("Cannot call add_option() after main()!")
calling_module = Inspection.find_calling_module()
added_option = self._get_option_from_args(args, kwargs)
self._add_option(calling_module, added_option)
def command(self, function=None, name=None):
"""
Decorator to turn a function into an application command.
To add a command foo, the following patterns will both work:
@app.command
def foo(args, options):
...
@app.command(name='foo')
def bar(args, options):
...
"""
if name is None:
return self._register_command(function)
else:
return partial(self._register_command, command_name=name)
def _register_command(self, function, command_name=None):
"""
Registers function as the handler for command_name. Uses function.__name__ if command_name
is None.
"""
if Inspection.find_calling_module() == '__main__':
if command_name is None:
command_name = function.__name__
if command_name in self._commands:
raise Application.Error('Found two definitions for command %s' % command_name)
self._commands[command_name] = function
return function
def default_command(self, function):
"""
Decorator to make a command default.
"""
if Inspection.find_calling_module() == '__main__':
if None in self._commands:
defaults = (self._commands[None].__name__, function.__name__)
raise Application.Error('Found two default commands: %s and %s' % defaults)
self._commands[None] = function
return function
def command_option(self, *args, **kwargs):
"""
Decorator to add an option only for a specific command.
"""
def register_option(function):
added_option = self._get_option_from_args(args, kwargs)
if not hasattr(function, Application.OPTIONS_ATTR):
setattr(function, Application.OPTIONS_ATTR, deque())
getattr(function, Application.OPTIONS_ATTR).appendleft(added_option)
return function
return register_option
def copy_command_options(self, command_function):
"""
Decorator to copy command options from another command.
"""
def register_options(function):
if hasattr(command_function, Application.OPTIONS_ATTR):
if not hasattr(function, Application.OPTIONS_ATTR):
setattr(function, Application.OPTIONS_ATTR, deque())
command_options = getattr(command_function, Application.OPTIONS_ATTR)
getattr(function, Application.OPTIONS_ATTR).extendleft(command_options)
return function
return register_options
def add_command_options(self, command_function):
"""
Function to add all options from a command
"""
module = inspect.getmodule(command_function).__name__
for option in getattr(command_function, Application.OPTIONS_ATTR, []):
self._add_option(module, option)
def _debug_log(self, msg):
if hasattr(self._option_values, 'twitter_common_app_debug') and (
self._option_values.twitter_common_app_debug):
print('twitter.common.app debug: %s' % msg, file=sys.stderr)
def set_option(self, dest, value, force=True):
"""
Set a global option value either pre- or post-initialization.
If force=False, do not set the default if already overridden by a manual call to
set_option.
"""
if hasattr(self._option_values, dest) and not force:
return
setattr(self._option_values, dest, value)
def get_options(self):
"""
Return all application options, both registered by __main__ and all imported modules.
"""
return self._option_values
def get_commands(self):
"""
Return all valid commands registered by __main__
"""
return filter(None, self._commands.keys())
def get_commands_and_docstrings(self):
"""
Generate all valid commands together with their docstrings
"""
for command, function in self._commands.items():
if command is not None:
yield command, function.__doc__
def get_local_options(self):
"""
Return the options only defined by __main__.
"""
new_values = options.Values()
for opt in self._main_options:
if opt.dest:
setattr(new_values, opt.dest, getattr(self._option_values, opt.dest))
return new_values
def set_usage(self, usage):
"""
Set the usage message should the user call --help or invalidly specify options.
"""
self._usage = usage
def error(self, message):
"""
Print the application help message, an error message, then exit.
"""
self._construct_partial_parser().error(message)
def help(self):
"""
Print the application help message and exit.
"""
self._short_help(*(None,)*4)
def set_name(self, application_name):
"""
Set the application name. (Autodetect otherwise.)
"""
self._raise_if_initialized("Cannot set application name.")
self._name = application_name
def name(self):
"""
Return the name of the application. If set_name was never explicitly called,
the application framework will attempt to autodetect the name of the application
based upon the location of __main__.
"""
if self._name is not None:
return self._name
else:
try:
return Inspection.find_application_name()
except:
return 'unknown'
def quit(self, rc, exit_function=sys.exit):
self._debug_log('Shutting application down.')
self._teardown_modules()
self._debug_log('Finishing up module teardown.')
nondaemons = 0
self.dump_profile()
for thr in threading.enumerate():
self._debug_log(' Active thread%s: %s' % (' (daemon)' if thr.isDaemon() else '', thr))
if thr is not threading.current_thread() and not thr.isDaemon():
nondaemons += 1
if nondaemons:
self._debug_log('More than one active non-daemon thread, your application may hang!')
else:
self._debug_log('Exiting cleanly.')
exit_function(rc)
def profiler(self):
if self._option_values.twitter_common_app_profiling:
if self._profiler is None:
try:
import cProfile as profile
except ImportError:
import profile
self._profiler = profile.Profile()
return self._profiler
else:
return None
def dump_profile(self):
if self._option_values.twitter_common_app_profiling:
if self._option_values.twitter_common_app_profile_output:
self.profiler().dump_stats(self._option_values.twitter_common_app_profile_output)
else:
self.profiler().print_stats(sort='time')
def _run_main(self, main_method, *args, **kwargs):
try:
if self.profiler():
rc = self.profiler().runcall(main_method, *args, **kwargs)
else:
rc = main_method(*args, **kwargs)
except SystemExit as e:
rc = e.code
self._debug_log('main_method exited with return code = %s' % repr(rc))
except KeyboardInterrupt as e:
rc = None
self._debug_log('main_method exited with ^C')
return rc
def _import_module(self, name):
"""
Import the module, return True on success, False if the import failed.
"""
try:
__import__(name)
return True
except ImportError:
return False
def main(self):
"""
If called from __main__ module, run script's main() method with arguments passed
and global options parsed.
The following patterns are acceptable for the main method:
main()
main(args)
main(args, options)
"""
main_module = Inspection.find_calling_module()
if main_module != '__main__':
# only support if __name__ == '__main__'
return
# Pull in modules in twitter.common.app.modules
if not self._import_module('twitter.common.app.modules'):
print('Unable to import twitter app modules!', file=sys.stderr)
sys.exit(1)
# defer init as long as possible.
self.init()
if self._option_values.twitter_common_app_rc_filename:
print('RC filename: %s' % self._rc_filename())
return
try:
caller_main = Inspection.find_main_from_caller()
except Inspection.InternalError:
caller_main = None
if None in self._commands:
assert caller_main is None, "Error: Cannot define both main and a default command."
else:
self._commands[None] = caller_main
main_method = self._commands[self._command]
if main_method is None:
commands = sorted(self.get_commands())
if commands:
print('Must supply one of the following commands:', ', '.join(commands), file=sys.stderr)
else:
print('No main() or command defined! Application must define one of these.', file=sys.stderr)
sys.exit(1)
try:
argspec = inspect.getargspec(main_method)
except TypeError as e:
print('Malformed main(): %s' % e, file=sys.stderr)
sys.exit(1)
if len(argspec.args) == 1:
args = [self._argv]
elif len(argspec.args) == 2:
args = [self._argv, self._option_values]
else:
if len(self._argv) != 0:
print('main() takes no arguments but got leftover arguments: %s!' %
' '.join(self._argv), file=sys.stderr)
sys.exit(1)
args = []
rc = self._run_main(main_method, *args)
self.quit(rc)
| 34.791495 | 101 | 0.674644 | 23,829 | 0.939518 | 242 | 0.009541 | 720 | 0.028388 | 0 | 0 | 8,667 | 0.341718 |
9b89b21d0ec82cd3eb4f531c62145aac5544814d
| 3,838 |
py
|
Python
|
nas/gui/end_registration_window.py
|
RolandZitny/BC-NAS
|
df2b1c643e5dce3b48c72026b4f83f895f33b822
|
[
"MIT"
] | null | null | null |
nas/gui/end_registration_window.py
|
RolandZitny/BC-NAS
|
df2b1c643e5dce3b48c72026b4f83f895f33b822
|
[
"MIT"
] | null | null | null |
nas/gui/end_registration_window.py
|
RolandZitny/BC-NAS
|
df2b1c643e5dce3b48c72026b4f83f895f33b822
|
[
"MIT"
] | null | null | null |
import base64
import os
import matplotlib.pyplot as plt
import cv2
import numpy as np
from PyQt5 import uic
from PyQt5 import QtWidgets
from PyQt5 import QtMultimedia
from PyQt5 import QtMultimediaWidgets
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QDesktopWidget
from nas.gui.login_stimulation_window import LoginStimulationPresentation
import config
directory_path = os.path.dirname(os.path.abspath(__file__))
ui_path = os.path.join(directory_path, "designs" + os.sep + "end_registration_window.ui")
Ui_RegWindow, QtBaseClass = uic.loadUiType(ui_path) # Load .ui file.
class EndRegistrationWindow(QtWidgets.QMainWindow, Ui_RegWindow):
"""
Class to display the final window that confirms the registration.
The user can perform a test login from this window.
:param reg_user: The object of the registered user.
:type reg_user: ``user``
"""
def __init__(self, reg_user):
QtWidgets.QMainWindow.__init__(self)
Ui_RegWindow.__init__(self)
self.setupUi(self)
self.reg_user = reg_user
self.login_window = None
self.set_up_window()
self.get_reaction_plot()
self.set_end_figure()
def set_up_window(self):
"""
Makes other window settings, such as connecting buttons, etc.
"""
# Center window to screen.
qt_rectangle = self.frameGeometry()
center_point = QDesktopWidget().availableGeometry().center()
qt_rectangle.moveCenter(center_point)
self.move(qt_rectangle.topLeft())
qt_rectangle.moveCenter(center_point)
self.move(qt_rectangle.topLeft())
self.end_name.setText(self.end_name.text() + " " + self.reg_user.get_name())
self.end_surname.setText(self.end_surname.text() + " " + self.reg_user.get_surname())
self.end_loginId.setText(self.end_loginId.text() + " " + self.reg_user.get_id())
self.TestLogin.clicked.connect(self.testing_log_in)
def get_reaction_plot(self):
"""
Creates a graph of responses to self-face and non-self-face stimuli.
This graph is stored at `TMP_END_FIGURE`.
"""
reactions, react_types = self.reg_user.get_reg_data()
self_face_reaction = None
non_self_face_reaction = None
for i in range(len(react_types)):
if react_types[i] == 1:
self_face_reaction = reactions[i]
non_self_face_reaction = reactions[i + 1]
break
fig, axs = plt.subplots(2)
fig.suptitle('Self-face & Non-self-face reakcia')
axs[0].plot(self_face_reaction[0])
axs[0].set_title('Self-face')
axs[1].plot(non_self_face_reaction[0])
axs[1].set_title('Non-self-face')
plt.setp(axs[0], ylabel='mV')
plt.setp(axs[1], ylabel='mV')
fig.tight_layout()
plt.savefig(config.TMP_END_FIGURE)
plt.clf()
def set_end_figure(self):
"""
Draw a graph of the reaction in the window.
"""
pixmap = QPixmap(config.TMP_END_FIGURE)
self.ReactionLabel.setPixmap(QPixmap(pixmap.scaledToHeight(500)))
self.clean_tmp()
@staticmethod
def clean_tmp():
"""
Cleans up the temporary files folder.
"""
if os.path.exists(config.TMP_END_FIGURE):
os.remove(config.TMP_END_FIGURE)
if os.path.exists(config.TMP_PHOTO):
os.remove(config.TMP_PHOTO)
if os.path.exists(config.TMP_PROC_PHOTO):
os.remove(config.TMP_PROC_PHOTO)
def testing_log_in(self):
"""
Performs a test login.
"""
self.login_window = LoginStimulationPresentation(self.reg_user)
self.login_window.showMaximized()
self.hide()
| 32.252101 | 94 | 0.647473 | 3,238 | 0.843669 | 0 | 0 | 384 | 0.100052 | 0 | 0 | 824 | 0.214695 |
9b8af184786b7b838926fd6c07d47b9fd8a6c329
| 445 |
py
|
Python
|
testing/matplotlib_test.py
|
deranderemark/CigarTracer
|
3f1172683c57dc7f28dd7517132014b23adfff90
|
[
"Apache-2.0"
] | null | null | null |
testing/matplotlib_test.py
|
deranderemark/CigarTracer
|
3f1172683c57dc7f28dd7517132014b23adfff90
|
[
"Apache-2.0"
] | 1 |
2022-02-06T15:50:07.000Z
|
2022-02-06T15:50:07.000Z
|
testing/matplotlib_test.py
|
deranderemark/CigarTracer
|
3f1172683c57dc7f28dd7517132014b23adfff90
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib.pyplot as plt
# Diagramm und Achsen definieren
fig, ax = plt.subplots()
# Werte für Tabelle erstellen
table_data=[
["1", 30, 34],
["2", 20, 223],
["3", 33, 2354],
["4", 25, 234],
["5", 12, 929]
]
#Tabelle erstellen
table = ax.table(cellText=table_data, loc='center', colLabels=['SD', 'ID', 'Score'])
# Tabelle ändern
table.set_fontsize(14)
table.scale(1,4)
ax.axis('off')
#Tabelle anzeigen
plt.show()
| 17.8 | 84 | 0.633708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.35123 |
9b8bb3b48e86a641ba4d24045654d0c3bccfafdb
| 5,242 |
py
|
Python
|
venv/Lib/site-packages/func_timeout/StoppableThread.py
|
lijj0812/UIAutoDemo
|
3e13380adeb6cf92410676ff7c125dbee598427f
|
[
"Apache-2.0"
] | 1 |
2021-01-12T14:39:01.000Z
|
2021-01-12T14:39:01.000Z
|
venv/Lib/site-packages/func_timeout/StoppableThread.py
|
lijj0812/UIAutoDemo
|
3e13380adeb6cf92410676ff7c125dbee598427f
|
[
"Apache-2.0"
] | 2 |
2021-06-16T19:56:35.000Z
|
2021-06-16T19:57:49.000Z
|
venv/Lib/site-packages/func_timeout/StoppableThread.py
|
lijj0812/UIAutoDemo
|
3e13380adeb6cf92410676ff7c125dbee598427f
|
[
"Apache-2.0"
] | 1 |
2020-09-17T07:56:53.000Z
|
2020-09-17T07:56:53.000Z
|
'''
Copyright (c) 2016, 2017, 2019 Timothy Savannah All Rights Reserved.
Licensed under the Lesser GNU Public License Version 3, LGPLv3. You should have recieved a copy of this with the source distribution as
LICENSE, otherwise it is available at https://github.com/kata198/func_timeout/LICENSE
'''
import os
import ctypes
import threading
__all__ = ('StoppableThread', 'JoinThread')
class StoppableThread(threading.Thread):
'''
StoppableThread - A thread that can be stopped by forcing an exception in the execution context.
This works both to interrupt code that is in C or in python code, at either the next call to a python function,
or the next line in python code.
It is recommended that if you call stop ( @see StoppableThread.stop ) that you use an exception that inherits BaseException, to ensure it likely isn't caught.
Also, beware unmarked exception handlers in your code. Code like this:
while True:
try:
doSomething()
except:
continue
will never be able to abort, because the exception you raise is immediately caught.
The exception is raised over and over, with a specifed delay (default 2.0 seconds)
'''
def _stopThread(self, exception, raiseEvery=2.0):
'''
_stopThread - @see StoppableThread.stop
'''
if self.is_alive() is False:
return True
self._stderr = open(os.devnull, 'w')
# Create "joining" thread which will raise the provided exception
# on a repeat, until the thread stops.
joinThread = JoinThread(self, exception, repeatEvery=raiseEvery)
# Try to prevent spurrious prints
joinThread._stderr = self._stderr
joinThread.start()
joinThread._stderr = self._stderr
def stop(self, exception, raiseEvery=2.0):
'''
Stops the thread by raising a given exception.
@param exception <Exception type> - Exception to throw. Likely, you want to use something
that inherits from BaseException (so except Exception as e: continue; isn't a problem)
This should be a class/type, NOT an instance, i.e. MyExceptionType not MyExceptionType()
@param raiseEvery <float> Default 2.0 - We will keep raising this exception every #raiseEvery seconds,
until the thread terminates.
If your code traps a specific exception type, this will allow you #raiseEvery seconds to cleanup before exit.
If you're calling third-party code you can't control, which catches BaseException, set this to a low number
to break out of their exception handler.
@return <None>
'''
return self._stopThread(exception, raiseEvery)
class JoinThread(threading.Thread):
'''
JoinThread - The workhouse that stops the StoppableThread.
Takes an exception, and upon being started immediately raises that exception in the current context
of the thread's execution (so next line of python gets it, or next call to a python api function in C code ).
@see StoppableThread for more details
'''
def __init__(self, otherThread, exception, repeatEvery=2.0):
'''
__init__ - Create a JoinThread (don't forget to call .start() ! )
@param otherThread <threading.Thread> - A thread
@param exception <BaseException> - An exception. Should be a BaseException, to prevent "catch Exception as e: continue" type code
from never being terminated. If such code is unavoidable, you can try setting #repeatEvery to a very low number, like .00001,
and it will hopefully raise within the context of the catch, and be able to break free.
@param repeatEvery <float> Default 2.0 - After starting, the given exception is immediately raised. Then, every #repeatEvery seconds,
it is raised again, until the thread terminates.
'''
threading.Thread.__init__(self)
self.otherThread = otherThread
self.exception = exception
self.repeatEvery = repeatEvery
self.daemon = True
def run(self):
'''
run - The thread main. Will attempt to stop and join the attached thread.
'''
# Try to silence default exception printing.
self.otherThread._Thread__stderr = self._stderr
if hasattr(self.otherThread, '_Thread__stop'):
# If py2, call this first to start thread termination cleanly.
# Python3 does not need such ( nor does it provide.. )
self.otherThread._Thread__stop()
while self.otherThread.is_alive():
# We loop raising exception incase it's caught hopefully this breaks us far out.
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self.otherThread.ident), ctypes.py_object(self.exception))
self.otherThread.join(self.repeatEvery)
try:
self._stderr.close()
except:
pass
# vim: set ts=4 sw=4 expandtab :
| 39.119403 | 166 | 0.647844 | 4,804 | 0.916444 | 0 | 0 | 0 | 0 | 0 | 0 | 3,819 | 0.728539 |
9b8fd2bf80fa07a3bd7f3ddd5254592ae0988fc9
| 190 |
py
|
Python
|
pics/admin.py
|
Joseph-Odhiambo/Gallary
|
f8dfab1149f11de94519afe597fe87f4ed28b9a5
|
[
"MIT"
] | 1 |
2021-05-19T12:58:15.000Z
|
2021-05-19T12:58:15.000Z
|
pics/admin.py
|
HASSAN1A/Gallery
|
a73bd93bcecbb830b4d676c9e9dd306880cac6f2
|
[
"MIT"
] | null | null | null |
pics/admin.py
|
HASSAN1A/Gallery
|
a73bd93bcecbb830b4d676c9e9dd306880cac6f2
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Image,Areas,Category
# Register your models here.
admin.site.register(Image)
admin.site.register(Areas)
admin.site.register(Category)
| 19 | 40 | 0.805263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.147368 |
9b902f255bd9d45e07a2e966eeb8f841dbe8fc88
| 1,985 |
py
|
Python
|
tests/lamvery/cli_test.py
|
rdtr/lamvery
|
e9334a0d258c63e6426c7cd320f691b9f21044c1
|
[
"MIT"
] | 101 |
2015-11-12T11:29:20.000Z
|
2020-05-24T19:26:37.000Z
|
tests/lamvery/cli_test.py
|
rdtr/lamvery
|
e9334a0d258c63e6426c7cd320f691b9f21044c1
|
[
"MIT"
] | 45 |
2015-11-13T05:43:18.000Z
|
2017-04-27T18:14:49.000Z
|
tests/lamvery/cli_test.py
|
marcy-terui/lamvery
|
e9334a0d258c63e6426c7cd320f691b9f21044c1
|
[
"MIT"
] | 22 |
2016-01-26T00:12:57.000Z
|
2019-12-13T09:06:43.000Z
|
# -*- coding: utf-8 -*-
from unittest import TestCase
from mock import Mock, patch
from lamvery.cli import (
main,
init,
build,
configure,
deploy,
encrypt,
encrypt_file,
events,
decrypt,
set_alias,
invoke,
rollback,
api,
generate
)
class FunctionsTestCase(TestCase):
def test_init(self):
with patch('lamvery.cli.InitAction'):
init(Mock())
def test_archive(self):
with patch('lamvery.cli.BuildAction'):
build(Mock())
def test_configure(self):
with patch('lamvery.cli.ConfigureAction'):
configure(Mock())
def test_deploy(self):
with patch('lamvery.cli.DeployAction'):
deploy(Mock())
def test_encrypt(self):
with patch('lamvery.cli.EncryptAction'):
encrypt(Mock())
def test_encrypt_file(self):
with patch('lamvery.cli.EncryptFileAction'):
encrypt_file(Mock())
def test_events(self):
with patch('lamvery.cli.EventsAction'):
events(Mock())
def test_decrypt(self):
with patch('lamvery.cli.DecryptAction'):
decrypt(Mock())
def test_set_alias(self):
with patch('lamvery.cli.SetAliasAction'):
set_alias(Mock())
def test_invoke(args):
with patch('lamvery.cli.InvokeAction'):
invoke(Mock())
def test_rollback(args):
with patch('lamvery.cli.RollbackAction'):
rollback(Mock())
def test_api(args):
with patch('lamvery.cli.ApiAction'):
api(Mock())
def test_generate(args):
with patch('lamvery.cli.GenerateAction'):
generate(Mock())
@patch('argparse.ArgumentParser')
@patch('sys.exit')
def test_main(self, argp, ex):
main()
@patch('argparse.ArgumentParser')
@patch('sys.exit')
def test_main_error(self, argp, ex):
argp.parse_args = Mock(side_effect=Exception)
main()
| 22.816092 | 53 | 0.594458 | 1,693 | 0.852897 | 0 | 0 | 272 | 0.137028 | 0 | 0 | 441 | 0.222166 |
9b906d0ca190514b42e0b5edafba1d709df76a02
| 569 |
py
|
Python
|
db-test.py
|
alexolotl/apt-hunt
|
25d2a2d565b0b694a8f5e3442ba429ae99688e54
|
[
"MIT"
] | null | null | null |
db-test.py
|
alexolotl/apt-hunt
|
25d2a2d565b0b694a8f5e3442ba429ae99688e54
|
[
"MIT"
] | null | null | null |
db-test.py
|
alexolotl/apt-hunt
|
25d2a2d565b0b694a8f5e3442ba429ae99688e54
|
[
"MIT"
] | null | null | null |
import sqlite3
from util import post_listing_to_slack
from slackclient import SlackClient
import settings
sc = SlackClient(settings.SLACK_TOKEN)
con = sqlite3.connect('listings.db')
# with con:
# con.row_factory = sqlite3.Row
# cur = con.cursor()
# # print("SQLite version: %s" % data)
# cur.execute("SELECT * FROM Listings")
# rows = cur.fetchone()
# for row in rows:
# print(row)
#
# post_listing_to_slack(sc, rows)
# deletes entire table !!!
# with con:
# cur = con.cursor()
# cur.execute("DROP TABLE IF EXISTS Listings")
| 22.76 | 50 | 0.669596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 382 | 0.671353 |
9b9103f77dd912b77e605c95f3235abd9dcb9a29
| 15,630 |
py
|
Python
|
src/utils.py
|
asrashley/dash-live
|
1ffbc57896e4e46855a42af6ef79a1865ebfce55
|
[
"Apache-2.0"
] | 2 |
2019-11-02T06:26:29.000Z
|
2020-05-15T16:54:20.000Z
|
src/utils.py
|
asrashley/dash-live
|
1ffbc57896e4e46855a42af6ef79a1865ebfce55
|
[
"Apache-2.0"
] | 1 |
2020-01-20T17:20:54.000Z
|
2020-01-21T08:38:30.000Z
|
src/utils.py
|
asrashley/dash-live
|
1ffbc57896e4e46855a42af6ef79a1865ebfce55
|
[
"Apache-2.0"
] | null | null | null |
#############################################################################
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#############################################################################
#
# Project Name : Simulated MPEG DASH service
#
# Author : Alex Ashley
#
#############################################################################
import base64
import datetime
import decimal
import io
import json
import math
import os
import re
import time
class Buffer(object):
def __init__(self, pos, data):
self.pos = pos
self.buf = data
self.data = memoryview(self.buf)
self.size = len(data)
self.timestamp = time.time()
@property
def end(self):
return self.pos + self.size
class BufferedReader(io.RawIOBase):
def __init__(self, reader, buffersize=16384, data=None, offset=0, size=None,
max_buffers=30):
super(BufferedReader, self).__init__()
#print('BufferedReader', reader, buffersize, offset, size)
self.reader = reader
self.buffers = {}
self.buffersize = buffersize
self.pos = 0
self.offset = offset
self.size = size
self.max_buffers = max_buffers
self.num_buffers = 0
if data is not None:
self.size = len(data)
self.buffersize = self.size
self.buffers[0] = Buffer(self.pos, data)
self.num_buffers = 1
self.max_buffers = self.num_buffers+1
def readable(self):
return not self.closed
def seek(self, offset, whence=io.SEEK_SET):
#print('seek', offset, whence)
if whence == io.SEEK_SET:
self.pos = offset
elif whence == io.SEEK_CUR:
self.pos += offset
elif whence == io.SEEK_END:
if self.size is None:
self.reader.seek(0, io.SEEK_END)
self.size = self.reader.tell() - self.offset
self.pos = self.size + offset
self.pos = max(0, self.pos)
if self.size is not None:
self.pos = min(self.pos, self.size)
return self.pos
def tell(self):
return self.pos
def seekable(self):
return not self.closed
def peek(self, size):
#print('peek', self.pos, size)
assert size > 0
if self.size is not None:
size = min(size, self.size - self.pos)
if size <= 0:
return r''
bucket = self.pos // self.buffersize
end = (self.pos + size) // self.buffersize
bucket *= self.buffersize
end *= self.buffersize
offset = self.pos - bucket
rv = []
todo = size
while todo:
self.cache(bucket)
sz = min(todo, self.buffersize-offset)
rv.append(self.buffers[bucket].data[offset:].tobytes())
bucket += self.buffersize
offset = 0
todo -= sz
if len(rv)==1:
return rv[0]
return r''.join(rv)
def cache(self, bucket):
#print('cache', bucket)
if self.buffers.has_key(bucket):
return
if self.num_buffers == self.max_buffers:
remove = None
oldest = None
for k,v in self.buffers.iteritems():
if remove is None or v.timestamp < oldest:
remove = k
oldest = v.timestamp
if remove is not None:
del self.buffers[remove]
self.num_buffers -= 1
if self.reader.tell() != bucket+self.offset:
self.reader.seek(bucket+self.offset, io.SEEK_SET)
b = Buffer(bucket, self.reader.read(self.buffersize))
if self.size is None and b.size < self.buffersize:
self.size = bucket + b.size
self.buffers[bucket] = b
self.num_buffers += 1
assert self.num_buffers <= self.max_buffers
def read(self, n=-1):
#print('read', self.pos, n)
if n==-1:
return self.readall()
if self.size is not None:
n = min(n, self.size - self.pos)
if n <= 0:
return r''
b = self.peek(n)
self.pos += n
return b[:n]
def readall(self):
self.reader.seek(self.pos)
rv = self.reader.read()
self.pos += len(rv)
return rv
# A UTC class, see https://docs.python.org/2.7/library/datetime.html#datetime.tzinfo
class UTC(datetime.tzinfo):
"""UTC"""
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return self.ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return self.ZERO
def dateTimeToUnixEpoch(dt):
""" Convert a dateTime to number of seconds since the Unix epoch.
"""
epoch = datetime.datetime(year=1970, month=1, day=1, tzinfo=UTC())
return (dt - epoch).total_seconds()
def toIsoDateTime(value):
""" Convert a datetime to an ISO8601 formatted dateTime string.
:param value: the dateTime to convert
:returns: an ISO8601 formatted string version of the dateTime
"""
rv = value.isoformat()
if value.tzinfo is None:
rv += 'Z'
else:
# replace +00:00 timezone with Z
rv = re.sub('[+-]00:00$','Z',rv)
return rv
def toIsoDuration(secs):
""" Convert a time (in seconds) to an ISO8601 formatted duration string.
:param secs: the duration to convert, in seconds
:returns: an ISO8601 formatted string version of the duration
"""
if isinstance(secs,basestring):
secs = float(secs)
elif isinstance(secs, datetime.timedelta):
secs = secs.total_seconds()
hrs = math.floor(secs/3600)
rv=['PT']
secs %= 3600
mins = math.floor(secs/60)
secs %= 60
if hrs:
rv.append('%dH'%hrs)
if hrs or mins:
rv.append('%dM'%mins)
rv.append('%0.2fS'%secs)
return ''.join(rv)
date_hacks = [
(re.compile('Apri[^l]'),'Apr '), (re.compile('Sept[^e]'),'Sep '),
(re.compile(r'(\w{3} \d{1,2},? \d{4})\s*-\s*(.*$)'), r'\1 \2' ),
(re.compile(r'(\w{3} \d{1,2}), (\d{4}\s*\d{1,2}:\d{2})'), r'\1 \2' ),
(re.compile(r'(\w{3})-(\d{2})$'), r'\1 \2' ),
(re.compile(r'(.+) ([PCE][SD]?T)$'),r'\1')
]
def parse_date(date, format=None):
"""Try to create a datetime from the given string"""
formats = ["%Y-%m-%d", "%m/%d/%y", "%m/%d/%Y", "%b %Y", "%b %y", "%m/xx/%y",
"%a %b %d %Y", "%B %d %Y %H:%M", "%b %d %Y %H:%M",
"%B %d %Y", "%b %d %Y",'%a %b %d, %Y']
if format is not None:
formats.insert(0,format)
if not isinstance(date, basestring):
date = str(date)
d = date
tz = datetime.timedelta(0)
if re.match('.+\s+ES?T$',date):
tz = datetime.timedelta(hours=5)
elif re.match('.+\s+EDT$',date):
tz = datetime.timedelta(hours=4)
elif re.match('.+\s+PS?T$',date):
tz = datetime.timedelta(hours=8)
elif re.match('.+\s+PDT$',date):
tz = datetime.timedelta(hours=7)
for regex,sub in date_hacks:
d = regex.sub(sub,d)
for f in formats:
try:
rv = datetime.datetime.strptime(d, f)
rv += tz;
return rv
except ValueError:
pass
try:
return time.strptime(date)
except ValueError:
pass
return None
def dateTimeFormat(value, fmt):
""" Format a date using the given format"""
if not value:
return value
if isinstance(value, basestring):
value = parse_date(value)
if value is None:
return value
return value.strftime(fmt)
duration_re = re.compile(r'^PT((?P<hours>\d+)[H:])?((?P<minutes>\d+)[M:])?((?P<seconds>[\d.]+)S?)?$')
def from_isodatetime(date_time):
"""
Convert an ISO formated date string to a datetime.datetime or datetime.timedelta
"""
if not date_time:
return None
if date_time[:2]=='PT':
match = duration_re.match(date_time)
if not match:
raise ValueError(date_time)
hours, minutes, seconds = match.group('hours'), match.group('minutes'), match.group('seconds')
secs = 0
if hours is not None:
secs += int(match.group('hours'))*3600
if minutes is not None:
secs += int(match.group('minutes'))*60
if seconds is not None:
secs += float(match.group('seconds'))
return datetime.timedelta(seconds=secs)
if 'T' in date_time:
try:
return datetime.datetime.strptime(date_time, "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=UTC())
except ValueError:
pass
try:
return datetime.datetime.strptime(date_time, "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=UTC())
except ValueError:
return datetime.datetime.strptime(date_time, "%Y-%m-%dT%H:%MZ").replace(tzinfo=UTC())
if not 'Z' in date_time:
try:
return datetime.datetime.strptime(date_time, "%Y-%m-%d")
except ValueError:
return datetime.datetime.strptime(date_time, "%d/%m/%Y")
return datetime.datetime.strptime(date_time, "%H:%M:%SZ").replace(tzinfo=UTC()).time()
def toHtmlString(item, className=None):
"""Converts an object in to a form suitable for rendering in an HTML page.
"""
rv=item
if isinstance(item,dict):
if className:
rv='<table class="%s">'%className
else:
rv='<table>'
for key,val in item.iteritems():
rv.append('<tr><td>%s</td><td>%s</td></tr>'%(str(key),toHtmlString(val)))
rv.append('</table>')
rv = '\n'.join(rv)
elif isinstance(item,(list,tuple)):
rv = []
for val in item:
rv.append(toHtmlString(val))
if item.__class__ == tuple:
rv = ''.join(['(',','.join(rv),')'])
else:
rv = ''.join(['[',','.join(rv),']'])
if className:
rv = '<span class="%s">%s</span>'%(className,rv)
elif isinstance(item,bool):
if className is None:
className=''
rv = '<span class="bool-yes %s">✓</span>'%className if item else '<span class="bool-no %s">✗</span>'%className
else:
if className:
rv = '<span class="%s">%s</span>'%(className,str(rv))
else:
rv = str(rv)
return rv
def flatten(items, convert_numbers=False):
"""Converts an object in to a form suitable for storage.
flatten will take a dictionary, list or tuple and inspect each item in the object looking for
items such as datetime.datetime objects that need to be converted to a canonical form before
they can be processed for storage.
"""
if isinstance(items,dict):
rv={}
else:
rv = []
for item in items:
key = None
if isinstance(items,dict):
key = item
item = items[key]
if hasattr(item, 'toJSON'):
item = item.toJSON(pure=True)
elif isinstance(item,(datetime.date, datetime.datetime,datetime.time)):
item = toIsoDateTime(item)
elif isinstance(item,(datetime.timedelta)):
item = toIsoDuration(item)
elif convert_numbers and isinstance(item,long):
item = '%d'%item
elif isinstance(item,decimal.Decimal):
item = float(item)
elif isinstance(item, basestring):
item = str(item).replace("'","\'")
elif isinstance(item, (list, set, tuple)):
item = flatten(list(item))
elif isinstance(item, dict):
item = flatten(item)
if callable(item):
continue
if key:
rv[key]=item
else:
rv.append(item)
if items.__class__ == tuple:
return tuple(rv)
return rv
def as_python(value):
"""Convert the value into a string of Python code.
The result is suitable for use with eval()
"""
if value is None:
return 'None'
wrap_strings = True
if hasattr(value, 'toJSON'):
value = value.toJSON()
wrap_strings = False
if isinstance(value, (list, tuple)):
items = map(lambda v: as_python(v), list(value))
try:
value = '[{0}]'.format(','.join(items))
except TypeError:
print items
raise
elif isinstance(value, (dict)):
items = []
clz = value.get('_type', None)
for k, v in value.iteritems():
if k == '_type':
continue
if clz is None:
items.append('"{0}": {1}'.format(k, as_python(v)))
else:
items.append('{0}={1}'.format(k, as_python(v)))
if clz is None:
value = '{' + ','.join(items) + '}'
else:
value = '{0}({1})'.format(clz, ','.join(items))
elif wrap_strings and isinstance(value, (basestring)):
if '"' in value:
value = ''.join(["'", value, "'"])
else:
value = ''.join(['"', value, '"'])
elif isinstance(value, (datetime.date, datetime.datetime,datetime.time)):
value = 'utils.from_isodatetime("%s")'%(toIsoDateTime(value))
elif isinstance(value, (datetime.timedelta)):
value = 'utils.from_isodatetime("%s")'%(toIsoDuration(value))
elif isinstance(value, decimal.Decimal):
value = 'decimal.Decimal(%s)'%(value)
else:
value = str(value)
return value
def toJson(value, indent=0):
if not value:
return value
try:
if isinstance(value, (dict, list)):
value = flatten(value)
return json.dumps(value, indent=indent)
except ValueError:
return value
def xmlSafe(value):
"""Convert the given string to a format that is safe for inclusion in an XML document.
"""
return value.replace('&','&')
def default(value, default_value):
if value:
return value
return default_value
def scale_timedelta(delta, num, denom):
"""Scale the given timedelta, avoiding overflows"""
secs = num * delta.seconds
msecs = num* delta.microseconds
secs += msecs / 1000000.0
return secs / denom
def toBase64(value):
return base64.b64encode(value)
def toUuid(value):
if not isinstance(value, basestring):
value = value.encode('hex')
value = value.upper()
return '-'.join([value[:8], value[8:12], value[12:16], value[16:20], value[20:] ])
def sizeFormat(value, binary=True):
units = ['G', 'M', 'K', '']
mult = 1024 if binary else 1000
while value > mult and units:
units.pop()
value = value // mult
if not units:
units = 'T'
return '{:d}{}B'.format(value, units[-1])
#
# The following code is from djangoappengine/utils.py
#
try:
from google.appengine.api import apiproxy_stub_map
have_appserver = bool(apiproxy_stub_map.apiproxy.GetStub('datastore_v3'))
on_production_server = have_appserver and not os.environ.get('SERVER_SOFTWARE', '').lower().startswith('devel')
except ImportError:
pass
| 32.63048 | 130 | 0.55675 | 4,140 | 0.264875 | 0 | 0 | 64 | 0.004095 | 0 | 0 | 3,669 | 0.234741 |
9b9174eb28a0ab4b95d4146848ae09c6b7a36f4f
| 1,883 |
py
|
Python
|
network.py
|
YanhengWang/Draughts
|
ad19ccbd3c4fc0defda68c45ed8f2dd14969f2a3
|
[
"Apache-2.0"
] | null | null | null |
network.py
|
YanhengWang/Draughts
|
ad19ccbd3c4fc0defda68c45ed8f2dd14969f2a3
|
[
"Apache-2.0"
] | 1 |
2020-10-12T00:33:54.000Z
|
2020-10-12T00:33:54.000Z
|
network.py
|
YanhengWang/Draughts
|
ad19ccbd3c4fc0defda68c45ed8f2dd14969f2a3
|
[
"Apache-2.0"
] | null | null | null |
from utils import PATH_LABEL
from utils import PATH_DATA_FOLDER
import pickle
import torch
import torch.nn as nn
import torch.utils.data as dat
class ResBlock(nn.Module):
def __init__(self, inChannels, outChannels):
super(ResBlock, self).__init__()
self.matchDimension = None
if inChannels != outChannels:
self.matchDimension = nn.Conv2d(inChannels, outChannels, 1, stride = 1, bias = False)
self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride = 1, padding = 1, bias = False)
self.bn1 = nn.BatchNorm2d(outChannels)
self.conv2 = nn.Conv2d(outChannels, outChannels, 3, stride = 1, padding = 1, bias = False)
self.bn2 = nn.BatchNorm2d(outChannels)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = nn.functional.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.matchDimension == None:
out += x
else:
out += self.matchDimension(x)
out = nn.functional.relu(out)
return out
class ResNet(nn.Module):
def __init__(self):
super(ResNet, self).__init__()
self.conv = nn.Conv2d(4, 32, 3, stride = 1, padding = 1, bias = False)
self.bn = nn.BatchNorm2d(32)
blockList = [ResBlock(32, 32) for i in range(5)]
self.res = nn.Sequential(*blockList)
self.pool = nn.AvgPool2d(5)
self.fc = nn.Linear(128, 1)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = nn.functional.relu(x)
x = self.res(x)
x = self.pool(x)
x = x.view(x.size()[0], -1)
x = self.fc(x)
x = torch.tanh(x)
return x
class Dataset(dat.Dataset):
def __init__(self):
with open(PATH_LABEL, "rb") as f:
self.labelList = pickle.load(f)
def __getitem__(self, index):
with open(PATH_DATA_FOLDER + str(index) + ".dat", "rb") as f:
data = torch.FloatTensor(pickle.load(f))
label = torch.FloatTensor([self.labelList[index]])
return data, label
def __len__(self):
return len(self.labelList)
| 27.289855 | 92 | 0.677111 | 1,733 | 0.92034 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.007435 |
9b921382771a3e80faea212dc2044b24ad49e32b
| 6,623 |
py
|
Python
|
src/client/app/__init__.py
|
ZackPashkin/toloka-kit
|
8f650e5d8cdded1949ca633cf78f9b851ce839bb
|
[
"Apache-2.0"
] | 153 |
2021-02-06T13:41:11.000Z
|
2022-03-19T17:51:01.000Z
|
src/client/app/__init__.py
|
ZackPashkin/toloka-kit
|
8f650e5d8cdded1949ca633cf78f9b851ce839bb
|
[
"Apache-2.0"
] | 29 |
2021-01-15T12:54:37.000Z
|
2022-02-07T07:45:32.000Z
|
src/client/app/__init__.py
|
ZackPashkin/toloka-kit
|
8f650e5d8cdded1949ca633cf78f9b851ce839bb
|
[
"Apache-2.0"
] | 17 |
2021-01-29T15:20:04.000Z
|
2022-01-30T07:21:03.000Z
|
__all__ = [
'AppProject',
'App',
'AppItem',
'AppItemsCreateRequest',
'AppBatch',
'AppBatchCreateRequest'
]
import datetime
import decimal
from enum import unique
from typing import Dict, Any, List
from ..primitives.base import BaseTolokaObject
from ..project.field_spec import FieldSpec
from ...util._extendable_enum import ExtendableStrEnum
from ...util._codegen import attribute
class _AppError(BaseTolokaObject):
"""
Attributes:
code: String error code.
message: Detailed description of the error.
payload: Additional information about the error. May have different structure for different errors.
"""
code: str
message: str
payload: Any
class AppProject(BaseTolokaObject):
"""An App project with the parameters that you specify when creating it. It will have the interface and quality
control already pre-configured, decomposition done, and everything ready to use: all you need is to upload batches
and send them for labeling.
Attributes:
app_id:
parent_app_project_id:
name:
parameters:
id:
status: Project statuses for asynchronous creation. Allowed values:
* CREATING
* READY
* ARCHIVE
* ERROR
created:
item_price:
errors:
"""
@unique
class Status(ExtendableStrEnum):
CREATING = 'CREATING'
READY = 'READY'
ARCHIVED = 'ARCHIVED'
ERROR = 'ERROR'
app_id: str
parent_app_project_id: str
name: str
parameters: Dict
id: str = attribute(readonly=True)
status: Status = attribute(readonly=True, autocast=True)
created: datetime.datetime = attribute(readonly=True)
item_price: decimal.Decimal = attribute(readonly=True)
errors: List[_AppError] = attribute(readonly=True)
class App(BaseTolokaObject):
"""An example of a standard task that you want to solve using Toloka. Unlike project templates, you don't have to
set up everything yourself.
Attributes:
id: ID of the App.
name:
image: Image.
description: Overview.
constraints_description: Description of limitations.
default_item_price: Default processing cost per work item.
param_spec: Specification of parameters for creating a project.
input_spec: Schema of input data in Toloka format.
output_spec: Schema of output data in Toloka format.
examples: Task examples.
"""
id: str
name: str
image: str
description: str
constraints_description: str
default_item_price: decimal.Decimal
param_spec: Dict
input_spec: Dict[str, FieldSpec]
output_spec: Dict[str, FieldSpec]
examples: Any
class AppItem(BaseTolokaObject):
"""A work item with data. It's uploaded into the batch with other items to be collectively sent for labeling.
In a TSV file with tasks, each line is a work item.
Attributes:
batch_id: ID of the batch that includes the item.
input_data: The item data following the App schema.
id: Item ID.
app_project_id: ID of the app project that includes the batch with this item.
created:
updated:
status: Processing status. If the item has the NEW status, it can be edited. In other statuses, the item is
immutable. Allowed values:
* NEW - new;
* PROCESSING - being processed;
* COMPLETED - processing complete;
* ERROR - error during processing;
* CANCELLED - processing canceled;
* ARCHIVE - item has been archived;
* NO_MONEY - not enough money for processing.
output_data: Processing result.
errors:
created_at: Date and time when the item was created.
started_at: Date and time when the item processing started.
finished_at: Date and time when the item processing was completed.
"""
@unique
class Status(ExtendableStrEnum):
NEW = 'NEW'
PROCESSING = 'PROCESSING'
COMPLETED = 'COMPLETED'
ERROR = 'ERROR'
CANCELLED = 'CANCELLED'
ARCHIVE = 'ARCHIVE'
NO_MONEY = 'NO_MONEY'
batch_id: str
input_data: Dict[str, Any]
id: str = attribute(readonly=True)
app_project_id: str = attribute(readonly=True)
created: datetime.datetime = attribute(readonly=True)
updated: datetime.datetime = attribute(readonly=True)
status: Status = attribute(readonly=True, autocast=True)
output_data: Dict[str, Any] = attribute(readonly=True)
errors: _AppError = attribute(readonly=True)
created_at: datetime.datetime = attribute(readonly=True)
started_at: datetime.datetime = attribute(readonly=True)
finished_at: datetime.datetime = attribute(readonly=True)
class AppItemsCreateRequest(BaseTolokaObject):
"""Request Body.
Attributes:
batch_id: Batch ID.
items: list of items.
"""
batch_id: str
items: List[Dict[str, Any]]
class AppBatch(BaseTolokaObject):
"""A batch of data that you send for labeling at a time. The batch consists of work items.
Attributes:
id: Batch ID.
app_project_id: Project ID.
name:
status: The state of the batch, calculated based on the states of items comprising it. Allowed values:
* NEW
* PROCESSING
* COMPLETED
* ERROR
* CANCELLED
* ARCHIVE
* NO_MONEY
items_count: Number of items in the batch.
item_price: The cost of processing per item in a batch.
cost: The cost of processing per batch.
created_at: Date and time when the batch was created.
started_at: Date and time when batch processing started.
finished_at: Date and time when batch processing was completed.
"""
@unique
class Status(ExtendableStrEnum):
NEW = 'NEW'
PROCESSING = 'PROCESSING'
COMPLETED = 'COMPLETED'
ERROR = 'ERROR'
CANCELLED = 'CANCELLED'
ARCHIVE = 'ARCHIVE'
NO_MONEY = 'NO_MONEY'
id: str
app_project_id: str
name: str
status: Status = attribute(autocast=True)
items_count: int
item_price: decimal.Decimal
cost: decimal.Decimal
created_at: datetime.datetime
started_at: datetime.datetime
finished_at: datetime.datetime
class AppBatchCreateRequest(BaseTolokaObject):
"""Request Body.
Attributes:
items: The item data following the App schema.
"""
items: List[Dict[str, Any]]
| 30.380734 | 118 | 0.653782 | 6,196 | 0.935528 | 0 | 0 | 640 | 0.096633 | 0 | 0 | 3,868 | 0.584025 |
9b926fbc1417f4ebd631923a6169eb196f0aff02
| 760 |
py
|
Python
|
WebApp/main/utility/StringUtility.py
|
georg-wenzel/ml-data-smell-detection
|
7dddd401ca1f1a830dfd8b00760659911e5b1086
|
[
"MIT"
] | 1 |
2022-03-29T14:46:40.000Z
|
2022-03-29T14:46:40.000Z
|
WebApp/main/utility/StringUtility.py
|
georg-wenzel/ml-data-smell-detection
|
7dddd401ca1f1a830dfd8b00760659911e5b1086
|
[
"MIT"
] | null | null | null |
WebApp/main/utility/StringUtility.py
|
georg-wenzel/ml-data-smell-detection
|
7dddd401ca1f1a830dfd8b00760659911e5b1086
|
[
"MIT"
] | 1 |
2021-06-13T08:24:46.000Z
|
2021-06-13T08:24:46.000Z
|
# Utility functions for Strings (i.e. storing common strings once)
#define common strings
ERR_MISSING_KEY = "The field(s) {0} must be filled in this form."
ERR_INVALID_KEY = "The field '{0}' contains an invalid value."
ERR_UNAUTHORIZED = "The logged in user does not have access to this value: {0}"
MSG_FINISHED_TRAINING = "Your agent {0} has finished training and can now be used."
#define error string for (multiple) column mismatch
#pass tuple of mismatched columns as defined by AgentUtility.dataset_all_columns_match
def ERR_COLUMN_MISMATCH(columns_mismatched):
err = ("Column type mismatch: " +
" ".join(list(map(lambda x: "\"" + x[0] + "\": Expected " + str(x[1]) + ", but was " + str(x[2]) + ".", columns_mismatched))))
return err
| 54.285714 | 138 | 0.709211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 496 | 0.652632 |
9b93e34b4881434ea8a11345bd79b7ea4e4e91b3
| 2,184 |
py
|
Python
|
lib/python2.7/site-packages/pyami/primefactor.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | 6 |
2018-05-10T19:12:53.000Z
|
2021-05-19T21:11:56.000Z
|
pyami/primefactor.py
|
vosslab/ctfeval
|
6cfc648f91c318c3a46a959e4771c3d16d8e741a
|
[
"Apache-2.0"
] | 1 |
2017-04-15T11:04:39.000Z
|
2017-04-17T20:21:53.000Z
|
pyami/primefactor.py
|
vossman/ctfeval
|
6cfc648f91c318c3a46a959e4771c3d16d8e741a
|
[
"Apache-2.0"
] | 1 |
2019-09-05T20:58:37.000Z
|
2019-09-05T20:58:37.000Z
|
#!/usr/bin/env python
import sys
import math
maxprime = 12
twomult = 2**2
#====================
def prime_factors(n):
""" Return the prime factors of the given number. """
# < 1 is a special case
if n <= 1:
return [1]
factors = []
lastresult = n
while True:
if lastresult == 1:
break
c = 2
while True:
if lastresult % c == 0:
break
c += 1
factors.append(c)
lastresult /= c
return factors
#====================
def getAllPrimes(maxn=1028):
goodones = []
n = 2
while n <= maxn:
if isGoodPrime(n,False):
#print n, factors
goodones.append(n)
n += 2
return goodones
#====================
def getAllEvenPrimes(maxn=1028):
goodones = []
n = 2
while n <= maxn:
if isGoodPrime(n,False):
#print n, factors
goodones.append(n)
n += 2
return goodones
#====================
def getNextEvenPrime(num=400):
goodones = []
n = num
while not isGoodStack(n) and n < 10000:
n += 1
return n
#====================
def getPrevEvenPrime(num=400):
goodones = []
n = num
while not isGoodStack(n) and n > 1:
n -= 1
return n
#====================
def getPrimeLimits(num=4):
prev = getPrevEvenPrime(num)
next = getNextEvenPrime(num)
return (prev, next)
#====================
def isGoodPrime(num=4, power_of_4_rule=True):
"""
Boxsize rules:
(1) no prime factor greater than 11
(2) if greater than 4^x, must be multiple of 2^x,
"""
#print numa
if power_of_4_rule:
if num % 4 != 0:
return False
### get the number of powers of 4 in number
power = int(math.floor(math.log(float(num))/math.log(4.0)))
### check to make sure number is divisible by 2 to that power
mod = int(2**power)
if num % mod != 0:
return False
### get prime factors and find maximum
factors = prime_factors(num)
if max(factors) > maxprime:
return False
return True
#====================
def isGoodStack(num=4):
if num % twomult != 0:
return False
return isGoodPrime(num)
#====================
if __name__ == "__main__":
if len(sys.argv) > 1:
n = int(sys.argv[1])
factors = prime_factors(n)
print n, factors
prev, next = getPrimeLimits(n)
print "Use %d or %d instead"%(prev,next)
else:
print getAllPrimes()
| 18.991304 | 63 | 0.594322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 618 | 0.282967 |
9b94d42c4e5c72f5294c49e1d55e12f33a9b3855
| 2,027 |
py
|
Python
|
visualization/draw.py
|
DougMHu/roomba-obstacle-mapping
|
019b6108c1967c7daabe7b4795cfac7ef0e79cf7
|
[
"MIT"
] | 3 |
2018-05-26T20:41:27.000Z
|
2020-10-19T12:40:42.000Z
|
visualization/draw.py
|
DougMHu/roomba-obstacle-mapping
|
019b6108c1967c7daabe7b4795cfac7ef0e79cf7
|
[
"MIT"
] | null | null | null |
visualization/draw.py
|
DougMHu/roomba-obstacle-mapping
|
019b6108c1967c7daabe7b4795cfac7ef0e79cf7
|
[
"MIT"
] | 1 |
2017-01-31T09:47:21.000Z
|
2017-01-31T09:47:21.000Z
|
# MIT License
# Copyright (c) 2016 Aashiq Ahmed, Shuai Chen, Meha Deora, Douglas Hu
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import turtle
turtle.setup(800,800)
wn = turtle.Screen()
doug = turtle.Turtle()
# Draw a grid
length = 5
width = 2
step = 50
def draw_row(width, mark):
doug.up()
for i in range(width):
if (mark[i]):
doug.down()
doug.begin_fill()
for j in range(4):
doug.fd(step)
doug.right(90)
if (mark[i]):
doug.end_fill()
doug.fd(step)
doug.up()
#draw_row(width,[1,0])
def draw_matrix(mark):
doug.up()
rows = len(mark)
cols = len(mark[0])
orig = doug.fillcolor()
doug.fillcolor('red')
for row in mark:
draw_row(cols, row)
doug.fd(-cols*step)
doug.right(90)
doug.fd(step)
doug.left(90)
doug.fillcolor(orig)
draw_matrix([[0,1],[1,1]])
# doug.left(90)
# doug.fd((width-0.5)*step)
# doug.right(90)
# doug.up()
# doug.fd(0.5*step)
# doug.down()
# doug.pensize(step)
# doug.fd((length-1)*step)
turtle.getscreen()._root.mainloop()
#doug.fd(length*step)
| 25.658228 | 80 | 0.716823 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,326 | 0.654169 |
9b96be5064867858493bac891020397805ad69fe
| 251 |
py
|
Python
|
igphotoprofile.py
|
raflimkamal/python_project
|
6d3321612226fa15b9fcafe4a301160f63d81213
|
[
"MIT"
] | null | null | null |
igphotoprofile.py
|
raflimkamal/python_project
|
6d3321612226fa15b9fcafe4a301160f63d81213
|
[
"MIT"
] | null | null | null |
igphotoprofile.py
|
raflimkamal/python_project
|
6d3321612226fa15b9fcafe4a301160f63d81213
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
pip install instaloader
# In[4]:
import instaloader
# In[6]:
ig = instaloader.Instaloader()
dp = input("Enter Insta Username:")
ig.download_profile(dp, profile_pic_only = True)
# In[ ]:
| 8.366667 | 48 | 0.649402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.36255 |
9b9ade24b6474ca9ac882881c781ddb3dc8e1ab1
| 2,180 |
py
|
Python
|
activate.py
|
cassidoxa/bottoman
|
9d04331794ffb8bb745fc175c15db6d4a1f5714c
|
[
"MIT"
] | null | null | null |
activate.py
|
cassidoxa/bottoman
|
9d04331794ffb8bb745fc175c15db6d4a1f5714c
|
[
"MIT"
] | null | null | null |
activate.py
|
cassidoxa/bottoman
|
9d04331794ffb8bb745fc175c15db6d4a1f5714c
|
[
"MIT"
] | null | null | null |
import json
import urllib.request
from bottoman import TwitchBot
import config
from db.db import DatabaseManager
def get_user_id_display(user):
"""
uses twitch's API to get a user's token with their (case insensitive)
user name
"""
client_id = config.client_id
token = "srtajsl3jjbhhtfrvk0dlsu33aytv2"
header = {"Client-Id": client_id, "Authorization": f'Bearer {token}'}
url = f'https://api.twitch.tv/helix/users?login={user}'
req = urllib.request.Request(url, headers=header)
response = urllib.request.urlopen(req).read().decode('utf-8')
response = json.loads(response)
return (int(response['data'][0]['id']),
response['data'][0]['display_name'])
def check_admin():
"""
check for an admin. If no admin user, ask for one and add
to chatters db.
"""
dbmgr = DatabaseManager('db/bottoman.db')
permissions_list = [i[0] for i in dbmgr.query(
"SELECT permissions \
FROM chatters").fetchall()]
if 'admin' in permissions_list:
return
else:
admin_flag = False
while admin_flag is False:
admin = input(f"This bot has no admin. Please enter the name of \
your twitch channel: ")
double_check = input(f'The admin account will be {admin}. Is \
this correct? (y/n): ')
if double_check.lower() == "y":
user_id, name = get_user_id_display(admin)
dbmgr.write(
"INSERT INTO chatters \
VALUES (?,?,?,?,?,?)",
(user_id, name.lower(), name, 'admin', 1, 0,))
dbmgr.close()
admin_flag = True
elif double_check.lower() == "n":
continue
else:
print(f"Please try again.")
continue
return
# check for admin, initialize bot, join room, send hello message
check_admin()
bottoman = TwitchBot()
bottoman.join_room(bottoman.s)
bottoman.send_message(config.join_msg)
bottoman.run_time()
| 29.459459 | 77 | 0.559174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 812 | 0.372477 |
9b9e8192f42c44f946f004808b8e37a13a83e0b0
| 478 |
py
|
Python
|
waveshare_snake.py
|
AndrewCarterUK/MiniGame
|
6d699c045e84ee3834f23eb0483245195438eff7
|
[
"MIT"
] | null | null | null |
waveshare_snake.py
|
AndrewCarterUK/MiniGame
|
6d699c045e84ee3834f23eb0483245195438eff7
|
[
"MIT"
] | null | null | null |
waveshare_snake.py
|
AndrewCarterUK/MiniGame
|
6d699c045e84ee3834f23eb0483245195438eff7
|
[
"MIT"
] | null | null | null |
from minigame.waveshare.button import Button
from minigame.waveshare.display import Display
from minigame.games.snake import Snake
WIDTH = 20
HEIGHT = 20
STEP_TIME = 0.5
BLOCK_SIZE = 32
def main():
display = Display()
l_button = Button(5)
r_button = Button(26)
u_button = Button(6)
d_button = Button(19)
snake = Snake(display, WIDTH, HEIGHT, STEP_TIME, l_button, r_button, u_button, d_button)
snake.play()
if __name__ == '__main__':
main()
| 19.916667 | 92 | 0.698745 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.020921 |
9b9f8f5cfe54f976ada38f7cf0db4c9ffc2c1571
| 8,636 |
py
|
Python
|
jumodjango/urls.py
|
jumoconnect/openjumo
|
828d993bfbb83777d10a68de6964c7d5bb2c7bd0
|
[
"MIT"
] | 5 |
2015-03-11T18:59:46.000Z
|
2018-08-17T17:49:45.000Z
|
jumodjango/urls.py
|
kmrifat/openjumo
|
828d993bfbb83777d10a68de6964c7d5bb2c7bd0
|
[
"MIT"
] | 2 |
2020-06-05T16:52:17.000Z
|
2021-02-08T20:24:26.000Z
|
jumodjango/urls.py
|
kmrifat/openjumo
|
828d993bfbb83777d10a68de6964c7d5bb2c7bd0
|
[
"MIT"
] | 6 |
2016-02-04T00:45:30.000Z
|
2021-07-07T17:14:50.000Z
|
from api.api_v1 import api_urls
from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
)
''' RANDOM URLs '''
urlpatterns += patterns('etc.views',
url(r'^about/?$', 'about', name = 'about'),
url(r'^help/?$', 'help', name = 'help'),
url(r'^jobs/?$', 'jobs', name = 'jobs'),
url(r'^team/?$', 'team', name = 'team'),
url(r'^blog/?$', 'blog', name = 'blog'),
url(r'^contact/?$', 'contact', name = 'contact'),
url(r'^privacy/?$', 'privacy', name = 'privacy'),
url(r'^terms/?$', 'terms', name = 'terms'),
url(r'^/?$', 'index', name = 'index'),
url(r'^error/?$', 'throw_error', name = 'throw_error'),
url(r'^health_check/?$', 'health_check', name = 'health_check'),
)
''' END OF RANDOM URLs '''
''' API URLS '''
urlpatterns += patterns('',
(r'^api/', include(api_urls())),
)
''' END API URLS '''
''' USER URLs '''
urlpatterns += patterns('users.views',
url(r'^login/?$', 'login_permalink', name = 'login_permalink'),
url(r'^logout/?$', 'logout_permalink', name = 'logout_permalink'),
url(r'^setup/?$', 'setup', name = 'setup'),
url(r'^discover/?$', 'discover', name = 'discover'),
url(r'^user/(?P<mongo_id>[a-zA-Z0-9\-_].*)/?$', 'old_user_permalink', name = 'old_user_permalink'),
url(r'^forgot_password/?$', 'forgot_password', name = 'forgot_password'),
url(r'^reset_password/(?P<reset_id>[a-fA-F0-9].*)/?$', 'reset_password', name = 'reset_password'),
url(r'^upload_photo/?$', 'upload_photo', name = 'upload_photo'),
url(r'^settings/?$', 'settings', name='settings'),
url(r'^settings/notifications/?$', 'notifications', name='settings_notifications'),
url(r'^settings/connect/?$', 'connect', name='settings_connect'),
url(r'^settings/developer/?$', 'developer', name='settings_developer'),
url(r'^users/(?P<user_id>\d*)/follow/?$', 'follow', name='follow_user'),
url(r'^users/(?P<user_id>\d*)/unfollow/?$', 'unfollow', name='unfollow_user'),
url(r'^users/(?P<user_id>\d*)/followers/?$', 'follower_list', name='user_followers'),
url(r'^users/(?P<user_id>\d*)/followings/?$', 'following_list', name='user_followings'),
url(r'^remove/?$', 'remove_user', name='remove_user')
)
urlpatterns += patterns('users.ajax.views',
url(r'^json/v1/user/fbid_check/?$', 'check_fbid', name = 'check_fbid'),
url(r'^json/v1/user/fb_login/?$', 'fb_login', name = 'fb_login'),
url(r'^json/v1/user/fbot_update/?$', 'fbot_update', name = 'fbot_update'),
url(r'^json/v1/user/update/?$', 'update_user', name = 'update_user'),
url(r'^json/v1/user/remove/?$', 'remove_user', name = 'remove_user'),
url(r'^json/v1/user/reset_password/?$', 'reset_password', name = 'reset_password'),
url(r'^json/v1/user/forgot_password/?$', 'forgot_password', name = 'forgot_password'),
url(r'^json/v1/user/action/follow/?$', 'follow', name = 'follow'),
)
''' END OF USER URLs '''
''' ISSUE URLs '''
urlpatterns += patterns('issue.views',
url(r'^issue/(?P<mongo_id>[a-zA-Z0-9\-_].*)/?$', 'old_issue_permalink', name = 'old_issue_permalink'),
url(r'^issuename/(?P<issuename>[a-zA-Z0-9\-_\ ].*)/?$', 'old_issuename_permalink', name = 'old_issuename_permalink'),
url(r'^users/(?P<user_id>\d*)/issues/?$', 'followed_issue_list', name='followed_issue_list')
)
''' ISSUE URLs '''
''' ORG URLs '''
urlpatterns += patterns('org.views',
url(r'^org/categories.js$', 'org_categories', name = 'org_categories'),
url(r'^org/claim/(?P<org_id>[0-9a-zA-Z\-_].*)/confirm/?$', 'claim_org_confirm', name = 'claim_org_confirm'),
url(r'^org/claim/(?P<org_id>[0-9a-zA-Z\-_].*)/?$', 'claim_org', name = 'claim_org'),
url(r'^org/create/?$', 'create_org', name = 'create_org'),
url(r'^org/(?P<org_id>\d.*)/details/?$', 'details', name='details_org'),
url(r'^org/(?P<org_id>[0-9a-zA-Z\-_].*)/manage/?$', 'manage_org', {'tab': 'about'}, name='manage_org'),
url(r'^org/(?P<org_id>[0-9a-zA-Z\-_].*)/manage/connect/?$', 'manage_org', {'tab': 'connect'}, name='manage_org_connect'),
url(r'^org/(?P<org_id>[0-9a-zA-Z\-_].*)/manage/more/?$', 'manage_org', {'tab': 'more'}, name='manage_org_more'),
url(r'^org/(?P<mongo_id>[a-zA-Z0-9\-_].*)/?$', 'old_org_permalink', name = 'old_org_permalink'),
url(r'^orgname/(?P<orgname>[a-zA-Z0-9\-_\ ].*)/?$', 'old_orgname_permalink', name = 'old_orgname_permalink'),
url(r'^users/(?P<user_id>\d*)/orgs/?$', 'followed_org_list', name='followed_org_list')
)
urlpatterns += patterns('org.ajax.views',
url(r'^json/v1/org/fetch_centroid/?$', 'fetch_org_by_centroid', name = 'fetch_org_by_centroid'),
url(r'^json/v1/org/update/?$', 'update_org', name = 'update_org'),
url(r'^json/v1/org/remove/?$', 'remove_org', name = 'remove_org'),
url(r'^json/v1/org/flag/?$', 'flag_org', name = 'flag_org'),
url(r'^json/v1/org/create/?$', 'org_create', name = 'org_create'),
url(r'^json/v1/org/normalize_facebook_id/?$', 'normalize_facebook_id', name = 'normalize_facebook_id'),
)
''' END OF ORG URLs '''
''' COMMITMENT URLS '''
urlpatterns += patterns('commitment.views',
url(r'^commitments/create/?$', 'create', name='create_commitment'),
url(r'^commitments/(?P<commitment_id>\d*)/delete/?$', 'delete', name='delete_commitment'),
url(r'^orgs/(?P<entity_id>\d*)/commitments/?$', 'list', {'model_name': 'org.Org'}, name='org_commitments'),
url(r'^issues/(?P<entity_id>\d*)/commitments/?$', 'list', {'model_name': 'issue.Issue'}, name='issue_commitments'),
)
''' ACTION URLS '''
urlpatterns += patterns('action.views',
url(r'^orgs/(?P<entity_id>\d*)/actions/?$', 'action_list', {'model_name': 'org.Org'}, name='org_action_list'),
url(r'^issues/(?P<entity_id>\d*)/actions/?$', 'action_list', {'model_name': 'issue.Issue'}, name='issue_action_list'),
)
''' SEARCH URLS '''
urlpatterns += patterns('search.views',
url(r'^json/v1/search/onebox/?$', 'autocomplete', name = 'autocomplete'),
url(r'^search/?$', 'search_page', name='search_page'),
url(r'^json/v1/search/?$', 'ajax_search', name='ajax_search'),
url(r'^json/v1/autocomplete/?$', 'ajax_term_complete', name='ajax_term_complete')
)
''' MAILER URLS '''
urlpatterns += patterns('mailer.views',
url(r'^unsubscribe/$', 'unsubscribe', name='unsubscribe'),
url(r'^email/text/(?P<username>[a-zA-Z0-9\-_\ ].*)/?$', 'jumo_reader', name = 'jumo_reader'),
url(r'^email/(?P<username>[a-zA-Z0-9\-_\ ].*)/?$', 'jumo_reader', name = 'jumo_reader'),
#url(r'^notification/(?P<username>[a-zA-Z0-9\-_\ ].*)/?$', 'notification_email', name = 'notification_email'),
)
''' END MAILER URLS '''
''' ADMIN URLS '''
urlpatterns += patterns('',
(r'^admin/org/report/$', 'org.admin_views.report'),
(r'^grappelli/', include('grappelli.urls')),
(r'^admin/', include(admin.site.urls)),
)
if settings.IS_DATAMINE:
urlpatterns += patterns('miner.views',
url(r'^related_searches/?$', 'related_searches', name='related_searches')
)
#if settings.DEBUG:
if True:
urlpatterns += patterns('django.views.static',
(r'^static/(?P<path>.*)$',
'serve', {
'document_root': settings.MEDIA_ROOT,
'show_indexes': True }),)
handler500 = 'etc.views.error_500'
handler404 = 'etc.views.error_404'
'''
#########################################################################################
### HEY #########################################################################
################################################## SEE ALL THEM POUND SIGNS? ############
#########################################################################################
############### THAT MEANS THIS IS AN IMPORTANT MSG #####################################
#########################################################################################
################################# SO PAY ATTENTION OK? ##################################
#########################################################################################
####### EVERYTHING WILL BREAK IF THIS ISN'T THE LAST LINE OF CODE IN THIS FILE. #
#########################################################################################
################################## WE COOL? #############################################
#########################################################################################
'''
urlpatterns += patterns('etc.views',
url(r'^([a-zA-Z0-9\-_].*)/?$', 'clean_url', name = 'entity_url'),
)
| 45.452632 | 125 | 0.54736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,052 | 0.700787 |
9ba08896288342be18a3bdfe4d777157062a927c
| 2,830 |
py
|
Python
|
modules/layers.py
|
vliu15/munit
|
5789d96590519d729f89c9501eba7692fa7054ef
|
[
"MIT"
] | 3 |
2021-03-04T01:48:03.000Z
|
2021-12-16T06:55:10.000Z
|
modules/layers.py
|
vliu15/munit
|
5789d96590519d729f89c9501eba7692fa7054ef
|
[
"MIT"
] | null | null | null |
modules/layers.py
|
vliu15/munit
|
5789d96590519d729f89c9501eba7692fa7054ef
|
[
"MIT"
] | null | null | null |
# The MIT License
#
# Copyright (c) 2020 Vincent Liu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import torch
import torch.nn as nn
class AdaptiveInstanceNorm2d(nn.Module):
''' Implements 2D Adaptive Instance Normalization '''
def __init__(self, channels, s_dim=8, h_dim=256):
super().__init__()
self.instance_norm = nn.InstanceNorm2d(channels, affine=False)
self.style_scale_transform = self.mlp(s_dim, h_dim, channels)
self.style_shift_transform = self.mlp(s_dim, h_dim, channels)
@staticmethod
def mlp(self, in_dim, h_dim, out_dim):
return nn.Sequential(
nn.Linear(in_dim, h_dim),
nn.ReLU(inplace=True),
nn.Linear(h_dim, h_dim),
nn.ReLU(inplace=True),
nn.Linear(h_dim, out_dim),
)
def forward(self, image, w):
normalized_image = self.instance_norm(image)
style_scale = self.style_scale_transform(w)[:, :, None, None]
style_shift = self.style_shift_transform(w)[:, :, None, None]
transformed_image = style_scale * normalized_image + style_shift
return transformed_image
class LayerNorm2d(nn.Module):
''' Implements 2D Layer Normalization '''
def __init__(self, channels, eps=1e-5, affine=True):
super().__init__()
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.rand(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
mean = x.flatten(1).mean(1).reshape(-1, 1, 1, 1)
std = x.flatten(1).std(1).reshape(-1, 1, 1, 1)
x = (x - mean) / (std + self.eps)
if self.affine:
x = x * self.gamma.reshape(1, -1, 1, 1) + self.beta.reshape(1, -1, 1, 1)
return x
| 36.753247 | 84 | 0.674558 | 1,678 | 0.592933 | 0 | 0 | 280 | 0.09894 | 0 | 0 | 1,183 | 0.418021 |
9ba3741e685c1d558d552dcb021080523937b319
| 721 |
py
|
Python
|
lab_new/locust/locustfile.py
|
mwardbopp/f5-big-iq-lab
|
70d5b766571f8db8b3bc744e98c183dbdd500089
|
[
"Apache-2.0"
] | 18 |
2018-07-17T15:17:16.000Z
|
2021-12-05T21:13:26.000Z
|
lab_new/locust/locustfile.py
|
mwardbopp/f5-big-iq-lab
|
70d5b766571f8db8b3bc744e98c183dbdd500089
|
[
"Apache-2.0"
] | 34 |
2018-09-11T04:43:47.000Z
|
2021-04-19T15:58:50.000Z
|
lab_new/locust/locustfile.py
|
mwardbopp/f5-big-iq-lab
|
70d5b766571f8db8b3bc744e98c183dbdd500089
|
[
"Apache-2.0"
] | 54 |
2018-07-30T12:23:33.000Z
|
2021-06-11T17:54:28.000Z
|
import time
from locust import HttpUser, task, between
# https://docs.locust.io/en/stable/quickstart.html
class QuickstartUser(HttpUser):
wait_time = between(5, 10)
@task
def index_page(self):
self.client.get("/index.php", verify=False)
self.client.get("/contact", verify=False)
self.client.get("/wishlist", verify=False)
self.client.get("/faq", verify=False)
self.client.get("/product/view?id=72", verify=False)
self.client.get("/product/view?id=64", verify=False)
self.client.get("/product/view?id=56", verify=False)
def on_start(self):
self.client.post("/user/login", json={"username":"test_user", "password":"123456"}, verify=False)
| 34.333333 | 105 | 0.65742 | 612 | 0.848821 | 0 | 0 | 413 | 0.572816 | 0 | 0 | 204 | 0.28294 |
9ba44cd9d91cc8c729aafc0cddc794fc2187f3f9
| 25,015 |
py
|
Python
|
lizardanalysis/calculations/aep_pep_test.py
|
JojoReikun/ClimbingLizardDLCAnalysis
|
6cc38090217a3ffd4860ef6d06ba7967d3c10b7c
|
[
"MIT"
] | 1 |
2021-03-09T19:12:44.000Z
|
2021-03-09T19:12:44.000Z
|
lizardanalysis/calculations/aep_pep_test.py
|
JojoReikun/ClimbingLizardDLCAnalysis
|
6cc38090217a3ffd4860ef6d06ba7967d3c10b7c
|
[
"MIT"
] | null | null | null |
lizardanalysis/calculations/aep_pep_test.py
|
JojoReikun/ClimbingLizardDLCAnalysis
|
6cc38090217a3ffd4860ef6d06ba7967d3c10b7c
|
[
"MIT"
] | null | null | null |
def aep_pep_test(**kwargs):
"""
Calculates two different things:
1.) The x and y coordinates of the AEP and PEP, relative to the coxa of a respective leg
2.) The swing phases and the stance phases, identifying on a frame by frame basis
Return: results data frame with 30 key value pairs:
x6 allocation of swing and stance phases for each foot/leg
x6 x coordinates of AEP for each foot/leg
x6 y coordinates for AEP for each foot/leg
x6 x coordinates for PEP for each foot/leg
x6 y coordinates for PEP for each foot/leg
"""
import os.path
import pandas as pd
from pandas import np
from pathlib import Path
from lizardanalysis.utils import animal_settings
from scipy import signal
import math
# print("footfall_by_switches")
# define necessary **kwargs:
data = kwargs.get('data')
data_rows_count = kwargs.get('data_rows_count')
config = kwargs.get('config')
filename = kwargs.get('filename')
likelihood = kwargs.get('likelihood')
animal = kwargs.get('animal')
df_result_current = kwargs.get('df_result_current')
# added in this so that you can get the estimated values from alpha
# so long as that column currently resides in the data frame
config_file = Path(config).resolve()
# result folder for footfall plots
step_detection_folder = os.path.join(str(config_file).rsplit(os.path.sep, 1)[0], "analysis-results",
"step_detection")
# create file path for foot fall pattern diagrams
plotting_footfall_folder = os.path.join(step_detection_folder, "footfall-pattern-diagrams")
# TODO: instead of hard-coding the feet and the three points for body_motion,
# TODO: let the user choose based on labels available in DLC result file: Choose feet & choose body motion
scorer = data.columns[1][0]
feet = animal_settings.get_list_of_feet(animal)
relative = False
plotting_footfall_patterns = True
# define cut-off value -> crops X% of frames on each side of video
p_cut_off = 0.05
body_motion = {"frame": [], "mean_motion_x": []}
abdomen_diff = 0
head_diff = 0
# assuming that the body from the head to the abdomen is rigid?
# this for loop is used to calculate the x coordinate difference between a given frame and the previous
# therefore gives you can indicator of the direction of motion
# if the [row] - [row-1] > 0 , then the stick insect is moving to the right
# if the [row] - [row-1] < 0, then the stick insect is moving to the left
for row in range(1, data_rows_count):
if data.loc[row][scorer, "head", 'likelihood'] >= likelihood and data.loc[row - 1][
scorer, "head", 'likelihood'] >= likelihood:
head_diff = data.loc[row][scorer, "head"] - data.loc[row - 1][scorer, "head"]
if data.loc[row][scorer, "abdomen", 'likelihood'] >= likelihood and data.loc[row - 1][
scorer, "abdomen", 'likelihood'] >= likelihood:
abdomen_dif = data.loc[row][scorer, "abdomen"] - data.loc[row - 1][scorer, "abdomen"]
body_motion["frame"].append(row - 1)
body_motion["mean_motion_x"].append(abs((head_diff + abdomen_diff) / 2.0))
# am taking the absolute value, because if the stick insect walks to the left, then you don't want to
# switch the which sign changes indicates swing/pep and which sign change indicates stance/aep.
# taking the average of the differences, to determine the average 'speed' i.e. the displacement over one frame of the whole body
# one class instance and one result array for every foot, since every foot needs its own counter
calculators = {}
results = {}
# for every foot, need to do within the original for loop, so all foot calculations are performed for a given frame
foot_motions = {}
rel_foot_motions = {}
# left the for loop for the body motion, and will now be working with for loops for the foot motion
for foot in feet:
foot_motions[f"{foot}"] = []
rel_foot_motions[f"rel_{foot}"] = []
# if the [row] - [row-1] > 0 , then the stick insect FOOT is moving to the right
# if the [row] - [row-1] < 0, then the stick insect FOOT is moving to the left
# taking an absolute value for the body and foot motions avoid issues with directions (?)
foot_motion = 0
for row in range(1, data_rows_count):
if data.loc[row][scorer, f"{foot}", 'likelihood'] >= likelihood and data.loc[row - 1][scorer,
f"{foot}",'likelihood'] >= likelihood:
foot_motion = abs(data.loc[row][scorer, f"{foot}", 'x'] - data.loc[row - 1][
scorer, f"{foot}", 'x'])
foot_motions[f"{foot}"].append(foot_motion)
rel_foot_motions[f"rel_{foot}"].append(foot_motion - body_motion['mean_motion_x'][row - 1])
else:
foot_motions[f"foot"].append
# now need to store the body motion data, the foot motion data, and the relative foot motion all in a dataframe
# this dataframe within the loop is only for one foot
dict_df = {'body_motion': body_motion['mean_motion_x'], 'foot_motion': foot_motions[f"{foot}"],
"rel_foot_motion": rel_foot_motions[f"rel_{foot}"]}
print(dict_df)
df = pd.DataFrame.from_dict(dict_df)
intersections = smooth_and_plot(df, data_rows_count, p_cut_off, relative, foot, filename,
step_detection_folder)
######################################################################################################################
# the smooth_and_plot function returns 'intersection_dict'
# intersection dict is: {"idx":[], "sign":[]}
# idx = the idx of the number list/array of differences in the sign, only storing when the differences are non-zero
# sign = stores the sign of the number associated with the index of the non zero number
# positive => start of swing =>PEP
# negative => start of stance => AEP
# gives the alpha_estimation values for the
rom_list = [col for col in df_result_current.columns if ("rom_angle_{}".format(foot) in col)]
aep_pep_angle = []
# for loop will calculate the angle that defines the femur-coxa vector relative to the normal
# to the body axis, running through the coxa of the foot of interest
for angle in range(len(rom_list)):
aep_pep_angle.append(90 - angle)
foot_chars = list(foot)
f_t_joint_lpx = []
f_t_joint_lpy = []
t_c_joint_lpx = []
t_c_joint_lpy = []
# low pass filter application of the coordinate data alone?
# is this necessary
b, a = signal.butter(3, 0.1, btype='lowpass', analog=False)
f_t_joint_lpx = signal.filtfilt(b, a,
(data.loc[:, (scorer, "{}m{}".format(foot_chars[0], foot_chars[1]), "x")]))
f_t_joint_lpy = signal.filtfilt(b, a,
(data.loc[:, (scorer, "{}m{}".format(foot_chars[0], foot_chars[1]), "y")]))
t_c_joint_lpx = signal.filtfilt(b, a,
(data.loc[:, (scorer, "{}b{}".format(foot_chars[0], foot_chars[1]), "x")]))
t_c_joint_lpy = signal.filtfilt(b, a,
(data.loc[:, (scorer, "{}b{}".format(foot_chars[0], foot_chars[1]), "y")]))
# ensuring that the values for the keys are defined as arrays, so that you can append for the
# following for loop
results_aep = {"{}_x".format(foot): [], "{}_y".format(foot): []}
results_pep = {"{}_x".format(foot): [], "{}_y".format(foot): []}
for i in range(2, data_rows_count):
if i - 2 in intersections["idx"]:
# atm just leaving the likelihood check
# is it worth doing, considering the alpha angles depended on those likelihoods anyway?
# so you would be just checking the same likelihood even though
# now calculating the Euclidean distance between the coxa label and the femur label
f_t_joint_co = (f_t_joint_lpx[i], f_t_joint_lpy[i])
t_c_joint_co = (t_c_joint_lpx[i], t_c_joint_lpy[i])
distance = np.sqrt(
(f_t_joint_co[0] - t_c_joint_co[0]) ** 2 + (f_t_joint_co[1] - t_c_joint_co[1]) ** 2)
# calibrate distance with conversion factor
# NEED TO WRITE THE CONVERSION FACTOR!
distance_calib = distance # / conv_fac
# results_aep = {}
# results_pep = {}
if intersections["sign"][i - 2] > 0:
# this means you are transitioning to the swing phase, so should be PEP
results_pep[f"{foot}_x"].append((math.cos(aep_pep_angle[i]) * distance_calib))
results_pep[f"{foot}_y"].append((math.sin(aep_pep_angle[i]) * distance_calib))
if intersections["sign"][i - 2] < 0:
# this means you are transitioning to the stance phase so should be aep
results_aep[f"{foot}_x"].append((math.cos(aep_pep_angle[i]) * distance_calib))
results_aep[f"{foot}_y"].append((math.sin(aep_pep_angle[i]) * distance_calib))
# therefore should now have two dictionaries that contain the x coordinates and the y coordinates
# of the aep and the pep for each foot
# one aep value and one pep value per stepping cycle
#####################################################################################################################
# initializes class instance for every foot and empty result dict to be filled with the swing and stance phases:
calculators[foot] = StridesAndStances()
# "S10" = string of 10 characters: stance/stride + counter 000n
results[foot] = calculators[foot].determine_stride_phases(intersections, data_rows_count)
# rename dictionary keys of results
results = {'stepphase_' + key: value for (key, value) in results.items()}
results_aep = {"AEP_" + key: value for (key, value) in results_aep.items()}
results_pep = {"PEP_" + key: value for (key, value) in results_pep.items()}
# print("results: ", results)
if plotting_footfall_patterns:
""" plots a foot fall pattern diagram for every DLC result csv file/every lizard run """
plot_footfall_pattern(results, data_rows_count, filename, plotting_footfall_folder)
## need to add the result of the code here!
# last step must be combining the three results dictionaries
results.update(results_aep)
results.update(results_pep)
return results
# shouldn't matter whether the stick insect walks in a straight horizontal line or not, because you're only looking at
# the switch in the direction of movement
# therefore, as long as the insect doesn't walk completely vertically suddenly, then the algorithm should still work
def smooth_and_plot(df, data_rows_count, p_cut_off, relative, foot, filename, step_detection_folder, plotting=True):
# smoothing of the raw input data from foot motion and body motion, using the Butterworth low-pass filter an a Savintzky-
# Golay smoothing alogirthm. Then, the intersection points are computed between the smoothed body and foot curves
# If relative is TRUE: body motion is already subtracted from the foot motion, hence foot is relative to the x-axis
# If relative is FALSE: the intersection of the foot motion and body motion data curves needs to be determined
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import os
import errno
# savgol filter smoothing window (must be odd!)
smooth_wind = 13
x_cut_off_value = int(round(data_rows_count * p_cut_off, 0))
x = np.linspace(0, data_rows_count - 1, data_rows_count - 1)
b, a = signal.butter(3, 0.1, btype='lowpass', analog=False)
x_cut_off = np.linspace(x_cut_off_value, data_rows_count - 1, int(data_rows_count - 1 - x_cut_off_value))
if plotting == True:
# initiate plot
plt.figure()
plt.axvline(x_cut_off_value, color='black', label='cutoff 0.05%')
if relative == True:
"""Uses the relative foot motion i.e. the foot motion where body motion has been subtracted"""
rel_foot_motion_low_passed = signal.filtfilt(b, a, df['rel_foot_motion'])
# smooth curves with Savitzky-Golay filter:
y_foot_rel = df.loc[x_cut_off_value:, 'rel_foot_motion']
y_foot_rel_lp = rel_foot_motion_low_passed[x_cut_off_value:] # two different types of filtering (?)
# smooth without the low pass filter
y_foot_rel_smoothed = signal.savgol_filter(y_foot_rel, smooth_wind, 3)
# smooth with the low pass filter
y_foot_rel_lp_smoothed = signal.savgol_filter(y_foot_rel_lp, smooth_wind, 3)
x_axis_f = np.zeros(data_rows_count - 1 - x_cut_off_value)
# get the indexes of the frames where you are transitioning from swing -> stance or stance -> swing
idx = np.argwhere(np.diff(np.sign(x_axis_f - y_foot_rel_smoothed))).flatten()
intersections_dict = {"idx": [], "sign": []}
for i in idx:
intersections_dict["idx"].append(i)
intersections_dict["sign"].append(np.sign(x_axis_f[i] - y_foot_rel_smoothed[i]))
intersections_dict["idx"] = [b + x_cut_off_value for b in intersections_dict['idx']]
if plotting == True:
df['rel_foot_motion'].plot(color='#f5c242') # plot_rel_foot
plt.plot(x, rel_foot_motion_low_passed, color='green', label='rel_foot_motion low pass (lp) filter')
plt.plot(x_cut_off, y_foot_rel_smoothed, color='red', label='rel_foot_motion_smoothed')
plt.plot(x_cut_off, y_foot_rel_lp_smoothed, color='lightgreen', label='rel_foot_motion_lp_smoothed')
plt.plot(x_cut_off[idx], y_foot_rel_lp_smoothed[idx], 'ko') # plot intersection points
# edit here -> second argument was changed from x_axis_f to y_foot_rel_lp_smoothed
for i in range(len(intersections_dict['idx'])):
plt.annotate(intersections_dict['idx'][i],
(x_cut_off[intersections_dict['idx'][i] - x_cut_off_value] - 5,
y_foot_rel_lp_smoothed[intersections_dict['idx'][i] - x_cut_off_value] + 3))
# another edit here?
else:
"""
Uses the foot motion and the body motion and computes the intersection points for the smoothed curves.
Intersection points for the lizard standing (bodymotion -> 0) will get excluded by using a body-motion threshold
of 10% of max(body_motion_lp_smoothed).
"""
# lowpass filter for body motion
body_motion_low_passed = signal.filtfilt(b, a, df['body_motion'])
# lowpass filter for foot motion
foot_motion_low_passed = signal.filtfilt(b, a, df['foot_motion'])
# smooth curves:
y_body = df.loc[x_cut_off_value:, 'body_motion']
y_body_lp = body_motion_low_passed[x_cut_off_value:]
y_foot = df.loc[x_cut_off_value:, 'foot_motion']
y_foot_lp = foot_motion_low_passed[x_cut_off_value:]
# smooth original body motion without low pass filter
y_body_smoothed = signal.savgol_filter(y_body, 51, 3)
# smooth low-pass-filtered body motion
y_body_lp_smoothed = signal.savgol_filter(y_body_lp, 17, 3)
# smooth original foot motion without low pass filter
y_foot_smoothed = signal.savgol_filter(y_foot, 17, 3)
# smooth low-pass-filtered rel foot motion
y_foot_lp_smoothed = signal.savgol_filter(y_foot_lp, 17, 3)
# compute and plot intersection points:
idx = np.argwhere(np.diff(np.sign(y_body_lp_smoothed - y_foot_lp_smoothed))).flatten()
intersections_dict = {"idx": [], "sign": []}
max_body_motion = max([abs(max(y_body_lp_smoothed)), abs(min(y_body_lp_smoothed))])
body_motion_stand = round(max_body_motion * 0.1, 2)
# print(f"max body motion: {max_body_motion}, 10%: {body_motion_stand}")
for i in idx:
# exclude all intersections which are within 0+-1% of max body motion (~standing)
if abs(y_body_lp_smoothed[i]) >= body_motion_stand:
intersections_dict["idx"].append(i)
intersections_dict["sign"].append(np.sign(y_body_lp_smoothed[i] - y_foot_lp_smoothed[i]))
intersections_dict['idx'] = [b + x_cut_off_value for b in intersections_dict['idx']]
# print("x intersections: ", intersections_dict)
# remove intersection points when lizard has stopped walking (usually in the end):
# intersections_dict = remove_standing_intersections(intersections_dict, y_body_lp_smoothed, y_foot_lp_smoothed)
if plotting == True:
df['body_motion'].plot(color='#3089db') # plot body motion
df['foot_motion'].plot(color='#d68f00') # plot foot motion
plt.plot(x, body_motion_low_passed, color='lightblue', label='body_motion low pass (lp) filter')
plt.plot(x, foot_motion_low_passed, color='green', label='foot_motion low pass (lp) filter')
plt.plot(x_cut_off, y_body_smoothed, color='#160578', label='body_motion_smoothed')
plt.plot(x_cut_off, y_foot_smoothed, color='red', label='foot_motion_smoothed')
plt.plot(x_cut_off, y_body_lp_smoothed, color='#9934b3', label='body_motion_lp_smoothed')
plt.plot(x_cut_off, y_foot_lp_smoothed, color='lightgreen', label='foot_motion_lp_smoothed')
plt.plot(x_cut_off[idx], y_body_lp_smoothed[idx], 'ko') # plot intersection points
for i in range(len(intersections_dict['idx'])):
plt.annotate(intersections_dict['idx'][i],
(x_cut_off[intersections_dict['idx'][i] - x_cut_off_value] - 5,
y_body_lp_smoothed[intersections_dict['idx'][i] - x_cut_off_value] + 3))
if plotting == True:
# set y-limits, add legend and display plots
plt.axhline(0, color='black')
plt.ylim(-30, 30)
plt.legend()
plt.xlabel('frames')
plt.ylabel('dx/frame')
filename_title = filename.split("_", 2)[:2]
filename_title = filename_title[0] + filename_title[1]
plt.title(f"{filename_title}-{foot}")
# plt.show()
try:
os.makedirs(step_detection_folder)
# print("folder for curve_fitting plots created")
except OSError as e:
if e.errno != errno.EEXIST:
raise
if relative == True:
plt.savefig(os.path.join(step_detection_folder, f"steps_{filename_title}_{foot}_rel.pdf"))
else:
plt.savefig(os.path.join(step_detection_folder, f"steps_{filename_title}_{foot}.pdf"))
# plt.show()
plt.close()
return intersections_dict
## removed the unused function, might need to put back in at some point
class StridesAndStances:
"""
class to detect stride and stance phases for current feet => initialize class instance for every foot.
This method iterates through all frames, if the current frame is one of the intersection points, the sign of the
point will be checked. If the sign is positive the phase will be set to swing and the swing_phase_counter increased
by 1. All frames until the next intersection will be assigned that phase name and number.
Rows before and after first and last index respectively will be filled with np.nan.
"""
def __init__(self):
import numpy as np
self.stride_phase_counter = 0
self.stance_phase_counter = 0
self.phase = 'UNKNOWN'
self.current_phase = np.nan
def determine_stride_phases(self, intersection_dict, data_rows_count):
"""
Function to detect the swing or stance phases using the intersection points and their signs.
Return: list with one entry for every row.
"""
import numpy as np
# create empty list with length of data rows count:
results = np.full((data_rows_count,), '', dtype='S10')
index = 0
for row in range(data_rows_count):
# switch swing or stance depending on sign of intersection point
if row in intersection_dict['idx']:
index = intersection_dict['idx'].index(row) # find the index in list of current idx
sign = intersection_dict['sign'][index] # find the respective sign
# if sign is positive, the phase till next idx will be swing
self.current_phase = self.assign_swing_or_stance(sign)
# fill all rows until next idx with that swing or stance number
results[row] = self.current_phase
# fill all rows after last idx with np.nan
if index != 0:
results[intersection_dict['idx'][index]:] = np.nan
# print("results: ", results)
return results
# Todo: Go through intersection_dict and assign correct swing or stance phase for every row
def assign_swing_or_stance(self, sign):
if sign > 0: # swing
if self.phase == 'stance' or self.phase == 'UNKNOWN':
self.stride_phase_counter += 1
self.phase = 'swing' # originally called stride
retval = f'swing{self.stride_phase_counter:04d}'
else: # stance
if self.phase == 'swing' or self.phase == 'UNKNOWN':
self.stance_phase_counter += 1
self.phase = 'stance'
retval = f'stance{self.stance_phase_counter:04d}'
return retval
def __str__(self):
return f"swings: {self.stride_phase_counter}, stances: {self.stance_phase_counter}"
def plot_footfall_pattern(results, data_rows_count, filename, plotting_footfall_folder):
"""
takes the result dataframe and creates a new dataframe for plotting. Every foot gets assigned an individual number.
The dataframe is then filtered for strings containing "stride", the strides get replaced by the respective number,
while all stances will be NaN.
In the plot strides are therefore displayed as bars and stances are empty.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
import os
import errno
df_plot = pd.DataFrame(columns=results.keys(), index=range(data_rows_count))
# filter here and only fill in stances as numbers => stances bars, strides white
for i, key in enumerate(results):
df_plot[key] = [i + 1 if s.startswith(b'stance') else np.NaN for s in results[key]]
key_list = [key for key in df_plot.columns]
colors = False
if colors:
cmap = plt.cm.coolwarm
legend_elements = [Line2D([0], [0], color=cmap(0.), lw=4, label=key_list[0]),
Line2D([0], [0], color=cmap(.33), lw=4, label=key_list[1]),
Line2D([0], [0], color=cmap(.66), lw=4, label=key_list[2]),
Line2D([0], [0], color=cmap(1.), lw=4, label=key_list[3]),
Line2D([0], [0], color='black', lw=4, label='stance phases'),
Line2D([0], [0], color='white', lw=4, label='stride phases')]
fig, ax = plt.subplots()
df_plot.plot(linewidth=10, color=cmap(np.linspace(0, 1, 5)), ax=ax)
ax.legend(handles=legend_elements)
else:
legend_elements = [Line2D([0], [0], color='white', lw=1, label='1 = FL | 2 = FR | 3 = HR | 4 = HL'),
Line2D([0], [0], color='black', lw=4, label='stance phases'),
Line2D([0], [0], color='white', lw=4, label='stride phases')]
fig, ax = plt.subplots()
df_plot.plot(linewidth=10, color='black', ax=ax)
ax.legend(handles=legend_elements)
# saves footfall pattern diagrams as pdf in defined result folder. If folder is not extant yet, it will be created
try:
os.makedirs(plotting_footfall_folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
plt.savefig(os.path.join(plotting_footfall_folder, "{}.pdf".format(filename)))
plt.clf()
plt.close()
| 49.534653 | 136 | 0.625625 | 2,785 | 0.111333 | 0 | 0 | 0 | 0 | 0 | 0 | 10,788 | 0.431261 |
9ba6402b03516602907bdd9d7c0d28f9b0666716
| 1,601 |
py
|
Python
|
python/test.py
|
drulm/Spark_Knapsack
|
ef8ab8b6ac6762391b63ff29ebf857a65f98698d
|
[
"Apache-2.0"
] | 7 |
2018-04-18T00:51:29.000Z
|
2021-05-30T12:58:36.000Z
|
python/test.py
|
darrell-ulm/Spark_Knapsack
|
ef8ab8b6ac6762391b63ff29ebf857a65f98698d
|
[
"Apache-2.0"
] | null | null | null |
python/test.py
|
darrell-ulm/Spark_Knapsack
|
ef8ab8b6ac6762391b63ff29ebf857a65f98698d
|
[
"Apache-2.0"
] | 2 |
2019-05-28T03:13:26.000Z
|
2019-11-22T19:50:14.000Z
|
# --------------------------------------------
# Test the Approximate Knapsack function test
# --------------------------------------------
# Pull in the knapsack library.
import random
from pyspark.sql import SparkSession
from knapsack import knapsack
# Create the SparkContext.
sc = SparkSession \
.builder \
.appName("Knapsack Approximation Algorithm Test") \
.getOrCreate()
# Knapsack problem size.
N = 10
# Setup sample data for knapsack.
knapsackData = [('item_' + str(k), random.uniform(1.0, 10.0), random.uniform(1.0, 10.0)) for k in range(N)]
# Make a Dataframe with item(s), weight(s), and value(s) for the knapsack.
knapsackData = sc.createDataFrame(knapsackData, ['item', 'weights', 'values'])
# Display the original data
print "Original Data:"
print knapsackData.show()
print "\n"
# Create a random maximum weight
W = random.uniform(N * 1.3, N * 1.6)
# Show the weight.
print "W: "
print W
print "\n"
# Call the knapsack greedy approximation function, with data and size 5.
k = knapsack.knapsackApprox(knapsackData, W)
# Show the results Dataframe.
print "Selected Elements:"
print k.show()
print "\n"
# Show totals for selected elements of knapsack.
sumValues = k.rdd.map(lambda x: x["values"]).reduce(lambda x, y: x+y)
sumWeights = k.rdd.map(lambda x: x["weights"]).reduce(lambda x, y: x+y)
numResults = k.count()
print "Totals:"
print "Sum Values: ", sumValues
print "Sum Weights: ", sumWeights
print numResults
print "\n"
# ------------------------------------------
# End of Approximate Knapsack function test
# ------------------------------------------
| 26.245902 | 107 | 0.634603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 863 | 0.539038 |
9ba65d1715a7fcfaf9934b6a6bcb75b319f1120c
| 413 |
py
|
Python
|
mysite/blog/views.py
|
josonle/LearningDjango
|
62558aa141c5872c2380e5daa336da199a54b0e1
|
[
"MIT"
] | 1 |
2019-02-19T07:38:02.000Z
|
2019-02-19T07:38:02.000Z
|
mysite/blog/views.py
|
josonle/LearningDjango
|
62558aa141c5872c2380e5daa336da199a54b0e1
|
[
"MIT"
] | null | null | null |
mysite/blog/views.py
|
josonle/LearningDjango
|
62558aa141c5872c2380e5daa336da199a54b0e1
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render,get_object_or_404
from .models import BlogArticles
# Create your views here.
def blog_title(request):
blogs=BlogArticles.objects.all()
return render(request,"blog/titles.html",{"blogs":blogs})
def article(request,a_id):
blog=get_object_or_404(BlogArticles,id=a_id)
publish_time=blog.publish
return render(request,"blog/article.html",{'blog':blog,'publish':publish_time})
| 34.416667 | 80 | 0.79661 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.20339 |
9ba95d022de0cbe77839799064d678253b042077
| 204 |
py
|
Python
|
tests/test_gluupostgres.py
|
danilosoarescardoso/cloud-native-edition
|
b8aa66119dc4440b1ca3741a4065c9ae7feb42fb
|
[
"Apache-2.0"
] | 1 |
2021-04-04T04:25:49.000Z
|
2021-04-04T04:25:49.000Z
|
tests/test_gluupostgres.py
|
danilosoarescardoso/cloud-native-edition
|
b8aa66119dc4440b1ca3741a4065c9ae7feb42fb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_gluupostgres.py
|
danilosoarescardoso/cloud-native-edition
|
b8aa66119dc4440b1ca3741a4065c9ae7feb42fb
|
[
"Apache-2.0"
] | null | null | null |
import pygluu.kubernetes.postgres as module0
from pygluu.kubernetes.postgres import Postgres
def test_base_exception():
try:
var0 = module0.Postgres()
except BaseException:
pass
| 20.4 | 47 | 0.72549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9ba99aa02744fe90eebce52ab7ecf4ce0854c775
| 1,367 |
py
|
Python
|
Medium/918. Maximum Sum Circular Subarray/solution (1).py
|
czs108/LeetCode-Solutions
|
889f5b6a573769ad077a6283c058ed925d52c9ec
|
[
"MIT"
] | 3 |
2020-05-09T12:55:09.000Z
|
2022-03-11T18:56:05.000Z
|
Medium/918. Maximum Sum Circular Subarray/solution (1).py
|
czs108/LeetCode-Solutions
|
889f5b6a573769ad077a6283c058ed925d52c9ec
|
[
"MIT"
] | null | null | null |
Medium/918. Maximum Sum Circular Subarray/solution (1).py
|
czs108/LeetCode-Solutions
|
889f5b6a573769ad077a6283c058ed925d52c9ec
|
[
"MIT"
] | 1 |
2022-03-11T18:56:16.000Z
|
2022-03-11T18:56:16.000Z
|
# 918. Maximum Sum Circular Subarray
# Runtime: 1028 ms, faster than 5.09% of Python3 online submissions for Maximum Sum Circular Subarray.
# Memory Usage: 18.6 MB, less than 33.98% of Python3 online submissions for Maximum Sum Circular Subarray.
import math
class Solution:
def maxSubarraySumCircular(self, nums: list[int]) -> int:
def max_one_interval() -> int:
curr_max_sum, max_sum = -math.inf, -math.inf
for x in nums:
curr_max_sum = max(x, curr_max_sum + x)
max_sum = max(max_sum, curr_max_sum)
return max_sum
def max_two_interval() -> int:
right_sums = [-math.inf] * len(nums)
right_sums[-1] = nums[-1]
for i in range(len(nums) - 2, -1, -1):
right_sums[i] = right_sums[i + 1] + nums[i]
max_right_sums = [-math.inf] * len(nums)
max_right_sums[-1] = right_sums[-1]
for i in range(len(nums) - 2, -1, -1):
max_right_sums[i] = max(max_right_sums[i + 1], right_sums[i])
max_sum = -math.inf
left_sum = 0
for i in range(len(nums) - 2):
left_sum += nums[i]
max_sum = max(max_sum, left_sum + max_right_sums[i + 2])
return max_sum
return max(max_one_interval(), max_two_interval())
| 35.973684 | 106 | 0.567666 | 1,103 | 0.806876 | 0 | 0 | 0 | 0 | 0 | 0 | 244 | 0.178493 |
9ba9d75f770e59ab5f8bd4c1745fa1e171a92981
| 10,644 |
py
|
Python
|
testing.py
|
gustxsr/learning-with-assemblies
|
4158829adf4500a9ae868ca7c64ffef90753c66b
|
[
"MIT"
] | null | null | null |
testing.py
|
gustxsr/learning-with-assemblies
|
4158829adf4500a9ae868ca7c64ffef90753c66b
|
[
"MIT"
] | null | null | null |
testing.py
|
gustxsr/learning-with-assemblies
|
4158829adf4500a9ae868ca7c64ffef90753c66b
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import convolve
from matplotlib.gridspec import GridSpec
import matplotlib as mpl
rng = np.random.default_rng()
def k_cap(input, cap_size):
"""
Given a vector input it returns the highest cap_size
entries from cap_zie
"""
output = np.zeros_like(input)
if len(input.shape) == 1:
idx = np.argsort(input)[-cap_size:]
output[idx] = 1
else:
idx = np.argsort(input, axis=-1)[:, -cap_size:]
np.put_along_axis(output, idx, 1, axis=-1)
return output
class brain_region:
"""
Creates a brain region from assembly calculus
"""
def __init__(self, n_neurons , n_in, cap_size, id: int ) -> None:
"""
Creates a brain region that takes
"""
self.id=id
self.n_neurons=n_neurons
self._n_in=n_in
self.cap_size=cap_size
mask = np.zeros((self.n_neurons, self.n_neurons), dtype=bool) # NXN array of zeros
W = np.zeros((self.n_neurons, self.n_neurons))
mask_a = np.zeros((self._n_in, self.n_neurons), dtype=bool) # image to N matrix
A = np.zeros((self._n_in, self.n_neurons))
mask = (rng.random((self.n_neurons, self.n_neurons)) < sparsity) & np.logical_not(np.eye(n_neurons, dtype=bool)) # Creating matrix from N to B with no entries in the diagonal
W = np.ones((self.n_neurons, self.n_neurons)) * mask
W /= W.sum(axis=0) # Transition probabiliy matrix
mask_a = rng.random((self._n_in, self.n_neurons)) < sparsity
A = np.ones((self._n_in, self.n_neurons)) * mask_a
A /= A.sum(axis=0)
W = np.ones_like(W) * mask
A = np.ones_like(A) * mask_a
W /= W.sum(axis=0, keepdims=True)
A /= A.sum(axis=0, keepdims=True)
self._W=W
self._A=A
self.mask=mask
self.mask_a=mask_a
self.act_h = np.zeros(self.n_neurons)
self.bias = np.zeros(self.n_neurons)
self.b = -1
self.classify_act_h=np.zeros(self.n_neurons)
def next(self, input, initial=False, final=False ):
"""
It computes the activation output of the input going
through this brain region.
"""
if initial:
self.act_h = np.zeros(self.n_neurons)
act_h_new = k_cap(self.act_h @ self._W + input @ self._A + self.bias, self.cap_size) # output a NXN array for the neurons that are activated. The first part is from self activiation and second from inoput
self._A[(input > 0)[:, np.newaxis] & (act_h_new > 0)[np.newaxis, :]] *= 1 + beta
self._W[(self.act_h > 0)[:, np.newaxis] & (act_h_new > 0)[np.newaxis, :]] *= 1 + beta
self.act_h = act_h_new
if final:
self.reinforce_bias()
print("Shape of act_h:"+str(self.act_h.shape))
return self.act_h.copy()
def reinforce_bias(self):
"""
This function is meant to be called at the end of each round to renormalize the transition matrices
"""
self.bias[self.act_h>0]+=-1 # after all rounds the activated neurons have a smaller bias so they more likely to fire
self._A /= self._A.sum(axis=0, keepdims=True)
self._W /= self._W.sum(axis=0, keepdims=True)
def classify(self,input, n_examples, initial=False, ):
if initial:
self.classify_act_h=np.zeros((n_examples , self.n_neurons))
self.classify_act_h=k_cap(self.classify_act_h @ self._W + input @ self._A + self.bias, self.cap_size)
return self.classify_act_h
class assembly_network:
"""
This class is meant to implement the assembly calculus structure
This generalizes for multiple inputs and brain regions
"""
def __init__(self, number_of_inputs: int , sparsity:int, layers: list, beta: float) -> None:
"""
Initializes the structure of the Brain Region. It takes the number of inputs and then a list for layers that should contain tuples of the form (neurons, cap_size).
"""
self.n_in = number_of_inputs # Vector of 28X28 pixels
# List with pairs of tuples (n, c) where n is the number of neurons and c is the size of the cap
self.create_layers(layers) # Creates all the structure for the brain regions
self.sparsity = sparsity
self.beta =beta
def create_layers(self, layers)-> None:
"""
Creates brain regions according to the list from layers
The layers list should contain tuples of the form (number of neurons, cap size)
"""
self.layers=[]
temp=self.n_in+0
for k, (neurons, cap_size) in enumerate(layers):
self.layers.append(brain_region(neurons, temp, cap_size, k))
temp=neurons+0
def next(self, input: np.array, initial=False, final=False ):
"""
During the training process, it puts the input
through the network and it runs it through all the layers
"""
temp=input
print(self.layers)
for k , brain_region_k in enumerate(self.layers):
new_temp=brain_region_k.next(temp, initial=initial, final=final)
temp=new_temp
return temp
def classify(self,input, initial=False ):
temp=input
for brain_region in self.layers:
print("temp shape"+str(temp.shape))
temp=brain_region.classify(temp, input.shape[0], initial)
return temp
class classification_mnist:
def __init__(self, kernels: list ,train_path: str, test_path: str, number_of_inputs: int , sparsity:int, layers: list, beta: float ) :
"""
Creates a MNIST recognition architecture based on assembly calculus
"""
self.cap_size=layers[-1][1]
self.n_neurons= layers[-1][0]
self.n_in=number_of_inputs
self.assembly_network=assembly_network(number_of_inputs, sparsity, layers, beta , )
self.get_files( train_path, test_path)
self.create_training_data(kernels)
self.create_testing_data(kernels)
def create_training_data(self ,kernels= [np.ones((1, 3, 3))] ):
"""
Creates a data set with n_examples from the files obtained
by get_files
"""
self.train_examples = []
for kernel in kernels:
self.train_examples.append(np.zeros((10, self.n_examples, 784)))
for i in range(10):
#Does the convulution between a all 1's 3X3 kernel and each of the images
self.train_examples[-1][i] = k_cap(convolve(self.train_imgs[self.train_labels == i][:self.n_examples].reshape(-1, 28, 28), kernel, mode='same').reshape(-1, 28 * 28), self.cap_size)
def create_testing_data(self ,kernels= [np.ones((1, 3, 3))] ):
"""
Creates a data set with n_examples from the files obtained
by get_files
"""
self.test_examples = []
for kernel in kernels:
self.test_examples.append( np.zeros((10, self.n_examples, 784) ))
for i in range(10):
#Does the convulution between a all 1's 3X3 kernel and each of the images
self.test_examples[-1][i] = k_cap(convolve(self.test_imgs[self.test_labels == i][:self.n_examples].reshape(-1, 28, 28), kernel, mode='same').reshape(-1, 28 * 28), self.cap_size)
def get_files(self, train_path: str, test_path: str)-> None:
"""
Given two paths it retrieves the data structure encoded in those paths. traun_path should be the path of the training data
and test_path should be the path for test data.
Assumes a csv format on nthe data on the paths
"""
test_data = np.loadtxt(test_path, delimiter=',')
train_data = np.loadtxt(train_path, delimiter=',')
self.train_imgs = train_data[:, 1:]
self.train_imgs.shape
self.test_imgs = test_data[:, 1:]
self.train_labels = train_data[:, 0]
self.test_labels = test_data[:, 0]
def train_model(self, n_rounds)-> np.array:
"""
Given the number of rounds (images that will be shown to the model)
The program runs and trains the edge weights for the network.
"""
self.activations = np.zeros((10, n_rounds, self.n_neurons))
for i in range(10): # iterations for each of the labels
for j in range(n_rounds): # for each of the rounds
input = self.train_examples[0][i, j] # image inputs
act_h= self.assembly_network.next(input, initial=(j==0), final= (j==n_rounds-1) ) # output a NXN array for the neurons that are activated. The first part is from self activiation and second from inoput
self.activations[i, j] = act_h
return self.activations
def classify(self, n_rounds, test=True )-> dict:
"""
When called, this function runs one batch of data through
the whole network and then returns a dictionary with succes rates
"""
if test:
examples=self.test_examples[0]
else:
examples=self.train_examples[0]
self.n_examples=examples.shape[1]
#### RUNS THROUGH NETWORK
outputs = np.zeros((10, n_rounds+1, self.n_examples, self.n_neurons))
for i in np.arange(10):
for j in range(n_rounds):
outputs[i, j+1] = self.assembly_network.classify(examples[i], initial= (j==0)) # run each one network for n_rounds and save the neurons active at each step
#### STARTS CLASSIFICATION
c = np.zeros((10, self.n_neurons))
for i in range(10):
c[i, outputs[i, 1].sum(axis=0).argsort()[-self.cap_size:]] = 1
predictions = (outputs[:, 1] @ c.T).argmax(axis=-1)
acc = (predictions == np.arange(10)[:, np.newaxis]).sum(axis=-1) / self.n_examples
return acc
n_in = 784 # Vector of 28X28 pixels
cap_size = 200 # Size of the cap
sparsity = 0.1
n_rounds = 10
n_examples=800
beta = 1e0
train_path="./data/mnist/mnist_train.csv"
layers=[ (2000,200)]# number of neurons in network with respective cap_size
test_path="./data/mnist/mnist_test.csv"
kernels=[np.ones((1, 3, 3))]
classify_two=classification_mnist(kernels,train_path,test_path, n_in , sparsity, layers, beta)
classify_two.train_model( 5)
print(classify_two.classify( 5, test=False))
| 37.087108 | 217 | 0.613209 | 9,504 | 0.892897 | 0 | 0 | 0 | 0 | 0 | 0 | 3,228 | 0.303269 |
9bab281692147103f4b861c83d053ce8c6a1c16f
| 4,398 |
py
|
Python
|
src/chatstats.py
|
brendancsmith/cohort-facebook
|
a7b37d14b7152349930bc10f69cb72446d6c3581
|
[
"MIT"
] | null | null | null |
src/chatstats.py
|
brendancsmith/cohort-facebook
|
a7b37d14b7152349930bc10f69cb72446d6c3581
|
[
"MIT"
] | null | null | null |
src/chatstats.py
|
brendancsmith/cohort-facebook
|
a7b37d14b7152349930bc10f69cb72446d6c3581
|
[
"MIT"
] | null | null | null |
from collections import Counter, defaultdict
from datetime import datetime
from statistics import mean
from dateutil.parser import parse as parse_datetime
from dateutil import rrule
def num_comments_by_user(comments):
commenters = (comment['from']['name'] for comment in comments)
counter = Counter(commenters)
return counter
def percent_empty_comments_by_user(emptyComments, nonEmptyComments):
numEmptyCommentsByUser = num_comments_by_user(emptyComments)
numNonEmptyCommentsByUser = num_comments_by_user(nonEmptyComments)
# TODO: could break if a user doesn't have one type of comment
percentEmptyCommentsByUser = Counter()
for user in numNonEmptyCommentsByUser:
numEmpty = numEmptyCommentsByUser[user]
numTotal = numEmpty + numNonEmptyCommentsByUser[user]
percentEmptyCommentsByUser[user] = numEmpty / numTotal
return percentEmptyCommentsByUser
def num_comments_by_day(comments):
dts = datetimes(comments)
counter = Counter(dt.date() for dt in dts)
first_day = min(counter.keys())
last_day = datetime.now().date()
all_dates = (dt.date() for dt in rrule.rrule(rrule.DAILY,
dtstart=first_day,
until=last_day))
for date in all_dates:
if date not in counter:
counter[date] = 0
return counter
def avg_word_count_by_user(comments, default_word_count=1):
wordCountsByUser = defaultdict(list)
for comment in comments:
name = comment['from']['name']
words = None
if 'message' not in comment:
words = default_word_count
else:
words = len(comment['message'].split())
wordCountsByUser[name].append(words)
avgWordCountByUser = dict((user, mean(wordCounts))
for user, wordCounts in wordCountsByUser.items())
return avgWordCountByUser
def longest_comment_by_users(comments):
longestCommentByUser = defaultdict(int)
commentsByUser = defaultdict(list)
for comment in comments:
name = comment['from']['name']
commentsByUser[name].append(comment)
for name, comments in commentsByUser.items():
commentLengths = (len(comment['message']) for comment in comments)
maxCommentLength = max(commentLengths)
longestCommentByUser[name] = maxCommentLength
return longestCommentByUser
def word_count_by_day(comments):
wordCountsByDay = defaultdict(int)
for comment in comments:
timestamp = comment['created_time']
date = parse_datetime(timestamp).date()
words = len(comment['message'].split())
wordCountsByDay[date] += words
first_day = min(wordCountsByDay.keys())
last_day = datetime.now().date()
all_dates = (dt.date() for dt in rrule.rrule(rrule.DAILY,
dtstart=first_day,
until=last_day))
for date in all_dates:
if date not in wordCountsByDay:
wordCountsByDay[date] = 0
return wordCountsByDay
def daily_activity_by_user(comments):
first_day = min(parse_datetime(comment['created_time']).date() for comment in comments)
last_day = datetime.now().date()
all_dates = [dt.date() for dt in rrule.rrule(rrule.DAILY,
dtstart=first_day,
until=last_day)]
activityByUser = defaultdict(list)
for comment in comments:
user = comment['from']['name']
timestamp = comment['created_time']
date = parse_datetime(timestamp).date()
activityByUser[user].append(date)
make_blank_counter = lambda: Counter(dict(zip(all_dates, [0] * len(all_dates))))
dailyActivityByUser = {}
for user, activity in activityByUser.items():
dailyActivityByUser[user] = make_blank_counter()
dailyActivityByUser[user].update(activity)
return dailyActivityByUser
def datetimes(comments):
timestamps = (comment['created_time'] for comment in comments)
dts = map(parse_datetime, timestamps)
return dts
def corpus(comments):
messages = [comment['message'] for comment in comments
if 'message' in comment]
corpus = '\n'.join(messages)
return corpus
| 29.918367 | 91 | 0.648931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.050932 |
9bac9dcd120d634f437ebef6f5de2fb78cd0ef74
| 741 |
py
|
Python
|
app/model/user_signup.py
|
dwdraugr/YADS
|
c8036d8196a3158636aaa4f1910033e70ec8ecb4
|
[
"Apache-2.0"
] | 3 |
2019-09-02T11:26:58.000Z
|
2019-12-06T15:54:38.000Z
|
app/model/user_signup.py
|
dwdraugr/YADS
|
c8036d8196a3158636aaa4f1910033e70ec8ecb4
|
[
"Apache-2.0"
] | null | null | null |
app/model/user_signup.py
|
dwdraugr/YADS
|
c8036d8196a3158636aaa4f1910033e70ec8ecb4
|
[
"Apache-2.0"
] | null | null | null |
import app.model.model as model
import hashlib
class UserSignup(model.Model):
def __init__(self):
super(UserSignup, self).__init__()
def signup(self, username, password, email):
cursor = self.matchadb.cursor(dictionary=True)
query = [
username,
email,
]
cursor.execute("SELECT id FROM users WHERE username = %s OR email = %s", tuple(query))
data = cursor.fetchall()
if cursor.rowcount == 0:
query.append(hashlib.sha3_512(password.encode('utf-8')).hexdigest())
cursor.execute("INSERT INTO users (id, username, password, email) VALUES "
"(NULL, %s, %s, %s)", tuple(query))
cursor.execute()
| 35.285714 | 94 | 0.584345 | 692 | 0.933873 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.191633 |
9bae71f7a1d534c3b03ab7c28df3edc847994f0b
| 2,125 |
py
|
Python
|
utils/lsms/compositional_histogram_cutoff.py
|
allaffa/HydraGNN
|
b48f75cd3fe1b0d03bae9af3e6bdc2bb29f8b9c6
|
[
"BSD-3-Clause"
] | 1 |
2022-01-30T16:50:51.000Z
|
2022-01-30T16:50:51.000Z
|
utils/lsms/compositional_histogram_cutoff.py
|
allaffa/HydraGNN
|
b48f75cd3fe1b0d03bae9af3e6bdc2bb29f8b9c6
|
[
"BSD-3-Clause"
] | 1 |
2022-02-03T11:45:53.000Z
|
2022-02-09T17:59:37.000Z
|
utils/lsms/compositional_histogram_cutoff.py
|
kshitij-v-mehta/HydraGNN
|
d27958270b2beb35f98e4403239e3c5c77ad4a04
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import shutil
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
def find_bin(comp, nbins):
bins = np.linspace(0, 1, nbins)
for bi in range(len(bins) - 1):
if comp > bins[bi] and comp < bins[bi + 1]:
return bi
return nbins - 1
def compositional_histogram_cutoff(
dir,
elements_list,
histogram_cutoff,
num_bins,
overwrite_data=False,
create_plots=True,
):
"""
Downselect LSMS data with maximum number of samples per binary composition.
"""
if dir.endswith("/"):
dir = dir[:-1]
new_dir = dir + "_histogram_cutoff/"
if os.path.exists(new_dir):
if overwrite_data:
shutil.rmtree(new_dir)
else:
print("Exiting: path to histogram cutoff data already exists")
return
if not os.path.exists(new_dir):
os.makedirs(new_dir)
comp_final = []
comp_all = np.zeros([num_bins])
for filename in tqdm(os.listdir(dir)):
path = os.path.join(dir, filename)
# This is LSMS specific - it assumes only one header line and only atoms following.
atoms = np.loadtxt(path, skiprows=1)
elements, counts = np.unique(atoms[:, 0], return_counts=True)
# Fixup for the pure component cases.
for e, elem in enumerate(elements_list):
if elem not in elements:
elements = np.insert(elements, e, elem)
counts = np.insert(counts, e, 0)
num_atoms = atoms.shape[0]
composition = counts[0] / num_atoms
b = find_bin(composition, num_bins)
comp_all[b] += 1
if comp_all[b] < histogram_cutoff:
comp_final.append(composition)
new_path = os.path.join(new_dir, filename)
os.symlink(path, new_path)
if create_plots:
plt.figure(0)
plt.hist(comp_final, bins=num_bins)
plt.savefig("composition_histogram_cutoff.png")
plt.figure(1)
w = 1 / num_bins
plt.bar(np.linspace(0, 1, num_bins), comp_all, width=w)
plt.savefig("composition_initial.png")
| 27.960526 | 91 | 0.610353 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 348 | 0.163765 |
9baf6c3804173cb531cd1c2955cba4bc19bd4390
| 109 |
py
|
Python
|
CVcontact/urls.py
|
siavashMehran/Portfolio
|
a592ec51122d96e8e336365fd3cd039a7f223221
|
[
"MIT"
] | null | null | null |
CVcontact/urls.py
|
siavashMehran/Portfolio
|
a592ec51122d96e8e336365fd3cd039a7f223221
|
[
"MIT"
] | null | null | null |
CVcontact/urls.py
|
siavashMehran/Portfolio
|
a592ec51122d96e8e336365fd3cd039a7f223221
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import contactMe
urlpatterns = [
path('contact', contactMe)
]
| 13.625 | 30 | 0.724771 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.082569 |
9bb0067ad50b3ebfd94976cc78cce86faed75925
| 1,256 |
py
|
Python
|
PointMatcher/actions/export.py
|
daisatojp/PointMatcher
|
927bd4dd676b18da763ccaab2f429f27de281710
|
[
"MIT"
] | 2 |
2021-01-05T03:42:50.000Z
|
2022-03-16T07:17:02.000Z
|
PointMatcher/actions/export.py
|
daisatojp/PointMatcher
|
927bd4dd676b18da763ccaab2f429f27de281710
|
[
"MIT"
] | 4 |
2021-01-07T06:28:01.000Z
|
2021-01-18T11:59:56.000Z
|
PointMatcher/actions/export.py
|
daisatojp/PointMatcher
|
927bd4dd676b18da763ccaab2f429f27de281710
|
[
"MIT"
] | null | null | null |
import os.path as osp
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QAction
from PyQt5.QtWidgets import QFileDialog
from PointMatcher.utils.filesystem import icon_path
class ExportAction(QAction):
def __init__(self, parent):
super(ExportAction, self).__init__('Export', parent)
self.p = parent
self.setIcon(QIcon(icon_path('save')))
self.setShortcut('Ctrl+Alt+S')
self.triggered.connect(self.export)
self.setEnabled(False)
def export(self, _value=False):
if (self.p.annotDir is not None) and osp.exists(self.p.annotDir):
defaultDir = self.p.annotDir
elif (self.p.imageDir is not None) and osp.exists(self.p.imageDir):
defaultDir = self.p.imageDir
else:
defaultDir = '.'
defaultDir = self.p.settings.get('exportPath', defaultDir)
filters = 'json file (*.json)'
filename = QFileDialog.getSaveFileName(
self.p, 'choose file name to be exported', defaultDir, filters)
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.p.matching.export(filename)
self.p.settings['exportPath'] = osp.dirname(filename)
| 35.885714 | 75 | 0.642516 | 1,073 | 0.854299 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.084395 |
9bb08d27951cdcbd92a25a4408ad1a1b8fb55f34
| 1,345 |
py
|
Python
|
test/test_CommandHead.py
|
jcandan/WonderPy
|
ee82322b082e94015258b34b27f23501f8130fa2
|
[
"MIT"
] | 46 |
2018-07-31T20:30:41.000Z
|
2022-03-23T17:14:51.000Z
|
test/test_CommandHead.py
|
jcandan/WonderPy
|
ee82322b082e94015258b34b27f23501f8130fa2
|
[
"MIT"
] | 24 |
2018-08-01T09:59:29.000Z
|
2022-02-26T20:57:51.000Z
|
test/test_CommandHead.py
|
jcandan/WonderPy
|
ee82322b082e94015258b34b27f23501f8130fa2
|
[
"MIT"
] | 24 |
2018-08-01T19:14:31.000Z
|
2021-02-18T13:26:40.000Z
|
import unittest
from mock import Mock
from test.robotTestUtil import RobotTestUtil
class MyTestCase(unittest.TestCase):
def test_head_turn(self):
robot = RobotTestUtil.make_fake_dash()
robot.stage_cmds = Mock()
m = robot.stage_cmds
robot.commands.head.do_pan_angle (90)
robot.commands.head.do_tilt_angle (10)
robot.commands.head.do_pan_voltage (75)
robot.commands.head.do_tilt_voltage (85)
robot.commands.head.do_pan_tilt_angle (-45.3, -30.3)
robot.commands.head.do_pan_tilt_voltage(-10.3, -11.3)
self.assertEquals(m.call_count, 8)
self.assertAlmostEquals(m.call_args_list[0][0][0]['203']['degree'], 90)
self.assertAlmostEquals(m.call_args_list[1][0][0]['202']['degree'], -10)
self.assertAlmostEquals(m.call_args_list[2][0][0]['213']['prcnt' ], 75)
self.assertAlmostEquals(m.call_args_list[3][0][0]['214']['prcnt' ], -85)
self.assertAlmostEquals(m.call_args_list[4][0][0]['203']['degree'], -45.3)
self.assertAlmostEquals(m.call_args_list[5][0][0]['202']['degree'], 30.3)
self.assertAlmostEquals(m.call_args_list[6][0][0]['213']['prcnt' ], -10.3)
self.assertAlmostEquals(m.call_args_list[7][0][0]['214']['prcnt' ], 11.3)
if __name__ == '__main__':
unittest.main()
| 40.757576 | 82 | 0.646097 | 1,210 | 0.899628 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.081784 |
9bb204788fee823d3cdd79e26af5c6bd4b825e8a
| 3,866 |
py
|
Python
|
feature_options.py
|
soarsmu/HERMES
|
9b38eedd1f7fcc3321048cc25d15c38268e6fd0b
|
[
"MIT"
] | 2 |
2022-01-15T11:31:40.000Z
|
2022-03-09T11:27:28.000Z
|
feature_options.py
|
soarsmu/HERMES
|
9b38eedd1f7fcc3321048cc25d15c38268e6fd0b
|
[
"MIT"
] | null | null | null |
feature_options.py
|
soarsmu/HERMES
|
9b38eedd1f7fcc3321048cc25d15c38268e6fd0b
|
[
"MIT"
] | null | null | null |
import click
class ExperimentOption():
def __init__(self):
self.data_set_size = -1
self.ignore_number = True
self.use_github_issue = True
self.use_jira_ticket = True
self.use_comments = True
self.use_bag_of_word = True
self.positive_weights = [0.5]
self.max_n_gram = 1
self.min_document_frequency = 1
self.use_linked_commits_only = False
# if self.use_issue_classifier = False, issue's information is attached to commit message
self.use_issue_classifier = True
self.fold_to_run = 10
self.use_stacking_ensemble = True
self.tf_idf_threshold = -1
self.use_patch_context_lines = False
self.unlabeled_size = -1
def read_option_from_command_line(size, unlabeled_size,
ignore_number, github_issue, jira_ticket, use_comments,
positive_weights, max_n_gram,
min_document_frequency, use_linked_commits_only,
use_issue_classifier,
fold_to_run,
use_stacking_ensemble,
tf_idf_threshold,
use_patch_context_line):
experiment_option = ExperimentOption()
experiment_option.data_set_size = size
experiment_option.unlabeled_size = unlabeled_size
experiment_option.ignore_number = ignore_number
experiment_option.use_github_issue = github_issue
experiment_option.use_jira_ticket = jira_ticket
experiment_option.use_comments = use_comments
experiment_option.positive_weights = list(positive_weights)
experiment_option.max_n_gram = max_n_gram
experiment_option.min_document_frequency = min_document_frequency
experiment_option.use_linked_commits_only = use_linked_commits_only
experiment_option.use_issue_classifier = use_issue_classifier
experiment_option.fold_to_run = fold_to_run
experiment_option.use_stacking_ensemble = use_stacking_ensemble
experiment_option.tf_idf_threshold = tf_idf_threshold
experiment_option.use_patch_context_lines = use_patch_context_line
click.echo("Running process with these options:")
if experiment_option.data_set_size == -1:
click.echo(" Data set size: Full data")
else:
click.echo(" Data set size: {}".format(experiment_option.data_set_size))
click.echo(" Ignore number as token: {}".format(experiment_option.ignore_number))
click.echo(" Use github issue: {}".format(experiment_option.use_github_issue))
click.echo(" Use jira ticket: {}".format(experiment_option.use_jira_ticket))
click.echo(" Use comments: {}".format(experiment_option.use_comments))
click.echo(" Use bag of words: {}".format(experiment_option.use_bag_of_word))
click.echo(" Positive weights: {}".format(experiment_option.positive_weights))
click.echo(" Max N-gram: {}".format(experiment_option.max_n_gram))
click.echo(" Min document frequency: {}".format(experiment_option.min_document_frequency))
click.echo(" Use linked commit only: {}".format(experiment_option.use_linked_commits_only))
click.echo(" Use issue classifier: {}".format(experiment_option.use_issue_classifier))
click.echo(" Fold to run: {}".format(experiment_option.fold_to_run))
click.echo(" Use stacking ensemble: {}".format(experiment_option.use_stacking_ensemble))
click.echo(" Tf-idf threshold: {}".format(experiment_option.tf_idf_threshold))
click.echo(" Use patch context lines: {}".format(experiment_option.use_patch_context_lines))
if experiment_option.unlabeled_size != -1:
click.echo(" Unlabeled size: {}".format(experiment_option.unlabeled_size))
return experiment_option
| 51.546667 | 99 | 0.693999 | 734 | 0.18986 | 0 | 0 | 0 | 0 | 0 | 0 | 585 | 0.151319 |
9bb22f65833ccdf573c2ff6580ffe37f01d473f8
| 247 |
py
|
Python
|
sciapp/action/advanced/macros.py
|
Pad0y/imagepy
|
23f41b64ade02f94b566b0d23a4b6459c1a1578d
|
[
"BSD-4-Clause"
] | null | null | null |
sciapp/action/advanced/macros.py
|
Pad0y/imagepy
|
23f41b64ade02f94b566b0d23a4b6459c1a1578d
|
[
"BSD-4-Clause"
] | null | null | null |
sciapp/action/advanced/macros.py
|
Pad0y/imagepy
|
23f41b64ade02f94b566b0d23a4b6459c1a1578d
|
[
"BSD-4-Clause"
] | null | null | null |
class Macros:
def __init__(self, title, cmds):
self.title = title
self.cmds = cmds
def __call__(self):
return self
def start(self, app, para=None, callafter=None):
app.run_macros(self.cmds, callafter)
| 22.454545 | 52 | 0.615385 | 246 | 0.995951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9bb4070df0345465e234b3e6738bbb40c587c512
| 2,038 |
py
|
Python
|
py_randomprime/__init__.py
|
UltiNaruto/py-randomprime
|
597d3636c2e40e11ed92d4808200ded879ccb244
|
[
"MIT"
] | null | null | null |
py_randomprime/__init__.py
|
UltiNaruto/py-randomprime
|
597d3636c2e40e11ed92d4808200ded879ccb244
|
[
"MIT"
] | 2 |
2021-05-24T18:05:11.000Z
|
2021-05-31T08:07:29.000Z
|
py_randomprime/__init__.py
|
henriquegemignani/py-randomprime
|
aac48b44761cbb8d857a4d72e06dfac17efc1fae
|
[
"MIT"
] | 2 |
2021-08-18T01:17:19.000Z
|
2021-11-26T15:08:34.000Z
|
import copy
import os
import json
from pathlib import Path
from typing import Callable, Optional
from . import rust, version
class BaseProgressNotifier:
def notify_total_bytes(self, total_size: int):
raise NotImplementedError()
def notify_writing_file(self, file_name: bytes, file_bytes: int):
raise NotImplementedError()
def notify_writing_header(self):
raise NotImplementedError()
def notify_flushing_to_disk(self):
raise NotImplementedError()
class ProgressNotifier(BaseProgressNotifier):
total_size: int = 0
bytes_so_far: int = 0
def __init__(self, callback: Callable[[float, str], None]):
self.callback = callback
def notify_total_bytes(self, total_size: int):
self.total_size += total_size
def notify_writing_file(self, file_name: bytes, file_bytes: int):
self.callback(self.bytes_so_far / self.total_size, "Writing file {}".format(file_name.decode("utf-8")))
self.bytes_so_far += file_bytes
def notify_writing_header(self):
self.callback(self.bytes_so_far / self.total_size, "Writing ISO header")
def notify_flushing_to_disk(self):
self.callback(1, "Flushing written data to the disk")
def patch_iso_raw(config_str: str, notifier: BaseProgressNotifier):
if notifier is None:
raise ValueError("notifier is None")
return rust.patch_iso(config_str, notifier)
def patch_iso(input_iso: Path, output_iso: Path, config: dict, notifier: BaseProgressNotifier):
new_config = copy.copy(config)
new_config["inputIso"] = os.fspath(input_iso)
new_config["outputIso"] = os.fspath(output_iso)
return patch_iso_raw(json.dumps(new_config), notifier)
def symbols_for_file(input_file: Path) -> Optional[dict]:
v = rust.get_iso_mp1_version(os.fspath(input_file))
if v is not None:
return rust.get_mp1_symbols(v)
def symbols_for_version(v: str) -> Optional[dict]:
return rust.get_mp1_symbols(v)
__version__ = version.version
VERSION = version.version
| 29.536232 | 111 | 0.723749 | 1,098 | 0.538763 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.0579 |
9bb514fb57dd5b2a6965770909c4eb7274835dca
| 3,453 |
py
|
Python
|
secistsploit/modules/auxiliary/whatweb.py
|
reneaicisneros/SecistSploit
|
b4e1bb0a213bee39c3bb79ab36e03e19122b80c0
|
[
"MIT"
] | 15 |
2018-12-06T16:03:32.000Z
|
2021-06-23T01:17:00.000Z
|
secistsploit/modules/auxiliary/whatweb.py
|
reneaicisneros/SecistSploit
|
b4e1bb0a213bee39c3bb79ab36e03e19122b80c0
|
[
"MIT"
] | null | null | null |
secistsploit/modules/auxiliary/whatweb.py
|
reneaicisneros/SecistSploit
|
b4e1bb0a213bee39c3bb79ab36e03e19122b80c0
|
[
"MIT"
] | 6 |
2019-03-01T04:10:00.000Z
|
2020-02-26T08:43:54.000Z
|
# -*- coding: UTF-8 -*-
import os
from secistsploit.core.exploit import *
from secistsploit.core.http.http_client import HTTPClient
class Exploit(HTTPClient):
__info__ = {
"name": "whatweb",
"description": "whatweb",
"authors": (
"jjiushi",
),
"references": (
"www.422926799.github.io"
"www.422926799.github.io"
),
}
target = OptString("www.whatweb.net", "Target URl")
domain = OptString("", "Target domain or IP")
port = OptPort(443, "Target HTTP port")
files = OptString("", "Files to import")
iplist = OptString(
"", "Batch detection of IP segments, such as input like 1.1.1.")
def __init__(self):
self.endianness = "<"
def run(self):
rhost = (self.domain)
file = (self.files)
iplist = (self.iplist)
if rhost != '':
headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
'Connection': 'keep-alive',
'Content-Length': '383',
}
data = {
'target': '{}'.format(rhost),
}
response = self.http_request(
method="POST",
path="/whatweb.php",
headers=headers,
data=data,
)
if response:
print('[+] url:{}'.format(rhost))
print('[+] fingerprint:{}'.format(response.text))
if rhost == '' and file != '':
if os.path.exists(file):
print('[+] {} Open ok'.format(file))
else:
print('[-] {} Not Found'.format(file))
dk = open(file, 'r')
for rd in dk.readlines():
qc = "".join(rd.split('\n'))
headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
'Connection': 'keep-alive',
'Content-Length': '383',
}
data = {
'target': '{}'.format(qc),
}
response = self.http_request(
method="POST",
path="/whatweb.php",
headers=headers,
data=data,
)
if response:
print('[+] url:{}'.format(qc))
print('[+] fingerprint:{}'.format(response.text))
if rhost == '' and iplist != '':
for i in range(1, 255):
ip = iplist + str(i)
headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
'Connection': 'keep-alive',
'Content-Length': '383',
}
data = {
'target': '{}'.format(ip),
}
response = self.http_request(
method="POST",
path="/whatweb.php",
headers=headers,
data=data,
)
if response:
print('[+] url:{}'.format(ip))
print('[+] fingerprint:{}'.format(response.text))
| 34.188119 | 141 | 0.435274 | 3,318 | 0.960904 | 0 | 0 | 0 | 0 | 0 | 0 | 1,027 | 0.297423 |
9bb942cefeb3547baf593097bb2c4998d052f1b8
| 3,285 |
py
|
Python
|
pygnss/__init__.py
|
nmerlene/pygnss
|
9dc59e57cf5a4bdf0ca56c2b6a23d622ffda4c5a
|
[
"MIT"
] | null | null | null |
pygnss/__init__.py
|
nmerlene/pygnss
|
9dc59e57cf5a4bdf0ca56c2b6a23d622ffda4c5a
|
[
"MIT"
] | null | null | null |
pygnss/__init__.py
|
nmerlene/pygnss
|
9dc59e57cf5a4bdf0ca56c2b6a23d622ffda4c5a
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import logging
import xarray
from time import time
from typing import Union
#
from .io import opener
from .rinex2 import rinexnav2, _scan2
from .rinex3 import rinexnav3, _scan3
# for NetCDF compression. too high slows down with little space savings.
COMPLVL = 1
def readrinex(rinexfn: Path, outfn: Path=None, use: Union[str, list, tuple]=None, verbose: bool=True) -> xarray.Dataset:
"""
Reads OBS, NAV in RINEX 2,3. Plain ASCII text or GZIP .gz.
"""
nav = None
obs = None
rinexfn = Path(rinexfn).expanduser()
# %% detect type of Rinex file
if rinexfn.suffix == '.gz':
fnl = rinexfn.stem.lower()
else:
fnl = rinexfn.name.lower()
if fnl.endswith('n') or fnl.endswith('n.rnx'):
nav = rinexnav(rinexfn, outfn)
elif fnl.endswith('o') or fnl.endswith('o.rnx'):
obs = rinexobs(rinexfn, outfn, use=use, verbose=verbose)
elif rinexfn.suffix.endswith('.nc'):
nav = rinexnav(rinexfn)
obs = rinexobs(rinexfn)
else:
raise ValueError(f"I dont know what type of file you're trying to read: {rinexfn}")
return obs, nav
def getRinexVersion(fn: Path) -> float:
"""verify RINEX version"""
fn = Path(fn).expanduser()
with opener(fn) as f:
ver = float(f.readline()[:9]) # yes :9
return ver
# %% Navigation file
def rinexnav(fn: Path, ofn: Path=None, group: str='NAV') -> xarray.Dataset:
""" Read RINEX 2,3 NAV files in ASCII or GZIP"""
fn = Path(fn).expanduser()
if fn.suffix == '.nc':
try:
return xarray.open_dataset(fn, group=group)
except OSError:
logging.error(f'Group {group} not found in {fn}')
return
ver = getRinexVersion(fn)
if int(ver) == 2:
nav = rinexnav2(fn)
elif int(ver) == 3:
nav = rinexnav3(fn)
else:
raise ValueError(f'unknown RINEX verion {ver} {fn}')
if ofn:
ofn = Path(ofn).expanduser()
print('saving NAV data to', ofn)
wmode = 'a' if ofn.is_file() else 'w'
nav.to_netcdf(ofn, group=group, mode=wmode)
return nav
# %% Observation File
def rinexobs(fn: Path, ofn: Path=None, use: Union[str, list, tuple]=None,
group: str='OBS', verbose: bool=False) -> xarray.Dataset:
"""
Read RINEX 2,3 OBS files in ASCII or GZIP
"""
fn = Path(fn).expanduser()
if fn.suffix == '.nc':
try:
logging.debug(f'loading {fn} with xarray')
return xarray.open_dataset(fn, group=group)
except OSError:
logging.error(f'Group {group} not found in {fn}')
return
tic = time()
ver = getRinexVersion(fn)
if int(ver) == 2:
obs = _scan2(fn, use, verbose)
elif int(ver) == 3:
obs = _scan3(fn, use, verbose)
else:
raise ValueError(f'unknown RINEX verion {ver} {fn}')
print(f"finished in {time()-tic:.2f} seconds")
if ofn:
ofn = Path(ofn).expanduser()
print('saving OBS data to', ofn)
wmode = 'a' if ofn.is_file() else 'w'
enc = {k: {'zlib': True, 'complevel': COMPLVL, 'fletcher32': True}
for k in obs.data_vars}
obs.to_netcdf(ofn, group=group, mode=wmode, encoding=enc)
return obs
| 28.318966 | 120 | 0.595129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 759 | 0.23105 |
9bb96ea949af7533581d8e4cca76f381e779a9b0
| 5,201 |
py
|
Python
|
classroom/pref_graph.py
|
norabelrose/whisper
|
79642bab696f3e166b6af61a447602e8e5d58270
|
[
"MIT"
] | null | null | null |
classroom/pref_graph.py
|
norabelrose/whisper
|
79642bab696f3e166b6af61a447602e8e5d58270
|
[
"MIT"
] | null | null | null |
classroom/pref_graph.py
|
norabelrose/whisper
|
79642bab696f3e166b6af61a447602e8e5d58270
|
[
"MIT"
] | null | null | null |
from typing import TYPE_CHECKING
import networkx as nx
from .fas import eades_fas
if TYPE_CHECKING: # Prevent circular import
from .pref_dag import PrefDAG
class PrefGraph(nx.DiGraph):
"""
`PrefGraph` represents a possibly cyclic set of preferences over clips as a weighted directed graph.
Edge weights represent the strength of the preference of A over B, and indifferences are represented
as edges with zero weights. Clips are represented as string IDs. If you want to prevent cycles from
being added to the graph in an online fashion, you should probably use `PrefDAG` instead.
"""
@property
def indifferences(self) -> nx.Graph:
"""Return a read-only, undirected view of the subgraph containing only indifferences."""
edge_view = self.edges
return nx.graphviews.subgraph_view(
self,
filter_edge=lambda a, b: edge_view[a, b].get('weight', 1.0) == 0.0
).to_undirected(as_view=True)
@property
def nonisolated(self) -> 'PrefGraph':
deg_view = self.degree
return nx.graphviews.subgraph_view(
self,
filter_node=lambda n: deg_view(n) > 0
)
@property
def strict_prefs(self) -> nx.DiGraph:
"""Return a read-only view of the subgraph containing only strict preferences."""
edge_view = self.edges
return nx.graphviews.subgraph_view(
self,
filter_edge=lambda a, b: edge_view[a, b].get('weight', 1.0) > 0
)
def __repr__(self) -> str:
num_indiff = self.indifferences.number_of_edges()
num_prefs = self.strict_prefs.number_of_edges()
return f'{type(self).__name__}({num_prefs} strict prefs, {num_indiff} indifferences)'
def add_indiff(self, a: str, b: str, **attr):
"""Try to dd the indifference relation `a ~ b`, and throw an error if the expected
coherence properties of the graph would be violated."""
if attr.setdefault('weight', 0.0) != 0.0:
raise CoherenceViolation("Indifferences cannot have nonzero weight")
self.add_edge(a, b, **attr)
def add_edge(self, a: str, b: str, **attr):
"""Add an edge to the graph, and check for coherence violations. Usually you
should use the `add_pref` or `add_indiff` wrapper methods instead of this method."""
if attr.get('weight', 1) < 0:
raise CoherenceViolation("Preferences must have non-negative weight")
super().add_edge(a, b, **attr)
add_pref = add_edge
def draw(self):
"""Displays a visualization of the graph using `matplotlib`. Strict preferences
are shown as solid arrows, and indifferences are dashed lines."""
strict_subgraph = self.strict_prefs
pos = nx.drawing.spring_layout(strict_subgraph)
nx.draw_networkx_nodes(strict_subgraph, pos)
nx.draw_networkx_edges(strict_subgraph, pos)
nx.draw_networkx_edges(self.indifferences, pos, arrowstyle='-', style='dashed')
nx.draw_networkx_labels(strict_subgraph, pos)
def acyclic_subgraph(self) -> 'PrefDAG':
"""Return an acyclic subgraph of this graph as a `PrefDAG`. The algorithm will try
to remove as few preferences as possible, but it is not guaranteed to be optimal.
If the graph is already acyclic, the returned `PrefDAG` will be isomorphic to this graph."""
from .pref_dag import PrefDAG
fas = set(eades_fas(self.strict_prefs))
return PrefDAG((
(u, v, d) for u, v, d in self.edges(data=True) # type: ignore
if (u, v) not in fas
))
def is_quasi_transitive(self) -> bool:
"""Return whether the strict preferences are acyclic."""
return nx.is_directed_acyclic_graph(self.strict_prefs)
def pref_prob(self, a: str, b: str, eps: float = 5e-324) -> float:
"""Return the probability that `a` is preferred to `b`."""
a_weight = self.pref_weight(a, b)
denom = a_weight + self.pref_weight(b, a)
# If there's no strict preference between a and b, then the
# probability that A is preferred to B is 1/2.
return (a_weight + eps) / (denom + 2 * eps)
def pref_weight(self, a: str, b: str, default: float = 0.0) -> float:
"""
Return the weight of the preference `a > b`, or 0.0 if there is no such
preference. Preferences with no explicit weight are assumed to have weight 1.
"""
attrs = self.edges.get((a, b), None)
return attrs.get('weight', 1.0) if attrs is not None else default
def unlink(self, a: str, b: str):
"""Remove the preference relation between `a` and `b`."""
try:
self.remove_edge(a, b)
except nx.NetworkXError:
# Try removing the edge in the other direction.
try:
self.remove_edge(b, a)
except nx.NetworkXError:
raise KeyError(f"No preference relation between {a} and {b}")
class CoherenceViolation(Exception):
"""Raised when an operation would violate the coherence of the graph."""
pass
| 42.284553 | 104 | 0.635455 | 5,033 | 0.967699 | 0 | 0 | 881 | 0.169391 | 0 | 0 | 2,223 | 0.427418 |
9bbc0decb0390376acbaa65e5a7c58faddf9f153
| 516 |
py
|
Python
|
scaffolder/templates/django/views.py
|
javidgon/wizard
|
a75a4c10f84c756c2466c9afaaadf3b2c0cf3a43
|
[
"MIT"
] | null | null | null |
scaffolder/templates/django/views.py
|
javidgon/wizard
|
a75a4c10f84c756c2466c9afaaadf3b2c0cf3a43
|
[
"MIT"
] | null | null | null |
scaffolder/templates/django/views.py
|
javidgon/wizard
|
a75a4c10f84c756c2466c9afaaadf3b2c0cf3a43
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.views import generic
from .models import {% for model in app.models %}{{ model.name }}{% if not loop.last %}, {% endif %}{% endfor %}
{% for model in app.models %}class {{ model.name }}IndexView(generic.ListView):
model = {{ model.name }}
template_name = '{{ model.name | lower }}s/index.html'
class {{ model.name }}DetailView(generic.DetailView):
model = {{ model.name }}
template_name = '{{ model.name | lower }}s/detail.html'
{% endfor %}
| 30.352941 | 112 | 0.656977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.149225 |
9bbcdfbd01a5563f9c4786b31c8c24dcfa3b565b
| 683 |
py
|
Python
|
hisitter/reviews/permissions.py
|
babysitter-finder/backend
|
5c37c6876ca13b5794ac44e0342b810426acbc76
|
[
"MIT"
] | 1 |
2021-02-25T01:02:40.000Z
|
2021-02-25T01:02:40.000Z
|
hisitter/reviews/permissions.py
|
babysitter-finder/backend
|
5c37c6876ca13b5794ac44e0342b810426acbc76
|
[
"MIT"
] | null | null | null |
hisitter/reviews/permissions.py
|
babysitter-finder/backend
|
5c37c6876ca13b5794ac44e0342b810426acbc76
|
[
"MIT"
] | 1 |
2020-11-23T20:57:47.000Z
|
2020-11-23T20:57:47.000Z
|
""" Reviews permissions."""
# Python
import logging
# Django Rest Framework
from rest_framework.permissions import BasePermission
class IsServiceOwner(BasePermission):
""" This permission allow determine if the user
is a client, if not permission is denied.
"""
def has_permission(self, request, view):
""" Manage the permission if the user is a client. """
try:
user = request.user.user_client
if user == view.service.user_client:
return True
else:
return False
except Exception as error:
logging.info(f'We have a problem that Exception raise {error}')
| 28.458333 | 75 | 0.628111 | 547 | 0.800878 | 0 | 0 | 0 | 0 | 0 | 0 | 267 | 0.390922 |
9bbd9c4b8b498fde19563e3848c89d37d52b9838
| 1,678 |
py
|
Python
|
pk.py
|
CnybTseng/SOSNet
|
9f1e96380388dde75fe0737ec0b3516669054205
|
[
"MIT"
] | null | null | null |
pk.py
|
CnybTseng/SOSNet
|
9f1e96380388dde75fe0737ec0b3516669054205
|
[
"MIT"
] | null | null | null |
pk.py
|
CnybTseng/SOSNet
|
9f1e96380388dde75fe0737ec0b3516669054205
|
[
"MIT"
] | null | null | null |
import sys
import torch
import timeit
sys.path.append('../JDE')
from mot.models.backbones import ShuffleNetV2
from sosnet import SOSNet
if __name__ == '__main__':
print('SOSNet PK ShuffleNetV2')
model1 = ShuffleNetV2(
stage_repeat={'stage2': 4, 'stage3': 8, 'stage4': 4},
stage_out_channels={'conv1': 24, 'stage2': 48, 'stage3': 96,
'stage4': 192, 'conv5': 1024}).cuda().eval()
arch={
'conv1': {'out_channels': 64},
'stage2': {'out_channels': 256, 'repeate': 2, 'out': True},
'stage3': {'out_channels': 384, 'repeate': 2, 'out': True},
'stage4': {'out_channels': 512, 'repeate': 2, 'out': True},
'conv5': {'out_channels': 1024}}
model2 = SOSNet(arch).cuda().eval()
x = torch.rand(1, 3, 224, 224).cuda()
loops = 1000
with torch.no_grad():
start = timeit.default_timer()
for _ in range(loops):
y = model1(x)
torch.cuda.synchronize()
end = timeit.default_timer()
latency = (end - start) / loops
print('ShuffleNetV2 latency: {} seconds.'.format(latency))
for yi in y:
print(yi.shape)
with torch.no_grad():
start = timeit.default_timer()
for _ in range(loops):
y = model2(x)
torch.cuda.synchronize()
end = timeit.default_timer()
latency = (end - start) / loops
print('SOSNet latency: {} seconds.'.format(latency))
for yi in y:
print(yi.shape)
with torch.autograd.profiler.profile(use_cuda=True, record_shapes=True) as prof:
model2(x)
print(prof.key_averages().table())
| 37.288889 | 85 | 0.567342 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 318 | 0.189511 |
9bbda2f39a11084b661e8fe58491f418c2a36b6f
| 2,255 |
py
|
Python
|
test/generate_netmhcpan_functions.py
|
til-unc/mhcgnomes
|
0bfbe193daeb7cd38d958222f6071dd657e9fb6e
|
[
"Apache-2.0"
] | 6 |
2020-10-27T15:31:32.000Z
|
2020-11-29T03:26:06.000Z
|
test/generate_netmhcpan_functions.py
|
til-unc/mhcgnomes
|
0bfbe193daeb7cd38d958222f6071dd657e9fb6e
|
[
"Apache-2.0"
] | 4 |
2020-10-27T14:57:16.000Z
|
2020-11-04T21:56:39.000Z
|
test/generate_netmhcpan_functions.py
|
pirl-unc/mhcgnomes
|
0bfbe193daeb7cd38d958222f6071dd657e9fb6e
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
NETMHCPAN_3_0_DEST = "test_netmhcpan_3_0_alleles.py"
NETMHCPAN_3_0_SOURCE = "netmhcpan_3_0_alleles.txt"
NETMHCPAN_4_0_DEST = "test_netmhcpan_4_0_alleles.py"
NETMHCPAN_4_0_SOURCE = "netmhcpan_4_0_alleles.txt"
special_chars = " *:-,/."
def generate(src, dst, exclude=set()):
alleles = set()
with open(dst, "w") as f:
f.write("from mhcgnomes import parse, Allele, Gene, AlleleWithoutGene\n")
with open(src) as alleles_file:
for line in alleles_file:
line = line.strip()
if line.startswith("#"):
continue
elif not line:
continue
parts = line.split()
allele_name = parts[0]
if allele_name in exclude:
continue
if allele_name in alleles:
print("Skipping repeat allele: '%s'" % allele_name)
continue
alleles.add(allele_name)
fn_name = allele_name.replace("\"", "").strip()
for c in special_chars:
fn_name = fn_name.replace(c, "_")
fn_name = fn_name.replace("__", "_")
f.write(f"\ndef test_{fn_name}():")
f.write(f"\n result = parse('{allele_name}')")
if ":" in allele_name:
f.write(
f"""\n assert result.__class__ is Allele, \\
'Expected parse(\"{allele_name}\") to be Allele but got %s' % (result,)""")
else:
f.write(
f"""\n assert result.__class__ in (Gene, Allele, AlleleWithoutGene), \\
'Unexpected type for parse(\"{allele_name}\"): %s' % (result,)""")
f.write("\n")
print(f"Wrote {len(alleles)} from {src} tests to {dst}")
return alleles
netmhcpan_3_0_alleles = generate(
src=NETMHCPAN_3_0_SOURCE,
dst=NETMHCPAN_3_0_DEST)
generate(
src=NETMHCPAN_4_0_SOURCE,
dst=NETMHCPAN_4_0_DEST,
exclude=netmhcpan_3_0_alleles)
| 35.234375 | 107 | 0.501552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 689 | 0.305543 |
9bbdac1e6d8dd9f71aa6f189c3f63b6af713637c
| 343 |
py
|
Python
|
Dataset/Leetcode/test/11/489.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/test/11/489.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/test/11/489.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution:
def XXX(self, height: List[int]) -> int:
ans, i, j = 0, 0, len(height) - 1
while i < j:
val = min(height[i], height[j])
ans = max(val * (j - i), ans)
if height[i] == val:
i += 1
if height[j] == val:
j -= 1
return ans
| 26.384615 | 44 | 0.390671 | 340 | 0.991254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9bbde6aa054a0343fb01e156fb53162fe6c254c5
| 96 |
py
|
Python
|
python/tests/test_linked_list.py
|
Leenhazaimeh/data-structures-and-algorithms
|
d55d55bf8c98e768cb929326b5ec8c18fb5c8384
|
[
"MIT"
] | null | null | null |
python/tests/test_linked_list.py
|
Leenhazaimeh/data-structures-and-algorithms
|
d55d55bf8c98e768cb929326b5ec8c18fb5c8384
|
[
"MIT"
] | 10 |
2021-07-29T18:56:48.000Z
|
2021-09-11T19:11:00.000Z
|
python/tests/test_linked_list.py
|
Leenhazaimeh/data-structures-and-algorithms
|
d55d55bf8c98e768cb929326b5ec8c18fb5c8384
|
[
"MIT"
] | 3 |
2021-08-16T06:16:37.000Z
|
2021-12-05T14:29:51.000Z
|
# from linked_list.linked_list import LinkedList
# def test_import():
# assert LinkedList
| 16 | 48 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.947917 |
9bbf5d23053e93f4be3618d38f8307dfe71dd5b9
| 2,156 |
py
|
Python
|
美团爬取商家信息/paquxinxi.py
|
13060923171/Crawl-Project2
|
effab1bf31979635756fc272a7bcc666bb499be2
|
[
"MIT"
] | 14 |
2020-10-27T05:52:20.000Z
|
2021-11-07T20:24:55.000Z
|
美团爬取商家信息/paquxinxi.py
|
13060923171/Crawl-Project2
|
effab1bf31979635756fc272a7bcc666bb499be2
|
[
"MIT"
] | 1 |
2021-09-17T07:40:00.000Z
|
2021-09-17T07:40:00.000Z
|
美团爬取商家信息/paquxinxi.py
|
13060923171/Crawl-Project2
|
effab1bf31979635756fc272a7bcc666bb499be2
|
[
"MIT"
] | 8 |
2020-11-18T14:23:12.000Z
|
2021-11-12T08:55:08.000Z
|
import requests
import re
import json
headers = {
"Origin": "https://bj.meituan.com",
"Host": "apimobile.meituan.com",
"Referer": "https://bj.meituan.com/s/%E7%81%AB%E9%94%85/",
"Cookie": "uuid=692a53319ce54d0c91f3.1597223761.1.0.0; ci=1; rvct=1; _lxsdk_cuid=173e1f47707c8-0dcd4ff30b4ae3-3323765-e1000-173e1f47707c8; _lxsdk_s=173e1f47708-21d-287-4d9%7C%7C35",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36"
}
def get_parse(url):
html = requests.get(url,headers = headers)
if html.status_code:
get_html(html)
else:
print(html.status_code)
def get_html(html):
content = html.text
#店名
titles= re.compile('","title":"(.*?)",',re.S|re.I)
title = titles.findall(content)
#地址
addresses = re.compile(',"address":"(.*?)",', re.S | re.I)
address = addresses.findall(content)
#评分
avgscores = re.compile(',"avgscore":(.*?),', re.S | re.I)
avgscore = avgscores.findall(content)
#评价人数
commentses = re.compile(',"comments":(.*?),', re.S | re.I)
comments = commentses.findall(content)
#联系电话
phones = re.compile('"phone":"(.*?)",', re.S | re.I)
phone = phones.findall(content)
for i in range(len(title)):
try:
t = title[i]
a = address[i]
avg = avgscore[i]
c = comments[i]
p = phone[i]
print(t,a,avg,c,p)
dowload(t,a,avg,c,p)
except:
pass
def dowload(t,a,avg,c,p):
data = {
'店铺名称': t,
'店铺地址': a,
'店铺评分': avg,
'评价人数': c,
'电话': p
}
with open("美团信息.txt","a+",encoding="utf-8")as f:
f.write(json.dumps(data,ensure_ascii=False)+"\n")
print("写入成功")
if __name__ == '__main__':
#在这个URL里面offse参数每次翻页增加32,limit参数是一次请求的数据量,q是搜索关键词poi/pcsearch/1?其中的1是北京城市的id编号。
for i in range(0,33,32):
url = "https://apimobile.meituan.com/group/v4/poi/pcsearch/1?uuid=692a53319ce54d0c91f3.1597223761.1.0.0&userid=-1&limit=32&offset={}&cateId=-1&q=%E7%81%AB%E9%94%85".format(i)
get_parse(url)
| 33.169231 | 185 | 0.590909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,017 | 0.437231 |
9bc11053555c82b404c0a0cf86d08e3626d9e05f
| 4,071 |
py
|
Python
|
entity_resolution/EntityClass.py
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
ebe7f1c3f516525f4bfbf5b4f1446e8c6612a67b
|
[
"MIT"
] | null | null | null |
entity_resolution/EntityClass.py
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
ebe7f1c3f516525f4bfbf5b4f1446e8c6612a67b
|
[
"MIT"
] | null | null | null |
entity_resolution/EntityClass.py
|
GeoJamesJones/ArcGIS-Senzing-Prototype
|
ebe7f1c3f516525f4bfbf5b4f1446e8c6612a67b
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import json
from typing import List, Dict
from entity_resolution import EntityResolution
class Entity:
def __init__(self, entity_fields: List[str],
data: List[str],
source_fields: List[str],
attr_dicts: List[Dict[str, List[str] | str]],
record_type: str):
self.entity_fields = entity_fields
self.data = data
self.source_fields = source_fields
self.attr_dicts = attr_dicts
self.record_type = record_type
skip_values = [
EntityResolution.NAME_FULL.value,
EntityResolution.NAME_ORG.value,
EntityResolution.NAME_LAST.value,
EntityResolution.NAME_FIRST.value,
EntityResolution.NAME_MIDDLE.value,
EntityResolution.NAME_PREFIX.value,
EntityResolution.NAME_SUFFIX.value,
EntityResolution.ADDR_FULL.value,
EntityResolution.ADDR_LINE1.value,
EntityResolution.ADDR_LINE2.value,
EntityResolution.ADDR_LINE3.value,
EntityResolution.ADDR_LINE4.value,
EntityResolution.ADDR_LINE5.value,
EntityResolution.ADDR_LINE6.value,
EntityResolution.ADDR_CITY.value,
EntityResolution.ADDR_STATE.value,
EntityResolution.ADDR_ZIP.value,
EntityResolution.ADDR_COUNTRY.value,
EntityResolution.ADDR_FD.value,
EntityResolution.ADDR_TD.value,
EntityResolution.PHONE_NUM.value,
EntityResolution.PHONE_FD.value,
EntityResolution.PHONE_TD.value,
]
field_map = {}
for i,j in enumerate(self.source_fields):
field_map[j] = i
self.data_dict = {key:str(value) for (key,value) in zip(self.entity_fields, self.data) if key not in skip_values}
self.data_dict["RECORD_TYPE"] = self.record_type
for d in attr_dicts:
if EntityResolution.NAME_TYPE.value in d.keys():
name = {EntityResolution.NAME_TYPE.value: d[EntityResolution.NAME_TYPE.value]}
data_index = []
for i in d['source_fields']:
data_index.append(field_map[i])
for x in data_index:
name[entity_fields[x]] = str(data[x])
if 'NAME_LIST' not in self.data_dict.keys():
self.data_dict['NAME_LIST'] = []
self.data_dict['NAME_LIST'].append(name)
else:
self.data_dict['NAME_LIST'].append(name)
if EntityResolution.ADDR_TYPE.value in d.keys():
addr = {EntityResolution.ADDR_TYPE.value: d[EntityResolution.ADDR_TYPE.value]}
data_index = []
for i in d['source_fields']:
data_index.append(field_map[i])
for x in data_index:
addr[entity_fields[x]] = str(data[x])
if 'ADDR_LIST' not in self.data_dict.keys():
self.data_dict['ADDR_LIST'] = []
self.data_dict['ADDR_LIST'].append(addr)
else:
self.data_dict['ADDR_LIST'].append(addr)
if EntityResolution.PHONE_TYPE.value in d.keys():
phone = {EntityResolution.PHONE_TYPE.value: d[EntityResolution.PHONE_TYPE.value]}
data_index = []
for i in d['source_fields']:
data_index.append(field_map[i])
for x in data_index:
phone[entity_fields[x]] = str(data[x])
if 'PHONE_LIST' not in self.data_dict.keys():
self.data_dict['PHONE_LIST'] = []
self.data_dict['PHONE_LIST'].append(phone)
else:
self.data_dict['PHONE_LIST'].append(phone)
def to_json(self) -> str:
return json.dumps(self.data_dict)
| 36.675676 | 121 | 0.561778 | 3,942 | 0.968312 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.047654 |
32ca34b8eacf24dc530fada37a04db8272ab0be6
| 523 |
py
|
Python
|
langcreator/system.py
|
xzripper/LanguageCreator
|
65421063161166d3e4f97e4b874909259b665fce
|
[
"MIT"
] | 2 |
2021-12-12T16:48:20.000Z
|
2021-12-31T17:48:21.000Z
|
langcreator/system.py
|
xzripper/LanguageCreator
|
65421063161166d3e4f97e4b874909259b665fce
|
[
"MIT"
] | null | null | null |
langcreator/system.py
|
xzripper/LanguageCreator
|
65421063161166d3e4f97e4b874909259b665fce
|
[
"MIT"
] | null | null | null |
import subprocess
import sys
import os
subprocess = subprocess
sys = sys
os = os
def output(command: str, remlstc: bool) -> str:
"""
Get output from console command.
If remlstc is True, it's return an output without a useless newline.
:param command: The command.
:param remlstc: Remove last character from output.
"""
return subprocess.check_output(command, shell=True, encoding='cp866')[:-1] if remlstc else subprocess.check_output(command, shell=True, encoding='cp866')
| 27.526316 | 158 | 0.692161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.439771 |
32cada166139a42c2081b8a48a2bcd39a15cb5ab
| 2,612 |
py
|
Python
|
create_categories.py
|
Botomatik/JackBot
|
58651d8b5a5bcead2a2eb79849019cb4f972b7cd
|
[
"MIT"
] | null | null | null |
create_categories.py
|
Botomatik/JackBot
|
58651d8b5a5bcead2a2eb79849019cb4f972b7cd
|
[
"MIT"
] | null | null | null |
create_categories.py
|
Botomatik/JackBot
|
58651d8b5a5bcead2a2eb79849019cb4f972b7cd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Program to batch create categories.
The program expects a generator containing a list of page titles to be used as
base.
The following command line parameters are supported:
-always (not implemented yet) Don't ask, just do the edit.
-overwrite (not implemented yet).
-parent The name of the parent category.
-basename The base to be used for the new category names.
Example:
create_categories.py
-lang:commons
-family:commons
-links:User:Multichill/Wallonia
-parent:"Cultural heritage monuments in Wallonia"
-basename:"Cultural heritage monuments in"
"""
__version__ = '$Id$'
#
# (C) Multichill, 2011
# (C) xqt, 2011
#
# Distributed under the terms of the MIT license.
#
#
import os, sys, re, codecs
import urllib, httplib, urllib2
import catlib
import time
import socket
import StringIO
import wikipedia as pywikibot
import config
import pagegenerators
def createCategory(page, parent, basename):
title = page.title(withNamespace=False)
newpage = pywikibot.Page(pywikibot.getSite(u'commons', u'commons'),
u'Category:' + basename + u' ' + title)
newtext = u''
newtext += u'[[Category:' + parent + u'|' + title + u']]\n'
newtext += u'[[Category:' + title + u']]\n'
if not newpage.exists():
#FIXME: Add user prompting and -always option
pywikibot.output(newpage.title())
pywikibot.showDiff(u'', newtext)
comment = u'Creating new category'
#FIXME: Add exception handling
newpage.put(newtext, comment)
else:
#FIXME: Add overwrite option
pywikibot.output(u'%s already exists, skipping' % (newpage.title(),))
def main(args):
'''
Main loop. Get a generator and options.
'''
generator = None
parent = u''
basename = u''
always = False
genFactory = pagegenerators.GeneratorFactory()
for arg in pywikibot.handleArgs():
if arg == '-always':
always = True
elif arg.startswith('-parent:'):
parent = arg [len('-parent:'):].strip()
elif arg.startswith('-basename'):
basename = arg [len('-basename:'):].strip()
else:
genFactory.handleArg(arg)
generator = genFactory.getCombinedGenerator()
if generator:
for page in generator:
createCategory(page, parent, basename)
else:
pywikibot.output(u'No pages to work on')
pywikibot.output(u'All done')
if __name__ == "__main__":
try:
main(sys.argv[1:])
finally:
pywikibot.stopme()
| 25.359223 | 78 | 0.630551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,135 | 0.434533 |
32cae26d8eb99a201dc12930e81a1edb58d4cace
| 10,287 |
py
|
Python
|
avod/core/losses.py
|
Zengyi-Qin/TLNet
|
11fa48160158b550ad2dc810ed564eebe17e8f5e
|
[
"Apache-2.0"
] | 114 |
2019-03-13T01:42:22.000Z
|
2022-03-31T07:56:04.000Z
|
avod/core/losses.py
|
Zengyi-Qin/TLNet
|
11fa48160158b550ad2dc810ed564eebe17e8f5e
|
[
"Apache-2.0"
] | 12 |
2019-03-26T08:18:13.000Z
|
2021-05-19T14:36:27.000Z
|
avod/core/losses.py
|
Zengyi-Qin/TLNet
|
11fa48160158b550ad2dc810ed564eebe17e8f5e
|
[
"Apache-2.0"
] | 22 |
2019-03-22T10:44:49.000Z
|
2021-04-01T00:11:07.000Z
|
"""Classification and regression loss functions for object detection.
Localization losses:
* WeightedL2LocalizationLoss
* WeightedSmoothL1LocalizationLoss
Classification losses:
* WeightedSoftmaxClassificationLoss
* WeightedSigmoidClassificationLoss
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow as tf
from avod.core import ops
class Loss(object):
"""Abstract base class for loss functions."""
__metaclass__ = ABCMeta
def __call__(self,
prediction_tensor,
target_tensor,
ignore_nan_targets=False,
scope=None,
**params):
"""Call the loss function.
Args:
prediction_tensor: a tensor representing predicted quantities.
target_tensor: a tensor representing regression or classification
targets.
ignore_nan_targets: whether to ignore nan targets in the loss
computation. E.g. can be used if the target
tensor is missing groundtruth data that
shouldn't be factored into the loss.
scope: Op scope name. Defaults to 'Loss' if None.
**params: Additional keyword arguments for specific implementations
of the Loss.
Returns:
loss: a tensor representing the value of the loss function.
"""
with tf.name_scope(scope, 'Loss',
[prediction_tensor, target_tensor, params]) as scope:
if ignore_nan_targets:
target_tensor = tf.where(tf.is_nan(target_tensor),
prediction_tensor,
target_tensor)
return self._compute_loss(
prediction_tensor, target_tensor, **params)
@abstractmethod
def _compute_loss(self, prediction_tensor, target_tensor, **params):
"""Method to be overriden by implementations.
Args:
prediction_tensor: a tensor representing predicted quantities
target_tensor: a tensor representing regression or classification
targets
**params: Additional keyword arguments for specific implementations
of the Loss.
Returns:
loss: a tensor representing the value of the loss function
"""
pass
class WeightedL2LocalizationLoss(Loss):
"""L2 localization loss function with anchorwise output support.
Loss[b,a] = .5 * ||weights[b,a] * (prediction[b,a,:] - target[b,a,:])||^2
"""
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted
locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a (scalar) tensor representing the value of the loss function
or a float tensor of shape [batch_size, num_anchors]
"""
weighted_diff = (prediction_tensor - target_tensor) * tf.expand_dims(
weights, 2)
square_diff = 0.5 * tf.square(weighted_diff)
return tf.reduce_sum(square_diff)
class WeightedSigmoidClassificationLoss(Loss):
"""Sigmoid cross entropy classification loss function."""
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a (scalar) tensor representing the value of the loss function
or a float tensor of shape [batch_size, num_anchors]
"""
weights = tf.expand_dims(weights, 2)
if class_indices is not None:
weights *= tf.reshape(
ops.indices_to_dense_vector(class_indices,
tf.shape(prediction_tensor)[2]),
[1, 1, -1])
per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
return tf.reduce_sum(per_entry_cross_ent * weights)
class WeightedSmoothL1Loss(Loss):
"""Smooth L1 localization loss function.
The smooth L1_loss is defined elementwise as .5 x^2 if |x|<1 and |x|-.5
otherwise, where x is the difference between predictions and target.
See also Equation (3) in the Fast R-CNN paper by Ross Girshick (ICCV 2015)
"""
def _compute_loss(self, prediction_tensor, target_tensor, weight):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [num_anchors,
code_size] representing the (encoded) predicted
locations of objects.
target_tensor: A float tensor of shape [num_anchors,
code_size] representing the regression targets
Returns:
loss: an anchorwise tensor of shape [num_anchors] representing
the value of the loss function
"""
diff = prediction_tensor - target_tensor
abs_diff = tf.abs(diff)
abs_diff_lt_1 = tf.less(abs_diff, 1)
anchorwise_smooth_l1norm = tf.reduce_sum(
tf.where(abs_diff_lt_1, 0.5 * tf.square(abs_diff), abs_diff - 0.5),
axis=1) * weight
return anchorwise_smooth_l1norm
class WeightedSoftmaxLoss(Loss):
"""Softmax cross-entropy loss function."""
def _compute_loss(self, prediction_tensor, target_tensor, weight):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
Returns:
loss: a (scalar) tensor representing the value of the loss function
"""
num_classes = prediction_tensor.get_shape().as_list()[-1]
per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits(
labels=tf.reshape(target_tensor, [-1, num_classes]),
logits=tf.reshape(prediction_tensor, [-1, num_classes])))
per_row_cross_ent = tf.reshape(per_row_cross_ent, [-1, 1])
positive_cls = tf.cast(tf.argmax(tf.reshape(target_tensor, [-1, num_classes]), axis=1), tf.float32)
weight_cls = tf.reshape((positive_cls + 0.1) * 10, [-1, 1])
pred_argmax = tf.argmax(tf.reshape(prediction_tensor, [-1, num_classes]), axis=1)
true_argmax = tf.argmax(tf.reshape(target_tensor, [-1, num_classes]), axis=1)
accuracy_all = tf.reduce_mean(tf.cast(tf.equal(pred_argmax, true_argmax), tf.float32))
pred_positive = tf.cast(tf.greater(pred_argmax, 0), tf.float32)
true_positive = tf.cast(tf.greater(true_argmax, 0), tf.float32)
accuracy_positive = tf.reduce_sum(true_positive * tf.cast(tf.equal(pred_positive, \
true_positive), tf.float32)) / (tf.reduce_sum(true_positive) + 1e-2)
#accuracy_positive = tf.constant(num_classes)
#accuracy_positive = tf.reduce_sum(true_positive) / (tf.reduce_sum(tf.ones_like(true_positive, dtype=tf.float32)) + 1e-2)
return tf.reduce_sum(per_row_cross_ent) * weight, accuracy_all, accuracy_positive
class WeightedSoftmaxLossPiecewise(object):
"""Softmax cross-entropy loss function."""
def _compute_loss(self, prediction_tensor, target_tensor, pos_gt, neg_gt, weight, balance=1.0):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
Returns:
loss: a (scalar) tensor representing the value of the loss function
"""
num_classes = prediction_tensor.get_shape().as_list()[-1]
per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits(
labels=tf.reshape(target_tensor, [-1, num_classes]),
logits=tf.reshape(prediction_tensor, [-1, num_classes])))
pred_argmax = tf.argmax(tf.reshape(prediction_tensor, [-1, num_classes]), axis=1)
true_argmax = tf.argmax(tf.reshape(target_tensor, [-1, num_classes]), axis=1)
equal_all = tf.cast(tf.equal(pred_argmax, true_argmax), tf.float32)
accuracy_neg = tf.reduce_sum(equal_all * neg_gt) / (tf.reduce_sum(neg_gt) + 1e-2)
accuracy_pos = tf.reduce_sum(equal_all * pos_gt) / (tf.reduce_sum(pos_gt) + 1e-2)
#accuracy_positive = tf.constant(num_classes)
#accuracy_positive = tf.reduce_sum(true_positive) / (tf.reduce_sum(tf.ones_like(true_positive, dtype=tf.float32)) + 1e-2)
#rate = tf.reduce_sum(neg_gt) / (tf.reduce_sum(pos_gt) + 1e-2)
pos_loss = tf.reduce_sum(per_row_cross_ent * pos_gt) * weight / (tf.reduce_sum(pos_gt) + 1e-2)
neg_loss = tf.reduce_sum(per_row_cross_ent * neg_gt) * weight / (tf.reduce_sum(neg_gt) + 1e-2)
return pos_loss * balance + neg_loss, accuracy_neg, accuracy_pos
| 44.150215 | 129 | 0.636726 | 9,902 | 0.962574 | 0 | 0 | 572 | 0.055604 | 0 | 0 | 5,310 | 0.516185 |
32cd6811a8df581555a9e17bfebdb7625e6646ac
| 19,282 |
py
|
Python
|
routing/views.py
|
iqqmuT/tsari
|
343ef5cf08ee24bdb710e94c0b6fb334264e5677
|
[
"MIT"
] | null | null | null |
routing/views.py
|
iqqmuT/tsari
|
343ef5cf08ee24bdb710e94c0b6fb334264e5677
|
[
"MIT"
] | 2 |
2020-02-11T22:09:10.000Z
|
2020-06-05T18:02:28.000Z
|
routing/views.py
|
iqqmuT/tsari
|
343ef5cf08ee24bdb710e94c0b6fb334264e5677
|
[
"MIT"
] | null | null | null |
import json
from datetime import datetime, timedelta
from dateutil import parser as dateparser
from django.contrib.auth.decorators import user_passes_test
from django.db.models import Q
from django.http import HttpResponseNotFound, JsonResponse
from django.shortcuts import render
from django.utils import timezone
from avdb.models import \
Convention, \
Equipment, \
EquipmentType, \
Location, \
LocationType, \
TransportOrder, \
TransportOrderLine
import logging
logger = logging.getLogger(__name__)
# TO data structure
# {
# 'disabled': False,
# 'equipment': 1,
# 'week': '2018-05-28T09:00:00+00:00',
# 'from': {
# 'location': 9,
# 'load_out': '2018-06-19T08:00:00+00:00'
# },
# 'to': {
# 'location': 4,
# 'convention': 7,
# 'load_in': '2018-06-19T09:00:00+00:00'
# },
# 'name': 'First TO',
# 'notes': 'Notes',
# 'unitNotes': 'Uniittinotes'
# }
@user_passes_test(lambda u: u.is_superuser)
def index(request, year):
video_types = EquipmentType.objects.filter(name__istartswith='Video').order_by('name')
audio_types = EquipmentType.objects.filter(name__istartswith='Audio').order_by('name')
elec_types = EquipmentType.objects.filter(name__istartswith='Electricity').order_by('name')
#video_eqs = Equipment.objects.filter(equipment_type__name__istartswith='Video')
#audio_eqs = Equipment.objects.filter(equipment_type__name__istartswith='Audio')
#elec_eqs = Equipment.objects.filter(equipment_type__name__istartswith='Electricity')
# get time period
first = _get_first_convention(year)
last = _get_last_convention(year)
if first is None or last is None:
return HttpResponseNotFound("No conventions found for year %d" % year)
start_date = first.load_in
end_date = last.load_out
# move start_date backwards to previous monday + 1 week
start_date = start_date - timedelta(days=start_date.weekday() + 7)
# move end_date forwards by week
end_date = end_date + timedelta(weeks=2)
weeks = []
monday = start_date
while monday < end_date:
weeks.append({
'monday': _get_previous_monday(monday),
'sunday': _get_next_sunday(monday),
'number': monday.isocalendar()[1],
})
monday = monday + timedelta(weeks=1)
to_data = []
equipment_groups = []
equipment_groups.extend(_add_eq_group(video_types, weeks, to_data))
equipment_groups.extend(_add_eq_group(audio_types, weeks, to_data))
equipment_groups.extend(_add_eq_group(elec_types, weeks, to_data))
#equipment_groups = [
# _handle_equipments(video_eqs, weeks, to_data),
# _handle_equipments(audio_eqs, weeks, to_data),
# _handle_equipments(elec_eqs, weeks, to_data),
#]
return render(request, 'routing/index.html', {
'year': year,
'equipment_groups': equipment_groups,
'weeks': weeks,
'conventions': json.dumps(_get_conventions_json()),
'start': start_date,
'end': end_date,
'other_locations': _get_other_locations(),
'locations_json': json.dumps(_get_locations_json()),
'json': json.dumps(to_data),
})
def _add_eq_group(equipment_types, weeks, to_data):
groups = []
for eq_type in equipment_types:
eqs = Equipment.objects.filter(equipment_type=eq_type, disabled=False)
groups.append(_handle_equipments(eqs, weeks, to_data))
return groups
# Save JSON request
def save(request, year):
data = json.loads(request.body.decode('utf-8'))
# Disable all existing TransportOrders for this year,
# and enable only those we are editing/creating
_disable_all_tos(year)
# Remove existing TransportOrderLines for this year
# We will re-create new TransportOrderLines
eq_ids = set()
for to_data in data['tos']:
eq_ids.add(to_data['equipment'])
for id in eq_ids:
_remove_tols(id, year)
# transit_from is storage for transit information
transit_from = None
for to_data in data['tos']:
if to_data['disabled'] == False:
if 'inTransit' in to_data['from'].keys() and to_data['from']['inTransit'] == True and ('inTransit' not in to_data['to'].keys() or to_data['to']['inTransit'] == False):
# end of transit
# from.load_out is saved to last TO in transit in UI
if 'load_out' in to_data['from'].keys():
transit_from['load_out'] = to_data['from']['load_out']
# copy 'from' data from beginning of transit
to_data['from'] = transit_from
transit_from = None
# save TO data
tol = _save_to_data(to_data)
else:
if 'inTransit' in to_data['to'].keys() and to_data['to']['inTransit'] == True and ('inTransit' not in to_data['from'].keys() or to_data['from']['inTransit'] == False):
# save 'from' data from beginning of transit
transit_from = to_data['from']
return JsonResponse({ 'ok': True })
def _save_to_data(to_data):
"""Saves TransportOrder data."""
to = _get_or_create_to(to_data)
if to is None:
# could not create TO
return None
#week = dateparser.parse(to_data['week'])
#monday = _get_previous_monday(week)
#sunday = _get_next_sunday(week)
# create new TransportOrderLine
tol = TransportOrderLine(
equipment=Equipment.objects.get(pk=to_data['equipment']),
transport_order=to,
)
tol.save()
return tol
def _disable_all_tos(year):
"""Disables all TransportOrders from given year."""
start = datetime(year, 1, 1)
end = datetime(year, 12, 31, 23, 59, 59)
tos = TransportOrder.objects.filter(
Q(from_loc_load_out__range=(start, end)) | Q(to_loc_load_in__range=(start, end)) | Q(from_convention__load_out__range=(start, end)) | Q(to_convention__load_in__range=(start, end))
)
for to in tos:
to.disabled = True
to.save()
def _get_or_create_to(to_data):
"""Gets or creates TransportOrder with given data."""
from_location = None
from_convention = None
from_load_out = None
if 'from' in to_data.keys():
if 'convention' in to_data['from'].keys() and to_data['from']['convention'] is not None:
id = to_data['from']['convention']
from_convention = Convention.objects.get(pk=id)
if 'location' in to_data['from'].keys() and to_data['from']['location'] is not None:
id = to_data['from']['location']
from_location = Location.objects.get(pk=id)
if from_convention is None and 'load_out' in to_data['from'].keys() and _is_valid_datetime(to_data['from']['load_out']):
from_load_out = dateparser.parse(to_data['from']['load_out'])
to_location = None
to_convention = None
to_load_in = None
if 'from' in to_data.keys():
if 'convention' in to_data['to'].keys() and to_data['to']['convention'] is not None:
id = to_data['to']['convention']
to_convention = Convention.objects.get(pk=id)
if 'location' in to_data['to'].keys() and to_data['to']['location'] is not None:
id = to_data['to']['location']
to_location = Location.objects.get(pk=id)
if to_convention is None and 'load_in' in to_data['to'].keys() and _is_valid_datetime(to_data['to']['load_in']):
to_load_in = dateparser.parse(to_data['to']['load_in'])
if from_location is None or to_location is None:
# can't create TransportOrder with empty Locations
return None
to, created = TransportOrder.objects.get_or_create(
from_convention=from_convention,
to_convention=to_convention,
from_loc=from_location,
to_loc=to_location,
from_loc_load_out=from_load_out,
to_loc_load_in=to_load_in,
)
# update other fields
if 'name' in to_data.keys():
to.name = to_data['name']
if 'notes' in to_data.keys():
to.notes = to_data['notes']
if 'unitNotes' in to_data.keys():
to.unit_notes = to_data['unitNotes']
to.disabled = False
to.save()
return to
def _is_valid_datetime(s):
try:
dateparser.parse(s)
return True
except ValueError:
logger.error("Invalid datetime '%s'" % s)
return False
def _get_previous_monday(d):
"""Returns previous monday from given datetime."""
monday = d - timedelta(days=d.weekday())
# set time to 00:00:00
return datetime(monday.year, monday.month, monday.day, 0, 0, 0)
def _get_next_sunday(d):
sunday = d + timedelta(days=6-d.weekday())
# set time to 23:59:59
return datetime(sunday.year, sunday.month, sunday.day, 23, 59, 59)
def _get_first_convention(year):
try:
return Convention.objects.filter(load_in__year=year).earliest('load_in')
except Convention.DoesNotExist:
return None
def _get_last_convention(year):
try:
return Convention.objects.filter(load_out__year=year).latest('load_out')
except Convention.DoesNotExist:
return None
def _get_conventions_json():
data = {}
for conv in Convention.objects.all():
d = {
'name': conv.routing_name()
}
if conv.load_in is not None:
d['load_in'] = conv.load_in.isoformat()
if conv.load_out is not None:
d['load_out'] = conv.load_out.isoformat()
data[conv.id] = d
return data
def _get_locations_json():
data = {}
for loc in Location.objects.all():
d = {
'name': loc.name,
}
data[loc.id] = d
return data
def _get_earliest_to(year):
try:
return TransportOrder.objects.filter(from_loc_load_out__year=year).earliest('from_loc_load_out')
except TransportOrder.DoesNotExist:
return None
def _get_latest_to(year):
try:
return TransportOrder.objects.filter(to_loc_load_in__year=year).latest('to_loc_load_in')
except TransportOrder.DoesNotExist:
return None
def _handle_equipments(equipments, weeks, to_data):
objs = []
for equipment in equipments:
eq_weeks = []
selected = {
'name': 'Select',
'type': None
}
start_location = selected
latest_location = None
latest_convention = None
for week in weeks:
# transport data
tod = {
#'transportOrder': None,
'disabled': False,
'equipment': equipment.pk,
'week': week['monday'].isoformat(),
'from': {
#'location': None,
#'convention': None,
},
'to': {
#'location': None,
#'convention': None,
}
}
#if latest_location is not None:
# tod['from']['location'] = latest_location
# tod['to']['location'] = latest_location
#if latest_convention is not None:
# tod['from']['convention'] = latest_convention
# tod['to']['convention'] = latest_convention
# find matching TransportOrderLine and fill information from there to toData object
filter_start = week['monday'] - timedelta(days=3)
filter_end = week['sunday'] - timedelta(days=3)
tols = _find_tols(equipment.pk,
filter_start,
filter_end)
if len(tols):
to = tols.first().transport_order
tod['id'] = to.pk
tod['name'] = to.name
tod['notes'] = to.notes
tod['unitNotes'] = to.unit_notes
if to.from_loc is not None:
tod['from']['location'] = to.from_loc.pk
if start_location['type'] is None:
selected = {
'name': to.from_loc.name,
'type': 'location'
}
start_location = selected
# set selected for previous empty weeks
for old_eq_week in eq_weeks:
if old_eq_week['selected']['type'] is None:
old_eq_week['selected'] = selected
if len(eq_weeks) > 0:
eq_weeks[len(eq_weeks)-1]['selected'] = {
'name': to.from_loc.name,
'type': 'location'
}
if to.from_convention is not None:
tod['from']['convention'] = to.from_convention.pk
tod['from']['location'] = to.from_convention.location.pk
if to.from_convention.load_out is not None:
tod['from']['load_out'] = to.from_convention.load_out.isoformat()
if start_location['type'] is None:
selected = {
'name': to.from_convention.routing_name(),
'type': 'convention',
}
start_location = selected
# set selected for previous empty weeks
for old_eq_week in eq_weeks:
if old_eq_week['selected']['type'] is None:
old_eq_week['selected'] = selected
if len(eq_weeks) > 0:
eq_weeks[len(eq_weeks)-1]['selected'] = {
'name': to.from_convention.routing_name(),
'type': 'convention',
}
if to.from_loc_load_out is not None:
tod['from']['load_out'] = to.from_loc_load_out.isoformat()
# special case: in transit
transit_length = to.transit_length()
if transit_length is not None and transit_length.days > 7:
_handle_in_transit(to, tod, to_data, eq_weeks)
if to.to_loc is not None:
tod['to']['location'] = to.to_loc.pk
selected = {
'name': to.to_loc.name,
'type': 'location'
}
latest_location = to.to_loc.pk
if to.to_convention is not None:
tod['to']['convention'] = to.to_convention.pk
tod['to']['location'] = to.to_convention.location.pk
if to.to_convention.load_in is not None:
tod['to']['load_in'] = to.to_convention.load_in.isoformat()
selected = {
'name': to.to_convention.routing_name(),
'type': 'convention',
}
latest_convention = to.to_convention.pk
if 'inTransit' in tod['to'].keys() and tod['to']['inTransit'] == True:
selected = {
'name': 'In transit',
'type': 'inTransit',
}
if to.to_loc_load_in is not None:
tod['to']['load_in'] = to.to_loc_load_in.isoformat()
to_data.append(tod)
# week data
w = {
'week': week,
#'to_idx': len(to_data) - 1, # index for to_data
'convention': None,
'conventions': _get_conventions(week['monday'], week['sunday']),
'other_locations': _get_other_locations(),
'selected': selected,
}
eq_weeks.append(w)
objs.append({
'eq': equipment,
'weeks': eq_weeks,
'start_location': start_location,
})
return objs
def _handle_in_transit(to, tod, to_data, eq_weeks):
transit_weeks = to.transit_length().days / 7
convention = None
location = None
if 'convention' in tod['from'].keys():
convention = tod['from']['convention']
if 'location' in tod['from'].keys():
location = tod['from']['location']
tod['from']['inTransit'] = True
tod['from']['convention'] = None
tod['from']['location'] = None
i = 0
l = len(to_data)
l2 = len(eq_weeks)
while i < transit_weeks - 2:
to_data[l-i-1]['to']['inTransit'] = True
to_data[l-i-1]['from']['inTransit'] = True
eq_weeks[l2-i-1]['selected'] = {
'name': 'In transit',
'type': 'inTransit',
}
i = i + 1
to_data[l-i-1]['to']['inTransit'] = True
to_data[l-i-1]['from']['convention'] = convention
to_data[l-i-1]['from']['location'] = location
eq_weeks[l2-i-1]['selected'] = {
'name': 'In transit',
'type': 'inTransit',
}
convention_cache = {}
def _get_conventions(start, end):
key = start.isoformat() + end.isoformat()
if key not in convention_cache.keys():
convention_cache[key] = Convention.objects.filter(
starts__gte=start,
ends__lte=end,
)
return convention_cache[key]
location_cache = {}
def _get_other_locations():
"""Returns all locations except convention venues."""
if 'all' not in location_cache.keys():
conv_venue = LocationType.objects.get(name='Convention venue')
location_cache['all'] = Location.objects.exclude(loc_type=conv_venue)
return location_cache['all']
def _find_tols(equipment_id, start, end):
"""Returns existing TransportOrderLines matching with given arguments.
Matches only if load_in is matching between start and end."""
#logger.error('Trying to find TOL')
#logger.error(equipment_id)
#logger.error(start_time)
#logger.error(end_time)
tols = TransportOrderLine.objects.filter(
equipment__id=equipment_id).filter(
Q(transport_order__to_loc_load_in__range=(start, end)) | Q(transport_order__to_convention__load_in__range=(start, end))
#Q(transport_order__from_loc_load_out__range=(start, end)) | Q(transport_order__to_loc_load_in__range=(start, end)) | Q(transport_order__from_convention__load_out__range=(start, end)) | Q(transport_order__to_convention__load_in__range=(start, end))
)
return tols
def _remove_tols(equipment_id, year):
"""Removes all TransportOrderLines for given equipment id and from that year."""
start = datetime(year, 1, 1)
end = datetime(year, 12, 31, 23, 59, 59)
TransportOrderLine.objects.filter(
equipment__id=equipment_id,
transport_order__from_loc_load_out__range=(start, end),
).delete()
TransportOrderLine.objects.filter(
equipment__id=equipment_id,
transport_order__to_loc_load_in__range=(start, end),
).delete()
TransportOrderLine.objects.filter(
equipment__id=equipment_id,
transport_order__from_convention__load_out__range=(start, end),
).delete()
TransportOrderLine.objects.filter(
equipment__id=equipment_id,
transport_order__to_convention__load_in__range=(start, end),
).delete()
| 36.041121 | 256 | 0.587283 | 0 | 0 | 0 | 0 | 2,272 | 0.11783 | 0 | 0 | 4,904 | 0.25433 |
32cf5c6af409ad539e05135e062b11460576c4f6
| 5,575 |
py
|
Python
|
my_ner.py
|
shouxieai/nlp-bilstm_crf-ner
|
907381325eeb0a2c29004e1c617bea7312579ba8
|
[
"Apache-2.0"
] | 16 |
2021-12-14T10:51:25.000Z
|
2022-03-30T10:10:09.000Z
|
my_ner.py
|
shouxieai/nlp-bilstm-ner
|
907381325eeb0a2c29004e1c617bea7312579ba8
|
[
"Apache-2.0"
] | 1 |
2022-03-23T04:28:50.000Z
|
2022-03-23T04:28:50.000Z
|
my_ner.py
|
shouxieai/nlp-bilstm-ner
|
907381325eeb0a2c29004e1c617bea7312579ba8
|
[
"Apache-2.0"
] | 2 |
2021-12-08T02:48:01.000Z
|
2021-12-13T13:03:25.000Z
|
import os
from torch.utils.data import Dataset,DataLoader
import torch
import torch.nn as nn
from sklearn.metrics import f1_score
def build_corpus(split, make_vocab=True, data_dir="data"):
"""读取数据"""
assert split in ['train', 'dev', 'test']
word_lists = []
tag_lists = []
with open(os.path.join(data_dir, split+".char.bmes"), 'r', encoding='utf-8') as f:
word_list = []
tag_list = []
for line in f:
if line != '\n':
word, tag = line.strip('\n').split()
word_list.append(word)
tag_list.append(tag)
else:
word_lists.append(word_list)
tag_lists.append(tag_list)
word_list = []
tag_list = []
word_lists = sorted(word_lists, key=lambda x: len(x), reverse=True)
tag_lists = sorted(tag_lists, key=lambda x: len(x), reverse=True)
# 如果make_vocab为True,还需要返回word2id和tag2id
if make_vocab:
word2id = build_map(word_lists)
tag2id = build_map(tag_lists)
word2id['<UNK>'] = len(word2id)
word2id['<PAD>'] = len(word2id)
tag2id['<PAD>'] = len(tag2id)
return word_lists, tag_lists, word2id, tag2id
else:
return word_lists, tag_lists
def build_map(lists):
maps = {}
for list_ in lists:
for e in list_:
if e not in maps:
maps[e] = len(maps)
return maps
class MyDataset(Dataset):
def __init__(self,datas,tags,word_2_index,tag_2_index):
self.datas = datas
self.tags = tags
self.word_2_index = word_2_index
self.tag_2_index = tag_2_index
def __getitem__(self,index):
data = self.datas[index]
tag = self.tags[index]
data_index = [self.word_2_index.get(i,self.word_2_index["<UNK>"]) for i in data]
tag_index = [self.tag_2_index[i] for i in tag]
return data_index,tag_index
def __len__(self):
assert len(self.datas) == len(self.tags)
return len(self.datas)
def batch_data_pro(self,batch_datas):
global device
data , tag = [],[]
da_len = []
for da,ta in batch_datas:
data.append(da)
tag.append(ta)
da_len.append(len(da))
max_len = max(da_len)
data = [i + [self.word_2_index["<PAD>"]] * (max_len - len(i)) for i in data]
tag = [i + [self.tag_2_index["<PAD>"]] * (max_len - len(i)) for i in tag]
data = torch.tensor(data,dtype=torch.long,device = device)
tag = torch.tensor(tag,dtype=torch.long,device = device)
return data , tag, da_len
class MyModel(nn.Module):
def __init__(self,embedding_num,hidden_num,corpus_num,bi,class_num,pad_index):
super().__init__()
self.embedding_num = embedding_num
self.hidden_num = hidden_num
self.corpus_num = corpus_num
self.bi = bi
self.embedding = nn.Embedding(corpus_num,embedding_num)
self.lstm = nn.LSTM(embedding_num,hidden_num,batch_first=True,bidirectional=bi)
if bi:
self.classifier = nn.Linear(hidden_num*2,class_num)
else:
self.classifier = nn.Linear(hidden_num, class_num)
self.cross_loss = nn.CrossEntropyLoss(ignore_index=pad_index)
def forward(self,data_index,data_len , tag_index=None):
em = self.embedding(data_index)
pack = nn.utils.rnn.pack_padded_sequence(em,data_len,batch_first=True)
output,_ = self.lstm(pack)
output,lens = nn.utils.rnn.pad_packed_sequence(output,batch_first=True)
pre = self.classifier(output)
self.pre = torch.argmax(pre, dim=-1).reshape(-1)
if tag_index is not None:
loss = self.cross_loss(pre.reshape(-1,pre.shape[-1]),tag_index.reshape(-1))
return loss
if __name__ == "__main__":
device = "cuda:0" if torch.cuda.is_available() else "cpu"
train_word_lists, train_tag_lists, word_2_index, tag_2_index = build_corpus("train")
dev_word_lists, dev_tag_lists = build_corpus("dev", make_vocab=False)
test_word_lists, test_tag_lists = build_corpus("test", make_vocab=False)
corpus_num = len(word_2_index)
class_num = len(tag_2_index)
train_batch_size = 5
dev_batch_size = len(dev_word_lists)
epoch = 100
lr = 0.001
embedding_num = 128
hidden_num = 129
bi = True
train_dataset = MyDataset(train_word_lists,train_tag_lists,word_2_index, tag_2_index)
train_dataloader = DataLoader(train_dataset,batch_size=train_batch_size,shuffle=False,collate_fn=train_dataset.batch_data_pro)
dev_dataset = MyDataset(dev_word_lists, dev_tag_lists, word_2_index, tag_2_index)
dev_dataloader = DataLoader(dev_dataset, batch_size=dev_batch_size, shuffle=False,collate_fn=dev_dataset.batch_data_pro)
model = MyModel(embedding_num,hidden_num,corpus_num,bi,class_num,word_2_index["<PAD>"])
model = model.to(device)
opt = torch.optim.Adam(model.parameters(),lr = lr)
for e in range(epoch):
model.train()
for data , tag, da_len in train_dataloader:
loss = model.forward(data,da_len,tag)
loss.backward()
opt.step()
opt.zero_grad()
model.eval() # F1,准确率,精确率,召回率
for dev_data , dev_tag, dev_da_len in dev_dataloader:
test_loss = model.forward(dev_data,dev_da_len,dev_tag)
score = f1_score(dev_tag.reshape(-1).cpu().numpy(),model.pre.cpu().numpy(),average="micro")
print(score)
break
| 32.794118 | 130 | 0.63139 | 2,404 | 0.427682 | 0 | 0 | 0 | 0 | 0 | 0 | 262 | 0.046611 |
32cf7fd469a0aec109e44e66849bad3789086158
| 245 |
py
|
Python
|
test.py
|
karttur/geoimagine03-support
|
3971db215382bd16f207eca3ef1d9d81e4298b41
|
[
"BSD-3-Clause"
] | null | null | null |
test.py
|
karttur/geoimagine03-support
|
3971db215382bd16f207eca3ef1d9d81e4298b41
|
[
"BSD-3-Clause"
] | null | null | null |
test.py
|
karttur/geoimagine03-support
|
3971db215382bd16f207eca3ef1d9d81e4298b41
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Created on 28 Jan 2021
@author: thomasgumbricht
'''
from string import whitespace
def CheckWhitespace(s):
'''
'''
return True in [c in s for c in whitespace]
s = 'dumsnut'
print (CheckWhitespace(s))
| 13.611111 | 51 | 0.591837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.326531 |
32cfbeee160a6e50ceb471701c99ace872cbfe2b
| 362 |
py
|
Python
|
leetcode/409.py
|
windniw/just-for-fun
|
54e5c2be145f3848811bfd127f6a89545e921570
|
[
"Apache-2.0"
] | 1 |
2019-08-28T23:15:25.000Z
|
2019-08-28T23:15:25.000Z
|
leetcode/409.py
|
windniw/just-for-fun
|
54e5c2be145f3848811bfd127f6a89545e921570
|
[
"Apache-2.0"
] | null | null | null |
leetcode/409.py
|
windniw/just-for-fun
|
54e5c2be145f3848811bfd127f6a89545e921570
|
[
"Apache-2.0"
] | null | null | null |
"""
link: https://leetcode.com/problems/longest-palindrome
problem: 问用s中字符组成的最长回文串长度
solution: map 记录字符出现次数
"""
class Solution:
def longestPalindrome(self, s: str) -> int:
m, res = collections.defaultdict(int), 0
for x in s:
m[x] += 1
for x in m:
res += m[x] // 2 * 2
return min(len(s), res + 1)
| 18.1 | 54 | 0.558011 | 243 | 0.595588 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.394608 |
32cfc631e8d4a50ff93f3a9a349602c8342fb97a
| 847 |
py
|
Python
|
nickenbot/config.py
|
brlafreniere/nickenbot
|
f13ec78057ec25823eb16df6ffab3a32eddfd3ca
|
[
"MIT"
] | 1 |
2016-08-10T12:20:58.000Z
|
2016-08-10T12:20:58.000Z
|
nickenbot/config.py
|
brlafreniere/nickenbot
|
f13ec78057ec25823eb16df6ffab3a32eddfd3ca
|
[
"MIT"
] | null | null | null |
nickenbot/config.py
|
brlafreniere/nickenbot
|
f13ec78057ec25823eb16df6ffab3a32eddfd3ca
|
[
"MIT"
] | null | null | null |
import yaml
import os
import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
project_dir = os.path.realpath(os.path.join(current_dir, ".."))
class ConfigManager:
network = None
config = None
@classmethod
def load(clss):
if clss.network:
config_filepath = os.path.join(project_dir, 'config/%s.config.yaml' % clss.network)
else:
config_filepath = os.path.join(project_dir, 'config/config.yaml')
config_file = open(config_filepath, 'r')
config_yaml = config_file.read()
clss.config = yaml.load(config_yaml)
@classmethod
def get(clss, key):
if not clss.config:
clss.load()
if not clss.config:
print("Configuration not found. Exiting.")
sys.exit(1)
return clss.config[key]
| 27.322581 | 95 | 0.615112 | 689 | 0.813459 | 0 | 0 | 620 | 0.731995 | 0 | 0 | 85 | 0.100354 |
32d046c8c2ed3ece0b08aa280a40083f8b7d16ab
| 2,277 |
py
|
Python
|
qna/views.py
|
channprj/KU-PL
|
7fc3719b612a819ed1bd443695d7f13f509ee596
|
[
"MIT"
] | null | null | null |
qna/views.py
|
channprj/KU-PL
|
7fc3719b612a819ed1bd443695d7f13f509ee596
|
[
"MIT"
] | null | null | null |
qna/views.py
|
channprj/KU-PL
|
7fc3719b612a819ed1bd443695d7f13f509ee596
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.shortcuts import redirect
from django.shortcuts import get_object_or_404
from django.utils import timezone
from .forms import QuestionForm
from .forms import AnswerForm
from .models import Question
from .models import Answer
def question_list(request):
questions = Question.objects.filter(created_date__lte = timezone.now()).order_by('-updated_date')
return render(request, 'qna/question_list.htm', {'questions': questions})
def question_detail(request, pk):
question = get_object_or_404(Question, pk=pk)
if request.method == "POST":
form = AnswerForm(request.POST)
if form.is_valid():
answer = form.save(commit=False)
answer.question = question
answer.user = request.user
answer.updated_date = timezone.now()
answer.save()
return redirect('qna.views.question_detail', pk=question.pk)
else:
form = AnswerForm()
return render(request, 'qna/question_detail.htm', {'question': question, 'form': form})
def question_new(request):
if request.method == "POST":
form = QuestionForm(request.POST)
if form.is_valid():
question = form.save(commit=False)
question.user = request.user
question.updated_date = timezone.now()
question.save()
return redirect('qna.views.question_detail', pk=question.pk)
else:
form = QuestionForm()
return render(request, 'qna/question_edit.htm', {'form': form})
def question_edit(request, pk):
question = get_object_or_404(Question, pk=pk)
if request.method == "POST":
form = QuestionForm(request.POST, instance=question)
if form.is_valid():
question = form.save(commit=False)
question.user = request.user
question.updated_date = timezone.now()
question.save()
return redirect('qna.views.question_detail', pk=question.pk)
else:
form = QuestionForm(instance=question)
return render(request, 'qna/question_edit.htm', {'form': form})
def question_remove(request, pk):
question = get_object_or_404(Question, pk=pk)
question.delete()
return redirect('qna.views.question_list')
| 35.030769 | 101 | 0.665349 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.119455 |
32d33f3c862ddf8043ee8ce09e1a526264e7c51a
| 1,648 |
py
|
Python
|
python/tests/test_oci.py
|
miku/labe
|
2d784f418e24ab6fef9f76791c9fdd02dd505657
|
[
"MIT"
] | null | null | null |
python/tests/test_oci.py
|
miku/labe
|
2d784f418e24ab6fef9f76791c9fdd02dd505657
|
[
"MIT"
] | null | null | null |
python/tests/test_oci.py
|
miku/labe
|
2d784f418e24ab6fef9f76791c9fdd02dd505657
|
[
"MIT"
] | 1 |
2021-09-16T10:51:00.000Z
|
2021-09-16T10:51:00.000Z
|
"""
Unit tests for labe. Most not mocked yet, hence slow.
"""
import collections
import socket
import pytest
import requests
from labe.oci import get_figshare_download_link, get_terminal_url
def no_internet(host="8.8.8.8", port=53, timeout=3):
"""
Host: 8.8.8.8 (google-public-dns-a.google.com)
OpenPort: 53/tcp
Service: domain (DNS/TCP)
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return False
except socket.error as ex:
return True
@pytest.mark.skipif(no_internet(), reason="no internet")
def test_get_redirct_url():
with pytest.raises(requests.exceptions.MissingSchema):
get_terminal_url("undefined")
assert get_terminal_url("https://google.com") == "https://www.google.com/"
assert get_terminal_url("http://google.com") == "https://www.google.com/?gws_rd=ssl"
assert (get_terminal_url("https://doi.org/10.1111/icad.12417") ==
"https://onlinelibrary.wiley.com/doi/10.1111/icad.12417")
@pytest.mark.skipif(no_internet(), reason="no internet")
def test_get_figshare_download_link():
Case = collections.namedtuple("Case", "link result")
cases = (
Case(
"https://doi.org/10.6084/m9.figshare.6741422.v11",
"https://figshare.com/ndownloader/articles/6741422/versions/11",
),
Case(
"https://doi.org/10.6084/m9.figshare.6741422.v7",
"https://figshare.com/ndownloader/articles/6741422/versions/7",
),
)
for c in cases:
assert get_figshare_download_link(c.link) == c.result
| 30.518519 | 88 | 0.662621 | 0 | 0 | 0 | 0 | 1,072 | 0.650485 | 0 | 0 | 653 | 0.396238 |
32d559b8ce0d7d1c7f26302620ef00f9255a82dc
| 26,404 |
py
|
Python
|
pyNastran/bdf/cards/test/test_dynamic.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 293 |
2015-03-22T20:22:01.000Z
|
2022-03-14T20:28:24.000Z
|
pyNastran/bdf/cards/test/test_dynamic.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 512 |
2015-03-14T18:39:27.000Z
|
2022-03-31T16:15:43.000Z
|
pyNastran/bdf/cards/test/test_dynamic.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 136 |
2015-03-19T03:26:06.000Z
|
2022-03-25T22:14:54.000Z
|
"""tests dynamic cards and dynamic load cards"""
import unittest
from io import StringIO
import numpy as np
import pyNastran
from pyNastran.bdf.bdf import BDF, read_bdf, CrossReferenceError
from pyNastran.bdf.cards.test.utils import save_load_deck
#ROOT_PATH = pyNastran.__path__[0]
class TestDynamic(unittest.TestCase):
"""
The cards tested are:
* TSTEP
"""
def test_tstep(self):
"""tests a TSTEP card"""
model = BDF(debug=None)
sid = 42
n1 = n2 = 5
dt1 = dt2 = 0.1
no1 = no2 = 3
card = ['TSTEP', sid,
n1, dt1, no1, None, None, None, None, None,
n2, dt2, no2]
model.add_card(card, card[0], comment='tstep comment')
model.validate()
tstep = model.tsteps[42]
tstep.raw_fields()
tstep.write_card()
tstep.write_card(size=16)
sid = 43
N = 5
DT = 0.1
NO = 3
tstep2 = model.add_tstep(sid, N, DT, NO)
tstep2.raw_fields()
tstep2.write_card()
tstep2.write_card(size=16)
save_load_deck(model)
def test_tstepnl(self):
"""tests a TSTEPNL card"""
model = BDF(debug=None)
card = ['TSTEPNL', 250, 100, .01, 1, 'ADAPT', 2, 10, 'PW',
1.E-2, 1.E-3, 1.E-6, 2, 10, 2, .02, None,
5, 5, 0, 0.75, 16.0, 0.1, 20.,]
model.add_card(card, card[0], comment='tstepnl comment')
model.validate()
tstepnl = model.tstepnls[250]
tstepnl.raw_fields()
tstepnl.write_card()
tstepnl.write_card(size=16)
sid = 42
ndt = 10
dt = 3.
no = 5
tstepnl2 = model.add_tstepnl(sid, ndt, dt, no)
tstepnl2.raw_fields()
tstepnl2.write_card()
tstepnl2.write_card(size=16)
save_load_deck(model)
def test_delay(self):
"""tests a two field DELAY card"""
model = BDF(debug=False)
node1, c1, t1 = 100, 3, 0.3
node2, c2, t2 = 101, 4, 0.4
sid = 42
card_lines = ['DELAY', sid, node1, c1, t1, node2, c2, t2]
model.add_card(card_lines, card_lines[0], comment='', is_list=True,
has_none=True)
model.add_grid(100, [0., 0., 0.])
model.add_grid(101, [0., 0., 0.])
model.validate()
model.cross_reference()
#print(model.delays[42])
save_load_deck(model)
def test_dphase(self):
"""tests a two field DPHASE card"""
model = BDF(debug=False)
node1, c1, t1 = 100, 3, 0.3
node2, c2, t2 = 101, 4, 0.4
sid = 42
card_lines = ['DPHASE', sid, node1, c1, t1, node2, c2, t2]
model.add_card(card_lines, card_lines[0], comment='', is_list=True,
has_none=True)
model.add_grid(100, [0., 0., 0.])
model.add_grid(101, [0., 0., 0.])
model.validate()
model.cross_reference()
#print(model.dphases[42])
save_load_deck(model)
def test_freq(self):
"""tests FREQ, FREQ1, FREQ2, FREQ4"""
model = BDF(debug=False)
sid = 101
freqs = 0.1
freq = model.add_freq(sid, freqs, comment='freq')
#print(freq)
freqs = [2.0, 3.0]
freq = model.add_freq(sid, freqs, comment='freq')
#print(freq)
f1 = 0.
df = 2.0
freq1 = model.add_freq1(sid, f1, df, ndf=5, comment='freq1')
assert len(freq1.freqs) == 6, 'freqs=%s' % freq1.freqs
#print(freq1)
f1 = 1.
f2 = 8.0
freq2 = model.add_freq2(sid, f1, f2, nf=6, comment='freq2')
assert len(freq2.freqs) == 7, 'freqs=%s' % freq2.freqs
assert np.allclose(freq2.freqs.max(), f2), freq2.freqs
#print(freq2)
freq4 = model.add_freq4(sid, f1, f2, fspread=0.1, nfm=3, comment='freq4')
#print(model.frequencies[sid])
#print(freq4)
fractions = [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
freq5 = model.add_freq5(sid, fractions, f1=0., f2=100., comment='freq5')
fractions = np.linspace(0., 1.)
unused_freq5b = model.add_freq5(sid, fractions, f1=0., f2=100., comment='freq5')
model.validate()
freq.raw_fields()
freq.write_card()
freq.write_card(size=16)
freq1.raw_fields()
freq1.write_card()
freq1.write_card(size=16)
freq2.raw_fields()
freq2.write_card()
freq2.write_card(size=16)
freq4.raw_fields()
freq4.write_card()
freq4.write_card(size=16)
freq5.raw_fields()
freq5.write_card()
freq5.write_card(size=16)
bdf_file = StringIO()
model.write_bdf(bdf_file, close=False)
unused_out = bdf_file.getvalue()
bdf_file.seek(0)
model2 = read_bdf(bdf_file, punch=True, debug=False)
model2.uncross_reference()
model2.safe_cross_reference()
model2.uncross_reference()
save_load_deck(model)
def test_tload(self):
"""tests DLOAD, TLOAD1, TLOAD2, TABLED2 cards"""
model = BDF(debug=False)
model.set_error_storage(nparse_errors=0, stop_on_parsing_error=True,
nxref_errors=0, stop_on_xref_error=True)
sid = 2
excite_id = 20
delay = 0
tid = 42
tload1 = model.add_tload1(sid, excite_id, tid, delay=0, Type='LOAD',
us0=0.0, vs0=0.0, comment='tload1')
tload1 = model.add_tload1(sid, excite_id, tid, delay=1., Type='DISP',
us0=0.0, vs0=0.0, comment='')
tload1 = model.add_tload1(sid, excite_id, tid, delay=2, Type='VELO',
us0=0.0, vs0=0.0, comment='')
tload1 = model.add_tload1(sid, excite_id, tid, delay=0, Type='ACC',
us0=0.0, vs0=0.0, comment='')
nid = 100
model.add_grid(nid, [0., 0., 0.])
darea_id = excite_id
component = 4
scale = 1.
model.add_darea(darea_id, nid, component, scale, comment='')
sid = 3
excite_id = 30
tload2 = model.add_tload2(sid, excite_id, delay=0, Type='LOAD',
T1=0., T2=None, frequency=0., phase=0.,
c=0., b=0., us0=0., vs0=0., comment='tload2')
tload2 = model.add_tload2(sid, excite_id, delay=1., Type='D',
T1=0., T2=None, frequency=0., phase=0.,
c=0., b=0., us0=0., vs0=0., comment='')
tload2 = model.add_tload2(sid, excite_id, delay=2, Type='V',
T1=0., T2=None, frequency=0., phase=0.,
c=0., b=0., us0=0., vs0=0., comment='')
tload2 = model.add_tload2(sid, excite_id, delay=0, Type='A',
T1=0., T2=1., frequency=0., phase=0.,
c=0., b=0., us0=0., vs0=0., comment='')
darea_id = excite_id
component = 4
scale = 1.
model.add_darea(darea_id, nid, component, scale, comment='')
delay_id = 2
nodes = 100
components = 2
delays = 1.5
delay = model.add_delay(delay_id, nodes, components, delays)
sid = 1
scale = 1.0
scale_factors = 1.
load_ids = 2
dload = model.add_dload(sid, scale, scale_factors, load_ids,
comment='dload')
x1 = 0.1
x = np.linspace(0., 1.)
y = np.sin(x)
tabled2 = model.add_tabled2(tid, x1, x, y, comment='tabled2')
model.pop_parse_errors()
delay.validate()
delay.raw_fields()
delay.write_card()
delay.write_card(size=16)
tload1.validate()
tload1.raw_fields()
tload1.write_card()
tload1.write_card(size=16)
tload2.validate()
tload2.raw_fields()
tload2.write_card()
tload2.write_card(size=16)
dload.validate()
dload.raw_fields()
dload.write_card()
dload.write_card(size=16)
tabled2.validate()
tabled2.raw_fields()
tabled2.write_card()
tabled2.write_card(size=16)
model.validate()
model.cross_reference()
model.pop_xref_errors()
bdf_file = StringIO()
model.write_bdf(bdf_file, close=False)
unused_out = bdf_file.getvalue()
bdf_file.seek(0)
unused_outs = model.get_bdf_stats(return_type='list')
unused_outs = model.get_bdf_stats(return_type='string')
time = 0.5
out1 = tload1.get_load_at_time(time, scale=1.)
out2 = tload2.get_load_at_time(time, scale=1.)
#print(out1)
assert len(out1) == 1, out1
assert len(out2) == 1, out2
#print(out1)
#print(out2)
time = [0.5, 0.9]
out1 = tload1.get_load_at_time(time, scale=1.)
out2 = tload2.get_load_at_time(time, scale=1.)
assert len(out1) == 2, out1
assert len(out2) == 2, out2
#print(out1)
#print(out2)
model2 = read_bdf(bdf_file, punch=True, debug=False)
model2.uncross_reference()
model2.safe_cross_reference()
model2.uncross_reference()
#print(out)
#print(outs)
save_load_deck(model, run_renumber=False, run_convert=False)
def test_rload(self):
"""tests DLOAD, RLOAD1, RLOAD2, TABLED2 cards"""
model = BDF(debug=False)
#model.case_control_deck = CaseControlDeck(['DLOAD=2', 'BEGIN BULK'])
sid = 2
excite_id = 20
delay = 0
tid = 42
rload1 = model.add_rload1(sid, excite_id, delay=0, dphase=0, tc=0,
td=0, Type='LOAD', comment='rload1')
rload1 = model.add_rload1(sid, excite_id, delay=1., dphase=0, tc=0,
td=0, Type='DISP', comment='rload1')
rload1 = model.add_rload1(sid, excite_id, delay=2, dphase=0, tc=0,
td=0, Type='VELO', comment='rload1')
rload1 = model.add_rload1(sid, excite_id, delay=0, dphase=0, tc=0,
td=0, Type='ACC', comment='rload1')
sid = 3
excite_id = 30
rload2 = model.add_rload2(sid, excite_id, delay=0, dphase=0, tb=0,
tp=0, Type='LOAD', comment='rload2')
rload2 = model.add_rload2(sid, excite_id, delay=1., dphase=0, tb=0,
tp=0, Type='D', comment='rload2')
rload2 = model.add_rload2(sid, excite_id, delay=2, dphase=0, tb=0,
tp=0, Type='V', comment='rload2')
rload2 = model.add_rload2(sid, excite_id, delay=0, dphase=0, tb=0,
tp=0, Type='A', comment='rload2')
excite_id = 20
nid = 21
c = 1
scale = 1.0
model.add_darea(excite_id, nid, c, scale, comment='darea')
model.add_grid(nid, [0., 0., 0.])
excite_id = 30
model.add_darea(excite_id, nid, c, scale, comment='darea')
delay_id = 2
nodes = 100
components = 2
delays = 1.5
delay = model.add_delay(delay_id, nodes, components, delays)
sid = 1
scale = 1.0
scale_factors = 1.
load_ids = 2
dload = model.add_dload(sid, scale, scale_factors, load_ids,
comment='dload')
x1 = 0.1
x = np.linspace(0., 1.)
y = np.sin(x)
tabled2 = model.add_tabled2(tid, x1, x, y, comment='tabled2')
model.pop_parse_errors()
delay.validate()
delay.raw_fields()
delay.write_card()
delay.write_card(size=16)
rload1.validate()
rload1.raw_fields()
rload1.write_card()
rload1.write_card(size=16)
rload2.validate()
rload2.raw_fields()
rload2.write_card()
rload2.write_card(size=16)
dload.validate()
dload.raw_fields()
dload.write_card()
dload.write_card(size=16)
tabled2.validate()
tabled2.raw_fields()
tabled2.write_card()
tabled2.write_card(size=16)
model.validate()
model.cross_reference()
model.pop_xref_errors()
#print(model.dareas)
bdf_file = StringIO()
model.write_bdf(bdf_file, close=False)
unused_out = bdf_file.getvalue()
bdf_file.seek(0)
unused_outs = model.get_bdf_stats(return_type='list')
unused_outs = model.get_bdf_stats(return_type='string')
freq = 0.5
out1 = rload1.get_load_at_freq(freq, scale=1.)
#out2 = rload2.get_load_at_time(freq, scale=1.)
#print(out1)
#print(out2)
assert len(out1) == 1, out1
#assert len(out2) == 1, out2
freq = [0.5, 0.9]
out1 = rload1.get_load_at_freq(freq, scale=1.)
#out2 = rload2.get_load_at_freq(freq, scale=1.)
#print(out1)
#print(out2)
assert len(out1) == 2, out1
#assert len(out2) == 2, out2
model2 = read_bdf(bdf_file, punch=True, debug=False)
model2.uncross_reference()
model2.safe_cross_reference()
model2.uncross_reference()
#print(out)
#print(outs)
save_load_deck(model, run_renumber=False, run_convert=False)
def test_ascre(self):
"""tests ASCRE, DELAY, DPHASE, TABLED2"""
model = BDF(debug=False)
sid = 1
excite_id = 2
rho = 1.0
b = 2.0
acsrce = model.add_acsrce(sid, excite_id, rho, b, delay=0, dphase=0, power=0,
comment='acsrce')
acsrce.raw_fields()
sid = 3
excite_id = 4
rho = 1.0
b = 2.0
delay = 3
dphase = 4
power = 5
unused_acsrce2 = model.add_acsrce(sid, excite_id, rho, b, delay=delay,
dphase=dphase, power=power)
nodes = 4
components = 5
delays = 6.0
delay = model.add_delay(sid, nodes, components, delays, comment='')
nodes = 4
components = 6
phase_leads = 2.0
delay = model.add_dphase(sid, nodes, components, phase_leads)
tid = power
x1 = 1.
x = np.linspace(0., 1.) + 10.
y = np.sin(x) + 2.
model.add_tabled2(tid, x1, x, y, comment='tabled2')
model.add_grid(4, [0., 0., 0.])
model.validate()
model.pop_parse_errors()
model.cross_reference()
model.pop_xref_errors()
save_load_deck(model, run_convert=False)
def test_nlparm(self):
"""tests NLPARM"""
model = BDF(debug=False)
nlparm_id = 42
model.add_nlparm(nlparm_id, comment='nlparm')
save_load_deck(model)
def test_nlpci(self):
"""tests NLPCI"""
model = BDF(debug=False)
nlpci_id = 42
nlpci = model.add_nlpci(nlpci_id, Type='CRIS', minalr=0.25, maxalr=4.,
scale=0., desiter=12, mxinc=20,
comment='nlpci')
nlpci.raw_fields()
#print(nlpci)
save_load_deck(model)
#def test_rotord(self):
#"""tests ROTORD"""
#model = BDF(debug=False)
#sid = 42
#rstart = 10.0
#rstep = 11.0
#numstep = 10
#rids = []
#rsets = [31]
#rspeeds = [None]
#rcords = []
#w3s = []
#w4s = []
#rforces = []
#brgsets = []
#rotord = model.add_rotord(
#sid, rstart, rstep, numstep,
#rids, rsets, rspeeds, rcords, w3s, w4s, rforces, brgsets,
#refsys='ROT', cmout=0.0, runit='RPM',
#funit='RPM', zstein='NO', orbeps=1.e-6,
#roprt=0, sync=1, etype=1, eorder=1.0,
#threshold=0.02, maxiter=10, comment='rotord')
#rotord.validate()
#save_load_deck(model)
def test_loadcyn(self):
"""tests LOADCYN"""
model = BDF(debug=False, log=None, mode='msc')
sid = 42
scale = 4.
segment_id = 10
scales = [1.]
load_ids = [3]
loadcyn = model.add_loadcyn(sid, scale, segment_id, scales, load_ids,
segment_type=None, comment='loadcyn')
loadcyn.validate()
model.pop_parse_errors()
card = loadcyn.write_card(size=8)
loadcyn.write_card(size=16, is_double=False)
loadcyn.write_card(size=16, is_double=True)
loadcyn.raw_fields()
str(loadcyn)
#print(model.loads)
model.loads = {}
model.add_card(card.split('\n')[1:], 'LOADCYN', comment='', is_list=False, has_none=True)
model.cross_reference()
model.uncross_reference()
model.safe_cross_reference()
save_load_deck(model, run_convert=False)
def test_deform(self):
"""tests DEFORM"""
model = BDF(debug=False, log=None, mode='msc')
sid = 42
eid = 10
deformation = 32.
deform = model.add_deform(sid, eid, deformation, comment='deform')
deform.validate()
model.pop_parse_errors()
card = deform.write_card(size=8)
deform.write_card(size=16, is_double=False)
deform.write_card(size=16, is_double=True)
deform.raw_fields()
str(deform)
model.loads = {}
model.add_card(card.split('\n')[1:], 'DEFORM', comment='', is_list=False, has_none=True)
model.pop_parse_errors()
with self.assertRaises(CrossReferenceError):
model.cross_reference()
with self.assertRaises(CrossReferenceError):
model.pop_xref_errors()
model.uncross_reference()
model.reset_errors()
model.safe_cross_reference()
delta = 0.1
eid1 = 11
eid2 = 12
eid3 = 13
fields = ['DEFORM', sid, eid1, delta, eid2, delta, eid3, delta]
model.add_card(fields, 'DEFORM')
eid = 10
nids = [2, 3]
mid = 100
model.add_grid(2, [0., 0., 0.])
model.add_grid(3, [1., 0., 0.])
E = 3.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu)
model.add_conrod(eid, mid, nids, A=0.0, j=0.0, c=0.0, nsm=0.0, comment='')
model.add_conrod(eid1, mid, nids, A=0.0, j=0.0, c=0.0, nsm=0.0, comment='')
model.add_conrod(eid2, mid, nids, A=0.0, j=0.0, c=0.0, nsm=0.0, comment='')
model.add_conrod(eid3, mid, nids, A=0.0, j=0.0, c=0.0, nsm=0.0, comment='')
model.cross_reference()
save_load_deck(model)
def test_rforce(self):
"""tests RFORCE"""
model = BDF(debug=False, log=None, mode='msc')
#model._nxref_errors = 0
sid = 42
nid = 2
cid = 1
scale = 2.
r123 = [0., 1., 2.]
rforce = model.add_rforce(sid, nid, scale, r123, cid=cid,
method=1, racc=0., mb=0, idrf=0, comment='rforce')
rforce.validate()
card = rforce.write_card(size=8)
rforce.write_card(size=16, is_double=False)
rforce.write_card(size=16, is_double=True)
rforce.raw_fields()
str(rforce)
model.loads = {}
model.add_card(card.split('\n')[1:], 'RFORCE', comment='', is_list=False, has_none=True)
model.pop_parse_errors()
with self.assertRaises(CrossReferenceError):
model.cross_reference()
with self.assertRaises(CrossReferenceError):
model.pop_xref_errors()
model.uncross_reference()
model.reset_errors()
with self.assertRaises(KeyError):
model.safe_cross_reference()
model.reset_errors()
model.add_grid(2, [0., 0., 0.])
model.add_cord2r(cid, [0., 0., 0.], [0., 0., 1.], [1., 0., 0.], rid=0, comment='')
model.cross_reference()
save_load_deck(model, run_convert=False)
def test_rforce1(self):
"""tests RFORCE1"""
model = BDF(debug=False, log=None, mode='msc')
sid = 42
nid = 2
scale = 2.
#r123 = None
group_id = -4
cid = 1
rforce1 = model.add_rforce1(sid, nid, scale, group_id, cid=cid, r123=None,
racc=0., mb=0, method=2, comment='rforce1')
rforce1.validate()
rforce1b = model.add_rforce1(sid, nid, scale, group_id, cid=0, r123=[1., 2., 3.],
racc=0., mb=0, method=2, comment='rforce1')
rforce1b.validate()
model.pop_parse_errors()
card = rforce1.write_card(size=8)
rforce1.write_card(size=16, is_double=False)
rforce1.write_card(size=16, is_double=True)
rforce1.raw_fields()
str(rforce1)
model.loads = {}
model.add_card(card.split('\n')[1:], 'RFORCE1', comment='', is_list=False, has_none=True)
model.pop_parse_errors()
with self.assertRaises(CrossReferenceError):
model.cross_reference()
with self.assertRaises(CrossReferenceError):
model.pop_xref_errors()
model.uncross_reference()
model.reset_errors()
with self.assertRaises(KeyError):
model.safe_cross_reference()
model.reset_errors()
model.add_grid(2, [0., 0., 0.])
model.add_cord2r(cid, [0., 0., 0.], [0., 0., 1.], [1., 0., 0.], rid=0, comment='')
model.cross_reference()
save_load_deck(model, run_convert=False)
def _test_dynamic1(self):
"""
xref test for:
- DLOAD -> DAREA -> NID
DLOAD take priority
useful for dynamic nodal forces/disp/vel/acc
"""
msg = """
SOL 108
CEND
SUBCASE 1
DLOAD = 33
DISP(PLOT) = ALL
BEGIN BULK
$DLOAD SID S S1 L1 S2 L2
DLOAD, 33, 1.0, 1.0, 35, 1.0, 36
$RLOAD1 SID EXCITEID DELAY DPHASE TC TD TYPE
RLOAD1, 35, 29, 0.2, 5.0, 40, 0.0, 0
RLOAD1, 36, 29, 31, 32, 4.0, 41, 0
$DAREA SID GRID COMP SCALE
DAREA, 29, 30, 1, 5.2
$DELAY SID GRID COMP LAG
DELAY, 31, 30, 1, 0.2
$DPHASE SID GRID COMP ANGLE
DPHASE, 32, 30, 1, 5.0
$TABLED1 TID XAXIS YAXIS
$ x1 y1 x2 y2 x3 y3 x4 y4
TABLED1, 40, LINEAR, LINEAR
,0.0, 4.0, 2.0, 8.0, 6.0, 8.0, ENDT
TABLED1, 41, LINEAR, LINEAR
,0.0, 0.5, 0.6, 0.4, 0.8, 0.7, ENDT
GRID,30
"""
model = BDF(debug=False)
bdf_file = StringIO()
bdf_file.write(msg)
bdf_file.seek(0)
model.read_bdf(bdf_file)
#In the example:
# * The DLOAD case control command selects the loading reference
# by the DLOAD bulk entry having SID = 33 as the dynamic
# loading for the analysis.
# * The DLOAD bulk entry combines the dynamic loads defined by
# two RLOAD1 entries having SIDs of 35 and 36. Neither dynamic
# load is scaled using the DLOAD entry.
# * Both RLOAD1 entries reference the same DAREA entry. Thus,
# both dynamic loads are applied to the same degree-of-freedom.
# In this example, it is a single degree-of-freedom, Component 1
# of Grid 30. Both dynamic loads are scaled 5.2 times by the
# DAREA entry.
# * Because the dynamic loads are applied at only one
# degree-of-freedom, the time delay and phase angle can be
# defined directly on the RLOAD1 entries. This is the case
# for the RLOAD1 entry having SID = 35. However, for
# demonstration purposes, the RLOAD1 entry having SID = 36
# references DELAY and DPHASE bulk entries. Both approaches
# define a delay of 0.2 and a phase angle of 5.0 for the
# corresponding dynamic load.
# * C(f) for the RLOAD1 entry having SID = 35 is defined by the
# TABLED1 entry having TID = 40. (See Figure 6-6.) D(f) for
# this same RLOAD1 entry is defined as zero.
# * C(f) for the RLOAD1 entry having SID = 36 is a constant
# value of 4.0. D(f) for this same RLOAD entry is defined by
# the TABLED1 entry having TID = 41.
def _test_dynamic2(self):
"""
xref test for:
- LOADSET -> LSEQ -> FORCE, PLOAD
- DLOAD -> RLOAD1 -> TABLED1
LOADSET take priority
useful for generalized dynamic forces/disp/vel/acc
"""
msg = """
SOL 108
CEND
SUBCASE 1
LOADSET = 27
DLOAD = 25
DISP(PLOT) = ALL
BEGIN BULK
$LSEQ SID EXCITEID LID
LSEQ, 27, 28, 26
$RLOAD1 SID EXCITEID DELAY DPHASE TC TD
RLOAD1, 25, 28, 0.0, 10.0, 29
$FORCE SID GRID CID F N1 N2 N3
FORCE, 26, 425, , 2.5, 1.0
$PLOAD SID PRES GRID1 GRID2 GRID3 GRID4
PLOAD, 26, 50.0, 63, 64, 88, 91
$TABLED1 TID XAXIS YAXIS
$ x1 y1 x2 y2 x3 y3 x4 y4
TABLED1, 29, LINEAR, LINEAR
,0.0, 0.5, 0.6, 0.4, 0.8, 0.7, ENDT
"""
model = BDF(debug=False)
bdf_file = StringIO()
bdf_file.write(msg)
bdf_file.seek(0)
model.read_bdf(bdf_file)
#In the example:
# * The LOADSET request in case control selects the LSEQ entry
# having SID = 27.
# * The LSEQ entry references the static loads having SID = 26.
# These loads include the FORCE and PLOAD entries. The FORCE
# and PLOAD entries provide the spatial distribution of the
# dynamic loading.
# * The DLOAD request in case control selects the RLOAD1 entry
# having SID = 25.
# * The RLOAD1 entry references a TABLED1 entry having TID = 29.
# This TABLED1 entry defines C(f) for the RLOAD1 entry. Because
# the TD field on the RLOAD1 entry is undefined, D(f) defaults
# to zero.
# * The EXCITEID fields of the LSEQ and RLOAD1 entries are both
# 28, thereby linking the temporal and spatial distributions of
# the dynamic loading. Thus, the dynamic load defined by the
# RLOAD1 entry is:
# o Scaled by 2.5 and applied as a force to Component 1 of Grid 425.
# o Scaled by 50.0 and applied as a pressure to the quadrilateral
# element face defined by Grids 63, 64, 88, and 91.
if __name__ == '__main__': # pragma: no cover
unittest.main()
| 34.069677 | 97 | 0.549879 | 26,049 | 0.986555 | 0 | 0 | 0 | 0 | 0 | 0 | 6,107 | 0.231291 |
32d6f22794e1af28d1b004461271504fb7680002
| 4,691 |
py
|
Python
|
src/kv/benchmark/runbench.py
|
showapicxt/iowow
|
a29ac5b28f1b6c2817061c2a43b7222176458876
|
[
"MIT"
] | 242 |
2015-08-13T06:38:10.000Z
|
2022-03-17T13:49:56.000Z
|
src/kv/benchmark/runbench.py
|
showapicxt/iowow
|
a29ac5b28f1b6c2817061c2a43b7222176458876
|
[
"MIT"
] | 44 |
2018-04-08T07:12:02.000Z
|
2022-03-04T06:15:01.000Z
|
src/kv/benchmark/runbench.py
|
showapicxt/iowow
|
a29ac5b28f1b6c2817061c2a43b7222176458876
|
[
"MIT"
] | 18 |
2016-01-14T09:50:34.000Z
|
2022-01-26T23:07:40.000Z
|
import subprocess
import argparse
import os
import random
from collections import OrderedDict
from parse import parse
from bokeh.io import export_png
from bokeh.plotting import figure, output_file, show, save
from bokeh.models import ColumnDataSource, FactorRange
from bokeh.transform import factor_cmap
from bokeh.layouts import gridplot
from bokeh.embed import components
parser = argparse.ArgumentParser(description='IWKV Benchmarks')
parser.add_argument(
'-b', '--basedir', help='Base directory with benchmark executables', default='.', nargs='?')
args = parser.parse_args()
basedir = os.path.abspath(args.basedir)
print('Base directory:', basedir)
benchmarks = [
'iwkv',
'lmdb',
'bdb',
'wiredtiger',
'kyc',
'tc'
#'leveldb'
]
runs = []
runs += [{'b': 'fillrandom2', 'n': n, 'vz': vz, 'rs': 2853624176, 'sizestats': True}
for n in (int(1e6),)
for vz in (1000,)]
runs += [{'b': 'fillrandom2,readrandom,deleterandom', 'n': n, 'vz': vz, 'kz': kz, 'rs': 2105940112}
for n in (int(2e6),)
for vz in (40, 400,)
for kz in (16, 1024,)]
runs += [{'b': 'fillseq,overwrite,deleteseq', 'n': n, 'kz': kz, 'rs': 570078848}
for n in (int(2e6),)
for vz in (400,)
for kz in (16, 1024,)]
runs += [{'b': 'fillrandom2,readrandom,readseq,readreverse', 'n': n, 'vz': vz, 'rs': 1513135152}
for n in (int(10e6),)
for vz in (200,)]
runs += [{'b': 'fillrandom2', 'n': n, 'vz': vz, 'rs': 3434783568}
for n in (int(10e3),)
for vz in ((200 * 1024),)]
results = OrderedDict()
def fill_result(bm, run, sizestats, line):
key = ' '.join(['-{} {}'.format(a, v) for a, v in run.items()])
if key not in results:
results[key] = OrderedDict()
if bm not in results[key]:
results[key][bm] = OrderedDict()
res = results[key][bm]
pval = parse('done: {} in {}', line)
if sizestats:
pval = parse('db size: {} ({})', line)
if pval and 'db size' not in res:
print(line, flush=True)
res['db size'] = int(pval[0]) / (1024 * 1024)
elif pval:
print(line, flush=True)
res[pval[0]] = int(pval[1])
def run_benchmark_run(bm, run):
args = ['{}/{}_benchmark'.format(basedir, bm)]
sizestats = False
for a, v in run.items():
if a in ('sizestats',):
sizestats = True
continue
args.append('-{}'.format(a))
args.append(str(v))
print('Run {}'.format(' '.join(args)), flush=True)
with subprocess.Popen(args,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=basedir,
bufsize=1) as output:
for line in output.stdout:
fill_result(bm, run, sizestats, line.strip())
output.wait()
def run_benchmark(bm):
for run in runs:
run_benchmark_run(bm, run)
def run():
for b in benchmarks:
run_benchmark(b)
def main():
run()
plots = []
palette = ["#00B377", "#e84d60", "#0054AE", "#c9d9d3",
"#BFF500", "#555555", "#DFBFFF", "#B1D28F",
"#FFAA00", "#A18353", "#888888", "#718dbf"]
for bn, rmap in results.items():
pfactors = None
x = [(bm, brun) for bm in iter(rmap) for brun in iter(rmap[bm])]
if len([v for v in x if v[1] == 'db size']):
sizestats = True
else:
sizestats = False
if pfactors is None:
pfactors = [f[1] for f in x]
counts = [rmap[bm][brun]
for bm in iter(rmap) for brun in iter(rmap[bm])]
source = ColumnDataSource(data=dict(x=x, counts=counts))
p = figure(x_range=FactorRange(*x), plot_height=350, plot_width=750,
title=bn) # y_axis_type="log"
p.vbar(x='x', top='counts', width=0.9, source=source, line_color='white',
fill_color=factor_cmap('x', palette=palette, factors=pfactors, start=1, end=2))
p.y_range.start = 0
p.yaxis.axis_label = 'Time ms' if not sizestats else 'Database file size (MB)'
p.x_range.range_padding = 0.1
p.xaxis.major_label_orientation = 1
p.xgrid.grid_line_color = None
p.toolbar_location = None
plots.append(p)
os.makedirs("charts", exist_ok=True)
export_png(p, filename="charts/{}.png".format(bn))
p.toolbar_location = "right"
grid = gridplot(plots, ncols=1, merge_tools=False)
output_file('benchmark_results_raw.html')
save(grid)
show(grid)
if __name__ == '__main__':
main()
| 31.273333 | 99 | 0.568322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 733 | 0.156257 |
32d7c7852b8b937ddf9034af3749422522ced7eb
| 2,792 |
py
|
Python
|
tests/utils/test_parser.py
|
ccechatelier/bcdi
|
cbe3b7960414b03f8e98336c3fcd7b367de441ca
|
[
"CECILL-B"
] | 18 |
2020-04-30T08:48:39.000Z
|
2022-03-30T14:42:01.000Z
|
tests/utils/test_parser.py
|
ccechatelier/bcdi
|
cbe3b7960414b03f8e98336c3fcd7b367de441ca
|
[
"CECILL-B"
] | 78 |
2019-06-30T03:45:58.000Z
|
2022-03-23T15:04:44.000Z
|
tests/utils/test_parser.py
|
ccechatelier/bcdi
|
cbe3b7960414b03f8e98336c3fcd7b367de441ca
|
[
"CECILL-B"
] | 16 |
2019-07-03T17:18:53.000Z
|
2022-01-12T15:54:56.000Z
|
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-05/2021 : DESY PHOTON SCIENCE
# authors:
# Jerome Carnis, [email protected]
from pathlib import Path
import unittest
from bcdi.utils.parser import ConfigParser
here = Path(__file__).parent
CONFIG = str(here.parents[1] / "conf/config_postprocessing.yml")
def run_tests(test_class):
suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
runner = unittest.TextTestRunner(verbosity=2)
return runner.run(suite)
class TestConfigParser(unittest.TestCase):
"""
Tests on the class ConfigParser.
def __init__(self, file_path : str, script_type : str = "preprocessing") -> None :
"""
def setUp(self) -> None:
self.command_line_args = {"scan": 999999999, "root_folder": str(here)}
self.parser = ConfigParser(CONFIG, self.command_line_args)
def test_init_file_path(self):
self.assertTrue(self.parser.file_path == CONFIG)
def test_init_file_path_2(self):
self.assertTrue(self.parser.arguments is None)
def test_init_file_path_wrong_type(self):
with self.assertRaises(TypeError):
ConfigParser(1234, self.command_line_args)
def test_init_file_path_wrong_file_extension(self):
with self.assertRaises(ValueError):
ConfigParser("C:/test.txt", self.command_line_args)
def test_init_file_path_not_existing(self):
with self.assertRaises(ValueError):
ConfigParser("C:/test.yml", self.command_line_args)
def test_init_command_line_args(self):
self.assertTrue(self.parser.command_line_args == self.command_line_args)
def test_init_command_line_args_none(self):
parser = ConfigParser(CONFIG, None)
self.assertTrue(parser.command_line_args is None)
def test_init_raw_config(self):
self.assertIsInstance(self.parser.raw_config, bytes)
def test_filter_dict(self):
dic = {"scan": "9999", "sdd": None}
output = self.parser.filter_dict(dic)
self.assertTrue(output == {"scan": "9999"})
def test_filter_dict_filter_value(self):
dic = {"scan": "9999", "sdd": None, "test": True}
output = self.parser.filter_dict(dic, filter_value=True)
self.assertTrue(output == {"scan": "9999", "sdd": None})
def test_load_arguments(self):
args = self.parser.load_arguments()
# "scan" is also key in CONFIG, which means that the overriding by the optional
# --scan argument from the command line works as expected
self.assertTrue(args.get("scan") == self.command_line_args["scan"])
if __name__ == "__main__":
run_tests(TestConfigParser)
| 34.469136 | 87 | 0.690544 | 2,105 | 0.75394 | 0 | 0 | 0 | 0 | 0 | 0 | 700 | 0.250716 |
32d936fc21c284d747f6a37882f102cf2a32a1e5
| 567 |
py
|
Python
|
src/directory-starter/README_text.py
|
hannahweber244/directory-starter
|
0cb12b6e9dfe9c3a6eb5029d7d0b6cb5da52b44b
|
[
"MIT"
] | null | null | null |
src/directory-starter/README_text.py
|
hannahweber244/directory-starter
|
0cb12b6e9dfe9c3a6eb5029d7d0b6cb5da52b44b
|
[
"MIT"
] | null | null | null |
src/directory-starter/README_text.py
|
hannahweber244/directory-starter
|
0cb12b6e9dfe9c3a6eb5029d7d0b6cb5da52b44b
|
[
"MIT"
] | null | null | null |
"""
# [REPO NAME]
## Table of contents
[Here you can use a table of contents to keep your README structured.]
## Overview
[Here you give a short overview over the motivation behind your project and what problem it solves.]
## How to use it
[Here you can explain how your tool/project is usable.]
### Requirements and dependencies
[If there are any requirements or dependencies to use what you developed, you can put those here.]
## Additional information
[Here you can include an overview over the structure of your code, additional information, tests etc.]
"""
| 31.5 | 102 | 0.75485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 567 | 1 |
32da7030ea8ed7c10970c252248ba50cc03bff1f
| 152 |
py
|
Python
|
kfdda/models/__init__.py
|
ll1l11/pymysql-test
|
de5747366bbf23ecb0b1f01059b3a69c8ac4936d
|
[
"MIT"
] | null | null | null |
kfdda/models/__init__.py
|
ll1l11/pymysql-test
|
de5747366bbf23ecb0b1f01059b3a69c8ac4936d
|
[
"MIT"
] | null | null | null |
kfdda/models/__init__.py
|
ll1l11/pymysql-test
|
de5747366bbf23ecb0b1f01059b3a69c8ac4936d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from ..core import db
from ..helpers import JSONSerializer
class BaseModel(db.Model, JSONSerializer):
__abstract__ = True
| 19 | 42 | 0.710526 | 66 | 0.434211 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.151316 |
32db89f97cc25f33ad056f8860c98d1fafd8baab
| 2,652 |
py
|
Python
|
chapt05/triangle.py
|
ohlogic/PythonOpenGLSuperBible4Glut
|
a0d01caaeb811002c191c28210268b5fcbb8b379
|
[
"MIT"
] | null | null | null |
chapt05/triangle.py
|
ohlogic/PythonOpenGLSuperBible4Glut
|
a0d01caaeb811002c191c28210268b5fcbb8b379
|
[
"MIT"
] | null | null | null |
chapt05/triangle.py
|
ohlogic/PythonOpenGLSuperBible4Glut
|
a0d01caaeb811002c191c28210268b5fcbb8b379
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# Demonstrates OpenGL color triangle
# Ben Smith
# [email protected]
#
# based heavily on ccube.cpp
# OpenGL SuperBible
# Program by Richard S. Wright Jr.
import math
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
ESCAPE = b'\033'
xRot = 0.0
yRot = 0.0
def InitGL(Width, Height):
# Black background
glClearColor(0.0, 0.0, 0.0, 1.0)
# Called to draw scene
def DrawGLScene():
# Clear the window with current clearing color
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Enable smooth shading
glShadeModel(GL_SMOOTH)
# Draw the triangle
glBegin(GL_TRIANGLES)
# Red Apex
glColor3ub(255,0,0)
glVertex3f(0.0,200.0,0.0)
# Green on the right bottom corner
glColor3ub(0,255,0)
glVertex3f(200.0,-70.0,0.0)
# Blue on the left bottom corner
glColor3ub(0,0,255)
glVertex3f(-200.0, -70.0, 0.0)
glEnd()
glutSwapBuffers()
def ReSizeGLScene(w, h):
# Prevent a divide by zero
if(h == 0):
h = 1
# Set Viewport to window dimensions
glViewport(0, 0, w, h)
# Reset coordinate system
glLoadIdentity()
# Window is higher than wide
if w <= h:
windowHeight = 250.0 * h / w
windowWidth = 250.0
else:
#window wider than high
windowWidth = 250.0 * w/h
windowHeight = 250.0
# Set the clipping volume
glOrtho(-windowWidth, windowWidth, -windowHeight, windowHeight, 1.0, -1.0)
def keyPressed(key, x, y):
if key == ESCAPE:
glutDestroyWindow(window)
sys.exit()
# Main program entry point
if __name__ == '__main__':
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
glutInitWindowSize(640, 480)
glutInitWindowPosition(0, 0)
window = glutCreateWindow("RGB Triangle")
glutDisplayFunc(DrawGLScene)
# Uncomment this line to get full screen.
#glutFullScreen()
#glutIdleFunc(DrawGLScene)
#glutTimerFunc( int(1.0/60.0), update, 0)
glutReshapeFunc(ReSizeGLScene)
glutKeyboardFunc(keyPressed)
#glutSpecialFunc (specialkeyPressed);
# Initialize our window.
InitGL(640, 480)
# Start Event Processing Engine
glutMainLoop()
| 21.737705 | 83 | 0.562217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 851 | 0.32089 |
32e013bad1fb65c5a409199a8b804f1d0f72e07c
| 1,379 |
py
|
Python
|
sdpremote/utils/user.py
|
gudn/sdpremote
|
431234420ea1e0c752432eac6000c11a75851375
|
[
"MIT"
] | null | null | null |
sdpremote/utils/user.py
|
gudn/sdpremote
|
431234420ea1e0c752432eac6000c11a75851375
|
[
"MIT"
] | null | null | null |
sdpremote/utils/user.py
|
gudn/sdpremote
|
431234420ea1e0c752432eac6000c11a75851375
|
[
"MIT"
] | null | null | null |
import binascii
from base64 import b64decode
from typing import Optional
from fastapi import Depends, Header, status
from fastapi.exceptions import HTTPException
def _user_header(authorization: Optional[str] = Header(None)) -> str:
if not authorization:
raise HTTPException(status.HTTP_401_UNAUTHORIZED)
splitted = authorization.split()
if len(splitted) != 2:
raise HTTPException(
status.HTTP_401_UNAUTHORIZED,
'only support basic authz schema',
)
schema, value = splitted
if schema != 'Basic':
raise HTTPException(
status.HTTP_401_UNAUTHORIZED,
'only support basic authz schema',
)
try:
value = b64decode(value).decode()
except binascii.Error:
raise HTTPException(
status.HTTP_401_UNAUTHORIZED,
'unable to decode value',
)
splitted = value.split(':')
if len(splitted) != 2:
raise HTTPException(status.HTTP_401_UNAUTHORIZED, 'invalid value')
user = splitted[0]
if not user:
raise HTTPException(status.HTTP_401_UNAUTHORIZED, 'invalid value')
return user
def user(
user: Optional[str] = None,
login: str = Depends(_user_header),
) -> str:
if user is not None and user != login:
raise HTTPException(status.HTTP_403_FORBIDDEN)
return login
| 26.018868 | 74 | 0.649021 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.094271 |
32e2062c20d3f7d54552e963b99e3b7f219ffa2e
| 19,175 |
py
|
Python
|
ScreenTrainer.py
|
ZihaoChen0319/CMB-Segmentation
|
99c5788baacc280ca5dbe02f3e18403e399fb238
|
[
"Apache-2.0"
] | null | null | null |
ScreenTrainer.py
|
ZihaoChen0319/CMB-Segmentation
|
99c5788baacc280ca5dbe02f3e18403e399fb238
|
[
"Apache-2.0"
] | null | null | null |
ScreenTrainer.py
|
ZihaoChen0319/CMB-Segmentation
|
99c5788baacc280ca5dbe02f3e18403e399fb238
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
import os
import torch.optim as optim
from tqdm import tqdm
import numpy as np
import torch
import torch.nn.functional as nnf
import SimpleITK as sitk
import json
from scipy import ndimage
import medpy.io as mio
from Utils import find_binary_object
from MyDataloader import get_train_cases, get_cmbdataloader
from MyNetwork import ScreenNet
from MyLoss import FocalLoss
from PairwiseMeasures_modified import PairwiseMeasures
class ScreenTrainer(nn.Module):
def __init__(self, data_path, model_save_path, dataset_path, device='cuda', all_epoch=50,
fold=0, bbox=(20, 20, 16), batch_size=32, loss='ce',
optimizer='sgd', init_lr=1e-3, decay_exponent=0.9, config=None, if_test=False,
random_negatives=1e5, aug_num=10, add_fp=False,
resample_num=(10000, 10000, 10000), modality=('T1', 'T2', 'T2S')):
"""
Trainer of the Screening Network.
"""
super(ScreenTrainer, self).__init__()
self.bbox = bbox
self.batch_size = batch_size
self.init_lr = init_lr
self.decay_exponent = decay_exponent
self.all_epoch = all_epoch
self.config = config
self.resample_num = resample_num
self.modality = modality
self.aug_num = aug_num
self.fold = fold
self.random_negatives = random_negatives
# path define
self.data_path = data_path
self.dataset_path = dataset_path
self.model_name = model_save_path.split('/')[-2]
self.model_save_path = model_save_path + 'fold_%d/' % fold
if not os.path.exists(self.model_save_path):
os.makedirs(self.model_save_path)
# device
self.device = device
# load division of data
if os.path.exists(dataset_path + 'fold_division.json'):
with open(dataset_path + 'fold_division.json', mode='r') as f:
splits = json.load(f)
self.train_list_sub = splits[str(fold)]['train']
self.val_list_sub = splits[str(fold)]['val']
else:
self.train_list_sub = []
self.val_list_sub = []
print('Data division is empty!')
# training and validation samples
if not if_test:
self.dataset_name = 'fold_%d/bbox-%d-%d-%d_neg-%d_aug-%d/' % \
(fold, self.bbox[0], self.bbox[1], self.bbox[2], random_negatives, aug_num)
if not os.path.exists(dataset_path + self.dataset_name):
os.makedirs(dataset_path + self.dataset_name)
# load or generate the training samples
if os.path.exists(dataset_path + self.dataset_name + 'pos.json'):
with open(dataset_path + self.dataset_name + 'pos.json', mode='r') as f:
self.train_cases_pos = json.load(f)
if os.path.exists(dataset_path + self.dataset_name + 'neg.json'):
with open(dataset_path + self.dataset_name + 'neg.json', mode='r') as f:
self.train_cases_neg = json.load(f)
else:
self.train_cases_pos, self.train_cases_neg = get_train_cases(
data_path=self.data_path, train_list=self.train_list_sub, bbox=self.bbox, seed=2021,
if_translation=True, random_negatives=random_negatives, aug_num=aug_num)
with open(dataset_path + self.dataset_name + 'pos.json', mode='w') as f:
json.dump(self.train_cases_pos, f)
with open(dataset_path + self.dataset_name + 'neg.json', mode='w') as f:
json.dump(self.train_cases_neg, f)
# load false positive samples
self.train_cases_fp = []
if add_fp:
if os.path.exists(dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold, self.model_name)):
with open(dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold, self.model_name), mode='r') as f:
self.train_cases_fp = json.load(f)
print('Dataset: pos %d, neg %d, fp %d' %
(len(self.train_cases_pos), len(self.train_cases_neg), len(self.train_cases_fp)))
else:
self.train_cases_fp = []
self.train_cases_pos = []
self.train_cases_neg = []
# model
self.model = ScreenNet(is_fc=False, in_channel=len(modality), num_class=2)
self.model.to(self.device)
# loss function
if loss == 'ce':
self.loss_fc = nn.CrossEntropyLoss()
elif loss == 'weighted ce':
self.loss_fc = nn.CrossEntropyLoss(weight=torch.tensor([0.25, 0.75], device=device))
elif loss == 'focal loss':
self.loss_fc = FocalLoss(alpha=0.25, gamma=2, num_classes=2)
else:
raise ValueError('No such optimizer')
# optimizer
if optimizer == 'sgd':
self.optimizer = optim.SGD(self.model.parameters(), lr=init_lr, momentum=0.99, nesterov=True)
elif optimizer == 'adam':
self.optimizer = optim.Adam(self.model.parameters(), lr=init_lr)
else:
raise ValueError('No such optimizer')
self.epoch = 1
self.lr = init_lr
self.train_metric = [0] * 3
self.test_metric = [0] * 4
def train_epoch(self):
self.model.train()
train_accum = [0] * 6
train_cases_fp = self.train_cases_fp.copy()
train_cases_pos = self.train_cases_pos.copy()
train_cases_neg = self.train_cases_neg.copy()
# randomly choose training samples, ensuring that the number of samples is fixed under different conditions
if len(self.resample_num):
train_cases_pos = np.random.choice(train_cases_pos, size=self.resample_num[0]).tolist()
train_cases_neg = np.random.choice(train_cases_neg, size=self.resample_num[1]).tolist()
if len(train_cases_fp):
train_cases_fp = np.random.choice(train_cases_fp, size=self.resample_num[2]).tolist()
data_list = train_cases_pos + train_cases_neg + train_cases_fp
dataloader = get_cmbdataloader(
data_path=self.data_path,
dataset_index=data_list,
bbox=self.bbox,
batch_size=self.batch_size,
shuffle=True,
pin_memory=True,
num_workers=2,
modality=self.modality
)
dataloader = tqdm(dataloader)
for img_batch, label_batch in dataloader:
img_batch = img_batch.to(self.device).float()
label_batch = label_batch.to(self.device)
pred_batch = self.model(img_batch)
loss = self.loss_fc(pred_batch, label_batch)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
y_hat = pred_batch.argmax(axis=1).detach().cpu().numpy()
y = label_batch.detach().cpu().numpy()
train_accum[0] += img_batch.shape[0]
train_accum[1] += loss.detach().cpu().numpy() * img_batch.shape[0]
train_accum[2] += np.sum(y_hat == y) # acc
train_accum[3] += np.sum((y_hat == 1) & (y == 1)) # tp
train_accum[4] += np.sum((y_hat == 1) & (y != 1)) # fp
train_accum[5] += np.sum((y_hat != 1) & (y == 1)) # fn
self.train_metric[0] = train_accum[1] / train_accum[0] # loss
self.train_metric[1] = train_accum[2] / train_accum[0] # acc
self.train_metric[2] = 2 * train_accum[3] / np.clip(2 * train_accum[3] + train_accum[4] + train_accum[5],
a_min=1e-5, a_max=1e10) # f1
dataloader.set_description('Epoch: %d, ' % self.epoch +
'train loss %.4f, ' % self.train_metric[0] +
'train acc %.4f, ' % self.train_metric[1] +
'train f1 %.4f, ' % self.train_metric[2])
return self.train_metric
def val_epoch(self):
self.model.eval()
test_accum = [0] * 6
for pat in self.val_list_sub:
data_list = []
for mod in self.modality:
data_list.append(np.load(self.data_path + '%s/%s_space-T2S_%s.npy' % (pat, pat, mod)))
img = np.stack(data_list, axis=0)
cmb, h = mio.load(self.data_path + '%s/%s_space-T2S_CMB.nii.gz' % (pat, pat))
pred, pred_post, n_obj, pred_init_space, candidates_list, score_init_space = \
self.inference(img, patch_size=(160, 160, 80), thresh=0.1, size=2, if_nms=True)
pe = PairwiseMeasures(ref_img=cmb, seg_img=pred_init_space, analysis='microbleeds',
measures=('f1_score', 'tp', 'fn', 'fp'),
connectivity=3, pixdim=h.get_voxel_spacing(), empty=True,
threshold=0.5, thresh_assign=3)
tp, fn, fp = pe.m_dict['tp'][0](), pe.m_dict['fn'][0](), pe.m_dict['fp'][0]()
f1 = pe.m_dict['f1_score'][0]()
test_accum[0] += 1
test_accum[1] += tp
test_accum[2] += fn
test_accum[3] += fp
test_accum[4] += f1 if np.sum(cmb) else 0
test_accum[5] += 1 if np.sum(cmb) else 0
print('%s: TP %d, FN %d, FP %d, F1 %.4f' % (pat, tp, fn, fp, f1))
self.test_metric[0] = test_accum[1] # TP
self.test_metric[1] = test_accum[2] # FN
self.test_metric[2] = test_accum[3] / test_accum[0] # avg FP
self.test_metric[3] = test_accum[4] / test_accum[5] # avg F1
print('Epoch: %d, TP %d, FN %d, avg FP %.4f, avg F1 %.4f' %
(self.epoch, self.test_metric[0], self.test_metric[1], self.test_metric[2], self.test_metric[3]))
return self.test_metric
def get_fp(self, thresh=0.1, if_aug=False):
"""Obtain false positives by applying initial model on training data"""
print(' --- Obtaining FP --- ')
self.model.eval()
if if_aug:
if os.path.exists(self.dataset_path + 'fold_%d/fp_%s_current_aug.json' % (self.fold, self.model_name)):
with open(self.dataset_path + 'fold_%d/fp_%s_current_aug.json' % (self.fold, self.model_name), mode='r') as f:
fp = json.load(f)
with open(self.dataset_path + 'fold_%d/fp_%s_epoch-%d_aug.json' % (self.fold, self.model_name, self.epoch), mode='w') as f:
json.dump(fp, f)
else:
if os.path.exists(self.dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold, self.model_name)):
with open(self.dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold, self.model_name), mode='r') as f:
fp = json.load(f)
with open(self.dataset_path + 'fold_%d/fp_%s_epoch-%d.json' % (self.fold, self.model_name, self.epoch), mode='w') as f:
json.dump(fp, f)
aug_list = []
if if_aug:
for pat in self.train_list_sub:
for i in range(self.aug_num):
aug_list.append(pat + '_aug%d' % i)
fp_list = self.train_cases_fp if len(self.train_cases_fp) else []
loader = tqdm(self.train_list_sub + aug_list)
for pat in loader:
data_list = []
for mod in self.modality:
data_list.append(np.load(self.data_path + '%s/%s_space-T2S_%s.npy' % (pat, pat, mod)))
cmb = np.load(self.data_path + '%s/%s_space-T2S_CMB.npy' % (pat, pat), mmap_mode='r')
shape = cmb.shape
img = np.stack(data_list, axis=0)
pred, pred_post, n_obj, pred_init_space, candidates_list, score_init_space = \
self.inference(img, patch_size=(160, 160, 80), thresh=thresh, size=4)
for (x, y, z) in candidates_list:
if x > shape[0] - self.bbox[0] // 2 or x < self.bbox[0] // 2 or \
y > shape[1] - self.bbox[1] // 2 or y < self.bbox[1] // 2 or \
z > shape[2] - self.bbox[2] // 2 or z < self.bbox[2] // 2:
continue
if np.sum(cmb[x - 1:x + 1, y - 1:y + 1, z - 1:z + 1]):
sample = {'pat': pat, 'start': (x, y, z), 'have cmb': 1}
else:
sample = {'pat': pat, 'start': (x, y, z), 'have cmb': 0}
fp_list.append(sample)
loader.set_description('FP num: %d' % len(fp_list))
self.train_cases_fp = fp_list
with open(self.dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold, self.model_name), mode='w') as f:
json.dump(fp_list, f)
print(' --- Finish, FP num: %d ---' % len(fp_list))
return fp_list
def adjust_lr(self):
"""Adjust the learning rate following ‘poly’ policy"""
self.lr = self.init_lr * (1 - self.epoch / self.all_epoch) ** self.decay_exponent
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.lr
return self.lr
def save_model(self, force=False):
"""Save the model every epoch(current) and every 5 epochs(epoch_xx)"""
state = {
'epoch': self.epoch,
'state_dict': self.model.state_dict(),
'config': self.config,
}
torch.save(state, self.model_save_path + 'current.pth.tar')
if self.epoch % 5 == 0 or force:
torch.save(state, self.model_save_path + 'epoch_%d_%d_%d_%.4f_%.4f.pth.tar' %
(self.epoch, self.test_metric[0], self.test_metric[1], self.test_metric[2], self.test_metric[3]))
def load_model(self, model_name='current', silent=False):
all_saved_models = os.listdir(self.model_save_path)
matched_model = [model for model in all_saved_models if model.startswith(model_name)]
if len(matched_model) == 1:
checkpoint = torch.load(self.model_save_path + matched_model[0], map_location={'cuda:0': self.device})
self.epoch = checkpoint['epoch'] + 1
self.model.load_state_dict(checkpoint['state_dict'])
self.model.to(self.device)
# self.config = checkpoint['config']
self.adjust_lr()
elif len(matched_model) > 1:
raise ValueError('Too many matched models!')
if not silent:
print('Screen model: %s, device: %s, epoch: %d'
% (self.model_save_path + model_name, self.device, self.epoch))
def inference(self, data: np.ndarray, patch_size=None, thresh=0.5, size=2, if_nms=True):
if len(data.shape) == 3:
data = np.expand_dims(data, axis=0)
shape = [data.shape[1], data.shape[2], data.shape[3]]
# compute the output size and the patches location, exactly corresponding to the architecture of ScreenNet
if patch_size is None:
patch_size = shape
out_size = [(shape[0] - 8) // 2 - 5, (shape[1] - 8) // 2 - 5, (shape[2] - 4) // 2 - 5]
out_patch_size = [(patch_size[0] - 8) // 2 - 5, (patch_size[1] - 8) // 2 - 5, (patch_size[2] - 4) // 2 - 5]
# print(data.shape, out_size, out_patch_size)
num_xyz = [out_size[i] // out_patch_size[i] for i in range(3)]
overlap_xyz = [((num_xyz[i] + 1) * out_patch_size[i] - out_size[i]) // num_xyz[i] for i in range(3)]
x_starts = [(out_patch_size[0] - overlap_xyz[0]) * n for n in range(num_xyz[0])]
x_starts.append(out_size[0] - out_patch_size[0])
y_starts = [(out_patch_size[1] - overlap_xyz[1]) * n for n in range(num_xyz[1])]
y_starts.append(out_size[1] - out_patch_size[1])
z_starts = [(out_patch_size[2] - overlap_xyz[2]) * n for n in range(num_xyz[2])]
z_starts.append(out_size[2] - out_patch_size[2])
out_starts = [(x, y, z) for z in z_starts for y in y_starts for x in x_starts]
starts = [(2*x, 2*y, 2*z) for (x, y, z) in out_starts]
# inference by sliding window strategy
pred = np.zeros(out_size)
overlap = np.zeros(out_size)
data = torch.tensor(data).float()
for st, out_st in zip(starts, out_starts):
data_patch = data[:, st[0]:st[0] + patch_size[0], st[1]:st[1] + patch_size[1], st[2]:st[2] + patch_size[2]]
data_patch = data_patch.to(self.device).unsqueeze(0)
pred_patch = self.model(data_patch).detach()
pred_patch = nnf.softmax(pred_patch, dim=1).squeeze()[1].detach().cpu().numpy()
pred[out_st[0]:out_st[0] + out_patch_size[0],
out_st[1]:out_st[1] + out_patch_size[1],
out_st[2]:out_st[2] + out_patch_size[2]] += pred_patch
overlap[out_st[0]:out_st[0] + out_patch_size[0],
out_st[1]:out_st[1] + out_patch_size[1],
out_st[2]:out_st[2] + out_patch_size[2]] += 1
pred /= overlap
pred_th = pred.copy()
pred_th[pred_th < thresh] = 0
if if_nms:
pred_itk = sitk.GetImageFromArray(pred_th)
pred_itk = sitk.RegionalMaxima(pred_itk)
pred_post = sitk.GetArrayFromImage(pred_itk)
labeled, n_obj = find_binary_object(pred_post)
maxima_list = ndimage.center_of_mass(labeled, labeled, range(1, n_obj+1))
else:
pred_post = pred_th.copy()
pred_post[pred_post >= thresh] = 1
labeled, n_obj = find_binary_object(pred_post)
maxima_list = ndimage.center_of_mass(labeled, labeled, range(1, n_obj + 1))
# find candidates
score_init_space = np.zeros(shape)
score_init_space[9:pred.shape[0] * 2 + 9, 9:pred.shape[1] * 2 + 9, 7:pred.shape[2] * 2 + 7] = \
nnf.interpolate(torch.tensor(pred, dtype=torch.float32).unsqueeze(0).unsqueeze(0),
scale_factor=2, mode='trilinear', align_corners=False).squeeze().numpy()
# map the results back to input volume space
pred_init_space = np.zeros(shape)
candidates_list = []
for (x, y, z) in maxima_list:
x = int(2 * x + 9)
y = int(2 * y + 9)
z = int(2 * z + 7)
if x < 0 or x >= shape[0] \
or y < 0 or y >= shape[1] \
or z < 0 or z >= shape[2]:
continue
pred_init_space[max(x-size//2, 0):min(x+size//2, shape[0]),
max(y-size//2, 0):min(y+size//2, shape[1]),
max(z-size//2, 0):min(z+size//2, shape[1])] = 1
candidates_list.append((x, y, z))
return pred, pred_post, n_obj, pred_init_space, candidates_list, score_init_space
| 50.460526 | 140 | 0.557445 | 18,702 | 0.975129 | 0 | 0 | 0 | 0 | 0 | 0 | 2,163 | 0.11278 |
32e36a60281e09d72c79ad1807ea74035aa73e60
| 534 |
py
|
Python
|
examples/earthquakes/main.py
|
admariner/beneath
|
a6aa2c220e4a646be792379528ae673f4bef440b
|
[
"MIT"
] | 65 |
2021-04-27T13:13:09.000Z
|
2022-01-24T00:26:06.000Z
|
examples/earthquakes/main.py
|
admariner/beneath
|
a6aa2c220e4a646be792379528ae673f4bef440b
|
[
"MIT"
] | 22 |
2021-10-06T10:30:40.000Z
|
2021-12-10T11:36:55.000Z
|
examples/earthquakes/main.py
|
admariner/beneath
|
a6aa2c220e4a646be792379528ae673f4bef440b
|
[
"MIT"
] | 4 |
2021-04-24T15:29:51.000Z
|
2022-03-30T16:20:12.000Z
|
import beneath
from generators import earthquakes
with open("schemas/earthquake.graphql", "r") as file:
EARTHQUAKES_SCHEMA = file.read()
if __name__ == "__main__":
p = beneath.Pipeline(parse_args=True)
p.description = "Continually pings the USGS earthquake API"
earthquakes = p.generate(earthquakes.generate_earthquakes)
p.write_table(
earthquakes,
"earthquakes",
schema=EARTHQUAKES_SCHEMA,
description="Earthquakes fetched from https://earthquake.usgs.gov/",
)
p.main()
| 28.105263 | 76 | 0.700375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.284644 |
32e3ce811bff9ec736c02ce8188ebe9e69d6a483
| 5,073 |
py
|
Python
|
examples/tf_vision/tensorflow_saved_model_service.py
|
siddharthgee/multi-model-server
|
bd795b402330b491edd5d2a235b8b8c2ef9fcb58
|
[
"Apache-2.0"
] | null | null | null |
examples/tf_vision/tensorflow_saved_model_service.py
|
siddharthgee/multi-model-server
|
bd795b402330b491edd5d2a235b8b8c2ef9fcb58
|
[
"Apache-2.0"
] | null | null | null |
examples/tf_vision/tensorflow_saved_model_service.py
|
siddharthgee/multi-model-server
|
bd795b402330b491edd5d2a235b8b8c2ef9fcb58
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
TensorflowSavedModelService defines an API for running a tensorflow saved model
"""
import json
import os
import tensorflow as tf
from model_handler import ModelHandler
class TensorflowSavedModelService(ModelHandler):
"""
TensorflowSavedModelService defines the fundamental loading model and inference
operations when serving a TF saved model. This is a base class and needs to be
inherited.
"""
def __init__(self):
super(TensorflowSavedModelService, self).__init__()
self.predictor = None
self.labels = None
self.signature = None
self.epoch = 0
# noinspection PyMethodMayBeStatic
def get_model_files_prefix(self, context):
return context.manifest["model"]["modelName"]
def initialize(self, context):
"""
Initialize model. This will be called during model loading time
:param context: Initial context contains model server system properties.
:return:
"""
super(TensorflowSavedModelService, self).initialize(context)
properties = context.system_properties
model_dir = properties.get("model_dir")
signature_file_path = os.path.join(model_dir, "signature.json")
if not os.path.isfile(signature_file_path):
raise RuntimeError("Missing signature.json file.")
with open(signature_file_path) as f:
self.signature = json.load(f)
#Define signature.json and work here
data_names = []
data_shapes = []
for input_data in self.signature["inputs"]:
data_name = input_data["data_name"]
data_shape = input_data["data_shape"]
# Replace 0 entry in data shape with 1 for binding executor.
for idx in range(len(data_shape)):
if data_shape[idx] == 0:
data_shape[idx] = 1
data_names.append(data_name)
data_shapes.append((data_name, tuple(data_shape)))
self.predictor = tf.contrib.predictor.from_saved_model(model_dir)
def inference(self, model_input):
"""
Internal inference methods for TF - saved model. Run forward computation and
return output.
:param model_input: list of dict of {name : numpy_array}
Batch of preprocessed inputs in tensor dict.
:return: list of dict of {name: numpy_array}
Batch of inference output tensor dict
"""
if self.error is not None:
return None
# Check input shape
check_input_shape(model_input, self.signature)
#Restricting to one request which contains the whole batch. Remove this line if adding custom batching support
model_input = model_input[0]
results = self.predictor(model_input)
return results
def check_input_shape(inputs, signature):
"""
Check input data shape consistency with signature.
Parameters
----------
inputs : List of dicts
Input data in this format [{input_name: input_tensor, input2_name: input2_tensor}, {...}]
signature : dict
Dictionary containing model signature.
"""
assert isinstance(inputs, list), 'Input data must be a list.'
for input_dict in inputs:
assert isinstance(input_dict, dict), 'Each request must be dict of input_name: input_tensor.'
assert len(input_dict) == len(signature["inputs"]), \
"Input number mismatches with " \
"signature. %d expected but got %d." \
% (len(signature['inputs']), len(input_dict))
for tensor_name, sig_input in zip(input_dict, signature["inputs"]):
assert len(input_dict[tensor_name].shape) == len(sig_input["data_shape"]), \
'Shape dimension of input %s mismatches with ' \
'signature. %d expected but got %d.' \
% (sig_input['data_name'],
len(sig_input['data_shape']),
len(input_dict[tensor_name].shape))
for idx in range(len(input_dict[tensor_name].shape)):
if idx != 0 and sig_input['data_shape'][idx] != 0:
assert sig_input['data_shape'][idx] == input_dict[tensor_name].shape[idx], \
'Input %s has different shape with ' \
'signature. %s expected but got %s.' \
% (sig_input['data_name'], sig_input['data_shape'],
input_dict[tensor_name].shape)
| 38.431818 | 118 | 0.640647 | 2,649 | 0.522176 | 0 | 0 | 0 | 0 | 0 | 0 | 2,441 | 0.481175 |
32e4f05624819cc83857abc3b6af4086f2c2a88e
| 167 |
py
|
Python
|
setup.py
|
kimballa/arduino-dbg
|
639d73b6d96996218cf9aafde52f3683c9d93775
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
kimballa/arduino-dbg
|
639d73b6d96996218cf9aafde52f3683c9d93775
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
kimballa/arduino-dbg
|
639d73b6d96996218cf9aafde52f3683c9d93775
|
[
"BSD-3-Clause"
] | null | null | null |
# Minimal setup.py
#
# Enables installing requirements as declared in setup.cfg.
# From this directory, run:
# pip install .
from setuptools import setup
setup()
| 18.555556 | 59 | 0.736527 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.742515 |
32e63e4af47da5e138ff28bb64adb55087df265e
| 7,113 |
py
|
Python
|
apps/etl/models.py
|
diudiu/featurefactory
|
ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e
|
[
"MIT"
] | null | null | null |
apps/etl/models.py
|
diudiu/featurefactory
|
ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e
|
[
"MIT"
] | null | null | null |
apps/etl/models.py
|
diudiu/featurefactory
|
ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
common models
"""
from django.db import models
from apps.common.models import BaseModel
from apps.datasource.models import DsInterfaceInfo
class FeatureType(models.Model):
id = models.AutoField(u'主键', primary_key=True)
feature_type_desc = models.CharField(u'特征类型解释', max_length=2048)
is_delete = models.BooleanField(u'是否逻辑删除', default=False)
class Meta:
db_table = 'fic_feature_type'
verbose_name = u'特征类型配置表'
verbose_name_plural = u'特征类型配置表'
def __unicode__(self):
return "%s" % self.feature_type_desc
class FeatureCardType(models.Model):
id = models.AutoField(u'主键', primary_key=True)
feature_type_desc = models.CharField(u'特征评分卡类型解释', max_length=2048)
is_delete = models.BooleanField(u'是否逻辑删除', default=False)
class Meta:
db_table = 'fic_feature_card_type'
verbose_name = u'特征评分卡类型配置表'
verbose_name_plural = u'特征评分卡类型配置表'
def __unicode__(self):
return "%s" % self.feature_type_desc
class FeatureRuleType(models.Model):
id = models.AutoField(u'主键', primary_key=True)
feature_type_desc = models.CharField(u'特征规则类型解释', max_length=2048)
is_delete = models.BooleanField(u'是否逻辑删除', default=False)
class Meta:
db_table = 'fic_feature_rule_type'
verbose_name = u'特征规则类型配置表'
verbose_name_plural = u'特征规则类型配置表'
def __unicode__(self):
return "%s" % self.feature_type_desc
class FeatureConf(BaseModel):
id = models.AutoField(u'主键', primary_key=True)
feature_name = models.CharField(u'特征字段名', max_length=64)
feature_name_cn = models.CharField(u'特征中文名', max_length=128)
collect_type = models.CharField(u'数据获取方式', max_length=64, null=True)
data_identity = models.CharField(u'参数字段名', max_length=2048, null=True)
feature_type = models.ForeignKey(FeatureType, db_column="feature_type", null=True)
feature_rule_type = models.ForeignKey(FeatureRuleType, db_column="feature_rule_type", null=True)
feature_card_type = models.ForeignKey(FeatureCardType, db_column="feature_card_type", null=True)
feature_select_value = models.CharField(u'特征可选值', max_length=2048, null=True)
class Meta:
db_table = 'fic_feature_common_conf'
verbose_name = u'一般特征处理逻辑配置表'
verbose_name_plural = u'一般特征处理逻辑配置表'
class FeatureShuntConf(BaseModel):
id = models.AutoField(u'主键', primary_key=True)
feature_name = models.CharField(u'特征字段名', max_length=64)
shunt_key = models.CharField(u'分流依据字段名称', max_length=64)
shunt_type = models.CharField(u'分流逻辑名', max_length=256)
shunt_value = models.CharField(u'分流比较值', max_length=256)
data_identity = models.CharField(u'原始数据标识', max_length=64)
class Meta:
db_table = 'fic_feature_shunt_conf'
verbose_name = u'分流处理逻辑配置表'
verbose_name_plural = u'分流处理逻辑配置表'
class FeatureRelevanceConf(BaseModel):
id = models.AutoField(u'主键', primary_key=True)
feature_name = models.CharField(u'特征字段名', max_length=64)
depend_feature = models.CharField(u'此特征依赖的其他特征名', max_length=64, null=True)
data_identity = models.CharField(u'', max_length=64)
class Meta:
db_table = 'fic_feature_relevance_conf'
verbose_name = u'依赖关系处理逻辑配置表'
verbose_name_plural = u'依赖关系处理逻辑配置表'
class PreFieldInfo(BaseModel):
id = models.AutoField(u'主键', primary_key=True)
field_name = models.CharField(u'字段名称', max_length=64)
field_name_cn = models.CharField(u'中文名称', max_length=64)
source = models.CharField(u'数据来源', max_length=64, null=True)
path = models.CharField(u'JsonPath路径', max_length=256, null=True)
class Meta:
db_table = 'fic_pre_field_info'
verbose_name = u'预处理字段表'
verbose_name_plural = u'预处理字段表'
class FeatureProcess(models.Model):
id = models.AutoField(u'主键', primary_key=True)
feature_name = models.CharField(u'特征字段名', max_length=100, unique=True)
feature_data_type = models.CharField(u'特征字段类型', max_length=50)
default_value = models.CharField(u'特征缺省值', max_length=100)
json_path_list = models.TextField(u'特征处理流程', null=True)
f_map_and_filter_chain = models.CharField(u'特征处理前置map链', max_length=2048, null=True)
reduce_chain = models.CharField(u'特征处理reduce链', max_length=2048, null=True)
l_map_and_filter_chain = models.CharField(u'特征处理后置map链', max_length=2048, null=True)
class Meta:
db_table = 'fic_feature_process_info'
verbose_name = u'特征计算方式配置表'
verbose_name_plural = u'特征计算方式配置表'
class LoanAgencyModel(BaseModel):
""" 贷款经理信息 """
source_name = models.CharField(u'来源网站名称', max_length=128, null=True, blank=True)
source_url = models.CharField(u'信息来源url', max_length=1024, null=True, blank=True)
source_url_mapping = models.CharField(u'映射url', max_length=255, null=True, blank=True)
name = models.CharField(u'联系人名称', max_length=32, null=True, blank=True)
telephone = models.CharField(u'手机号', max_length=64, null=True, blank=True)
business_released_date = models.DateField(u'信息发布日期', null=True, blank=True)
father_type = models.CharField(u'分类', max_length=20, null=True, blank=True)
reservation_counts = models.IntegerField(u'预约次数', default=0)
qq = models.CharField(u'qq号', max_length=15, db_index=True, null=True)
view_counts = models.IntegerField(u'浏览次数', default=0)
child_type = models.CharField(u'子分类', max_length=128, null=True, blank=True)
company_name = models.CharField(u'公司名称', max_length=128, null=True, blank=True)
company_addr = models.CharField(u'公司地址', max_length=255, null=True, blank=True)
company_register_date = models.DateField(u'注册时间', null=True)
company_url = models.CharField(u'公司官网', max_length=128, null=True, blank=True)
publish_city = models.CharField(u'信息发布城市', max_length=50, null=True, blank=True)
from_which_id = models.CharField(u'原始记录的id', max_length=64, null=True, blank=True)
from_which_model = models.CharField(u'来自哪个模型', max_length=64, null=True, blank=True,
help_text=u'使用app_label:model_name的形式存储')
class Meta:
db_table = 'do_loan_manager_info'
verbose_name_plural = u'贷款经理信息'
verbose_name = u'贷款经理信息'
class P2PTelephoneModel(models.Model):
company_name = models.CharField(u'公司名称', max_length=64, null=True, blank=True)
telephone = models.CharField(u'客服电话', max_length=64, null=True, blank=True)
province = models.CharField(u'省份或直辖市名称', max_length=32, null=True, blank=True)
class Meta:
db_table = 'do_p2p_telephone'
class FuncLibSource(models.Model):
FUNC_TYPE_CHOICES = [
('M', u'map'),
('F', u'filter'),
('R', u'reduce'),
('A', u'assert'),
]
func_name = models.CharField(u'函数名', max_length=80, primary_key=True)
func_desc = models.TextField(u'函数描述', null=True)
func_type = models.CharField(u'函数类型', choices=FUNC_TYPE_CHOICES, default="M", max_length=10, db_index=True)
class Meta:
db_table = 'fic_func_lib'
verbose_name = u'函数库配置表'
verbose_name_plural = u'函数库配置表'
| 39.082418 | 111 | 0.707578 | 7,836 | 0.974506 | 0 | 0 | 0 | 0 | 0 | 0 | 2,099 | 0.261037 |
32e861d95e4d1e621303b5ebac3624de50614805
| 4,007 |
py
|
Python
|
mazegen/solver.py
|
alekratz/mazegen
|
2799a5cf790cec4bab94a147315cc8541c5efec7
|
[
"MIT"
] | null | null | null |
mazegen/solver.py
|
alekratz/mazegen
|
2799a5cf790cec4bab94a147315cc8541c5efec7
|
[
"MIT"
] | null | null | null |
mazegen/solver.py
|
alekratz/mazegen
|
2799a5cf790cec4bab94a147315cc8541c5efec7
|
[
"MIT"
] | null | null | null |
import random
from typing import Optional
from .grid import *
class Solver:
def __init__(self, grid: Grid):
self._grid = grid
self._backtrack = []
self._pos = (0, 0)
self._dir = None
self._backtracking = False
self._branches = {
self._pos: set(self.valid_cells().keys()),
}
# Add entrance and exit
self.grid.cells[0][0].remove_wall(Wall.NORTH)
self.grid.cells[self.grid.height - 1][self.grid.width - 1].remove_wall(
Wall.EAST
)
@property
def is_done(self) -> bool:
return self.goal == self.pos
@property
def goal(self):
return (self.grid.width - 1, self.grid.height - 1)
@property
def grid(self):
return self._grid
@property
def pos(self):
return self._pos
@property
def cell(self):
x, y = self.pos
return self.grid.cells[y][x]
@property
def backtracking(self) -> bool:
return self._backtracking
def valid_cells(self):
"Gets the cells that are available to move into."
cell = self.cell
return {w: n for w, n in self.cell.neighbors().items() if w not in cell.walls}
def move(self, wall: Wall):
assert wall in self.valid_cells()
x, y = self.pos
if wall == Wall.NORTH:
y -= 1
elif wall == Wall.SOUTH:
y += 1
elif wall == Wall.EAST:
x += 1
elif wall == Wall.WEST:
x -= 1
else:
assert False
# Add this motion to the backtrack list
self._backtrack.append(self.pos)
self._pos = (x, y)
def step(self):
if self.is_done:
return
valid_cells = self.valid_cells()
# Register this branch if there are multiple targets to go to
if len(valid_cells) > 1 and self.pos not in self._branches:
self._branches[self.pos] = set(valid_cells.keys())
# Also, if we have backtrack positions available, disable the cell that we just came
# from.
if self._backtrack:
x1, y1 = self.pos
x2, y2 = self._backtrack[-1]
diff = (x2 - x1, y2 - y1)
if diff == (-1, 0):
wall = Wall.WEST
elif diff == (1, 0):
wall = Wall.EAST
elif diff == (0, -1):
wall = Wall.NORTH
elif diff == (0, 1):
wall = Wall.SOUTH
else:
assert False
self._branches[self.pos].remove(wall)
if self.pos in self._branches and self._branches[self.pos]:
# Choose a direction to move if we're at a branch
self._backtracking = False
self._dir = random.choice(list(self._branches[self.pos]))
self._branches[self.pos].remove(self._dir)
if self.backtracking:
# Set up for backtracking, but there's no backtrack left.
if not self._backtrack:
self._backtracking = False
self.step()
else:
self._pos = self._backtrack.pop()
else:
if self._dir not in valid_cells:
# Can't move this direction, try these options in this order:
# * Choose a random direction on this branch if we are on a branch,
# * Start backtracking
if self.pos in self._branches and self._branches[self.pos]:
self._dir = random.choice(list(self._branches[self.pos]))
self._branches[self.pos].remove(self._dir)
else:
self._backtracking = True
# TODO : prevent stack overflow where we have no backtrack available
assert self._backtrack
self.step()
return
self.move(self._dir)
| 32.056 | 96 | 0.520839 | 3,942 | 0.983778 | 0 | 0 | 442 | 0.110307 | 0 | 0 | 587 | 0.146494 |
32e9f9206385a627a8ad3b33526b3f3d199fd0d3
| 78 |
py
|
Python
|
practice.py
|
dajimmy1120/AvatarGAN
|
be264914223490ee9c23e59ad5a414da1aef4824
|
[
"Apache-2.0"
] | null | null | null |
practice.py
|
dajimmy1120/AvatarGAN
|
be264914223490ee9c23e59ad5a414da1aef4824
|
[
"Apache-2.0"
] | null | null | null |
practice.py
|
dajimmy1120/AvatarGAN
|
be264914223490ee9c23e59ad5a414da1aef4824
|
[
"Apache-2.0"
] | null | null | null |
from keras_segmentation.pretrained import pspnet_101_voc12
pspnet_101_voc12()
| 26 | 58 | 0.897436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
32ea368fa5ba2732d1c51618d8edfc516b6eb773
| 1,224 |
py
|
Python
|
example/RunModel/Abaqus_Model_Example/process_odb.py
|
volpatto/UQpy
|
acbe1d6e655e98917f56b324f019881ea9ccca82
|
[
"MIT"
] | null | null | null |
example/RunModel/Abaqus_Model_Example/process_odb.py
|
volpatto/UQpy
|
acbe1d6e655e98917f56b324f019881ea9ccca82
|
[
"MIT"
] | null | null | null |
example/RunModel/Abaqus_Model_Example/process_odb.py
|
volpatto/UQpy
|
acbe1d6e655e98917f56b324f019881ea9ccca82
|
[
"MIT"
] | null | null | null |
from odbAccess import *
from abaqusConstants import *
from textRepr import *
import timeit
import numpy as np
import os
import sys
start_time = timeit.default_timer()
index = sys.argv[-1]
# print(index)
# index = float(index)
index = int(index)
# print(index)
odbFile = os.path.join(os.getcwd(), "single_element_simulation_" + str(index) + ".odb")
odb = openOdb(path=odbFile)
step1 = odb.steps.values()[0]
his_key = 'Element PART-1-1.1 Int Point 1 Section Point 1'
region = step1.historyRegions[his_key]
LE22 = region.historyOutputs['LE22'].data
S22 = region.historyOutputs['S22'].data
# t = np.array(LE22)[:, 0]
x = np.array(LE22)[:, 1]
y = np.array(S22)[:, 1]
fnm = os.path.join(os.getcwd(), 'Output', 'output_element_{0}.csv'.format(index))
if not os.path.exists(os.path.dirname(fnm)):
try:
os.makedirs(os.path.dirname(fnm))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
output_file = open(fnm, 'wb')
for k in range(len(x)):
output_file.write('%13.6e, %13.6e\n' % (x[k], y[k]))
output_file.close()
elapsed = timeit.default_timer() - start_time
print('Finished running odb_process_script. It took ' + str(elapsed) + ' s to run.')
| 27.818182 | 87 | 0.684641 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 312 | 0.254902 |
32eaa0a294af2308ff208fed9c050fd370b31fec
| 8,526 |
py
|
Python
|
analysis_methods/shuff_time.py
|
gbrookshire/simulated_rhythmic_sampling
|
5c9ed507847a75dbe38d10d78b54441ae83f5831
|
[
"MIT"
] | null | null | null |
analysis_methods/shuff_time.py
|
gbrookshire/simulated_rhythmic_sampling
|
5c9ed507847a75dbe38d10d78b54441ae83f5831
|
[
"MIT"
] | null | null | null |
analysis_methods/shuff_time.py
|
gbrookshire/simulated_rhythmic_sampling
|
5c9ed507847a75dbe38d10d78b54441ae83f5831
|
[
"MIT"
] | null | null | null |
"""
Tools to perform analyses by shuffling in time, as in Landau & Fries (2012) and
Fiebelkorn et al. (2013).
"""
import os
import yaml
import numpy as np
import statsmodels.api as sm
from statsmodels.stats.multitest import multipletests
from .utils import avg_repeated_timepoints, dft
# Load the details of the behavioral studies
_pathname = os.path.dirname(os.path.abspath(__file__))
_behav_fname = os.path.join(_pathname, '../behav_details.yaml')
behav_details = yaml.safe_load(open(_behav_fname))
def landau(x, t, fs, k_perm):
"""
Analyze the data as in Landau & Fries (2012)
Parameters
----------
x : nd.array
Array of Hit (1) or Miss (0) for each trial
t : nd.array
Time-stamp (SOA) for each trial
Returns
-------
res : dict
The results of the randomization test as returned by
`time_shuffled_perm`, plus these items:
t : np.ndarray
The time-stamps of the individual trials
t_agg : np.ndarray
The time-steps for the aggregated accuracy time-series
x_agg : np.ndarray
The aggregated accuracy time-series
p_corr : np.ndarray
P-values corrected for multiple comparisons using Bonforroni
correction
"""
def landau_spectrum_trialwise(x_perm):
""" Helper to compute spectrum on shuffled data
"""
_, x_avg = avg_repeated_timepoints(t, x_perm)
f, y = landau_spectrum(x_avg, fs)
return f, y
# Compute the results
res = time_shuffled_perm(landau_spectrum_trialwise, x, k_perm)
res['t'] = t
res['t_agg'], res['x_agg'] = avg_repeated_timepoints(t, x)
# Correct for multiple comparisons across frequencies
_, p_corr, _, _ = multipletests(res['p'], method='bonferroni')
res['p_corr'] = p_corr
return res
def landau_spectrum(x, fs, detrend_ord=1):
"""
Get the spectrum of behavioral data as in Landau & Fries (2012)
The paper doesn't specifically mention detrending, but A.L. says they
always detrend with a 2nd-order polynomial. That matches the data --
without detrending, there should have been a peak at freq=0 due to the
offset from mean accuracy being above 0.
2021-06-14: AL tells me they used linear detrending.
The paper says the data were padded before computing the FFT, but doesn't
specify the padding or NFFT. I've chosen a value to match the frequency
resolution in the plots.
Parameters
----------
x : np.ndarray
The data time-series
Returns
-------
f : np.ndarray
The frequencies of the amplitude spectrum
y : np.ndarray
The amplitude spectrum
"""
details = behav_details['landau']
# Detrend the data
x = sm.tsa.tsatools.detrend(x, order=detrend_ord)
# Window the data
x = window(x, np.hanning(len(x)))
# Get the spectrum
f, y = dft(x, fs, details['nfft'])
return f, y
def fiebelkorn(x, t, k_perm):
"""
Search for statistically significant behavioral oscillations as in
Fiebelkorn et al. (2013)
Parameters
----------
x : np.ndarray
A sequence of accuracy (Hit: 1, Miss: 0) for each trial
t : np.ndarray
The time-stamps for each trial
k_perm : int
The number of times to randomly shuffle the data when computing the
permuted surrogate distribution
Returns
-------
res : dict
The results as given by `time_shuffled_perm`plus these items:
t : np.ndarray
The original time-stamps of the raw data
p_corr : np.ndarray
P-values for each frequency, corrected for multiple comparisons
using FDR
"""
# Compute the results
res = time_shuffled_perm(lambda xx: fiebelkorn_spectrum(xx, t), x, k_perm)
res['t'] = t
# Correct for multiple comparisons across frequencies
_, p_corr, _, _ = multipletests(res['p'], method='fdr_bh')
res['p_corr'] = p_corr
return res
def fiebelkorn_binning(x_trial, t_trial):
"""
Given accuracy and time-points, find the time-smoothed average accuracy
Parameters
----------
x_trial : np.ndarray
Accuracy (Hit: 1, Miss: 0) of each trial
t_trial : np.ndarray
The time-stamp of each trial
Returns
-------
x_bin : np.ndarray
The average accuracy within each time bin
t_bin : np.ndarray
The centers of each time bin
"""
details = behav_details['fiebelkorn']
# Time-stamps of the center of each bin
t_bin = np.arange(details['t_start'],
details['t_end'] + 1e-10,
details['bin_step'])
# Accuracy within each bin
x_bin = []
for i_bin in range(len(t_bin)):
bin_center = t_bin[i_bin]
bin_start = bin_center - (details['bin_width'] / 2)
bin_end = bin_center + (details['bin_width'] / 2)
bin_sel = (bin_start <= t_trial) & (t_trial <= bin_end)
x_bin_avg = np.mean(x_trial[bin_sel])
x_bin.append(x_bin_avg)
x_bin = np.array(x_bin)
return x_bin, t_bin
def fiebelkorn_spectrum(x, t):
"""
Compute the spectrum of accuracy data as in Fiebelkorn et al. (2013)
Parameters
----------
x : np.ndarray
The data for each trial
t : np.ndarray
The time-stamp for each trial
Returns
-------
f : np.ndarray
The frequencies of the resulting spectrum
y : np.ndarray
The amplitude spectrum
"""
details = behav_details['fiebelkorn']
# Get the moving average of accuracy
x_bin, t_bin = fiebelkorn_binning(x, t)
# Detrend the binned data
x_bin = sm.tsa.tsatools.detrend(x_bin, order=2)
# Window the data
x_bin = window(x_bin, np.hanning(len(x_bin)))
# Get the spectrum
f, y = dft(x_bin, 1 / details['bin_step'], details['nfft'])
# Only keep frequencies that were reported in the paper
f_keep = f <= details['f_max']
f = f[f_keep]
y = y[f_keep]
return f, y
def time_shuffled_perm(analysis_fnc, x, k_perm):
"""
Run a permutation test by shuffling the time-stamps of individual trials.
Parameters
----------
analysis_fnc : function
The function that will be used to generate the spectrum
x : np.ndarray
The data time-series
k_perm : int
How many permutations to run
Returns
-------
res : dict
Dictionary of the results of the randomization analysis
x : np.ndarray
The raw data
x_perm : np.ndarray
The shuffled data
f : np.ndarray
The frequencies of the resulting spectrum
y_emp : np.ndarray
The spectrum of the empirical (unshuffled) data
y_avg : np.ndarray
The spectra of the shuffled permutations
y_cis : np.ndarray
Confidence intervals for the spectra, at the 2.5th, 95th, and
97.5th percentile
p : np.ndarray
P-values (uncorrected for multiple comparisons) for each frequency
"""
# Compute the empirical statistics
f, y_emp = analysis_fnc(x)
# Run a bootstrapped permutation test.
# Create a surrogate distribution by randomly shuffling resps in time.
x_perm = []
y_perm = []
x_shuff = x.copy()
for k in range(k_perm):
np.random.shuffle(x_shuff)
_, y_perm_k = analysis_fnc(x_shuff)
y_perm.append(y_perm_k)
if k < 10: # Keep a few permutations for illustration
x_perm.append(x_shuff.copy())
# Find statistically significant oscillations
# Sometimes we get p=0 if no perms are larger than emp. Note that in this
# case, a Bonferroni correction doesn't have any effect on the p-values.
p = np.mean(np.vstack([y_perm, y_emp]) > y_emp, axis=0)
# Get summary of simulated spectra
y_avg = np.mean(y_perm, 1)
y_cis = np.percentile(y_perm, [2.5, 95, 97.5], 1)
# Bundle the results together
res = {}
res['x'] = x
res['x_perm'] = np.array(x_perm)
res['f'] = f
res['y_emp'] = y_emp
res['y_perm'] = np.array(y_perm)
res['y_avg'] = y_avg
res['y_cis'] = y_cis
res['p'] = p
return res
def window(x, win):
""" Apply a window to a segment of data
Parameters
----------
x : np.ndarray
The data
win : np.ndarray
The window
Returns
-------
x : np.ndarray
The windowed data
"""
return np.multiply(win, x.T).T
| 29 | 79 | 0.62327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,588 | 0.655407 |
32eb29b8500dc60a31bfc242ef317ed9ccbd65b5
| 1,411 |
py
|
Python
|
configs/common/ARM_A7.py
|
baz21/g5
|
e81b0df094c5ff80fbbcc37618e81e206a3c9de9
|
[
"BSD-3-Clause"
] | null | null | null |
configs/common/ARM_A7.py
|
baz21/g5
|
e81b0df094c5ff80fbbcc37618e81e206a3c9de9
|
[
"BSD-3-Clause"
] | null | null | null |
configs/common/ARM_A7.py
|
baz21/g5
|
e81b0df094c5ff80fbbcc37618e81e206a3c9de9
|
[
"BSD-3-Clause"
] | null | null | null |
from m5.objects import *
# https://en.wikipedia.org/wiki/Raspberry_Pi
# https://en.wikipedia.org/wiki/ARM_Cortex-A7
# Instruction Cache
class ARM_A7_ICache(Cache):
tag_latency = 1
data_latency = 1
response_latency = 1
mshrs = 2
tgts_per_mshr = 8
size = '16kB' # OK
assoc = 2
is_read_only = True
# Writeback clean lines as well
writeback_clean = True
# Data Cache
class ARM_A7_DCache(Cache):
tag_latency = 2
data_latency = 2
response_latency = 2
mshrs = 6
tgts_per_mshr = 8
size = '16kB' # OK
assoc = 2
write_buffers = 16
# Consider the L2 a victim cache also for clean lines
writeback_clean = True
# L2 Cache
class ARM_A7_L2(Cache):
tag_latency = 12
data_latency = 12
response_latency = 12
mshrs = 16
tgts_per_mshr = 8
size = '128kB' # OK
assoc = 16
write_buffers = 8
prefetch_on_access = True
clusivity = 'mostly_excl'
# Simple stride prefetcher
prefetcher = StridePrefetcher(degree=8, latency = 1)
tags = RandomRepl()
# L3 Cache, NONE
class ARM_A7_L3(Cache):
pass
# TLB Cache, NONE
class ARM_A7_iTLBL2(Cache):
tag_latency = 4
data_latency = 4
response_latency = 4
mshrs = 6
tgts_per_mshr = 8
size = '1kB'
assoc = 4
write_buffers = 16
is_read_only = True
# Writeback clean lines as well
writeback_clean = True
# end
| 20.75 | 57 | 0.649894 | 1,197 | 0.848335 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.253721 |
32ebbb19735d64f55f4b8caaf8724aa49e1ddf29
| 172 |
py
|
Python
|
webapp/models/__init__.py
|
xaldey/otus_blog
|
32600506d447c0b76c7e0323389d17428d197181
|
[
"Apache-2.0"
] | null | null | null |
webapp/models/__init__.py
|
xaldey/otus_blog
|
32600506d447c0b76c7e0323389d17428d197181
|
[
"Apache-2.0"
] | null | null | null |
webapp/models/__init__.py
|
xaldey/otus_blog
|
32600506d447c0b76c7e0323389d17428d197181
|
[
"Apache-2.0"
] | null | null | null |
from .create_db import Session, engine, Base
from .models import User, Post, Tag
__all__ = [
"Session",
"engine",
"Base",
"User",
"Post",
"Tag",
]
| 14.333333 | 44 | 0.569767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.232558 |
32ef88405f3f3c3db42531c5dfa16c38dbb4d202
| 1,405 |
py
|
Python
|
Easy/112.PathSum.py
|
YuriSpiridonov/LeetCode
|
2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781
|
[
"MIT"
] | 39 |
2020-07-04T11:15:13.000Z
|
2022-02-04T22:33:42.000Z
|
Easy/112.PathSum.py
|
YuriSpiridonov/LeetCode
|
2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781
|
[
"MIT"
] | 1 |
2020-07-15T11:53:37.000Z
|
2020-07-15T11:53:37.000Z
|
Easy/112.PathSum.py
|
YuriSpiridonov/LeetCode
|
2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781
|
[
"MIT"
] | 20 |
2020-07-14T19:12:53.000Z
|
2022-03-02T06:28:17.000Z
|
"""
Given a binary tree and a sum, determine if the tree has a root-to-leaf path
such that adding up all the values along the path equals the given sum.
Note: A leaf is a node with no children.
Example:
Given the below binary tree and sum = 22,
5
/ \
4 8
/ / \
11 13 4
/ \ \
7 2 1
return true, as there exist a root-to-leaf path 5->4->11->2 which sum is 22.
"""
#Difficulty: Easy
#114 / 114 test cases passed.
#Runtime: 44 ms
#Memory Usage: 15.6 MB
#Runtime: 44 ms, faster than 72.99% of Python3 online submissions for Path Sum.
#Memory Usage: 15.6 MB, less than 43.57% of Python3 online submissions for Path Sum.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def hasPathSum(self, root: TreeNode, summ: int) -> bool:
result = []
s = 0
self.summFunc(root, s, result)
return True if summ in result else False
def summFunc(self, root, s, result):
if not root:
return 0
s += root.val
self.summFunc(root.left, s, result)
self.summFunc(root.right, s, result)
if not root.left and not root.right:
result.append(s)
| 28.673469 | 84 | 0.577936 | 468 | 0.333096 | 0 | 0 | 0 | 0 | 0 | 0 | 920 | 0.654804 |
32f125ad1d76b4e0fde9ddfeb972aeb7353e40c7
| 42 |
py
|
Python
|
downloads.py
|
Jamal135/fine-grained-sentiment-app
|
4754cefd77ccfa99b15a7721c3471aeacec650c9
|
[
"MIT"
] | null | null | null |
downloads.py
|
Jamal135/fine-grained-sentiment-app
|
4754cefd77ccfa99b15a7721c3471aeacec650c9
|
[
"MIT"
] | null | null | null |
downloads.py
|
Jamal135/fine-grained-sentiment-app
|
4754cefd77ccfa99b15a7721c3471aeacec650c9
|
[
"MIT"
] | null | null | null |
import nltk
nltk.download('vader_lexicon')
| 21 | 30 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.357143 |
32f16560a7eafdb17a4951c61d182a0eaa97e4e4
| 880 |
py
|
Python
|
src/Tools/Button.py
|
hieuhdh/Multi-tasking-program
|
2f064a554f647247c84979b7a27f0797d1e1b5af
|
[
"MIT"
] | null | null | null |
src/Tools/Button.py
|
hieuhdh/Multi-tasking-program
|
2f064a554f647247c84979b7a27f0797d1e1b5af
|
[
"MIT"
] | null | null | null |
src/Tools/Button.py
|
hieuhdh/Multi-tasking-program
|
2f064a554f647247c84979b7a27f0797d1e1b5af
|
[
"MIT"
] | null | null | null |
from tkinter import*
from tkinter import Button, font
from tkinter.font import BOLD
import tkinter.ttk as ttk
from tkhtmlview import HTMLLabel
from tkhtmlview import HTMLText
def frameButton(frame, xx, yy, text, backgroundcolor, foregroundcolor, cmd, images):
def IN(e):
button['background'] = backgroundcolor
button['foreground']= '#120b26'
def OUT(e):
button['background'] = foregroundcolor
button['foreground']= '#120b26'
button = Button(frame, text = text, width = 30, height = 2, fg = '#120b26', border = 0, bg = foregroundcolor, activeforeground = '#120b26', activebackground = backgroundcolor, command=(cmd), font = ("Microsoft Sans Serif", 12, "bold"), cursor="hand2", borderwidth=0, image = images )
button.bind("<Enter>", IN)
button.bind("<Leave>", OUT)
button.place(x = xx, y = yy)
| 40 | 303 | 0.659091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.155682 |
32f6cfa5b601a97d41e10a68ea610b54a023b9f0
| 864 |
py
|
Python
|
src/test.py
|
ayieko168/Arduino-Oscilloscope
|
5a0634437010f4303c86aef141f33cc6a628b3dc
|
[
"MIT"
] | null | null | null |
src/test.py
|
ayieko168/Arduino-Oscilloscope
|
5a0634437010f4303c86aef141f33cc6a628b3dc
|
[
"MIT"
] | null | null | null |
src/test.py
|
ayieko168/Arduino-Oscilloscope
|
5a0634437010f4303c86aef141f33cc6a628b3dc
|
[
"MIT"
] | null | null | null |
import pyqtgraph as pg
import pyqtgraph.exporters
import numpy as np
import math
from time import sleep
f = 10
t = 0
Samples = 1000
# while True:
# y2 = np.sin( 2* np.pi * f * t)
# print(y)
# t+=0.01
# sleep(0.25)
def update():
global f, t, ys, y2
print(len(y2))
if len(y2) == Samples:
y2.pop(y2.index(y2[0]))
y2.append(np.sin( 2 * np.pi * f * t))
t += 0.0001
c2.updateData(y2)
# define the data
theTitle = "pyqtgraph plot"
y2 = []
# create plot
plt = pg.plot()
plt.showGrid(x=True,y=True)
dat2 = []
c2 = pg.PlotCurveItem(dat2)
plt.addItem(c2)
timer = pg.QtCore.QTimer ()
timer.timeout.connect(update)
timer.start(0.1)
## Start Qt event loop.
if __name__ == '__main__':
import sys
if sys.flags.interactive != 1 or not hasattr(pg.QtCore, 'PYQT_VERSION'):
pg.QtGui.QApplication.exec_()
| 16.941176 | 76 | 0.618056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.215278 |
32f73e3a96427c84bfa7bd842e7e9ab6eeb893b6
| 931 |
py
|
Python
|
Aula07/Exercicio2.py
|
PabloSchumacher/TrabalhosPython
|
828edd35eb40442629211bc9f1477f75fb025d74
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
Aula07/Exercicio2.py
|
PabloSchumacher/TrabalhosPython
|
828edd35eb40442629211bc9f1477f75fb025d74
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
Aula07/Exercicio2.py
|
PabloSchumacher/TrabalhosPython
|
828edd35eb40442629211bc9f1477f75fb025d74
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
#--- Exercicio 2 - Dicionários
#--- Escreva um programa que leia os dados de 11 jogadores
#--- Jogador: Nome, Posicao, Numero, PernaBoa
#--- Crie um dicionario para armazenar os dados
#--- Imprima todos os jogadores e seus dados
lista_jogador = []
for i in range(0,11):
dicionario_jogador = {'Nome':'', 'Posicao':'', 'Numero':'','Pernaboa':''}
dicionario_jogador['Nome'] = input(f'Digite o nome do {i+1}° jogador: ')
dicionario_jogador['Posicao'] = input(f'Digite a posição do {i+1}° jogador: ')
dicionario_jogador['Numero'] = int(input(f'Digite o número do {i+1}° jogador: '))
dicionario_jogador['Pernaboa'] = input(f'Digite o perna boa do {i+1}° jogador: ')
lista_jogador.append(dicionario_jogador)
for j in lista_jogador:
print(f"Nome: {dicionario_jogador['Nome']} - Posição: {dicionario_jogador['Posicao']} - Numero: {dicionario_jogador['Numero']} - Pernaboa: {dicionario_jogador['Pernaboa']}")
| 46.55 | 177 | 0.688507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 628 | 0.667375 |
32f8e7bf61b54b514d134bdb102d258bdc2af2ce
| 669 |
py
|
Python
|
Tiny ImageNet Challenge/train_data.py
|
Vishal-V/Mastering-TensorFlow-2.x
|
83e18cf84dc5c391c5f902978ee5a80e1be4a31d
|
[
"MIT"
] | 3 |
2020-05-15T16:57:39.000Z
|
2020-09-16T20:53:58.000Z
|
Tiny ImageNet Challenge/train_data.py
|
Vishal-V/Mastering-Tensorflow
|
83e18cf84dc5c391c5f902978ee5a80e1be4a31d
|
[
"MIT"
] | null | null | null |
Tiny ImageNet Challenge/train_data.py
|
Vishal-V/Mastering-Tensorflow
|
83e18cf84dc5c391c5f902978ee5a80e1be4a31d
|
[
"MIT"
] | 4 |
2020-03-30T16:11:41.000Z
|
2020-09-15T20:28:27.000Z
|
# Iterate over epochs.
for epoch in range(3):
print(f'Epoch {epoch+1}')
# Iterate over the batches of the dataset.
for step, x_batch_train in enumerate(train_data):
with tf.GradientTape() as tape:
reconstructed = autoencoder(x_batch_train)
# Compute reconstruction loss
loss = mse_loss(x_batch_train, reconstructed)
#loss += sum(autoencoder.losses) # Add KLD regularization loss
grads = tape.gradient(loss, autoencoder.trainable_variables)
optimizer.apply_gradients(zip(grads, autoencoder.trainable_variables))
loss_metric(loss)
if step % 100 == 0:
print(f'Step {step}: mean loss = {loss_metric.result()}')
| 35.210526 | 74 | 0.707025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.334828 |
32f92084cffe12b7f31fc3604eb9852e4502b8d7
| 1,422 |
py
|
Python
|
utils/generate_topics.py
|
ahoho/scholar
|
fe1b7ba590563e245e7765d100cfff091ba20c54
|
[
"Apache-2.0"
] | null | null | null |
utils/generate_topics.py
|
ahoho/scholar
|
fe1b7ba590563e245e7765d100cfff091ba20c54
|
[
"Apache-2.0"
] | null | null | null |
utils/generate_topics.py
|
ahoho/scholar
|
fe1b7ba590563e245e7765d100cfff091ba20c54
|
[
"Apache-2.0"
] | null | null | null |
################################################################
# Generate top-N words for topics, one per line, to stdout
################################################################
import os
import sys
import argparse
import numpy as np
import file_handling as fh
def get_top_n_topic_words(beta, vocab, n=30):
K, V = beta.shape
out = []
for i in range(K):
topic = []
vocab_dist = beta[i]
top_word_indices = vocab_dist.argsort()[-n:][::-1]
for ind in top_word_indices:
topic.append(vocab[ind])
out.append(topic)
return out
def main(call=None):
# handle command line
parser = argparse.ArgumentParser()
parser.add_argument("model_path", help="path for model directory")
parser.add_argument(
"-n",
dest="n_words",
type=int,
default=30,
help="number of words to show in each topic"
)
options = parser.parse_args(call)
model_path = options.model_path
n_words = options.n_words
## load Beta
beta = np.load(os.path.join(model_path, 'beta.npz'))['beta']
## load vocab
vocab = fh.read_json(os.path.join(model_path, 'vocab.json'))
# get and print topics
topics = get_top_n_topic_words(beta, vocab, n_words)
for topic in topics:
topicstring = ' '.join(topic)
print(topicstring)
if __name__ == "__main__":
main()
| 25.392857 | 70 | 0.563291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 386 | 0.271449 |
32fc43425ea47a93c10fa87eeeea81ca0922ca0c
| 918 |
py
|
Python
|
AutomateboringStuff/3. Functions/try_nd_except.py
|
gabriel-marchetti/Exercicios-Python
|
0f1eac7eee48081cf899d25bed0ec5dbc70a3542
|
[
"MIT"
] | 2 |
2021-12-21T23:28:02.000Z
|
2021-12-21T23:28:03.000Z
|
AutomateboringStuff/3. Functions/try_nd_except.py
|
gabriel-marchetti/Exercicios-Python
|
0f1eac7eee48081cf899d25bed0ec5dbc70a3542
|
[
"MIT"
] | 1 |
2021-12-22T12:05:11.000Z
|
2021-12-22T13:02:52.000Z
|
AutomateboringStuff/3. Functions/try_nd_except.py
|
gabriel-marchetti/Exercicios-Python
|
0f1eac7eee48081cf899d25bed0ec5dbc70a3542
|
[
"MIT"
] | null | null | null |
# Quando tivermos um programa onde claramente temos um caso
# indesejável, então podemos usar a função do python dita
# try_and_except.
# Vamos supor que desejamos fazer uma função que faça uma
# divisão, então podemos fazer a seguinte estrutura de
# código
def divisão(divideBy):
return 42 / divideBy
# veja que nesse caso, se dermos o argumento zero, então
# iremos ganhar um erro no terminal
# por conta disso existem dois métodos que podemos usar
# para resolver esse caso
def spam(divideBy):
try:
return 42 / divideBy
except ZeroDivisionError:
print('Erro: Argumento Inválido')
print(spam(2))
print(spam(12))
print(spam(0))
print(spam(1))
# Outro modo de escrevermos é através de:
def spam2(divideBy):
return 42 / divideBy
try:
print(spam(2))
print(spam(12))
print(spam(0))
print(spam(1))
except ZeroDivisionError:
print('Error: Invalid argument.')
| 20.863636 | 59 | 0.713508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 531 | 0.568522 |
32fcb908b2dfd2baf6aec8baabfb5d1f269220d0
| 1,577 |
py
|
Python
|
src/plyer_lach/platforms/android/email.py
|
locksmith47/turing-sim-kivy
|
f57de9d52494245c56f67dd7e63121434bb0553f
|
[
"MIT"
] | null | null | null |
src/plyer_lach/platforms/android/email.py
|
locksmith47/turing-sim-kivy
|
f57de9d52494245c56f67dd7e63121434bb0553f
|
[
"MIT"
] | null | null | null |
src/plyer_lach/platforms/android/email.py
|
locksmith47/turing-sim-kivy
|
f57de9d52494245c56f67dd7e63121434bb0553f
|
[
"MIT"
] | null | null | null |
from jnius import autoclass, cast
from kivy.logger import Logger
from plyer_lach.facades import Email
from plyer_lach.platforms.android import activity
Intent = autoclass('android.content.Intent')
AndroidString = autoclass('java.lang.String')
URI = autoclass('android.net.Uri')
class AndroidEmail(Email):
def _send(self, **kwargs):
intent = Intent(Intent.ACTION_SEND)
intent.setType('*/*')
recipient = kwargs.get('recipient')
subject = kwargs.get('subject')
text = kwargs.get('text')
create_chooser = kwargs.get('create_chooser')
file_path = kwargs.get('file_path')
if recipient:
intent.putExtra(Intent.EXTRA_EMAIL, [recipient])
if subject:
android_subject = cast('java.lang.CharSequence',
AndroidString(subject))
intent.putExtra(Intent.EXTRA_SUBJECT, android_subject)
if file_path:
file_uri = URI.parse('file://' + file_path)
Logger.info(str(file_uri.toString()))
intent.putExtra(Intent.EXTRA_STREAM, cast('android.os.Parcelable', file_uri))
Logger.info('Added file')
if create_chooser:
chooser_title = cast('java.lang.CharSequence',
AndroidString('Send message with:'))
activity.startActivity(Intent.createChooser(intent,
chooser_title))
else:
activity.startActivity(intent)
def instance():
return AndroidEmail()
| 36.674419 | 89 | 0.606848 | 1,252 | 0.793912 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.145212 |
32ff2b91e7cdacd12f1c52a76ec14a6214fafa45
| 452 |
py
|
Python
|
main.py
|
rishi-chauhan/sudoku
|
2b07954b2f3ab5146ab0f96eb4d0509a3ea45eb2
|
[
"MIT"
] | null | null | null |
main.py
|
rishi-chauhan/sudoku
|
2b07954b2f3ab5146ab0f96eb4d0509a3ea45eb2
|
[
"MIT"
] | null | null | null |
main.py
|
rishi-chauhan/sudoku
|
2b07954b2f3ab5146ab0f96eb4d0509a3ea45eb2
|
[
"MIT"
] | null | null | null |
"""Main class for sudoku game. Run this to solve the game."""
from board import Board
# ENTRIES contains the values of each cell
ENTRIES = [0, 0, 0, 2, 6, 0, 7, 0, 1, 6, 8, 0, 0, 7, 0, 0, 9, 0, 1,
9, 0, 0, 0, 4, 5, 0, 0, 8, 2, 0, 1, 0, 0, 0, 4, 0, 0,
0, 4, 6, 0, 2, 9, 0, 0, 0, 5, 0, 0, 0, 3, 0, 2, 8, 0,
0, 9, 3, 0, 0, 0, 7, 4, 0, 4, 0, 0, 5, 0, 0, 3, 6, 7,
0, 3, 0, 1, 8, 0, 0, 0]
board = Board(ENTRIES)
| 37.666667 | 67 | 0.446903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.227876 |
fd00768ed39187f9b978abbf6c4d123c662329a9
| 121 |
py
|
Python
|
fuzzer/fuzzing_strategies/base_strategy/base_strategy.py
|
Dyfox100/Libstemmer_Fuzzer
|
263d6e64e007116a348d994851aa05e4c0c35358
|
[
"MIT"
] | null | null | null |
fuzzer/fuzzing_strategies/base_strategy/base_strategy.py
|
Dyfox100/Libstemmer_Fuzzer
|
263d6e64e007116a348d994851aa05e4c0c35358
|
[
"MIT"
] | null | null | null |
fuzzer/fuzzing_strategies/base_strategy/base_strategy.py
|
Dyfox100/Libstemmer_Fuzzer
|
263d6e64e007116a348d994851aa05e4c0c35358
|
[
"MIT"
] | null | null | null |
import abc
class Abstract_Strategy(metaclass=abc.ABCMeta):
@abc.abstractmethod
def generate(self):
pass
| 17.285714 | 47 | 0.710744 | 108 | 0.892562 | 0 | 0 | 56 | 0.46281 | 0 | 0 | 0 | 0 |
fd009473c74aa4ae5995e6b6bc84914f1edd33ca
| 2,215 |
py
|
Python
|
netbox/dcim/migrations/0100_application.py
|
fireman0865/PingBox
|
0f00eaf88b88e9441fffd5173a1501e56c13db03
|
[
"Apache-2.0"
] | 1 |
2021-09-23T00:06:51.000Z
|
2021-09-23T00:06:51.000Z
|
netbox/dcim/migrations/0100_application.py
|
fireman0865/PingBox
|
0f00eaf88b88e9441fffd5173a1501e56c13db03
|
[
"Apache-2.0"
] | 2 |
2021-06-08T21:05:10.000Z
|
2021-09-08T01:46:58.000Z
|
netbox/dcim/migrations/0100_application.py
|
fireman0865/PingBox
|
0f00eaf88b88e9441fffd5173a1501e56c13db03
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.10 on 2020-03-04 09:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('virtualization', '0013_deterministic_ordering'),
('dcim', '0099_powerfeed_negative_voltage'),
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)),
('created', models.DateField(auto_now_add=True, null=True)),
('last_updated', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=100, unique=True)),
('slug', models.SlugField(max_length=100, unique=True)),
('database', models.CharField(blank=True, max_length=50)),
('language', models.CharField(blank=True, max_length=50)),
('version', models.CharField(blank=True, max_length=100)),
('application_team', models.CharField(blank=True, max_length=100)),
('link', models.CharField(blank=True, max_length=100, null=True)),
('environnement', models.CharField(blank=True, max_length=50)),
('application_maintainer', models.CharField(blank=True, max_length=100, null=True)),
('profilepardefaut', models.CharField(blank=True, max_length=100, null=True)),
('actualprofile', models.CharField(blank=True, max_length=100, null=True)),
('platform', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='applications', to='dcim.Platform')),
('virtual_machine', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='applications', to='virtualization.VirtualMachine')),
],
options={
'ordering': ['name', 'slug', 'platform', 'database', 'virtual_machine', 'language', 'environnement', 'version', 'application_team', 'link', 'application_maintainer', 'profilepardefaut', 'actualprofile'],
},
),
]
| 55.375 | 219 | 0.628442 | 2,088 | 0.942664 | 0 | 0 | 0 | 0 | 0 | 0 | 584 | 0.263657 |
fd00db8ee275e84aadc9a08c115a590eab1c8a65
| 1,934 |
py
|
Python
|
pam_notify.py
|
aNNufriy/pamNotifier
|
088ec0cb87c026a0fbc8e6275fc891bf653af645
|
[
"MIT"
] | 1 |
2020-03-21T21:37:57.000Z
|
2020-03-21T21:37:57.000Z
|
pam_notify.py
|
aNNufriy/pamNotifier
|
088ec0cb87c026a0fbc8e6275fc891bf653af645
|
[
"MIT"
] | null | null | null |
pam_notify.py
|
aNNufriy/pamNotifier
|
088ec0cb87c026a0fbc8e6275fc891bf653af645
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
import smtplib
import time
import syslog
import telegram
import yaml
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
# Author:: Alexander Schedrov ([email protected])
# Copyright:: Copyright (c) 2019 Alexander Schedrov
# License:: MIT
def pam_sm_open_session(pamh, flags, args):
cwd = '/etc/ssh/pam/'
whitelist=[line.rstrip('\n') for line in open(cwd+'whitelist','r')]
if not any(pamh.rhost==s for s in whitelist):
if pamh.get_user()!="git":
message = '%s: [%s] logged in from [%s]' % (os.uname()[1],pamh.get_user(),pamh.rhost)
syslog.syslog(message)
with open(cwd+'parameters.yml', 'r') as stream:
try:
params = yaml.safe_load(stream)
send_tg_message(params['tg'], message)
send_em_message(params['em'], message)
return pamh.PAM_SUCCESS
except Exception, e:
syslog.syslog(syslog.LOG_ERR, str(e))
return pamh.PAM_SERVICE_ERR
def pam_sm_close_session(pamh, flags, args):
return pamh.PAM_SUCCESS
def send_tg_message(tgparams, message):
bot = telegram.Bot(token=tgparams['bot_token'])
bot.sendMessage(chat_id=tgparams['chat_id'], text=message)
def send_em_message(emparams, message, host=os.uname()[1]):
timestring = time.strftime("[%Y-%m-%d %H:%M:%S]")
mpartmsg = MIMEMultipart()
mpartmsg['From'] = emparams['adr_from']
mpartmsg['To'] = emparams['adr_to']
mpartmsg['Subject'] = host+' ssh login'
mpartmsg.attach(MIMEText(message))
emclient = smtplib.SMTP_SSL(emparams['smtp_server'],465)
emclient.ehlo()
emclient.login(emparams['adr_from'], emparams['password'])
emclient.sendmail(emparams['adr_from'], emparams['adr_to'], mpartmsg.as_string())
emclient.quit()
| 33.344828 | 97 | 0.635471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 373 | 0.192865 |
fd032c799cd2f082ede61113614415437237b7bc
| 40,263 |
py
|
Python
|
src/eventail/async_service/pika/base.py
|
allo-media/eventail
|
aed718d733709f1a522fbfec7083ddd8ed7b5039
|
[
"MIT"
] | 2 |
2019-12-12T15:08:25.000Z
|
2020-05-19T08:52:06.000Z
|
src/eventail/async_service/pika/base.py
|
allo-media/eventail
|
aed718d733709f1a522fbfec7083ddd8ed7b5039
|
[
"MIT"
] | 10 |
2021-01-19T15:03:51.000Z
|
2022-03-08T15:48:22.000Z
|
src/eventail/async_service/pika/base.py
|
allo-media/eventail
|
aed718d733709f1a522fbfec7083ddd8ed7b5039
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018-2019 Groupe Allo-Media
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
A base class implementing AM service architecture and its requirements.
Inspired from pika complete examples.
"""
import functools
import json
import logging
import os
import signal
import socket
import traceback
from contextlib import contextmanager
from typing import Any, Callable, Dict, Generator, List, Optional, Sequence, Tuple
import cbor
import pika
from eventail.gelf import GELF
from eventail.log_criticity import ALERT, EMERGENCY, ERROR, WARNING
LOGGER = logging.getLogger("async_service")
JSON_MODEL = Dict[str, Any]
HEADER = Dict[str, str]
class Service(object):
"""This is an example service that will handle unexpected interactions
with RabbitMQ such as channel and connection closures.
If RabbitMQ closes the connection, this class will stop and indicate
that reconnection is necessary. You should look at the output, as
there are limited reasons why the connection may be closed, which
usually are tied to permission related issues or socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
To leverage the binary nature of AMQP messages, we use CBOR instead of
JSON as data serialization (transparent). Moreover, CBOR is much faster
and much more compact than JSON.
"""
ID = os.getpid()
HOSTNAME = socket.gethostname()
EVENT_EXCHANGE = "events"
CMD_EXCHANGE = "commands"
LOG_EXCHANGE = "logs"
EVENT_EXCHANGE_TYPE = "topic"
CMD_EXCHANGE_TYPE = "topic"
LOG_EXCHANGE_TYPE = "topic"
RETRY_DELAY = 15 # in seconds
#: Heartbeat interval, must be superior to the expected blocking processing time (in seconds).
#: Beware that the actual delay is negotiated with the broker, and the lower value is taken, so
#: configure Rabbitmq accordingly.
HEARTBEAT = 60
#: When rabbitmq is low on resources, it may temporarily block the connection.
#: We can specify a timeout if it is not acceptable to the service (in seconds)
BLOCKED_TIMEOUT = 3600
#: In production, experiment with higher prefetch values
#: for higher consumer throughput
PREFETCH_COUNT = 3
def __init__(
self,
amqp_urls: List[str],
event_routing_keys: Sequence[str],
command_routing_keys: Sequence[str],
logical_service: str,
) -> None:
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str amqp_urls: List of AMQP urls.
The service will try to connect to one of them, in a round-robin fashion.
"""
self._urls = amqp_urls
self._event_routing_keys = event_routing_keys
self._command_routing_keys = command_routing_keys
self.logical_service = logical_service
self.url_idx = 0
self._event_queue = logical_service + ".events"
self._command_queue = logical_service + ".commands"
self.exclusive_queues = False
self._serialize: Callable[..., bytes] = cbor.dumps
self._mime_type = "application/cbor"
self._connection: pika.SelectConnection
self._channel: pika.channel.Channel
self._log_channel: pika.channel.Channel
for s in (signal.SIGHUP, signal.SIGTERM, signal.SIGINT):
signal.signal(s, lambda _s, _f: self.stop())
def reset_connection_state(self) -> None:
self._bind_count = (len(self._event_routing_keys) or 1) + (
len(self._command_routing_keys) or 1
)
self.should_reconnect = False
self.was_consuming = False
self._closing = False
self._event_consumer_tag: Optional[str] = None
self._command_consumer_tag: Optional[str] = None
self._consuming = False
# for events publishing only
self._deliveries: Dict[
int, Tuple[str, str, JSON_MODEL, str, bool, Optional[HEADER]]
] = {}
self._acked = 0
self._nacked = 0
self._message_number = 0
def connect(self) -> pika.SelectConnection:
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
self.reset_connection_state()
url = self._urls[self.url_idx]
self.url_idx = (self.url_idx + 1) % len(self._urls)
LOGGER.info("Connecting to %s", url)
connection_params = pika.URLParameters(url)
connection_params.heartbeat = self.HEARTBEAT
connection_params.blocked_connection_timeout = self.BLOCKED_TIMEOUT
return pika.SelectConnection(
parameters=connection_params,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
)
def close_connection(self) -> None:
self._consuming = False
if self._connection.is_closing or self._connection.is_closed:
LOGGER.info("Connection is closing or already closed")
else:
LOGGER.info("Closing connection")
self._connection.close()
def on_connection_open(self, _unused_connection: pika.BaseConnection) -> None:
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:param pika.SelectConnection _unused_connection: The connection
"""
LOGGER.info("Connection opened")
self.open_channels()
def on_connection_open_error(
self, _unused_connection: pika.BaseConnection, err: Exception
) -> None:
"""This method is called by pika if the connection to RabbitMQ
can't be established.
:param pika.SelectConnection _unused_connection: The connection
:param Exception err: The error
"""
LOGGER.error("Connection open failed: %s", err)
self.reconnect(True)
def on_connection_closed(
self, _unused_connection: pika.BaseConnection, reason: Exception
) -> None:
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param Exception reason: exception representing reason for loss of
connection.
"""
if self._closing:
self._connection.ioloop.stop()
else:
self.reconnect(True)
def reconnect(self, should_reconnect=True) -> None:
"""Will be invoked if the connection can't be opened or is
closed. Indicates that a reconnect is necessary then stops the
ioloop.
"""
self.should_reconnect = should_reconnect
self.stop(should_reconnect)
def open_channels(self) -> None:
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
LOGGER.info("Creating channels")
self._connection.channel(
on_open_callback=functools.partial(self.on_channel_open, main=True)
)
self._connection.channel(
on_open_callback=functools.partial(self.on_channel_open, main=False)
)
def on_channel_open(self, channel: pika.channel.Channel, main: bool) -> None:
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchanges to use.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.info("Channel opened")
if main:
self._channel = channel
self.setup_exchange(self.EVENT_EXCHANGE, self.EVENT_EXCHANGE_TYPE, channel)
self.setup_exchange(self.CMD_EXCHANGE, self.CMD_EXCHANGE_TYPE, channel)
else:
self._log_channel = channel
self.setup_exchange(self.LOG_EXCHANGE, self.LOG_EXCHANGE_TYPE, channel)
self.add_on_channel_close_callback(channel)
def add_on_channel_close_callback(self, channel: pika.channel.Channel) -> None:
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
LOGGER.info("Adding channel close callback")
channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(
self, channel: pika.channel.Channel, reason: Exception
) -> None:
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param Exception reason: why the channel was closed
"""
LOGGER.warning("Channel %i was closed: %s", channel, reason)
self.close_connection()
def setup_exchange(
self, exchange_name: str, exchange_type: str, channel: pika.channel.Channel
) -> None:
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
LOGGER.info("Declaring exchange: %s", exchange_name)
# Note: using functools.partial is not required, it is demonstrating
# how arbitrary data can be passed to the callback when it is called
cb = functools.partial(self.on_exchange_declareok, exchange_name=exchange_name)
channel.exchange_declare(
exchange=exchange_name,
exchange_type=exchange_type,
callback=cb,
durable=True,
)
def on_exchange_declareok(
self, _unused_frame: pika.frame.Method, exchange_name: str
) -> None:
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.frame.Method unused_frame: Exchange.DeclareOk response frame
:param str|unicode userdata: Extra user data (exchange name)
"""
LOGGER.info("Exchange declared: %s", exchange_name)
if (
exchange_name == self.EVENT_EXCHANGE
and self._event_routing_keys
or exchange_name == self.CMD_EXCHANGE
and self._command_routing_keys
):
self.setup_queue(exchange_name)
elif exchange_name != self.LOG_EXCHANGE:
self._bind_count -= 1
if self._bind_count == 0:
self.set_qos()
def setup_queue(self, exchange_name: str) -> None:
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode exchange: The name of exchange to bind.
"""
cb = functools.partial(self.on_queue_declareok, exchange_name=exchange_name)
if self.exclusive_queues:
LOGGER.info("Declaring exclusive on exchange %s", exchange_name)
self._channel.queue_declare("", exclusive=True, callback=cb)
else:
queue = (
self._event_queue
if exchange_name == self.EVENT_EXCHANGE
else self._command_queue
)
LOGGER.info("Declaring queue %s on exchange %s", queue, exchange_name)
self._channel.queue_declare(queue=queue, durable=True, callback=cb)
def on_queue_declareok(self, frame: pika.frame.Method, exchange_name: str) -> None:
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method frame: The Queue.DeclareOk frame
"""
queue_name = frame.method.queue
routing_keys: Sequence[str]
if exchange_name == self.EVENT_EXCHANGE:
routing_keys = self._event_routing_keys
self._event_queue = queue_name
else:
routing_keys = self._command_routing_keys
self._command_queue = queue_name
LOGGER.info("Binding %s to %s with %s", exchange_name, queue_name, routing_keys)
for key in routing_keys:
self._channel.queue_bind(
queue_name, exchange_name, routing_key=key, callback=self.on_bindok
)
def on_bindok(self, _unused_frame: pika.frame.Method) -> None:
"""Invoked by pika when the Queue.Bind method has completed. At this
point we will set the prefetch count for the channel.
:param pika.frame.Method _unused_frame: The Queue.BindOk response frame
"""
LOGGER.info("Queue bound")
self._bind_count -= 1
if self._bind_count == 0:
self.set_qos()
def set_qos(self) -> None:
"""This method sets up the consumer prefetch to only be delivered
PREFETCH_COUNT at a time. The consumer must acknowledge this message
before RabbitMQ will deliver another one. You should experiment
with different prefetch values to achieve desired performance.
"""
self._channel.basic_qos(
prefetch_count=self.PREFETCH_COUNT, callback=self.on_basic_qos_ok
)
def on_basic_qos_ok(self, _unused_frame: pika.frame.Method) -> None:
"""Invoked by pika when the Basic.QoS method has completed. At this
point we will start consuming messages by calling start_consuming
which will invoke the needed RPC commands to start the process.
:param pika.frame.Method _unused_frame: The Basic.QosOk response frame
"""
LOGGER.info("QOS set to: %d", self.PREFETCH_COUNT)
self.enable_delivery_confirmations()
self.start_consuming()
def enable_delivery_confirmations(self) -> None:
"""Send the Confirm.Select RPC method to RabbitMQ to enable delivery
confirmations on the channel. The only way to turn this off is to close
the channel and create a new one.
When the message is confirmed from RabbitMQ, the
on_delivery_confirmation method will be invoked passing in a Basic.Ack
or Basic.Nack method from RabbitMQ that will indicate which messages it
is confirming or rejecting.
"""
LOGGER.info("Issuing Confirm.Select RPC command")
self._channel.confirm_delivery(self.on_delivery_confirmation)
def on_delivery_confirmation(self, method_frame: pika.frame.Method) -> None:
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
BEWARE: the `ack` and `nack` received here are emitted by the broker,
not by other services! They mean the broker accepted/received the
message or not.
Unroutable messages won't raise a `nack`.
If you want to be notified of unroutable messages,
you need to set `mandatory=True` on the emitted message and
implement `handle_returned_message`. The unroutable message
will then be returned to this callback.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type: str = method_frame.method.NAME.split(".")[1].lower()
delivery_tag: int = method_frame.method.delivery_tag
multiple: bool = method_frame.method.multiple
LOGGER.info("Received %s for delivery tag: %i", confirmation_type, delivery_tag)
confirm_range: List[int]
if multiple:
confirm_range = [
i for i in sorted(self._deliveries.keys()) if i <= delivery_tag
]
else:
confirm_range = [delivery_tag]
num_confirms = len(confirm_range)
if confirmation_type == "ack":
self._acked += num_confirms
elif confirmation_type == "nack":
self._nacked += num_confirms
# The broker in is trouble, resend later
for i in confirm_range:
self.call_later(
self.RETRY_DELAY, lambda args=self._deliveries[i]: self._emit(*args)
)
for i in confirm_range:
del self._deliveries[i]
LOGGER.info(
"Published %i messages, %i have yet to be confirmed, "
"%i were acked and %i were nacked",
self._message_number,
len(self._deliveries),
self._acked,
self._nacked,
)
def start_consuming(self) -> None:
"""This method sets up the consumer by first calling
add_on_cancel_callback so that the object is notified if RabbitMQ
cancels the consumer. It then issues the Basic.Consume RPC command
which returns the consumer tag that is used to uniquely identify the
consumer with RabbitMQ. We keep the value to use it when we want to
cancel consuming. The on_message method is passed in as a callback pika
will invoke when a message is fully received.
"""
LOGGER.info("Issuing consumer related RPC commands")
self.add_on_cancel_callback()
self.add_on_return_callback()
if self._event_routing_keys:
self._event_consumer_tag = self._channel.basic_consume(
self._event_queue, self.on_message
)
self._consuming = True
if self._command_routing_keys:
self._command_consumer_tag = self._channel.basic_consume(
self._command_queue, self.on_message
)
self._consuming = True
self.was_consuming = True
self.on_ready()
def add_on_cancel_callback(self) -> None:
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
for some reason. If RabbitMQ does cancel the consumer,
on_consumer_cancelled will be invoked by pika.
"""
LOGGER.info("Adding consumer cancellation callback")
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
def add_on_return_callback(self) -> None:
"""Add a callback that will be invoked to return an unroutable message."""
LOGGER.info("Adding return callback")
self._channel.add_on_return_callback(self.on_message_returned)
def on_consumer_cancelled(self, method_frame: pika.frame.Method) -> None:
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
receiving messages.
:param pika.frame.Method method_frame: The Basic.Cancel frame
"""
LOGGER.info("Consumer was cancelled remotely, shutting down: %r", method_frame)
if not (self._channel.is_closed or self._channel.is_closing):
self._channel.close()
def on_message_returned(
self,
ch: pika.channel.Channel,
basic_return: pika.spec.Basic.Return,
properties: pika.spec.BasicProperties,
body: bytes,
):
"""Invoked by pika when a message is returned.
A message maybe returned if:
* it was sent with the `mandatory` flag on True;
* the broker was unable to route it to a queue.
:param pika.channel.Channel ch: The channel object
:param pika.Spec.Basic.Return basic_deliver: method
:param pika.Spec.BasicProperties: properties
:param bytes body: The message body
"""
decoder = cbor if properties.content_type == "application/cbor" else json
# If we are not able to decode our own payload, better crash the service now
payload: JSON_MODEL = decoder.loads(body) if body else None
routing_key: str = basic_return.routing_key
envelope: Dict[str, str] = {}
if properties.reply_to:
envelope["reply_to"] = properties.reply_to
if properties.correlation_id:
envelope["correlation_id"] = properties.correlation_id
if properties.headers:
envelope.update(properties.headers)
LOGGER.info("Received returned message: %s", routing_key)
try:
self.handle_returned_message(routing_key, payload, envelope)
except Exception as e:
# unexpected error
self.log(
EMERGENCY,
"in handle_returned_message [{}] {}".format(self.logical_service, e),
conversation_id=envelope.get("conversation_id", ""),
)
# Crash the service now
self.stop()
def on_message(
self,
ch: pika.channel.Channel,
basic_deliver: pika.spec.Basic.Deliver,
properties: pika.spec.BasicProperties,
body: bytes,
) -> None:
"""Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param pika.channel.Channel ch: The channel object
:param pika.Spec.Basic.Deliver: basic_deliver method
:param pika.Spec.BasicProperties: properties
:param bytes body: The message body
"""
headers: HEADER = properties.headers
decoder = cbor if properties.content_type == "application/cbor" else json
routing_key: str = basic_deliver.routing_key
exchange: str = basic_deliver.exchange
if headers is None or "conversation_id" not in headers:
self.log(EMERGENCY, f"Missing headers on {routing_key}")
# unrecoverable error, send to dead letter
ch.basic_nack(delivery_tag=basic_deliver.delivery_tag, requeue=False)
return
conversation_id = headers["conversation_id"]
try:
payload: JSON_MODEL = decoder.loads(body) if body else None
except ValueError:
self.log(
EMERGENCY,
f"Unable to decode payload for {routing_key}; dead lettering.",
conversation_id=conversation_id,
)
# Unrecoverable, put to dead letter
ch.basic_nack(delivery_tag=basic_deliver.delivery_tag, requeue=False)
return
LOGGER.info("Received message from %s: %s", exchange, routing_key)
if exchange == self.CMD_EXCHANGE:
correlation_id = properties.correlation_id
reply_to = properties.reply_to
status = headers.get("status", "") if headers else ""
if not (reply_to or status):
self.log(
EMERGENCY,
"invalid enveloppe for command/result: {}; dead lettering.".format(
headers
),
conversation_id=conversation_id,
)
# Unrecoverable. Put to dead letter
ch.basic_nack(delivery_tag=basic_deliver.delivery_tag, requeue=False)
return
if reply_to:
with self.ack_policy(
ch, basic_deliver, conversation_id, reply_to, correlation_id
):
self.handle_command(
routing_key, payload, conversation_id, reply_to, correlation_id
)
else:
with self.ack_policy(
ch, basic_deliver, conversation_id, reply_to, correlation_id
):
self.handle_result(
routing_key, payload, conversation_id, status, correlation_id
)
else:
with self.ack_policy(ch, basic_deliver, conversation_id, "", ""):
self.handle_event(routing_key, payload, conversation_id)
@contextmanager
def ack_policy(
self,
ch: pika.channel.Channel,
deliver: pika.spec.Basic.Deliver,
conversation_id: str,
reply_to: str,
correlation_id: str,
) -> Generator[None, None, None]:
try:
yield None
except Exception:
error = traceback.format_exc()
self.log(
ALERT,
f"Unhandled error while processing message {deliver.routing_key}",
error,
conversation_id=conversation_id,
)
# retry once
if not deliver.redelivered:
ch.basic_nack(delivery_tag=deliver.delivery_tag, requeue=True)
else:
# dead letter
self.log(
EMERGENCY,
f"Giving up on {deliver.routing_key}",
error,
conversation_id=conversation_id,
)
ch.basic_nack(delivery_tag=deliver.delivery_tag, requeue=False)
else:
ch.basic_ack(delivery_tag=deliver.delivery_tag)
def stop_consuming(self) -> None:
"""Tell RabbitMQ that you would like to stop consuming by sending the
Basic.Cancel RPC command.
"""
if not (self._channel.is_closed or self._channel.is_closing):
LOGGER.info("Sending a Basic.Cancel RPC command to RabbitMQ")
for consumer_tag in (self._event_consumer_tag, self._command_consumer_tag):
if consumer_tag is not None:
cb = functools.partial(self.on_cancelok, userdata=consumer_tag)
self._channel.basic_cancel(consumer_tag, cb)
def on_cancelok(self, _unused_frame: pika.frame.Method, userdata: str) -> None:
"""This method is invoked by pika when RabbitMQ acknowledges the
cancellation of a consumer. At this point we will close the channel.
This will invoke the on_channel_closed method once the channel has been
closed, which will in-turn close the connection.
:param pika.frame.Method _unused_frame: The Basic.CancelOk frame
:param str|unicode userdata: Extra user data (consumer tag)
"""
self._consuming = False
LOGGER.info(
"RabbitMQ acknowledged the cancellation of the consumer: %s", userdata
)
self.close_channel()
def close_channel(self) -> None:
"""Call to close the channel with RabbitMQ cleanly by issuing the
Channel.Close RPC command.
"""
LOGGER.info("Closing the channels")
self._channel.close()
self._log_channel.close()
def _emit(
self,
exchange: str,
routing_key: str,
message: JSON_MODEL,
conversation_id: str,
mandatory: bool,
reply_to: str = "",
correlation_id: str = "",
headers: Optional[HEADER] = None,
) -> None:
"""Send a message.
The `message` is any data conforming to the JSON model.
"""
if headers is None:
headers = {}
headers["conversation_id"] = conversation_id
self._channel.basic_publish(
exchange=exchange,
routing_key=routing_key,
body=self._serialize(message),
mandatory=mandatory,
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
content_type=self._mime_type,
reply_to=reply_to,
correlation_id=correlation_id,
headers=headers,
),
)
self._message_number += 1
self._deliveries[self._message_number] = (
exchange,
routing_key,
message,
conversation_id,
mandatory,
headers,
)
LOGGER.info("Published message # %i", self._message_number)
# Public interface
def use_json(self) -> None:
"""Force sending message serialized in plain JSON instead of CBOR."""
self._serialize = lambda message: json.dumps(message).encode("utf-8")
self._mime_type = "application/json"
def use_exclusive_queues(self) -> None:
"""Force usage of exclusive queues.
This is useful for debug tools that should not leave a queue behind them (overflow risk)
and not interfere between instances.
"""
self.exclusive_queues = True
def log(
self,
criticity: int,
short: str,
full: str = "",
conversation_id: str = "",
additional_fields: Dict = {},
) -> None:
"""Log to the log bus.
Parameters:
- `criticity`: int, in the syslog scale
- `short`: str, short description of log
- `full`: str, the full message of the log (appears as `message` in Graylog)
- `additional_fields: Dict, data to be merged into the GELF payload as additional fields
"""
gelf = GELF(self, criticity, short, full, conversation_id, additional_fields)
LOGGER.debug("Application logged: %s\n%s", short, full)
# no persistent messages, no delivery confirmations
self._log_channel.basic_publish(
exchange=self.LOG_EXCHANGE,
routing_key=gelf.routing_key,
body=gelf.payload,
)
def send_command(
self,
command: str,
message: JSON_MODEL,
conversation_id: str,
reply_to: str,
correlation_id: str,
mandatory: bool = True,
) -> None:
"""Send a command message.
The `message` is any data conforming to the JSON model.
if `mandatory` is True (default) and you have implemented
`handle_returned_message`, then it will be called if your message
is unroutable."""
self._emit(
self.CMD_EXCHANGE,
command,
message,
conversation_id,
mandatory,
reply_to=reply_to,
correlation_id=correlation_id,
)
def return_success(
self,
destination: str,
message: JSON_MODEL,
conversation_id: str,
correlation_id: str,
mandatory: bool = True,
) -> None:
"""Send a successful result message.
The `message` is any data conforming to the JSON model.
if `mandatory` is True (default) and you have implemented
`handle_returned_message`, then it will be called if your message
is unroutable."""
headers = {"status": "success"}
self._emit(
self.CMD_EXCHANGE,
destination,
message,
conversation_id,
mandatory,
correlation_id=correlation_id,
headers=headers,
)
def return_error(
self,
destination: str,
message: JSON_MODEL,
conversation_id: str,
correlation_id: str,
mandatory: bool = True,
) -> None:
"""Send a failure result message.
The `message` is any data conforming to the JSON model.
If `mandatory` is True (default) and you have implemented
`handle_returned_message`, then it will be called if your message
is unroutable."""
headers = {"status": "error"}
self._emit(
self.CMD_EXCHANGE,
destination,
message,
conversation_id,
mandatory,
correlation_id=correlation_id,
headers=headers,
)
def publish_event(
self,
event: str,
message: JSON_MODEL,
conversation_id: str,
mandatory: bool = False,
) -> None:
"""Publish an event on the bus.
The ``event`` is the name of the event,
and the `message` is any data conforming to the JSON model.
If `mandatory` is True and you have implemented
`handle_returned_message`, then it will be called if your message
is unroutable.
The default is False because some events maybe unused yet.
"""
self._emit(self.EVENT_EXCHANGE, event, message, conversation_id, mandatory)
def call_later(self, delay: int, callback: Callable) -> None:
"""Call `callback` after `delay` seconds."""
self._connection.ioloop.call_later(delay, callback)
def run(self) -> None:
"""Run the service by connecting to RabbitMQ and then
starting the IOLoop to block and allow the SelectConnection to operate.
"""
self._connection = self.connect()
self._connection.ioloop.start()
def stop(self, reconnect=False) -> None:
"""Cleanly shutdown the connection to RabbitMQ by stopping the consumer
with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
will be invoked by pika, which will then closing the channel and
connection. The IOLoop is started again if this method is invoked
when CTRL-C is pressed raising a KeyboardInterrupt exception. This
exception stops the IOLoop which needs to be running for pika to
communicate with RabbitMQ. All of the commands issued prior to starting
the IOLoop will be buffered but not processed.
This method is automatically triggered if we receive one of
these UNIX signals: signal.SIGHUP, signal.SIGTERM, signal.SIGINT.
"""
self.should_reconnect = reconnect
if not self._closing:
if not self._connection.is_closed:
self.log(WARNING, "Shutting down…")
self._closing = True
LOGGER.info("Stopping")
if self._consuming:
self.stop_consuming()
try:
self._connection.ioloop.start()
except RuntimeError:
# already running!
pass
else:
self._connection.ioloop.stop()
LOGGER.info("Stopped")
def handle_event(
self, event: str, payload: JSON_MODEL, conversation_id: str
) -> None:
"""Handle incoming event (may be overwritten by subclasses).
The `payload` is already decoded and is a python data structure compatible with the JSON data model.
You should never do any filtering here: use the routing keys intead
(see ``__init__()``).
The default implementation dispatches the messages by calling methods in the form
``self.on_KEY(payload)`` where key is the routing key.
"""
handler = getattr(self, "on_" + event)
if handler is not None:
handler(payload, conversation_id)
else:
self.log(
ERROR,
f"unexpected event {event}; check your subscriptions!",
conversation_id=conversation_id,
)
def handle_command(
self,
command: str,
payload: JSON_MODEL,
conversation_id: str,
reply_to: str,
correlation_id: str,
) -> None:
"""Handle incoming commands (may be overwriten by subclasses).
The `payload` is already decoded and is a python data structure compatible with the JSON data model.
You should never do any filtering here: use the routing keys intead (see ``__init__()``).
Expected errors should be returned with the ``return_error`` method.
The default implementation dispatches the messages by calling methods in the form
``self.on_COMMAND(payload, reply_to, correlation_id)`` where COMMAND is what is left
after stripping the ``service.`` prefix from the routing key.
"""
handler = getattr(self, "on_" + command.split(".")[-1])
if handler is not None:
handler(payload, conversation_id, reply_to, correlation_id)
else:
# should never happens: means we misconfigured the routing keys
self.log(
ERROR,
f"unexpected command {command}; check your subscriptions!",
conversation_id=conversation_id,
)
def handle_result(
self,
key: str,
payload: JSON_MODEL,
conversation_id: str,
status: str,
correlation_id: str,
) -> None:
"""Handle incoming result (may be overwritten by subclasses).
The `payload` is already decoded and is a python data structure compatible with the JSON data model.
You should never do any filtering here: use the routing keys intead (see ``__init__()``).
The ``key`` is the routing key and ``status`` is either "success" or "error".
The default implementation dispatches the messages by calling methods in the form
``self.on_KEY(payload, status, correlation_id)`` where KEY is what is left
after stripping the ``service.`` prefix from the routing key.
"""
handler = getattr(self, "on_" + key.split(".")[-1])
if handler is not None:
handler(payload, conversation_id, status, correlation_id)
else:
# should never happens: means we misconfigured the routing keys
self.log(
ERROR,
f"unexpected result {key}; check your subscriptions!",
conversation_id=conversation_id,
)
# Abstract methods
def handle_returned_message(
self, key: str, payload: JSON_MODEL, envelope: Dict[str, str]
):
"""Invoked when a message is returned (to be implemented by subclasses).
A message maybe returned if:
* it was sent with the `mandatory` flag on True;
* and the broker was unable to route it to a queue.
"""
pass
def on_ready(self) -> None:
"""Code to execute once the service comes online.
(to be implemented by subclasses)
"""
pass
| 39.014535 | 108 | 0.634304 | 38,561 | 0.95749 | 1,104 | 0.027413 | 1,124 | 0.02791 | 0 | 0 | 18,631 | 0.462618 |
fd0394b6bd7363e7ed4aa89ca0603954bd731b42
| 889 |
py
|
Python
|
CLI/mainmenue.py
|
MeatBoyed/PasswordBank2
|
f4367b22902ce1282772b184899e3d6e899c1cca
|
[
"MIT"
] | 1 |
2021-02-08T17:45:28.000Z
|
2021-02-08T17:45:28.000Z
|
CLI/mainmenue.py
|
MeatBoyed/PasswordBank2
|
f4367b22902ce1282772b184899e3d6e899c1cca
|
[
"MIT"
] | null | null | null |
CLI/mainmenue.py
|
MeatBoyed/PasswordBank2
|
f4367b22902ce1282772b184899e3d6e899c1cca
|
[
"MIT"
] | null | null | null |
from .mock_api.utils import GetSelection
from .viewAccounts import ViewAccounts
from .addAccount import AddAccount
def MainMenue():
headerMessage = (
"""\n\n=========================================================\n===================== Main Menue ========================\n""")
print(headerMessage)
accessMessage = (
"""1: Search for Account(s)\n2: Add an Account\n3: Quit\n\n=========================================================""")
print(accessMessage)
while True:
select = GetSelection()
print("---------------------------------------------------------")
if select == 1:
ViewAccounts()
elif select == 2:
AddAccount()
pass
elif select == 3:
print("Quitting account")
break
else:
print("Enter a valid select option")
| 26.939394 | 137 | 0.418448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 353 | 0.397075 |
fd03c109230a47c1540cdcf65dcdedac9302a120
| 7,342 |
py
|
Python
|
dataset.py
|
Intelligent-Computing-Lab-Yale/Energy-Separation-Training
|
9336862a10c915a482d427e8a36367f648e7dd40
|
[
"MIT"
] | 2 |
2022-03-31T02:36:52.000Z
|
2022-03-31T06:13:25.000Z
|
dataset.py
|
Intelligent-Computing-Lab-Yale/Energy-Separation-Training
|
9336862a10c915a482d427e8a36367f648e7dd40
|
[
"MIT"
] | null | null | null |
dataset.py
|
Intelligent-Computing-Lab-Yale/Energy-Separation-Training
|
9336862a10c915a482d427e8a36367f648e7dd40
|
[
"MIT"
] | null | null | null |
import torch
import torchvision
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import os
def get10(batch_size, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'cifar10-data'))
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
print("Building CIFAR-10 data loader with {} workers".format(num_workers))
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(
root=data_root, train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(
root=data_root, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def get100(batch_size, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'cifar100-data'))
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
print("Building CIFAR-100 data loader with {} workers".format(num_workers))
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root=data_root, train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
# transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root=data_root, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor()
# transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def svhn(batch_size, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'svhn-data'))
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
print("Building SVHN data loader with {} workers".format(num_workers))
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.SVHN(
root=data_root, split='train', download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.SVHN(
root=data_root, split='test', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
class normalize(object):
def __init__(self, mean, absmax):
self.mean = mean
self.absmax = absmax
def __call__(self, tensor):
# Args: tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
# Returns: Tensor: Normalized image.
for t, m, am in zip(tensor, self.mean, self.absmax):
t.sub_(m).div_(am)
return tensor
def tinyimagenet(batch_size):
traindir = os.path.join('/gpfs/loomis/project/panda/shared/tiny-imagenet-200/train')
valdir = os.path.join('/gpfs/loomis/project/panda/shared/tiny-imagenet-200/val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
ds = []
train_dataset = torchvision.datasets.ImageFolder(
traindir,
transforms.Compose([
# transforms.Pad(4),
# transforms.RandomCrop(32),
transforms.RandomCrop(64, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# normalize,
]))
trainloader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True,
num_workers=2, pin_memory=True, drop_last=True)
ds.append(trainloader)
testloader = torch.utils.data.DataLoader(
torchvision.datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(64),
transforms.ToTensor(),
# normalize,
])),
batch_size=batch_size, shuffle=False,
num_workers=2, pin_memory=True, drop_last=True)
ds.append(testloader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def imagenet(batch_size):
traindir = os.path.join('/gpfs/loomis/project/panda/shared/imagenet_2012/train')
valdir = os.path.join('/gpfs/loomis/project/panda/shared/imagenet/val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
ds = []
train_dataset = torchvision.datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomCrop(64, padding=8),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# normalize,
]))
trainloader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True,
num_workers=2, pin_memory=True, drop_last=True)
ds.append(trainloader)
testloader = torch.utils.data.DataLoader(
torchvision.datasets.ImageFolder(valdir, transforms.Compose([
transforms.ToTensor(),
# normalize,
])),
batch_size=batch_size, shuffle=False,
num_workers=2, pin_memory=True, drop_last=True)
ds.append(testloader)
ds = ds[0] if len(ds) == 1 else ds
return ds
| 40.120219 | 96 | 0.578725 | 389 | 0.052983 | 0 | 0 | 0 | 0 | 0 | 0 | 1,066 | 0.145192 |
fd04dad88b99035b710b66d225ec5a6739f0249b
| 25,604 |
py
|
Python
|
tests/st/ops/cpu/test_scatter_arithmetic_op.py
|
PowerOlive/mindspore
|
bda20724a94113cedd12c3ed9083141012da1f15
|
[
"Apache-2.0"
] | 3,200 |
2020-02-17T12:45:41.000Z
|
2022-03-31T20:21:16.000Z
|
tests/st/ops/cpu/test_scatter_arithmetic_op.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 176 |
2020-02-12T02:52:11.000Z
|
2022-03-28T22:15:55.000Z
|
tests/st/ops/cpu/test_scatter_arithmetic_op.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 621 |
2020-03-09T01:31:41.000Z
|
2022-03-30T03:43:19.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor, Parameter
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class TestScatterAddNet(nn.Cell):
def __init__(self, lock, inputx, indices, updates):
super(TestScatterAddNet, self).__init__()
self.scatter_add = P.ScatterAdd(use_locking=lock)
self.inputx = Parameter(inputx, name="inputx")
self.indices = Parameter(indices, name="indices")
self.updates = Parameter(updates, name="updates")
def construct(self):
out = self.scatter_add(self.inputx, self.indices, self.updates)
return out
def scatter_add_net(inputx, indices, updates):
lock = True
net = TestScatterAddNet(lock, inputx, indices, updates)
return net()
def scatter_add_use_locking_false_net(inputx, indices, updates):
lock = False
net = TestScatterAddNet(lock, inputx, indices, updates)
return net()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_add_small_float32():
inputx = Tensor(np.zeros((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32))
output = scatter_add_net(inputx, indices, updates)
expected = np.array([[6., 8., 10.],
[12., 14., 16.]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_add_input_updated():
inputx = Tensor(np.zeros((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32))
lock = True
net = TestScatterAddNet(lock, inputx, indices, updates)
net()
expected = np.array([[6., 8., 10.],
[12., 14., 16.]])
np.testing.assert_array_almost_equal(net.inputx.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_add_large_shape_float32():
inputx = Tensor(np.ones((4, 2, 3, 4)).astype(np.float32))
indices = Tensor(np.array([[0, 2], [3, 1]]).astype(np.int32))
updates = Tensor(np.arange(96).reshape((2, 2, 2, 3, 4)).astype(np.float32))
output = scatter_add_net(inputx, indices, updates)
expected = np.array([[[[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.]],
[[13., 14., 15., 16.],
[17., 18., 19., 20.],
[21., 22., 23., 24.]]],
[[[73., 74., 75., 76.],
[77., 78., 79., 80.],
[81., 82., 83., 84.]],
[[85., 86., 87., 88.],
[89., 90., 91., 92.],
[93., 94., 95., 96.]]],
[[[25., 26., 27., 28.],
[29., 30., 31., 32.],
[33., 34., 35., 36.]],
[[37., 38., 39., 40.],
[41., 42., 43., 44.],
[45., 46., 47., 48.]]],
[[[49., 50., 51., 52.],
[53., 54., 55., 56.],
[57., 58., 59., 60.]],
[[61., 62., 63., 64.],
[65., 66., 67., 68.],
[69., 70., 71., 72.]]]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_add_small_float32_use_locking_false():
inputx = Tensor(np.zeros((2, 3)).astype(np.float32))
indices = Tensor(np.array([1, 0]).astype(np.int32))
updates = Tensor(np.arange(6).reshape((2, 3)).astype(np.float32))
output = scatter_add_use_locking_false_net(inputx, indices, updates)
expected = np.array([[3., 4., 5.],
[0., 1., 2.]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_add_input_less_than_1_float32():
inputx = Tensor(np.array([[0.214141, 0.415151, 0.51516],
[0.876542, 0.451611, 0.55112],
[0.111244, 0.633333, 0.34444]]).astype(np.float32))
indices = Tensor(np.array([[[1, 0, 2],
[2, 2, 0]],
[[1, 0, 1],
[2, 1, 2]]]).astype(np.int32))
updates = Tensor(np.arange(34, 70).reshape((2, 2, 3, 3)).astype(np.float32))
output = scatter_add_net(inputx, indices, updates)
expected = np.array([[141.21414, 144.41515, 147.51517],
[208.87654, 212.45161, 216.55112],
[257.11124, 262.63333, 267.34442]], dtype=np.float32)
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_add_float16():
inputx = Tensor(np.zeros((2, 3)).astype(np.float16))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float16))
output = scatter_add_net(inputx, indices, updates)
expected = np.array([[6., 8., 10.],
[12., 14., 16.]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_add_large_float16():
inputx = Tensor(np.zeros((2, 3, 4)).astype(np.float16))
indices = Tensor(np.array([[0, 0], [1, 1]]).astype(np.int32))
updates = Tensor(np.arange(63, 111).reshape((2, 2, 3, 4)).astype(np.float16))
output = scatter_add_net(inputx, indices, updates)
expected = np.array([[[138., 140., 142., 144.],
[146., 148., 150., 152.],
[154., 156., 158., 160.]],
[[186., 188., 190., 192.],
[194., 196., 198., 200.],
[202., 204., 206., 208.]]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_add_disordered_float16():
inputx = Tensor(np.flip(np.arange(34, 46).reshape(3, 4).astype(np.float16)))
indices = Tensor(np.array([[[0, 1, 2],
[2, 1, 0]],
[[0, 0, 0],
[2, 2, 2]]]).astype(np.int32))
updates = Tensor(np.arange(63, 111).reshape((2, 2, 3, 4)).astype(np.float16))
output = scatter_add_net(inputx, indices, updates)
expected = np.array([[464., 468., 472., 476.],
[187., 188., 189., 190.],
[492., 496., 500., 504.]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_add_large_int32():
inputx = Tensor(np.zeros((2, 3, 4)).astype(np.int32))
indices = Tensor(np.array([[0, 0], [1, 1]]).astype(np.int32))
updates = Tensor(np.arange(63, 111).reshape((2, 2, 3, 4)).astype(np.int32))
output = scatter_add_net(inputx, indices, updates)
expected = np.array([[[138., 140., 142., 144.],
[146., 148., 150., 152.],
[154., 156., 158., 160.]],
[[186., 188., 190., 192.],
[194., 196., 198., 200.],
[202., 204., 206., 208.]]]).astype(np.int32)
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_add_disordered_int32():
inputx = Tensor(np.flip(np.arange(34, 46).reshape(3, 4).astype(np.int32)))
indices = Tensor(np.array([[[0, 1, 2],
[2, 1, 0]],
[[0, 0, 0],
[2, 2, 2]]]).astype(np.int32))
updates = Tensor(np.arange(63, 111).reshape((2, 2, 3, 4)).astype(np.int32))
output = scatter_add_net(inputx, indices, updates)
expected = np.array([[464., 468., 472., 476.],
[187., 188., 189., 190.],
[492., 496., 500., 504.]]).astype(np.int32)
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
class TestScatterSubNet(nn.Cell):
def __init__(self, lock, inputx, indices, updates):
super(TestScatterSubNet, self).__init__()
self.scatter_sub = P.ScatterSub(use_locking=lock)
self.inputx = Parameter(inputx, name="inputx")
self.indices = Parameter(indices, name="indices")
self.updates = Parameter(updates, name="updates")
def construct(self):
out = self.scatter_sub(self.inputx, self.indices, self.updates)
return out
def scatter_sub_net(inputx, indices, updates):
lock = True
net = TestScatterSubNet(lock, inputx, indices, updates)
return net()
def scatter_sub_use_locking_false_net(inputx, indices, updates):
lock = False
net = TestScatterSubNet(lock, inputx, indices, updates)
return net()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_sub_input_updated():
inputx = Tensor(np.zeros((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32))
lock = True
net = TestScatterSubNet(lock, inputx, indices, updates)
net()
expected = np.array([[-6., -8., -10.],
[-12., -14., -16.]])
np.testing.assert_array_almost_equal(net.inputx.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_sub_large_shape_float32():
inputx = Tensor(np.ones((4, 2, 3, 4)).astype(np.float32))
indices = Tensor(np.array([[0, 2], [3, 1]]).astype(np.int32))
updates = Tensor(np.arange(96).reshape((2, 2, 2, 3, 4)).astype(np.float32))
output = scatter_sub_net(inputx, indices, updates)
expected = np.array(
[[[[1.0, 0.0, -1.0, -2.0],
[-3.0, -4.0, -5.0, -6.0],
[-7.0, -8.0, -9.0, -10.0]],
[[-11.0, -12.0, -13.0, -14.0],
[-15.0, -16.0, -17.0, -18.0],
[-19.0, -20.0, -21.0, -22.0]]],
[[[-71.0, -72.0, -73.0, -74.0],
[-75.0, -76.0, -77.0, -78.0],
[-79.0, -80.0, -81.0, -82.0]],
[[-83.0, -84.0, -85.0, -86.0],
[-87.0, -88.0, -89.0, -90.0],
[-91.0, -92.0, -93.0, -94.0]]],
[[[-23.0, -24.0, -25.0, -26.0],
[-27.0, -28.0, -29.0, -30.0],
[-31.0, -32.0, -33.0, -34.0]],
[[-35.0, -36.0, -37.0, -38.0],
[-39.0, -40.0, -41.0, -42.0],
[-43.0, -44.0, -45.0, -46.0]]],
[[[-47.0, -48.0, -49.0, -50.0],
[-51.0, -52.0, -53.0, -54.0],
[-55.0, -56.0, -57.0, -58.0]],
[[-59.0, -60.0, -61.0, -62.0],
[-63.0, -64.0, -65.0, -66.0],
[-67.0, -68.0, -69.0, -70.0]]]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_sub_small_float32_use_locking_false():
inputx = Tensor(np.zeros((2, 3)).astype(np.float32))
indices = Tensor(np.array([1, 0]).astype(np.int32))
updates = Tensor(np.arange(6).reshape((2, 3)).astype(np.float32))
output = scatter_sub_use_locking_false_net(inputx, indices, updates)
expected = np.array([[-3., -4., -5.],
[-0., -1., -2.]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
class TestScatterMulNet(nn.Cell):
def __init__(self, lock, inputx, indices, updates):
super(TestScatterMulNet, self).__init__()
self.scatter_mul = P.ScatterMul(use_locking=lock)
self.inputx = Parameter(inputx, name="inputx")
self.indices = Parameter(indices, name="indices")
self.updates = Parameter(updates, name="updates")
def construct(self):
out = self.scatter_mul(self.inputx, self.indices, self.updates)
return out
def scatter_mul_net(inputx, indices, updates):
lock = True
net = TestScatterMulNet(lock, inputx, indices, updates)
return net()
def scatter_mul_use_locking_false_net(inputx, indices, updates):
lock = False
net = TestScatterMulNet(lock, inputx, indices, updates)
return net()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_mul_input_updated():
inputx = Tensor(np.ones((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32))
lock = True
net = TestScatterMulNet(lock, inputx, indices, updates)
net()
expected = np.array([[0., 7., 16.],
[27., 40., 55.]])
np.testing.assert_array_almost_equal(net.inputx.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_mul_output_updated_float32():
inputx = Tensor(np.ones((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32))
output = scatter_mul_net(inputx, indices, updates)
expected = np.array([[0., 7., 16.],
[27., 40., 55.]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_mul_small_float32_use_locking_false():
inputx = Tensor(np.ones((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32))
output = scatter_mul_use_locking_false_net(inputx, indices, updates)
expected = np.array([[0., 7., 16.],
[27., 40., 55.]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
class TestScatterDivNet(nn.Cell):
def __init__(self, lock, inputx, indices, updates):
super(TestScatterDivNet, self).__init__()
self.scatter_div = P.ScatterDiv(use_locking=lock)
self.inputx = Parameter(inputx, name="inputx")
self.indices = Parameter(indices, name="indices")
self.updates = Parameter(updates, name="updates")
def construct(self):
out = self.scatter_div(self.inputx, self.indices, self.updates)
return out
def scatter_div_net(inputx, indices, updates):
lock = True
net = TestScatterDivNet(lock, inputx, indices, updates)
return net()
def scatter_div_use_locking_false_net(inputx, indices, updates):
lock = False
net = TestScatterDivNet(lock, inputx, indices, updates)
return net()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_div_input_updated():
inputx = Tensor(np.zeros((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(1, 13).reshape((2, 2, 3)).astype(np.float32))
lock = True
net = TestScatterDivNet(lock, inputx, indices, updates)
net()
expected = np.array([[0., 0., 0.],
[0., 0., 0.]])
np.testing.assert_array_almost_equal(net.inputx.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_div_output_updated_float32():
inputx = Tensor(np.zeros((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(1, 13).reshape((2, 2, 3)).astype(np.float32))
output = scatter_div_net(inputx, indices, updates)
expected = np.array([[0., 0., 0.],
[0., 0., 0.]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_div_small_float32_use_locking_false():
inputx = Tensor(np.ones((2, 3)).astype(np.float32) * 10)
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.ones(12).reshape((2, 2, 3)).astype(np.float32))
output = scatter_div_use_locking_false_net(inputx, indices, updates)
expected = np.array([[10., 10., 10.],
[10., 10., 10.]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
class TestScatterMaxNet(nn.Cell):
def __init__(self, lock, inputx, indices, updates):
super(TestScatterMaxNet, self).__init__()
self.scatter_max = P.ScatterMax(use_locking=lock)
self.inputx = Parameter(inputx, name="inputx")
self.indices = Parameter(indices, name="indices")
self.updates = Parameter(updates, name="updates")
def construct(self):
out = self.scatter_max(self.inputx, self.indices, self.updates)
return out
def scatter_max_net(inputx, indices, updates):
lock = True
net = TestScatterMaxNet(lock, inputx, indices, updates)
return net()
def scatter_max_use_locking_false_net(inputx, indices, updates):
lock = False
net = TestScatterMaxNet(lock, inputx, indices, updates)
return net()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_max_input_updated():
inputx = Tensor(np.zeros((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32))
lock = True
net = TestScatterMaxNet(lock, inputx, indices, updates)
net()
expected = np.array([[6., 7., 8.],
[9., 10., 11.]])
np.testing.assert_array_almost_equal(net.inputx.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_max_output_updated_float32():
inputx = Tensor(np.zeros((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32))
output = scatter_max_net(inputx, indices, updates)
expected = np.array([[6., 7., 8.],
[9., 10., 11.]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_max_small_float32_use_locking_false():
inputx = Tensor(np.ones((2, 3)).astype(np.float32) * 10)
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32))
output = scatter_max_use_locking_false_net(inputx, indices, updates)
expected = np.array([[10., 10., 10.],
[10., 10., 11.]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
class TestScatterMinNet(nn.Cell):
def __init__(self, lock, inputx, indices, updates):
super(TestScatterMinNet, self).__init__()
self.scatter_min = P.ScatterMin(use_locking=lock)
self.inputx = Parameter(inputx, name="inputx")
self.indices = Parameter(indices, name="indices")
self.updates = Parameter(updates, name="updates")
def construct(self):
out = self.scatter_min(self.inputx, self.indices, self.updates)
return out
def scatter_min_net(inputx, indices, updates):
lock = True
net = TestScatterMinNet(lock, inputx, indices, updates)
return net()
def scatter_min_use_locking_false_net(inputx, indices, updates):
lock = False
net = TestScatterMinNet(lock, inputx, indices, updates)
return net()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_min_input_updated():
inputx = Tensor(np.zeros((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32))
lock = True
net = TestScatterMinNet(lock, inputx, indices, updates)
net()
expected = np.array([[0., 0., 0.],
[0., 0., 0.]])
np.testing.assert_array_almost_equal(net.inputx.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_min_output_updated_float32():
inputx = Tensor(np.ones((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32))
output = scatter_min_net(inputx, indices, updates)
expected = np.array([[0., 1., 1.],
[1., 1., 1.]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_min_small_float32_use_locking_false():
inputx = Tensor(np.ones((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32))
output = scatter_min_use_locking_false_net(inputx, indices, updates)
expected = np.array([[0., 1., 1.],
[1., 1., 1.]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
class TestScatterUpdateNet(nn.Cell):
def __init__(self, lock, inputx, indices, updates):
super(TestScatterUpdateNet, self).__init__()
self.scatter_update = P.ScatterUpdate(use_locking=lock)
self.inputx = Parameter(inputx, name="inputx")
self.indices = Parameter(indices, name="indices")
self.updates = Parameter(updates, name="updates")
def construct(self):
out = self.scatter_update(self.inputx, self.indices, self.updates)
return out
def scatter_update_net(inputx, indices, updates):
lock = True
net = TestScatterUpdateNet(lock, inputx, indices, updates)
return net()
def scatter_update_use_locking_false_net(inputx, indices, updates):
lock = False
net = TestScatterUpdateNet(lock, inputx, indices, updates)
return net()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_update_input_updated():
inputx = Tensor(np.zeros((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32))
lock = True
net = TestScatterUpdateNet(lock, inputx, indices, updates)
net()
expected = np.array([[6., 7., 8.],
[9., 10., 11.]])
np.testing.assert_array_almost_equal(net.inputx.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_update_output_updated_float32():
inputx = Tensor(np.ones((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32))
output = scatter_update_net(inputx, indices, updates)
expected = np.array([[6., 7., 8.],
[9., 10., 11.]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_scatter_update_small_float32_use_locking_false():
inputx = Tensor(np.ones((2, 3)).astype(np.float32))
indices = Tensor(np.array([[0, 1], [0, 1]]).astype(np.int32))
updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32))
output = scatter_update_use_locking_false_net(inputx, indices, updates)
expected = np.array([[6., 7., 8.],
[9., 10., 11.]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
| 39.757764 | 82 | 0.594712 | 3,480 | 0.135916 | 0 | 0 | 18,762 | 0.732776 | 0 | 0 | 854 | 0.033354 |
fd06722fb8cfe07ace7e4c46b654df0346766b26
| 4,181 |
py
|
Python
|
nn_similarity_index/cwt_kernel_mat.py
|
forgi86/xfer
|
56d98a66d6adb2466d1a73b52f3b27193930a008
|
[
"Apache-2.0"
] | 244 |
2018-08-31T18:35:29.000Z
|
2022-03-20T01:12:50.000Z
|
nn_similarity_index/cwt_kernel_mat.py
|
forgi86/xfer
|
56d98a66d6adb2466d1a73b52f3b27193930a008
|
[
"Apache-2.0"
] | 26 |
2018-08-29T15:31:21.000Z
|
2021-06-24T08:05:53.000Z
|
nn_similarity_index/cwt_kernel_mat.py
|
forgi86/xfer
|
56d98a66d6adb2466d1a73b52f3b27193930a008
|
[
"Apache-2.0"
] | 57 |
2018-09-11T13:40:35.000Z
|
2022-02-22T14:43:34.000Z
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import os
os.environ["OMP_NUM_THREADS"] = "1"
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import torchvision.models as models
import numpy as np
from abc import ABC
import os
import argparse
from sketched_kernels import SketchedKernels
from utils import *
if __name__ == "__main__":
# Get arguments from the command line
parser = argparse.ArgumentParser(description='PyTorch CWT sketching kernel matrices')
parser.add_argument('--datapath', type=str,
help='absolute path to the dataset')
parser.add_argument('--modelname', type=str,
help='model name')
parser.add_argument('--pretrained', action='store_true',
help='whether to load a pretrained ImageNet model')
parser.add_argument('--seed', default=0, type=int,
help='random seed for sketching')
parser.add_argument('--task', default='cifar10', type=str, choices=['cifar10', 'cifar100', 'svhn', 'stl10'],
help='the name of the dataset, cifar10 or cifar100 or svhn or stl10')
parser.add_argument('--split', default='train', type=str,
help='split of the dataset, train or test')
parser.add_argument('--bsize', default=512, type=int,
help='batch size for computing the kernel')
parser.add_argument('--M', '--num-buckets-sketching', default=512, type=int,
help='number of buckets in Sketching')
parser.add_argument('--T', '--num-buckets-per-sample', default=1, type=int,
help='number of buckets each data sample is sketched to')
parser.add_argument('--freq_print', default=10, type=int,
help='frequency for printing the progress')
args = parser.parse_args()
# Set the backend and the random seed for running our code
device = 'cuda' if torch.cuda.is_available() else 'cpu'
torch.manual_seed(args.seed)
if device == 'cuda':
cudnn.benchmark = True
torch.cuda.manual_seed(args.seed)
# The size of images for training and testing ImageNet models
imgsize = 224
# Generate a dataloader that iteratively reads data
# Load a model, either pretrained or not
loader = load_dataset(args.task, args.split, args.bsize, args.datapath, imgsize)
net = load_model(device, args.modelname, pretrained=True)
# Set the model to be in the evaluation mode. VERY IMPORTANT!
# This step to fix the running statistics in batchnorm layers,
# and disable dropout layers
net.eval()
csm = SketchedKernels(net, loader, imgsize, device, args.M, args.T, args.freq_print)
csm.compute_sketched_kernels()
# Compute sketched kernel matrices for each layer
for layer_id in range(len(csm.kernel_matrices)):
nkme = (csm.kernel_matrices[layer_id].sum() ** 0.5) / csm.n_samples
print("The norm of the kernel mean embedding of layer {:d} is {:.4f}".format(layer_id, nkme))
del net, loader
torch.cuda.empty_cache()
# Save the sketched kernel matrices
savepath = 'sketched_kernel_mat/'
if not os.path.isdir(savepath):
os.mkdir(savepath)
save_filename = '{}_{}_{}_{}.npy'.format(args.modelname, args.split, args.task, args.seed)
np.save(savepath + save_filename, csm.kernel_matrices)
| 40.201923 | 112 | 0.648649 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,908 | 0.45635 |
fd067b6667868f936c5b7ba2c71c491e3eeb9190
| 844 |
py
|
Python
|
venv/Lib/site-packages/traits/observation/events.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | 1 |
2022-01-18T17:56:51.000Z
|
2022-01-18T17:56:51.000Z
|
venv/Lib/site-packages/traits/observation/events.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/traits/observation/events.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | null | null | null |
# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
""" Event objects received by change handlers added using observe.
"""
from traits.observation._dict_change_event import ( # noqa: F401
DictChangeEvent,
)
from traits.observation._list_change_event import ( # noqa: F401
ListChangeEvent,
)
from traits.observation._set_change_event import ( # noqa: F401
SetChangeEvent,
)
from traits.observation._trait_change_event import ( # noqa: F401
TraitChangeEvent,
)
| 29.103448 | 71 | 0.760664 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 516 | 0.611374 |
fd077dfb9ba449d6f886f45f49324f828fa9d71b
| 827 |
py
|
Python
|
src/run_hid_4_network2.py
|
Naresh1318/Effect_of_injected_noise_in_deep_NN
|
0d001ea2c4d33011204247cb4c066b0da6632c04
|
[
"Unlicense"
] | 2 |
2016-09-11T08:47:29.000Z
|
2016-11-19T10:29:47.000Z
|
src/run_hid_4_network2.py
|
Naresh1318/Effect_of_injected_noise_in_deep_NN
|
0d001ea2c4d33011204247cb4c066b0da6632c04
|
[
"Unlicense"
] | null | null | null |
src/run_hid_4_network2.py
|
Naresh1318/Effect_of_injected_noise_in_deep_NN
|
0d001ea2c4d33011204247cb4c066b0da6632c04
|
[
"Unlicense"
] | null | null | null |
import mnist_loader
import network2
import numpy as np
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
eta = 0.9
m_b_s = 10
epochs = 30
trials = 10
trial_ev = []
for t in xrange(trials):
net = network2.Network([784, 50, 50, 50, 50, 10], cost=network2.CrossEntropyCost)
net.default_weight_initializer()
_,ev,_,_ = net.SGD(training_data[:1000], epochs, m_b_s, eta, evaluation_data=test_data[:1000],monitor_evaluation_accuracy=True)
print "Trial {} Complete".format(t + 1)
print "Maximum Evaluation Accuracy : {}".format(np.amax(ev))
trial_ev.append(np.amax(ev))
Avg_ev = np.mean(trial_ev)
Max_ev = np.amax(trial_ev)
print "Average Evaluation Accuracy for {} trials is {}".format(trials,Avg_ev)
print "Maximum Evaluation Accuracy for {} trials is {}".format(trials,Max_ev)
| 33.08 | 131 | 0.732769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.182588 |
fd08718d6dac06e0024584cff9f9907168ac0518
| 1,918 |
py
|
Python
|
wsm/backend/asyncwhois/base.py
|
Rayologist/windows-sshd-manager
|
4f78a0cdaa12fe3c2a785aca31066c3be886878b
|
[
"Apache-2.0"
] | 9 |
2022-02-09T09:09:43.000Z
|
2022-02-09T09:10:06.000Z
|
wsm/backend/asyncwhois/base.py
|
Rayologist/windows-sshd-manager
|
4f78a0cdaa12fe3c2a785aca31066c3be886878b
|
[
"Apache-2.0"
] | null | null | null |
wsm/backend/asyncwhois/base.py
|
Rayologist/windows-sshd-manager
|
4f78a0cdaa12fe3c2a785aca31066c3be886878b
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import List, Any
from ipaddress import IPv4Address
from dataclasses import dataclass, FrozenInstanceError
from types import SimpleNamespace
from enum import Enum, auto
class Kind(Enum):
CREATE_WHOIS = auto()
GET_WHOIS = auto()
GET_WHOIS_BY_IP = auto()
GET_IP_WITHOUT_WHOIS = auto()
GET_IP_WITH_WHOIS = auto()
UPDATE_WHOIS_BY_IP = auto()
GET_CACHE_BY_IP = auto()
class FrozenSimpleNamespace(SimpleNamespace):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __setattr__(self, name: str, value: Any) -> None:
raise FrozenInstanceError(f"cannot assign to field '{name}'")
@dataclass(frozen=True)
class Action:
kind: Kind
payload: dict
def __post_init__(self):
super().__setattr__("payload", FrozenSimpleNamespace(**self.payload))
class BaseWhoisExtractor(ABC):
__slots__ = ()
@abstractmethod
def extract(self, whois_result):
pass
class BaseCacheHandler(ABC):
__slots__ = ()
@abstractmethod
async def create(self, action: Action):
raise NotImplementedError
@abstractmethod
async def read(self, action: Action):
raise NotImplementedError
@abstractmethod
async def update(self, action: Action):
raise NotImplementedError
@abstractmethod
async def delete(self, action: Action):
raise NotImplementedError
class BaseAsyncWhois(ABC):
__slots__ = ()
@property
@abstractmethod
def lookup_failed(self) -> List[IPv4Address]:
pass
@abstractmethod
async def async_whois(self, ips: List[IPv4Address], cache: bool = True):
pass
@property
@abstractmethod
def extractor(self) -> BaseWhoisExtractor:
raise NotImplementedError
@property
@abstractmethod
def cache(self) -> BaseCacheHandler:
raise NotImplementedError
| 22.302326 | 77 | 0.688738 | 1,660 | 0.865485 | 0 | 0 | 1,024 | 0.533889 | 375 | 0.195516 | 43 | 0.022419 |
fd0c1d5bae5b02c0610c8254bb0ed033a6e6d1e5
| 1,079 |
py
|
Python
|
optaux/helper_functions/check_nonvalidated_auxs.py
|
coltonlloyd/OptAux
|
3ee1f8cdfa32f1a732ad41d5f854659159694160
|
[
"MIT"
] | 1 |
2019-06-05T10:41:06.000Z
|
2019-06-05T10:41:06.000Z
|
optaux/helper_functions/check_nonvalidated_auxs.py
|
coltonlloyd/OptAux
|
3ee1f8cdfa32f1a732ad41d5f854659159694160
|
[
"MIT"
] | null | null | null |
optaux/helper_functions/check_nonvalidated_auxs.py
|
coltonlloyd/OptAux
|
3ee1f8cdfa32f1a732ad41d5f854659159694160
|
[
"MIT"
] | null | null | null |
import cobra
from optaux import resources
resource_dir = resources.__path__[0]
met_to_rs = {'EX_pydam_e': ['PDX5PS', 'PYDXK', 'PYDXNK'],
'EX_orot_e': ['DHORTS', 'UPPRT', 'URIK2'],
'EX_thr__L_e': ['PTHRpp', 'THRS'],
'EX_pro__L_e': ['AMPTASEPG', 'P5CR'],
'EX_skm_e': ['DHQTi'],
'EX_cys__L_e': ['AMPTASECG', 'CYSS']}
for m, rs in met_to_rs.items():
ijo = cobra.io.load_json_model('%s/iJO1366.json' % resource_dir)
ijo.reactions.EX_o2_e.lower_bound = -20
biomass_reaction = list(ijo.objective.keys())[0]
biomass_reaction.lower_bound = .1
biomass_reaction.upper_bound = .1
for r in rs:
for g in [i.id for i in ijo.reactions.get_by_id(r).genes]:
print(ijo.genes.get_by_id(g).name,
[i.id for i in ijo.genes.get_by_id(g).reactions])
ijo.genes.get_by_id(g).remove_from_model()
ijo.objective = m
ijo.reactions.get_by_id(m).lower_bound = -10
ijo.optimize()
print(m, ijo.solution.f)
ijo.reactions.get_by_id(m).lower_bound = 0
| 33.71875 | 68 | 0.615385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 189 | 0.175162 |
fd0f8e0645346f82a2ff9bdf244ca7d9bf72405b
| 186 |
py
|
Python
|
xauto/common/futils.py
|
sababa11/xauto
|
107e59344b4624941387a4dff0d439719075ebf4
|
[
"Apache-2.0"
] | null | null | null |
xauto/common/futils.py
|
sababa11/xauto
|
107e59344b4624941387a4dff0d439719075ebf4
|
[
"Apache-2.0"
] | null | null | null |
xauto/common/futils.py
|
sababa11/xauto
|
107e59344b4624941387a4dff0d439719075ebf4
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
def get_workdir():
"""
get_workdir() -> workdir: [str]
Returns the current workdir.
"""
return os.path.realpath(os.path.dirname(sys.argv[0]))
| 15.5 | 57 | 0.629032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.435484 |
fd105e9dfaa8a1cb5dda8aab7e3ed98167bf73e4
| 10,430 |
py
|
Python
|
csv-to-mysql.py
|
LongPhan1912/Youtube-Playlist-Extractor
|
80b10e0b459c2cb264113cfaff644f5f28650813
|
[
"CC0-1.0"
] | null | null | null |
csv-to-mysql.py
|
LongPhan1912/Youtube-Playlist-Extractor
|
80b10e0b459c2cb264113cfaff644f5f28650813
|
[
"CC0-1.0"
] | null | null | null |
csv-to-mysql.py
|
LongPhan1912/Youtube-Playlist-Extractor
|
80b10e0b459c2cb264113cfaff644f5f28650813
|
[
"CC0-1.0"
] | null | null | null |
import csv
import MySQLdb
# installing MySQL: https://dev.mysql.com/doc/refman/8.0/en/osx-installation-pkg.html
# how to start, watch: https://www.youtube.com/watch?v=3vsC05rxZ8c
# or read this (absolutely helpful) guide: https://www.datacamp.com/community/tutorials/mysql-python
# this is mainly created to get a database of all the songs in my Favorites playlist
# if you wish to change the topic to 'FILM', 'SPORTS', or 'POLITICS'
# 1/ initially, set up the MySQL connection and craft a cursor
mydb = MySQLdb.connect(host='localhost', user='root', passwd='yourPasswordHere')
cursor = mydb.cursor()
# 2/ create a database:
cursor.execute("CREATE DATABASE mydb")
mydb.commit()
# 3/ after database is created, comment out steps 1/ and 2/ and uncomment step 3/
# mydb = MySQLdb.connect(host='localhost', user='root', passwd='', database="mydb")
# cursor = mydb.cursor()
# from here on out, whenever you call `cursor.execute()`, call `mydb.commit()` right afterwards
# 4/ create a table -- three options available to you
# the table's hardcoded right now so if the columns here are changed then other
def initialise_main_music_table(table_name):
cursor.execute("CREATE TABLE " + table_name
+ " (songID INTEGER PRIMARY KEY AUTO_INCREMENT, \
songTitle VARCHAR(150) NOT NULL, \
artist VARCHAR(100) NOT NULL, \
genre VARCHAR(100) NOT NULL, \
videoLink VARCHAR(100) NOT NULL, \
viewCount BIGINT NOT NULL, \
likeToDislikeRatio decimal(5, 4) NOT NULL)")
mydb.commit()
# the main music table helps extract info to create sub tables for a specific music category
def initialise_custom_music_table(table_name, main_music_table_name):
cursor.execute("CREATE TABLE " + table_name
+ " (categorySongID INTEGER PRIMARY KEY AUTO_INCREMENT, \
mainSongID INTEGER NOT NULL DEFAULT 1, \
FOREIGN KEY(mainSongID) REFERENCES " + main_music_table_name + "(songID), \
songTitle VARCHAR(150) NOT NULL, \
artist VARCHAR(100) NOT NULL, \
genre VARCHAR(100) NOT NULL, \
videoLink VARCHAR(100) NOT NULL, \
viewCount BIGINT NOT NULL, \
likeToDislikeRatio decimal(5, 4) NOT NULL)")
mydb.commit()
# def create_custom_table(table_name):
# cursor.execute("CREATE TABLE " + table_name
# + " (tableID INTEGER PRIMARY KEY AUTO_INCREMENT, \
# videoTitle VARCHAR(150) NOT NULL, \
# author VARCHAR(100) NOT NULL, \
# category VARCHAR(100) NOT NULL, \
# videoLink VARCHAR(100) NOT NULL, \
# viewCount BIGINT NOT NULL, \
# likeToDislikeRatio decimal(5, 4) NOT NULL)")
# mydb.commit()
# 5/ from a list of wanted fields, the function searches for the index corresponding to each field on the list
# and stores the index inside a dict (easy to look up and flexible if the order of the columns in the csv file is changed)
def get_indices_of_csv_table_items(csv_file_name, wanted_items):
indices = {}
with open(csv_file_name) as csv_file:
csv_data = csv.reader(csv_file, delimiter=',')
csv_headings = next(csv_data)
for idx, heading in enumerate(csv_headings):
if heading in wanted_items:
indices[heading] = idx
csv_file.close()
return indices
wanted_items = ['song_name', 'artist', 'topics', 'video_link', 'view_count', 'like_to_dislike_ratio']
# 6/ fill up our main table with the relevant data
def populate_main_music_table_from_csv(csv_file_name, table_name):
indices = get_indices_of_csv_table_items(csv_file_name, wanted_items)
with open(csv_file_name) as csv_file:
csv_data = csv.reader(csv_file, delimiter=',')
csv_headings = next(csv_data)
for idx, row in enumerate(csv_data):
song_name = row[indices['song_name']]
artist = row[indices['artist']]
if ' - Topic' in artist:
artist = artist[:artist.index(' - Topic')]
genre = row[indices['topics']][1:-1]
video_link = row[indices['video_link']]
view_count = int(row[indices['view_count']])
ratio = 0
if row[indices['like_to_dislike_ratio']]:
ratio = float(row[indices['like_to_dislike_ratio']][:-1]) / 100
if 'MUSIC' in genre:
cursor.execute(f"INSERT INTO {table_name} (songTitle, artist, genre, videoLink, viewCount, likeToDislikeRatio)\
VALUES(%s, %s, %s, %s, %s, %s)", (song_name, artist, genre, video_link, view_count, ratio))
mydb.commit() # remember to commit after populating the table
csv_file.close()
# 7/ fill up our custom table using data from the main music table
def populate_custom_music_table(your_new_table_name, main_music_table_name, column, chosen_value):
cursor.execute(f"INSERT INTO {your_new_table_name} (mainSongID, songTitle, artist, genre, videoLink, viewCount, likeToDislikeRatio)\
SELECT songID, songTitle, artist, genre, videoLink, viewCount, likeToDislikeRatio \
FROM {main_music_table_name} WHERE {column} LIKE '%{chosen_value}%'")
mydb.commit()
# -------------------------------------------------------------------
# -------------------SUPPLEMENTARY FUNCTIONS START-------------------
# -------------------------------------------------------------------
# add a field after table is created (new field placed after a specific column of a table)
def add_new_column(table_name, new_column_name, data_type, pivot_column):
cursor.execute(f"ALTER TABLE {table_name} ADD {new_column_name} {data_type} NOT NULL AFTER {pivot_column}")
mydb.commit()
# change data type for any given field
def modify_data_type(table_name, column_name, new_data_type):
cursor.execute(f"ALTER TABLE {table_name} MODIFY COLUMN {column_name} {new_data_type}")
mydb.commit()
# delete all the data from a specified table
def delete_data_from_table(table_name):
cursor.execute(f"DELETE FROM {table_name}")
mydb.commit()
def delete_selected_record_from_table(table_name, record):
cursor.execute(f"DELETE FROM {table_name} WHERE address = {record}")
mydb.commit()
# make a table disappear from existence :)
def drop_table(table_name):
cursor.execute(f"DROP TABLE {table_name}")
mydb.commit()
def print_table_plain(table_name):
cursor.execute(f"SELECT * FROM {table_name}")
result = cursor.fetchall()
for row in result:
print(row)
# print out all the songs in the playlist
# 'DESC' means descending order (most popular song on top) and 'ASC' is the opposite
def print_table_by_criteria(table_name, order_criteria, order):
if order_criteria != '' and order != '':
cursor.execute(f"SELECT * FROM {table_name} ORDER BY {order_criteria} {order}")
for item in cursor:
print(item)
# show the name of all the tables present in the database
def show_tables():
cursor.execute("SHOW TABLES")
tables = cursor.fetchall()
print(tables)
# check if a table already exists
def check_table_exists(table_name):
cursor.execute("""
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_name = '{0}'
""".format(table_name.replace('\'', '\'\'')))
return True if cursor.fetchone()[0] == 1 else False
# optional / not required function: should you wish to look up the different video topics
# if you want to search for all topics, leave `selected_topic` as an empty string
def get_all_selected_topics(csv_file_name, selected_topic):
res = []
indices = get_indices_of_csv_table_items(csv_file_name, wanted_items)
with open(csv_file_name) as csv_file:
csv_data = csv.reader(csv_file, delimiter=',')
csv_headings = next(csv_data)
for idx, row in enumerate(csv_data):
topics = row[indices['topics']][1:-1]
topic_list = topics.split(', ')
for item in topic_list:
if selected_topic in item and item not in res:
res.append(item)
return res
# ------------------------------------------------------------------
# -------------------SUPPLEMENTARY FUNCTIONS ENDS-------------------
# ------------------------------------------------------------------
# 8/ Create main music table
def build_main_music_table(csv_file_name, main_music_table_name):
initialise_main_music_table(main_music_table_name)
populate_main_music_table_from_csv(csv_file_name, main_music_table_name)
# 9/ Build a new music table based on the genre you love
def build_your_custom_music_table(your_new_table_name, main_music_table_name, column, chosen_value):
if check_table_exists(main_music_table_name) == False:
build_main_music_table(main_music_table_name)
initialise_custom_music_table(your_new_table_name, main_music_table_name)
populate_custom_music_table(your_new_table_name, main_music_table_name, column, chosen_value)
def main(): # example; feel free to change the variable names to your choosing
csv_file_name = 'favorite-playlist.csv' # name of csv file (use `main-extractor.py` first to create a csv file)
your_new_table_name = 'ElectronicMusic' # name your table
main_music_table_name = 'MainMusic' # name the main music table
column = 'genre' # column choices: songTitle, artist, genre, videoLink, viewCount, likeToDislikeRatio
chosen_value = 'ELECTRONIC MUSIC' # what you'd like to query, e.g. artist name or song title or genre
# to get a list of all possible video topics or music genres, you can run the function get_all_selected_topics()
# e.g. get_all_selected_topics('favorite-playlist.csv',
order_criteria = 'viewCount' # e.g. viewCount or likeToDislikeRatio or artist name in alphabetical order
ascending_order = False # change to true if you want to print the table in ascending order (i.e. lowest order at the top)
order = 'ASC' if ascending_order == True else 'DESC'
build_your_custom_music_table(your_new_table_name, main_music_table_name, column, chosen_value)
print_table_by_criteria(your_new_table_name, order_criteria, order)
if __name__ == "__main__":
main()
| 48.287037 | 136 | 0.661266 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,935 | 0.569032 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.