seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
75051539708
|
# -*- coding: utf-8 -*-
r"""
tests.test_state
~~~~~~~~~~~~~~~~
Tests for the \p State class including movement mechanics and enumeration of the \p MoveSet
class.
:copyright: (c) 2019 by Zayd Hammoudeh.
:license: MIT, see LICENSE for more details.
"""
from typing import Tuple
import pytest
from stratego import Move
from stratego.location import Location
from stratego.move import MoveStack
from stratego.piece import Color
from stratego.player import Player
from stratego.state import State
from testing_utils import STATES_PATH, SMALL_BRD, STD_BRD
def _get_move_from_player(plyr: Player, _orig: Tuple[int, int], new: Tuple[int, int]) -> Move:
r"""
Get the move from (row1, col1) in \p l1 to (row2, col2) in \p l2.
:param plyr: Player whose move will be extracted
:param _orig: Original location to move from
:param new: New location to move to
:return: Move corresponding to the move pair
"""
available_moves = plyr.move_set.avail
values = list(available_moves.values())
v = [v for v in values if v.orig == Location(*_orig) and v.new == Location(*new)]
assert v
return v[0]
def _verify_num_pieces_and_move_set_size(state: State, num_red_p: int, num_blue_p: int,
num_red_mv: int, num_blue_mv: int):
r"""
Verifies the number of pieces and size of the \p MoveSet
:param state: State of the game
:param num_red_p: Number of remaining RED pieces
:param num_blue_p: Number of remaining BLUE pieces
:param num_red_mv: Number of available moves for RED
:param num_blue_mv: Number of available moves for BLUE
"""
# Standardize assert tests
assert state.red.num_pieces == num_red_p
assert state.blue.num_pieces == num_blue_p
assert len(state.red.move_set) == num_red_mv
assert len(state.blue.move_set) == num_blue_mv
def test_duplicate_loc_in_state():
r""" Verify that a \p State file with two pieces in same location raises an error """
for dup_file in ["duplicate_loc_red.txt", "duplicate_loc_diff_color.txt"]:
duplicate_path = STATES_PATH / dup_file
assert duplicate_path.exists(), "Duplicate file path does not exist"
with pytest.raises(Exception):
State.importer(duplicate_path, STD_BRD)
def test_no_flag():
r""" Verify an error is raised if the file has no flag """
# Verify the "clean" passes
path = STATES_PATH / "no_flag_clean.txt"
assert path.exists(), "No flag test file does not exist"
State.importer(path, STD_BRD)
# Verify no flag checks are done for both players
for file in ["no_flag_red.txt", "no_flag_blue.txt"]:
path = STATES_PATH / file
assert path.exists(), "No flag test file does not exist"
with pytest.raises(Exception):
State.importer(path, STD_BRD)
# noinspection PyProtectedMember
def test_state_basic_moves():
r""" Verify the basic movement mechanics work without issue """
path = STATES_PATH / "state_move_verify.txt"
assert path.exists(), "Move verify file does not exist"
state = State.importer(path, STD_BRD)
# Verify initial state matches expectations
_verify_num_pieces_and_move_set_size(state, 7, 7, 4 + 3, 4 + 3)
move_stack = MoveStack()
# Define a series of moves. Entries in each tuple are:
# 0: Original piece location
# 1: Piece new location
# 2: Number of red pieces
# 3: Number of blue pieces
# 4: Size of the red move set
# 5: Size of the blue move set
move_list = [((0, 1), (1, 1), 7, 7, 12, 7),
((9, 1), (8, 1), 7, 7, 12, 12),
((1, 1), (2, 1), 7, 7, 12, 12),
((8, 1), (7, 1), 7, 7, 12, 12),
((2, 1), (3, 1), 7, 7, 12, 12),
((7, 1), (6, 1), 7, 7, 12, 12),
((3, 1), (4, 1), 7, 7, 11, 12), # One less due to blocked by (4, 2)
((6, 1), (5, 1), 7, 7, 11, 11), # One less due to blocked by (5, 2)
((4, 1), (5, 1), 6, 6, 8, 8), # Both lost piece in battle
((9, 3), (6, 3), 6, 6, 8, 18), # Move blue scout
((0, 3), (3, 3), 6, 6, 18, 18), # Move red scout
((6, 3), (6, 5), 6, 6, 18, 23), # Move blue scout
((3, 3), (3, 5), 6, 6, 20, 20), # Move red scout
((6, 5), (6, 4), 6, 6, 23, 23), # Move blue scout
((3, 5), (9, 5), 6, 5, 16, 22), # Red scout attack blue spy
((6, 4), (0, 4), 6, 4, 16, 5) # Blue scout attack red bomb
]
printer_out = []
for orig, new, num_red_p, num_blue_p, num_red_mv, num_blue_mv in move_list:
orig, new = Location(orig[0], orig[1]), Location(new[0], new[1])
p = state.next_player.get_piece_at_loc(orig)
assert p is not None
attacked = state.get_other_player(state.next_player).get_piece_at_loc(new)
move_stack.push(Move(p, orig, new, attacked))
assert state.update(move_stack.top())
assert state._printer._is_loc_empty(orig)
_verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p, num_red_mv, num_blue_mv)
printer_out.append(state.write_board())
# Try to move red bomb then the red flag
for orig in [Location(0, 4), Location(0, 6)]:
p = state.next_player.get_piece_at_loc(orig)
assert p is not None
for new in [orig.left(), orig.right]:
attacked = state.get_other_player(state.next_player).get_piece_at_loc(new)
with pytest.raises(Exception):
Move(p, orig, new, attacked)
# Verify Undo
for i in range(2, len(move_list) + 1):
_, _, num_red_p, num_blue_p, num_red_mv, num_blue_mv = move_list[-i]
state.undo()
assert state.write_board() == printer_out[-i], "Printer mismatch after do/undo"
_verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p, num_red_mv, num_blue_mv)
def test_small_direct_attack():
r""" Test making a direct attack """
move_list = [(None, None, None, None, 7, 7, 11, 11),
(Color.RED, Color.BLUE, (0, 3), (7, 3), 6, 6, 5, 5)
]
_helper_small_test(move_list)
def test_small_move_then_attack():
r""" Test making a single move with a scout then a direct attack """
move_list = [(None, None, None, None, 7, 7, 11, 11),
(Color.RED, Color.BLUE, (0, 3), (1, 3), 7, 7, 19, 10),
(Color.BLUE, Color.RED, (7, 3), (1, 3), 6, 6, 5, 5)
]
_helper_small_test(move_list)
def test_single_adjacent_scout():
r""" Test making a single move with a scout then a direct attack """
move_list = [(None, None, None, None, 2, 2, 11, 11),
(Color.BLUE, Color.BLUE, (2, 4), (2, 3), 1, 1, 0, 0)
]
_helper_small_test(move_list, state_file="moveset_two_scouts_adjacent.txt")
def test_scout_blocking_scout():
r""" Test making a single move with a scout then a direct attack """
move_list = [(None, None, None, None, 7, 7, 11, 11),
(Color.RED, Color.BLUE, (0, 5), (1, 5), 7, 7, 14, 11),
(Color.BLUE, Color.RED, (7, 3), (2, 3), 7, 7, 14, 19),
(Color.RED, Color.BLUE, (1, 5), (1, 4), 7, 7, 13, 19),
(Color.BLUE, Color.RED, (2, 3), (3, 3), 7, 7, 13, 13),
(Color.RED, Color.BLUE, (1, 4), (2, 4), 7, 7, 14, 13),
(Color.BLUE, Color.RED, (7, 2), (7, 3), 7, 7, 14, 13),
(Color.RED, Color.BLUE, (0, 0), (1, 0), 7, 7, 17, 13),
(Color.BLUE, Color.RED, (3, 3), (2, 3), 7, 7, 17, 16),
(Color.RED, Color.BLUE, (2, 4), (3, 4), 7, 7, 16, 19),
(Color.BLUE, Color.RED, (2, 3), (2, 4), 7, 7, 16, 16),
(Color.RED, Color.BLUE, (1, 0), (2, 0), 7, 7, 16, 16),
(Color.BLUE, Color.RED, (2, 4), (2, 1), 7, 7, 11, 19),
(Color.RED, Color.BLUE, (0, 2), (1, 2), 7, 7, 16, 19),
(Color.BLUE, Color.RED, (7, 5), (6, 5), 7, 7, 16, 22),
(Color.RED, Color.BLUE, (2, 0), (2, 1), 7, 6, 16, 9)
]
_helper_small_test(move_list, state_file="moveset_scout_block_scout.txt")
# noinspection PyProtectedMember
def _helper_small_test(move_info, state_file: str = "moveset_small_direct_attack.txt"):
r"""
Helper function for testing the movements on the small board
:param move_info: List of move information. For :math:`n` moves, the length of \p move_info
should be :math:`n+1`. The first element is the initial board configuration.
"""
path = STATES_PATH / state_file
assert path.exists(), "Small direct attack state file not found"
state = State.importer(path, SMALL_BRD)
_, _, _, _, num_red_p, num_blue_p, num_red_mv, num_blue_mv = move_info[0]
_verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p, num_red_mv, num_blue_mv)
# Test doing moves
moves, brd = [], [state.write_board()]
for col, other_col, l1, l2, num_red_p, num_blue_p, num_red_mv, num_blue_mv in move_info[1:]:
plyr, _ = state.get_player(col), state.get_player(other_col)
m = _get_move_from_player(plyr, l1, l2)
moves.append(m)
state.update(moves[-1])
brd.append(state.write_board())
_verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p,
num_red_mv, num_blue_mv)
# Test undoing the moves
for i in range(1, len(moves) - 1):
assert brd[-i] == state.write_board()
_, _, _, _, num_red_p, num_blue_p, num_red_mv, num_blue_mv = move_info[-i]
_verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p,
num_red_mv, num_blue_mv)
assert moves[-i] == state._stack.top() # pylint: disable=protected-access
state.rollback()
_, _, _, _, num_red_p, num_blue_p, num_red_mv, num_blue_mv = move_info[-i - 1]
_verify_num_pieces_and_move_set_size(state, num_red_p, num_blue_p,
num_red_mv, num_blue_mv)
assert brd[-i - 1] == state.write_board()
def test_if_has_move():
r""" Verify the \p piece_has_move method of the \p State class """
path = STATES_PATH / "deep_q_verify.txt"
state = State.importer(path, STD_BRD)
# Marshall cannot move
marshall_loc = Location(0, 0)
p = state.get_player(Color.RED).get_piece_at_loc(marshall_loc)
assert not state.piece_has_move(p)
# Rank3 can move
rank3_loc = Location(1, 0)
p = state.get_player(Color.RED).get_piece_at_loc(rank3_loc)
assert state.piece_has_move(p)
# Bomb cannot move
bomb_loc = Location(0, 4)
p = state.get_player(Color.RED).get_piece_at_loc(bomb_loc)
assert not state.piece_has_move(p)
# Flag cannot move
flag_loc = Location(0, 6)
p = state.get_player(Color.RED).get_piece_at_loc(flag_loc)
assert not state.piece_has_move(p)
# verify pieces with known moves
piece_col = (1, 2, 3, 5)
for col in piece_col:
flag_loc = Location(0, col)
p = state.get_player(Color.RED).get_piece_at_loc(flag_loc)
assert state.piece_has_move(p)
flag_loc = Location(state.board.num_rows - 1, col)
p = state.get_player(Color.BLUE).get_piece_at_loc(flag_loc)
assert state.piece_has_move(p)
# noinspection PyProtectedMember
def test_cyclic_move():
state = State.importer(STATES_PATH / "state_move_verify.txt", STD_BRD)
assert len(state._stack) == 0, "Move stack to begin with"
num_moves = 0
orig, new = (0, 1), (1, 1)
m = _get_move_from_player(state.next_player, orig, new)
state.update(m)
num_moves += 1
assert len(state._stack) == num_moves
assert len(state.get_cyclic_move()) == 0, "No cyclic move"
# Dummy moves to ensure no premature cycle
orig, new = (9, 1), (8, 1)
m = _get_move_from_player(state.next_player, orig, new)
state.update(m)
num_moves += 1
assert len(state._stack) == num_moves
assert len(state.get_cyclic_move()) == 0, "No cyclic move"
# Start of cycle
orig, new = (0, 0), (1, 0)
m = _get_move_from_player(state.next_player, orig, new)
state.update(m)
num_moves += 1
assert len(state._stack) == num_moves
assert len(state.get_cyclic_move()) == 0, "No cyclic move"
orig, new = (9, 0), (8, 0)
m = _get_move_from_player(state.next_player, orig, new)
state.update(m)
num_moves += 1
assert len(state._stack) == num_moves
assert len(state.get_cyclic_move()) == 0, "No cyclic move"
orig, new = (1, 0), (0, 0)
m = _get_move_from_player(state.next_player, orig, new)
state.update(m)
num_moves += 1
assert len(state._stack) == num_moves
assert len(state.get_cyclic_move()) == 0, "No cyclic move"
orig, new = (8, 0), (9, 0)
m = _get_move_from_player(state.next_player, orig, new)
state.update(m)
num_moves += 1
assert len(state._stack) == num_moves
assert len(state.get_cyclic_move()) == 0, "No cyclic move"
orig, new = (0, 0), (1, 0)
m = _get_move_from_player(state.next_player, orig, new)
state.update(m)
num_moves += 1
assert len(state._stack) == num_moves
assert len(state.get_cyclic_move()) == 0, "No cyclic move"
orig, new = (9, 0), (8, 0)
m = _get_move_from_player(state.next_player, orig, new)
state.update(m)
num_moves += 1
assert len(state._stack) == num_moves
assert len(state.get_cyclic_move()) == 1, "Cyclic move now"
orig, new = (1, 1), (0, 1)
m = _get_move_from_player(state.next_player, orig, new)
state.update(m)
num_moves += 1
assert len(state._stack) == num_moves
assert len(state.get_cyclic_move()) == 1, "Cyclic move now"
orig, new = (8, 1), (9, 1)
m = _get_move_from_player(state.next_player, orig, new)
state.update(m)
num_moves += 1
assert len(state._stack) == num_moves
assert len(state.get_cyclic_move()) == 0, "Cycle removed"
|
ZaydH/stratego
|
src/tests/test_state.py
|
test_state.py
|
py
| 14,130 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22844256236
|
nstations = int(input())
nlines = int(input())
station_lines = {} # station -> lines
for i in range(nlines):
_, *stations = map(int, input().split())
for st in stations:
station_lines.setdefault(st, []).append(i)
start, end = map(int, input().split())
#===
from itertools import combinations
from collections import deque
def solve(station_lines, start, end):
line_stations = {}
for st, lines in station_lines.items():
for line in lines:
line_stations.setdefault(line, set()).add(st)
connections = {}
for l1,l2 in combinations(line_stations, 2):
if line_stations[l1] & line_stations[l2]: # common stations
connections.setdefault(l1,[]).append(l2)
connections.setdefault(l2,[]).append(l1)
start_lines = set(station_lines[start])
end_lines = set(station_lines[end])
dq = deque()
dq.extend(station_lines[start])
scores = [None] * nlines
for x in start_lines:
scores[x] = 0
if start_lines & end_lines:
return 0
while dq:
l = dq.popleft()
score = scores[l]
if l not in connections:
continue
for lc in connections[l]:
if scores[lc] is None:
scores[lc] = score + 1
dq.append(lc)
elif scores[lc] < score:
scores[lc] = score + 1
if lc in end_lines:
return scores[lc]
return None
ans = solve(station_lines, start, end)
if ans is None:
print(-1)
else:
print(ans)
|
sergey-ryzhikov/yandex-alogotrain-3.0B
|
t40.py
|
t40.py
|
py
| 1,563 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30434077830
|
#%%
# 신호 기록 가져오기
with open('sample_20200601_pointfinger.txt', 'r') as openfile :
samples = openfile.readlines()
tmp_timests = [ samples[i][:-1] for i in range(len(samples)) if i%3==0 ]
tmp_samples = [ samples[i][:-1] for i in range(len(samples)) if i%3==1 ]
#%%
# 중복된 시간 기록 제거
timests, samples = list(), list()
deleted = list()
for sinx in range(len(tmp_timests)-1) :
if tmp_timests[sinx] != tmp_timests[sinx+1] :
samples.append(float(tmp_samples[sinx]))
timests.append(float(tmp_timests[sinx].replace('2020-06-01 09:', '')[3:]))
if tmp_timests[sinx].replace('2020-06-01 09:', '')[:2] == '26' : timests[-1] += 60
#%%
# 플롭 해보기
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import numpy as np
fm.get_fontconfig_fonts()
matplotlib.rc('font', family=fm.FontProperties(fname='C:/Windows/Fonts/NanumSquarel.ttf').get_name())
def plot(t, s, title='근전도 신호 데이터', xlabel='시간(초)', ylabel='신호 세기', style='-') :
T = np.array(t)
Y = np.array(s)
mat = np.array([T, Y])
plt.figure(figsize=(18, 5))
plt.plot(T, Y, style, ms=15, lw=1)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title, fontsize=15, pad=20)
plt.show()
plot(timests, samples)
#%%
# 초기화 시간 제거하기
init_end_inx = 0
while True :
if samples[init_end_inx] < 190 :
break
init_end_inx += 1
timests, samples = timests[init_end_inx:], samples[init_end_inx:]
plot(timests[init_end_inx:], samples[init_end_inx:])
# # 기울기 값 계산
# grads = list()
# for i in range(1, len(timests)) :
# grads.append((samples[i-1]-samples[i])/(timests[i-1]-timests[i]))
# plot(timests[1:], np.abs(grads))
#%%
# 바이어스 하기
bias_value = 173.5
plot(timests, np.abs(np.array(samples)-bias_value))
#%%
#구간 구하기
timespan = [51, 53.5] # 중간 신호
timespan = [53.5, 56.5] # 없는 신호
timespan = [75, 77] # 짧은 신호
timespan = [62, 66] # 긴 신호
span_indice = [0, 0]
while span_indice[0] < len(timests) :
if timests[span_indice[0]] > timespan[0] : break
span_indice[0] += 1
while span_indice[1] < len(timests) :
if timests[span_indice[1]] > timespan[1] : break
span_indice[1] += 1
span_indice
#%%
# 푸리에 주기적 곱
bias_samples = abs(np.array(samples)-bias_value)[span_indice[0]:span_indice[1]]
low_samples, high_samples = np.copy(bias_samples), np.copy(bias_samples)
pass_filter_value = 4
high_samples[bias_samples <= pass_filter_value] = 0
low_samples[bias_samples > pass_filter_value] = 0
plt.figure(figsize=(18, 8))
plt.plot(np.convolve(low_samples, high_samples), '-', ms=1, lw=1)
# plt.xlabel("")
# plt.ylabel("")
# plt.title(title, fontsize=15, pad=20)
plt.show()
#%%
bias_samples = abs(np.array(samples)-bias_value)
low_samples, high_samples = np.copy(bias_samples), np.copy(bias_samples)
high_samples[bias_samples <= pass_filter_value] = 0
low_samples[bias_samples > pass_filter_value] = 0
step_num = 5
pack_num = 30
pinx = 0
pvalues = list()
while pinx+pack_num < len(samples) :
pvalues.append(np.sum(np.convolve(
low_samples[pinx:pinx+pack_num],
high_samples[pinx:pinx+pack_num]))/pack_num)
pinx += step_num
plot(timests, samples)
plot(timests[:len(pvalues)], pvalues)
#%%
plot(timests[:len(pvalues)], pvalues)
#%%
# 그룹화하기
ginx = -1
group_area = list()
while ginx < len(pvalues) :
ginx += 1
while ginx < len(pvalues) and pvalues[ginx] < 1:
ginx += 1
tmp = ginx
while ginx < len(pvalues) and pvalues[ginx] > 0:
ginx += 1
group_area.append((tmp, ginx))
group_area
#%%
# 최댓값 보기
maximums = list()
for area in group_area :
if pvalues[area[0]:area[1]] :
maximums.append(np.max(pvalues[area[0]:area[1]]))
plot([i for i in range(len(group_area)-1)], maximums, style='.')
#%%
(timests[0] - timests[1]) * 50
#%%
# CNN의 입력 행렬 만들기
# 이상점 제거
group_area = group_area[1:-1]
#%%
# 훈련 데이터 만들기
labels = []
for i in range(19) :
labels.append(i)
len(labels)
#%%
def make_train(groups, areas, max_depth=40) :
train_list = list()
for area in areas :
train_set = list()
label_set = list()
group_span = (area[1] - area[0])
# if group_span : continue
inx = 0
while inx < max_depth and inx < group_span :
a = labels[int(inx/group_span*len(labels))] # angle
v = groups[inx:group_span]+[0 for i in range(max_depth-(group_span-inx))]
train_set.insert(0, v)
label_set.insert(0, a)
inx += 1
if train_set :
train_list.append((train_set, label_set))
return train_list
trains = make_train(pvalues, group_area)
#%%
tmp_train_inputs = list(map(lambda l: l[0], trains))
tmp_label_inputs = list(map(lambda l: l[1], trains))
train_inputs = list()
label_inputs = list()
for i in range(len(tmp_train_inputs)) :
for j in range(len(tmp_train_inputs[i])) :
train_inputs.append(tmp_train_inputs[i][j])
label_inputs.append(tmp_label_inputs[i][j])
train_inputs = np.array(train_inputs)
label_inputs = np.array(label_inputs)
print(train_inputs.shape)
#%%
### 신경망 학습을 위한 모듈 가져오기
import tensorflow as tf
from tensorflow.keras import datasets, layers, Sequential
#%%
# 모델 만들기
model = Sequential()
model.add(layers.Dense(40, activation='relu'))
model.add(layers.Dense(50, activation='relu'))
model.add(layers.Dense(40, activation='relu'))
model.add(layers.Dense(30, activation='relu'))
model.add(layers.Dense(len(labels), activation='softmax'))
model.build(input_shape=(None, 40))
model.summary()
#%%
# 훈련 시작
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
model.fit(train_inputs, label_inputs, epochs=500)
#%%
# 모델 평가
test_loss, test_acc = model.evaluate(train_inputs, label_inputs, verbose=2)
#%%
predicts = list()
for i in range(len(train_inputs)) :
p = model.predict(train_inputs[i:i+1]).tolist()[0]
predicts.append(p)
#%%
predicts = [ p.index(max(p)) for p in predicts]
predicts
#%%
plot([i for i in range(len(predicts))], predicts, xlabel="필터링된 그룹", ylabel="카테고리", title="예측 데이터")
plot([i for i in range(len(predicts))], label_inputs, xlabel="필터링된 그룹", ylabel="카테고리", title="검증 데이터")
#%%
model.save('./my_model.h5')
#%%
print(train_inputs[0:1])
#%%
# len(pvalues)
# #%%
# # 스펙트럼 관찰
# import scipy.signal
# f, P = scipy.signal.periodogram(np.array(samples), int(1/(timests[1]-timests[0])), nfft=len(samples))
# plt.subplot(211)
# plt.plot(f, P)
# plt.title("선형 스케일")
# plt.subplot(212)
# plt.semilogy(f, P)
# plt.title("로그 스케일")
# plt.tight_layout()
# plt.show()
|
oimq/DoTheEHands
|
SignalAnalyzer.py
|
SignalAnalyzer.py
|
py
| 6,880 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17968730699
|
# Databricks notebook source
# MAGIC %md
# MAGIC # Train Machine Learning Model
# MAGIC
# MAGIC This notebook aims to develop and register an MLFlow Model for deployment consisting of:
# MAGIC - a machine learning model to predict the liklihood of employee attrition.
# MAGIC
# MAGIC This example uses an adapted version of the [`IBM HR Analytics Employee Attrition & Performance` dataset](https://www.kaggle.com/pavansubhasht/ibm-hr-analytics-attrition-dataset) available from Kaggle.
# MAGIC
# MAGIC > Ensure you have created managed Delta tables in the Hive Metastore with the associated dataset. These [instructions](https://learn.microsoft.com/en-au/azure/databricks/ingestion/add-data/upload-data#upload-the-file) can be used to learn how to upload the dataset.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC #### Import dependencies and define constants
# COMMAND ----------
import json
from typing import Dict, Tuple, Union
import mlflow
import pandas as pd
from hyperopt import STATUS_OK, fmin, hp, tpe
from mlflow.models.signature import infer_signature
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.metrics import (accuracy_score, f1_score, precision_score,
recall_score, roc_auc_score)
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
# define notebook parameters
dbutils.widgets.text("curated_dataset_table",
"hive_metastore.default.employee_attrition_curated")
# define target column
TARGET = ["Attrition"]
# define categorical feature columns
CATEGORICAL_FEATURES = [
"Gender",
"Education",
"EducationField",
"Department",
"JobRole",
"JobLevel",
"PerformanceRating",
"JobInvolvement",
"JobSatisfaction",
"RelationshipSatisfaction",
"EnvironmentSatisfaction",
"BusinessTravel",
"OverTime",
"WorkLifeBalance",
"MaritalStatus",
"StockOptionLevel"
]
# define numeric feature columns
NUMERIC_FEATURES = [
"Age",
"DistanceFromHome",
"MonthlyIncome",
"NumCompaniesWorked",
"PercentSalaryHike",
"TotalWorkingYears",
"TrainingTimesLastYear",
"YearsAtCompany",
"YearsInCurrentRole",
"YearsSinceLastPromotion",
"YearsWithCurrManager"
]
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC #### Define functions to build the model
# COMMAND ----------
def prepare_data(df: pd.DataFrame, random_state: int = 2023) -> Tuple[pd.DataFrame, pd.DataFrame]:
# change data types of target and features
df[TARGET] = df[TARGET].replace({"Yes": 1, "No": 0})
df[NUMERIC_FEATURES] = df[NUMERIC_FEATURES].astype("float")
df[CATEGORICAL_FEATURES] = df[CATEGORICAL_FEATURES].astype("str")
# split into train and test datasets
df_train, df_test = train_test_split(
df[CATEGORICAL_FEATURES + NUMERIC_FEATURES + TARGET],
test_size=0.20,
random_state=random_state
)
return df_train, df_test
# COMMAND ----------
def make_classifer_pipeline(params: Dict[str, Union[str, int]]) -> Pipeline:
"""Create sklearn pipeline to apply transforms and a final estimator"""
# categorical features transformations
categorical_transformer = Pipeline(steps=[
("imputer", SimpleImputer(strategy="constant", fill_value="missing")),
("ohe", OneHotEncoder())]
)
# numeric features transformations
numeric_transformer = Pipeline(steps=[
("imputer", SimpleImputer(strategy="median"))]
)
# preprocessing pipeline
preprocessor = ColumnTransformer(
transformers=[
("numeric", numeric_transformer, NUMERIC_FEATURES),
("categorical", categorical_transformer, CATEGORICAL_FEATURES)
]
)
# model training pipeline
classifer_pipeline = Pipeline([
("preprocessor", preprocessor),
("classifier", RandomForestClassifier(**params, n_jobs=-1))
])
return classifer_pipeline
# COMMAND ----------
# define objective function
def hyperparameter_tuning(params):
mlflow.sklearn.autolog(silent=True)
with mlflow.start_run(nested=True):
# read and process curated data
df = spark.read.table(dbutils.widgets.get(
"curated_dataset_table")).toPandas()
df_train, df_test = prepare_data(df)
# seperate features and target variables
x_train, y_train = df_train[CATEGORICAL_FEATURES +
NUMERIC_FEATURES], df_train[TARGET]
x_test, y_test = df_test[CATEGORICAL_FEATURES +
NUMERIC_FEATURES], df_test[TARGET]
# train and model
estimator = make_classifer_pipeline(params)
estimator = estimator.fit(x_train, y_train.values.ravel())
y_predict_proba = estimator.predict_proba(x_test)
# train model
estimator = make_classifer_pipeline(params)
estimator.fit(x_train, y_train.values.ravel())
# calculate evaluation metrics
y_pred = estimator.predict(x_test)
validation_accuracy_score = accuracy_score(
y_test.values.ravel(), y_pred)
validation_roc_auc_score = roc_auc_score(y_test.values.ravel(), y_pred)
validation_f1_score = f1_score(y_test.values.ravel(), y_pred)
validation_precision_score = precision_score(
y_test.values.ravel(), y_pred)
validation_recall_score = recall_score(y_test.values.ravel(), y_pred)
# log evaluation metrics
mlflow.log_metric("validation_accuracy_score",
validation_accuracy_score)
mlflow.log_metric("validation_roc_auc_score", validation_roc_auc_score)
mlflow.log_metric("validation_f1_score", validation_f1_score)
mlflow.log_metric("validation_precision_score",
validation_precision_score)
mlflow.log_metric("validation_recall_score", validation_recall_score)
# log model
input_example = x_test.iloc[0].to_dict()
signature = infer_signature(x_train, y_pred)
mlflow.sklearn.log_model(
estimator, "model", signature=signature, input_example=input_example)
return {"loss": -validation_roc_auc_score, "status": STATUS_OK}
# COMMAND ----------
def train_model():
# set mlflow tracking uri
mlflow_client = mlflow.tracking.MlflowClient(tracking_uri='databricks')
mlflow.set_tracking_uri("databricks")
# start model training run
mlflow.set_experiment("/employee-attrition-classifier")
with mlflow.start_run(run_name="employee-attrition-classifier") as run:
# define search space
search_space = {
"n_estimators": hp.choice("n_estimators", range(100, 1000)),
"max_depth": hp.choice("max_depth", range(1, 25)),
"criterion": hp.choice("criterion", ["gini", "entropy"]),
}
# hyperparameter tuning
best_params = fmin(
fn=hyperparameter_tuning,
space=search_space,
algo=tpe.suggest,
max_evals=10,
)
# end run
mlflow.end_run()
return run
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC #### Train and register the machine learning model
# COMMAND ----------
# Train model
run = train_model()
# Retreive model from best run
best_run = mlflow.search_runs(filter_string=f"tags.mlflow.parentRunId='{run.info.run_id}'", order_by=[
"metrics.testing_auc DESC"]).iloc[0]
# Register model artifact
model_name = "employee-attrition"
result = mlflow.register_model(f"runs:/{best_run.run_id}/model", model_name)
# Return notebook output
json_output = json.dumps(
{"output": {"MODEL_NAME": result.name, "MODEL_VERSION": result.version}})
dbutils.notebook.exit(json_output)
# COMMAND ----------
|
nfmoore/azure-databricks-mlops-example-scenarios
|
core/notebooks/train_model.py
|
train_model.py
|
py
| 7,954 |
python
|
en
|
code
| 2 |
github-code
|
6
|
3360586236
|
"""
백준 1012 : 유기농 배추
"""
"""
BFS - Breath first Search
한번 방문한 지점은 절대로 다시 방문하지 않는다.
"""
from collections import deque
import sys
input=sys.stdin.readline
dx=[-1,1,0,0]
dy=[0,0,-1,1]
# ( -1, 0) ( 1,0) ( 0,-1) (0,1)
def BFS(graph,visit , x, y):
deq=deque()
deq.append([x,y])
visit[x][y]=True
while deq:
x,y=deq.popleft()
for i in range(4):
nx=x+dx[i] ; ny=y+dy[i]
if 0<=nx<N and 0<=ny<M and not visit[nx][ny] and graph[nx][ny]==1: # 그래프 안에 있고 방문하지 않은 지점이라면.
visit[nx][ny]=True #한번만 방문한다.
deq.append([nx,ny])
for i in range(int(input())):
M,N,K=map(int,input().split()) # 가로 , 세로 , 배추개수
graph=[ [0]*M for _ in range(N) ]
visit=[ [False]*M for _ in range(N) ]
for j in range(K):
a,b=map(int,input().split())
graph[b][a]=1
count=0
for j in range(N):
for k in range(M):
if visit[j][k]==False and graph[j][k]==1: #방문하지 않았고 배추가 있는 지역이라면
BFS(graph , visit , j , k)
count+=1
print(count)
|
030831/2023-Winter_Vacation_GroupStudy
|
1012.py
|
1012.py
|
py
| 1,301 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
43469204471
|
from __future__ import annotations
import yaml
import os
import errno
__all__ = ["save_setup", "read_setup"]
def save_setup(setup: dict, path: str):
"""
Save Model initialization setup dictionary.
Parameters
----------
setup : dict
The setup dictionary to be saved to `YAML <https://yaml.org/spec/1.2.2/>`__ file.
path : str
The file path. If the path not end with ``.yaml``, the extension is automatically added to the file path.
See Also
--------
read_setup: Read Model initialization setup dictionary.
Examples
--------
>>> setup, mesh = smash.load_dataset("cance")
>>> setup
{'structure': 'gr-a', 'dt': 3600, 'start_time': '2014-09-15 00:00', ...}
Save setup
>>> smash.save_setup(setup, "setup.yaml")
Read setup (the reloaded setup keys will be alphabetically sorted)
>>> setup_rld = smash.read_setup("setup.yaml")
setup_rld
{'daily_interannual_pet': True, 'descriptor_name': ['slope', 'dd'], ...}
"""
if not path.endswith(".yaml"):
path = path + ".yaml"
with open(path, "w") as f:
yaml.dump(setup, f, default_flow_style=False)
def read_setup(path: str) -> dict:
"""
Read Model initialization setup dictionary.
Parameters
----------
path : str
The file path.
Returns
-------
dict :
A setup dictionary loaded from YAML file.
See Also
--------
save_setup: Save Model initialization setup dictionary.
Examples
--------
>>> setup, mesh = smash.load_dataset("cance")
>>> setup
{'structure': 'gr-a', 'dt': 3600, 'start_time': '2014-09-15 00:00', ...}
Save setup
>>> smash.save_setup(setup, "setup.yaml")
Read setup (the reloaded setup keys will be alphabetically sorted)
>>> setup_rld = smash.read_setup("setup.yaml")
setup_rld
{'daily_interannual_pet': True, 'descriptor_name': ['slope', 'dd'], ...}
"""
if os.path.isfile(path):
with open(path, "r") as f:
setup = yaml.safe_load(f)
else:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path)
return setup
|
DassHydro-dev/smash
|
smash/io/setup_io.py
|
setup_io.py
|
py
| 2,170 |
python
|
en
|
code
| 2 |
github-code
|
6
|
13289274017
|
import os
import platform
import sys
try:
from pip._internal.operations import freeze
except ImportError: # pip < 10.0
from pip.operations import freeze
py_version = sys.version.replace("\n", " ")
py_platform = platform.platform()
pkgs = freeze.freeze()
pip_pkgs = "\n".join(
pkg
for pkg in pkgs
if any(
name in pkg
for name in {
# runhouse
"runhouse",
# required installs
"wheel",
"rich",
"fsspec",
"pyarrow",
"sshtunnel",
"sshfs",
"typer",
"skypilot",
# aws
"awscli",
"boto3",
"pycryptodome",
"s3fs",
# azure
"azure-cli",
"azure-core",
# gcp
"google-api-python-client",
"google-cloud-storage",
"gcsfs",
# docker
"docker",
}
)
)
print(f"Python Platform: {py_platform}")
print(f"Python Version: {py_version}")
print()
print(f"Relevant packages: \n{pip_pkgs}")
print()
os.system("sky check")
os.system("sky status --refresh")
|
kalaracey/runhouse
|
collect_env.py
|
collect_env.py
|
py
| 1,178 |
python
|
en
|
code
| null |
github-code
|
6
|
74175163389
|
# -*- coding:utf-8 -*-
"""
题目描述:大家都知道斐波那契数列,现在要求输入一个整数n,请你输出斐波那契数列的第n项(从0开始,第0项为0)
"""
class Solution:
def Fibonacci(self, n):
# write code here
dp = [0,1]
if n>=2:
for i in range(2,n+1):
dp.append(dp[i-1]+dp[i-2])
return dp[n]
|
xxxsssyyy/offer-Goal
|
07斐波那契数列.py
|
07斐波那契数列.py
|
py
| 392 |
python
|
zh
|
code
| 7 |
github-code
|
6
|
10420754903
|
from __future__ import annotations
import asyncio
import os
import platform
import re
from asyncio import IncompleteReadError, StreamReader, StreamWriter
from pathlib import Path
from typing import TYPE_CHECKING
from randovania.patching.patchers.exceptions import UnableToExportError
if TYPE_CHECKING:
from collections.abc import Callable, Sequence
IO_LOOP: asyncio.AbstractEventLoop | None = None
def is_windows() -> bool:
return platform.system() == "Windows"
def is_mac() -> bool:
return platform.system() == "Darwin"
async def _write_data(stream: StreamWriter, data: str):
stream.write(data.encode("UTF-8"))
stream.close()
async def _read_data(stream: StreamReader, read_callback: Callable[[str], None]):
while True:
try:
line = await stream.readuntil(b"\r")
except IncompleteReadError as incomplete:
line = incomplete.partial
if line:
try:
decoded = line.decode()
except UnicodeDecodeError:
decoded = line.decode("latin1")
for x in re.split(r"[\r\n]", decoded.strip()):
if x:
read_callback(x)
else:
break
async def _process_command_async(
args: list[str], input_data: str, read_callback: Callable[[str], None], additional_path_entries: Sequence[str] = ()
):
environment_vars = os.environ.copy()
if len(additional_path_entries) > 0:
appending_paths = ":".join(additional_path_entries)
environment_vars["PATH"] = f"{environment_vars['PATH']}:{appending_paths}"
process = await asyncio.create_subprocess_exec(
*args,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT,
env=environment_vars,
)
await asyncio.gather(
_write_data(process.stdin, input_data),
_read_data(process.stdout, read_callback),
)
await process.wait()
def process_command(
args: list[str], input_data: str, read_callback: Callable[[str], None], add_mono_if_needed: bool = True
):
if not Path(args[0]).is_file():
raise FileNotFoundError(f"{args[0]} not found")
needs_mono = add_mono_if_needed and not is_windows()
additional_paths = ()
if needs_mono:
args = ["mono", *args]
# Add common Mono paths to PATH, as they aren't there by default
if is_mac():
additional_paths = (
"/Library/Frameworks/Mono.framework/Versions/Current/Commands",
"/usr/local/bin",
"/opt/homebrew/bin",
)
work = _process_command_async(args, input_data, read_callback, additional_paths)
try:
if IO_LOOP is None:
if is_windows():
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
asyncio.run(work)
else:
asyncio.run_coroutine_threadsafe(work, IO_LOOP).result()
except FileNotFoundError:
if needs_mono:
raise UnableToExportError(
"Unable to find mono.<br /><br />"
"Please install it from the "
"<a href='https://www.mono-project.com/download/stable'>official website</a>."
)
else:
raise
|
randovania/randovania
|
randovania/games/prime2/patcher/csharp_subprocess.py
|
csharp_subprocess.py
|
py
| 3,321 |
python
|
en
|
code
| 165 |
github-code
|
6
|
21764328772
|
# Approach 1 - Breadth-First Search
# Time: O(N)
# Space: O(N)
from collections import deque
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
queue = deque()
# build the initial set of rotten oranges
fresh_oranges = 0
ROWS, COLS = len(grid), len(grid[0])
for r in range(ROWS):
for c in range(COLS):
if grid[r][c] == 2:
queue.append((r, c))
elif grid[r][c] == 1:
fresh_oranges += 1
queue.append((-1, -1))
# Because the while loop will add one more minute when it try to find the neighbors of last rotten orange. Since that the last rotten orange won't affect any other orange so we shouldn't include that round. Also, if there isn't any fresh orange(fresh_orange = 0), the function will simply return -1.
minutes_elapsed = -1
directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]
while queue:
row, col = queue.popleft()
if row == -1: # processing of first round is complete
minutes_elapsed += 1
if queue:
queue.append((-1, -1))
else: # this is rotten orange
for d in directions:
neighbor_row, neighbor_col = row + d[0], col + d[1]
if ROWS > neighbor_row >= 0 and COLS > neighbor_col >= 0:
if grid[neighbor_row][neighbor_col] == 1:
grid[neighbor_row][neighbor_col] = 2
fresh_oranges -= 1
queue.append((neighbor_row, neighbor_col))
return minutes_elapsed if fresh_oranges == 0 else -1
|
jimit105/leetcode-submissions
|
problems/rotting_oranges/solution.py
|
solution.py
|
py
| 1,876 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72613074749
|
#Importing the Morse code Letters
from letter_morse import ENGLISH_TO_MORSE
#Looping through the letters
MC_TO_ENGLISH = {}
for key, value in ENGLISH_TO_MORSE.items():
MC_TO_ENGLISH[value] = key
#Function for converting Eng to Morse
def english_to_mc(message):
morse = []
for char in message:
if char in ENGLISH_TO_MORSE:
morse.append(ENGLISH_TO_MORSE[char])
return"".join(morse)
#function for converting Morse to Eng
def mc_to_english(message):
message = message.split(" ")
english = []
for code in message:
if code in MC_TO_ENGLISH:
english.append(MC_TO_ENGLISH[code])
return " ".join(english)
#The Input
def main():
while True:
response = input("Convert Morse to English(1) or English to Morse(2)?").upper()
if response == "1" or response =="2":
break
if response == "1":
print("Enter Morse code (with a space after each code): ")
morse = input("> ")
english = mc_to_english(morse)
print("### English version ###")
print(english)
elif response == "2":
print("Enter english text: ")
english = input("> ").upper()
morse = english_to_mc(english)
print("###Morse code Version###")
print(morse)
if __name__== "__main__":
main()
|
Bophelo11/Morse-Code-convertor
|
MorseCodeConverterPortfolio/main.py
|
main.py
|
py
| 1,375 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25363489701
|
import psycopg2
from users import *
from moderators import *
def menu(): # Menu voor het kiezen van de verschillende opties.
while True:
print("Welcome to our program.")
menu_choice = int(input("1. Would you like to leave a message?\n"
"2. Would you like to log in as a moderator?\n"
"3. Would you like to register as a moderator?\n"
"4. Would you like to shut down?\n>"))
if menu_choice == 1:
insert_message()
return menu()
elif menu_choice == 2:
login_moderator()
return menu()
elif menu_choice == 3:
register_moderator()
return menu()
elif menu_choice == 4:
print("Thank you for using our program!")
break
menu()
|
DamianPlomp/stationszuil
|
main.py
|
main.py
|
py
| 850 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42857124490
|
import pytest
from django.conf import settings
from django.test import override_settings
from .compat import nullcontext
def pytest_configure():
settings.configure(
**dict(
SECRET_KEY="abcd",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.sessions",
"django.contrib.contenttypes",
"rest_framework",
"rest_framework_api_key",
],
ROOT_URL_CONF="urls",
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
)
)
@pytest.fixture
def view_with_permissions():
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
def create_view(*classes):
@api_view()
@permission_classes(classes)
def view(*args):
return Response()
return view
return create_view
def _create_user():
from django.contrib.auth import get_user_model
User = get_user_model()
return User.objects.create_user(username="foo", password="bar")
@pytest.fixture(
name="backend",
params=[
{"header": "HTTP_AUTHORIZATION", "default": "Api-Key {key}"},
{
"header": "HTTP_X_API_KEY",
"default": "{key}",
"set_custom_header_setting": True,
},
],
)
def fixture_backend(request) -> dict:
backend = request.param
if backend.get("set_custom_header_setting"):
ctx = override_settings(API_KEY_CUSTOM_HEADER=backend["header"])
else:
ctx = nullcontext()
with ctx:
yield backend
@pytest.fixture
def create_request(backend):
from rest_framework.test import APIRequestFactory, force_authenticate
from rest_framework_api_key.models import APIKey
request_factory = APIRequestFactory()
_MISSING = object()
def create(
authenticated: bool = False, authorization: str = _MISSING, **kwargs
):
headers = {}
if authorization is not None:
kwargs.setdefault("name", "test")
_, key = APIKey.objects.create_key(**kwargs)
if callable(authorization):
authorization = authorization(key)
if authorization is _MISSING:
authorization = backend["default"]
headers[backend["header"]] = authorization.format(key=key)
request = request_factory.get("/test/", **headers)
if authenticated:
user = _create_user()
force_authenticate(request, user)
return request
return create
|
thaitl235/djangorestframework-api-key
|
tests/conftest.py
|
conftest.py
|
py
| 2,739 |
python
|
en
|
code
| null |
github-code
|
6
|
2338682136
|
import pandas as pd
import numpy as np
import json
from collections import defaultdict
from play_by_play import PlayByPlay
#define front end variables
DATE = '2015-12-25'
SEASON = '2015-16'
SEASON_TYPE = 'Regular+Season' # 'Regular+Season' or 'Playoffs'
HOME_TEAM = 'LAL'
def build_df(json):
rows = []
for frame_id in json:
game_clock = json[frame_id]['time']
quarter = json[frame_id]['quarter']
row = [frame_id, game_clock, quarter]
rows.append(row)
df = pd.DataFrame(rows, columns = ['frame_id', 'game_clock', 'quarter'])
return df
def encode_quarter(quarter):
if quarter == '1st':
return 1
elif quarter == '2nd':
return 2
elif quarter == '3rd':
return 3
elif quarter == '4th':
return 4
else:
#doublecheck: is all of OT just 5? 2OT, 3OT,...etc.
return 5
def new_json_format(my_dict):
''' better organzition by nesting players on the court info '''
new_dict = defaultdict(dict)
for frame_id in ocr_pbp_dict:
game_clock = my_dict[frame_id]['game_clock']
quarter = my_dict[frame_id]['game_clock']
team1_id = my_dict[frame_id]['TEAM1_ID']
team1_player1 = my_dict[frame_id]['TEAM1_PLAYER1']
team1_player2 = my_dict[frame_id]['TEAM1_PLAYER2']
team1_player3 = my_dict[frame_id]['TEAM1_PLAYER3']
team1_player4 = my_dict[frame_id]['TEAM1_PLAYER4']
team1_player5 = my_dict[frame_id]['TEAM1_PLAYER5']
team2_id = my_dict[frame_id]['TEAM2_ID']
team2_player1 = my_dict[frame_id]['TEAM2_PLAYER1']
team2_player2 = my_dict[frame_id]['TEAM2_PLAYER2']
team2_player3 = my_dict[frame_id]['TEAM2_PLAYER3']
team2_player4 = my_dict[frame_id]['TEAM2_PLAYER4']
team2_player5 = my_dict[frame_id]['TEAM2_PLAYER5']
#assign to new format json
new_dict[frame_id]['game_clock'] = game_clock
new_dict[frame_id]['quarter'] = quarter
#Team 1
#for organizition, nest another dictionary for the team id and player ids of each team
new_dict[frame_id]['team1'] = {}
new_dict[frame_id]['team1']['id'] = team1_id
new_dict[frame_id]['team1']['players'] = [team1_player1, team1_player2, team1_player3, team1_player4, team1_player5]
#Team 2
new_dict[frame_id]['team2'] = {}
new_dict[frame_id]['team2']['id'] = team2_id
new_dict[frame_id]['team2']['players'] = [team2_player1, team2_player2, team2_player3, team2_player4, team2_player5]
return new_dict
if __name__ == "__main__":
#read in the ocr results
with open('./data/ocr_results.json') as ocr:
ocr_json = json.load(ocr)
#extract play by play data for the game uploaded
pbp = PlayByPlay(DATE, SEASON, SEASON_TYPE, HOME_TEAM).get_pbp()
#convert from json to DataFrame
ocr_df = build_df(ocr_json)
#fill in missing frames quarter by taking the last known quarter
#future TODO: very small chance the last known quarter is incorrect if missing values occur at transition around 12:00 mark of new quarter
ocr_df['quarter'] = ocr_df['quarter'].fillna(method = 'ffill')
#convert game clock from string to seconds
pbp['TimeSecs'] = [int(a) * 60 + int(b) for a, b in pbp['PCTIMESTRING'].str.split(':')]
ocr_df['TimeSecs'] = [int(a) * 60 + int(b) for a, b in ocr_df['game_clock'].str.split(':')]
#same for the quarter
ocr_df['quarter'] = ocr_df['quarter'].apply(encode_quarter)
#using pandas merge_asof to match up the corresponding pbp record for each frame to figure out who is on the court at each frame
ocr_pbp = pd.merge_asof(ocr_df.sort_values('TimeSecs'), pbp[['TimeSecs', 'PERIOD','TEAM1_ID','TEAM1_PLAYER1', 'TEAM1_PLAYER2', 'TEAM1_PLAYER3', 'TEAM1_PLAYER4', 'TEAM1_PLAYER5', 'TEAM2_ID', 'TEAM2_PLAYER1', 'TEAM2_PLAYER2', 'TEAM2_PLAYER3', 'TEAM2_PLAYER4','TEAM2_PLAYER5']].sort_values('TimeSecs'), on='TimeSecs', left_by = 'quarter', right_by = 'PERIOD', direction='forward').sort_values('frame_id').drop(columns = ['TimeSecs', 'PERIOD'])
#set index for .to_dict method
ocr_pbp = ocr_pbp.set_index('frame_id')
#convert to dictionary
ocr_pbp_dict = ocr_pbp.to_dict(orient='index')
#transform to final output form
ocr_pbp_new = new_json_format(ocr_pbp_dict)
#export the final ocr json
with open('./ocr_w_players.json', 'w') as output:
json.dump(ocr_pbp_new, output)
|
nalin1096/DS5500_Player_Tracking_and_Identification_NBA
|
helpers/play_by_play/pbp_ocr.py
|
pbp_ocr.py
|
py
| 4,444 |
python
|
en
|
code
| 6 |
github-code
|
6
|
37961270756
|
import dash_html_components as html
import dash
from dash.dependencies import Input, Output
import dash_table
import pandas as pd
import dash_core_components as dcc
df = pd.read_csv('GraphVisualizationLearning\/data.csv')
# print(df['seed'][2])
# print(df['seed'])
del df['seed']
# df = df.dropna()
dff = df[["Config","time_stamp","testcase","fail_count"]]
# print(dff)
# print(df.columns[0])
available_project = df['project'].unique()
available_date = df['date'].unique()
available_config = df['Config'].unique()
print(available_project)
app = dash.Dash(__name__)
PAGE_SIZE = 20
# app.layout = dash_table.DataTable(
# id='datatable-paging',
# columns=[
# {"name": i, "id": i} for i in df.columns
# ],
# page_current=0,
# page_size=PAGE_SIZE,
# page_action='custom'
# )
#
#
# @app.callback(
# Output('datatable-paging', 'data'),
# [Input('datatable-paging', "page_current"),
# Input('datatable-paging', "page_size")])
# def update_table(page_current,page_size):
# return df.iloc[
# page_current*page_size:(page_current+ 1)*page_size
# ].to_dict('records')
#
# app.layout = dash_table.DataTable(
# id='table-multicol-sorting',
# columns=[
# {"name": i, "id": i} for i in df.columns
# ],
# page_current=0,
# page_size=PAGE_SIZE,
# page_action='custom',
#
# sort_action='custom',
# sort_mode='multi',
# sort_by=[]
# )
#
# @app.callback(
# Output('table-multicol-sorting', "data"),
# [Input('table-multicol-sorting', "page_current"),
# Input('table-multicol-sorting', "page_size"),
# Input('table-multicol-sorting', "sort_by")])
# def update_table(page_current, page_size, sort_by):
# # print(sort_by)
# if len(sort_by):
# dff = df.sort_values(
# [col['column_id'] for col in sort_by],
# ascending=[
# col['direction'] == 'asc'
# for col in sort_by
# ],
# inplace=False
# )
# else:
# # No sort is applied
# dff = df
#
# return dff.iloc[
# page_current*page_size:(page_current+ 1)*page_size
# ].to_dict('records')
app.layout = html.Div(
# html.Div([
# html.H1('Post regression analysis')
# ]),
className="row",
children=[
html.Div([
html.Div([
dcc.Dropdown(
id='Project',
options=[{'label': proj, 'value': proj} for proj in available_project],
value=''
# value='TimbuktuMPNX',
# multi=True
)
], style={'width': '30%', 'display': 'inline-block'}),
html.Div([
dcc.Dropdown(
id='Date',
options=[{'label': date, 'value': date} for date in available_date],
# value='27/09/2018'
value=''
# multi=True
)
], style={'width': '30%', 'display': 'inline-block'}),
html.Div([
dcc.Dropdown(
id='Config',
options=[{'label': config, 'value': config} for config in available_config],
# value='27/09/2018'
value=''
# multi=True
)
], style={'width': '30%', 'display': 'inline-block'})
]),
html.Div(
dash_table.DataTable(
id='table-paging-with-graph',
columns=[
{"name" : i, "id":i} for i in dff.columns
],
page_current=0,
page_size=20,
page_action='custom',
filter_action='custom',
filter_query='',
sort_action='custom',
sort_mode='multi',
sort_by=[]
),
style={'height': 300, 'overflowY': 'scroll'},
className='six columns'
),
html.Div(
id='table-paging-with-graph-container',
className="five columns"
)
]
)
operators = [['ge ', '>='],
['le ', '<='],
['lt ', '<'],
['gt ', '>'],
['ne ', '!='],
['eq ', '='],
['contains '],
['datestartswith ']]
def split_filter_part(filter_part):
for operator_type in operators:
for operator in operator_type:
if operator in filter_part:
name_part, value_part = filter_part.split(operator, 1)
name = name_part[name_part.find('{') + 1: name_part.rfind('}')]
value_part = value_part.strip()
v0 = value_part[0]
if (v0 == value_part[-1] and v0 in ("'", '"', '`')):
value = value_part[1: -1].replace('\\' + v0, v0)
else:
try:
value = float(value_part)
except ValueError:
value = value_part
# word operators need spaces after them in the filter string,
# but we don't want these later
return name, operator_type[0].strip(), value
return [None] * 3
@app.callback(
Output('table-paging-with-graph', "data"),
[Input('Project', "value"),
Input('table-paging-with-graph', "page_current"),
Input('table-paging-with-graph', "page_size"),
Input('table-paging-with-graph', "sort_by"),
Input('table-paging-with-graph', "filter_query")
])
def update_table(select_proj,page_size,page_current,sort_by, filter):
filtering_expressions = filter.split(' && ')
dff_proj = df[df['project'] == select_proj]
for filter_part in filtering_expressions:
col_name, operator, filter_value = split_filter_part(filter_part)
if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
# these operators match pandas series operator method names
dff_proj = dff_proj.loc[getattr(dff_proj[col_name], operator)(filter_value)]
elif operator == 'contains':
dff_proj = dff_proj.loc[dff_proj[col_name].str.contains(filter_value)]
elif operator == 'datestartswith':
# this is a simplification of the front-end filtering logic,
# only works with complete fields in standard format
dff_proj = dff_proj.loc[dff_proj[col_name].str.startswith(filter_value)]
if len(sort_by):
dff_proj = dff_proj.sort_values(
[col['column_id'] for col in sort_by],
ascending=[
col['direction'] == 'asc'
for col in sort_by
],
inplace=False
)
return dff_proj.iloc[
page_current * page_size: (page_current + 1) * page_size
].to_dict('records')
if __name__ == '__main__':
app.run_server(debug=True)
|
shashank793/DataVisualisation
|
venv/simple_graph/create_tabl.py
|
create_tabl.py
|
py
| 6,981 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22666651676
|
def main():
# Upper limit is the highest 1_2_3_4_5_6_7_8_9_0 number
# while the lower limit is the lowest.
#
# Returns the first (only) number that matches
# the given pattern mathematically.
#
# The variable i has to end in 0, for the square to also
# end with the number 0, therefore += 10 loops.
upper = int(1929394959697989990**0.5)
lower = int(1020304050607080900**0.5)
nums = [0, 9, 8, 7, 6, 5, 4, 3, 2, 1] # Reverse order for while-loop
i = lower
while i <= upper:
x = i**2
num_index = 0
while x > 0:
num = x % 10
if num != nums[num_index]:
break
num_index += 1
x = x // 100
else:
return i
i += 10
return 0
if __name__ == '__main__':
import time
start = time.time()
print(main())
print(f"Time taken: {round(time.time() - start, 2)}s")
|
kakuttaja/project-euler
|
206.py
|
206.py
|
py
| 934 |
python
|
en
|
code
| 0 |
github-code
|
6
|
438092224
|
from django.http import HttpResponse
from django.shortcuts import redirect, reverse, render
from cart.models import Cart, Item, OrderItem, Basket
from product_listing.models import Product
import cart.forms
import datetime
from django.contrib.auth import authenticate
# Create your views here.
def index(request):
context={}
total = 0
if request.user.is_authenticated:
carts = Cart.objects.filter(user=request.user)
if carts is not None:
this_cart = carts.first()
context['cart'] = this_cart
context['items'] = Item.objects.filter(cart=this_cart)
else:
this_cart = Cart(user=request.user)
this_cart.save()
context['cart'] = this_cart
for item in context['items']:
total += item.unit_price
context['total'] = total
return render(request, 'cart/cart.html', context)
else:
return redirect('/account/login')
#def viewCart(request):
# context={}
# user = request.user
# cart = Cart.objects.get(user=user)
# context['cart'] = cart
# context['items'] = Item.objects.get(cart=cart)
# return render(request, "cart.html", context)
def modifyCart(request, action, product_id, quantity):
context = {}
if request.user.is_authenticated:
try:
user_cart = Cart.objects.get(user=request.user)
except:
user_cart = Cart.objects.create(user=request.user)
if (action=='add'):
add_to_cart(request, product_id, quantity)
elif (action=='remove'):
remove_from_cart(request, product_id, quantity)
elif (action=='clear'):
clear_cart(request)
else:
return HttpResponse("Error")
else:
return reverse(request, 'account/login.html', context)
return redirect('/')
def add_to_cart(request, product_id, quantity):
print('getting to cart"s add to cart')
user_cart = Cart.objects.get(user=request.user)
product = Product.objects.get(id=product_id)
itemsMatching = Item.objects.filter(product=product, cart=user_cart, unit_price=product.price_current)
this_item= itemsMatching.first()
if this_item is None:
item = Item(
product=product,
cart = user_cart,
quantity= quantity,
unit_price= product.price_current
)
item.save()
else:
print('ok')
new_q = this_item.quantity + quantity
itemsMatching.update(quantity=new_q)
def remove_from_cart(request, product_id, quantity):
this_cart = Cart.objects.get(user=request.user)
items = Item.objects.filter(product=product_id, cart=this_cart)
if items is not None:
item = items.first()
if (item.quantity > quantity):
item.quantity = item.quantity-quantity
else:
item.delete()
def clear_cart(request):
this_cart = Cart.objects.get(user=request.user)
for item in this_cart.item_set.all():
item.delete()
this_cart.delete()
def checkout(request):
context={}
if request.user.is_authenticated == False:
return reverse(request, 'account/login.html',context)
try:
this_cart = Cart.objects.get(user=request.user)
except:
return redirect('/cart')
if this_cart.item_set.all() is None:
form = forms.ShippingForm
msg = "Your cart is empty"
form.add_error(None, msg)
return render (request, 'cart/checkout.html', context)
user = request.user
baskets = []
items = this_cart.item_set.all()
if request.method=='POST':
form = cart.forms.ShippingForm(request.POST)
if form.is_valid():
try:
shipaddr = form.cleaned_data['street_address']+", "+form.cleaned_data['postcode']
for item in items:
total = 0
for basket in baskets:
if (basket.seller != item.seller):
total += 1
else:
thisBasket = basket
if (total == len(baskets)):
thisBasket = Basket(
seller = item.product.seller,
buyer = user,
time = datetime.datetime.now(),
shipping_to = shipaddr
)
thisBasket.save()
baskets.append(thisBasket)
orderItem = OrderItem(
product_string = item.__str__(),
unit_price = item.unit_price,
product = item.product,
buyer = user,
seller = item.product.seller,
basket = thisBasket,
quantity = item.quantity,
shipping_to = shipaddr
)
orderItem.save()
product = item.product
if product.stock > item.quantity:
product.stock -= item.quantity
else:
Product.objects.filter(pk=product.pk).update(stock=0)
item.delete()
this_cart.delete()
except:
print('except')
return HttpResponse("Please try again")
return render(request, 'cart/checkoutsuccess.html', context)
else:
print('form is invalid')
form = cart.forms.ShippingForm
context['form'] = form
return render (request, 'cart/checkout.html', context)
else:
form = cart.forms.ShippingForm
context['form'] = form
return render (request, 'cart/checkout.html', context)
def checkoutsuccess(request):
return render(request, 'cart/checkoutsuccess.html')
|
ftaoussi/COMP307---Marketplace
|
cart/views.py
|
views.py
|
py
| 4,769 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39702394709
|
from flask import Blueprint
from flask import render_template, url_for, request
from flask import make_response, send_from_directory
from werkzeug.utils import secure_filename
import os
from apps.xmind2caseapp import write2excel, xmind2case
x2c = Blueprint('x2c',__name__)
# workpath = os.getcwd()
workpath=os.path.dirname(os.path.realpath(__file__))
upload_dir = os.path.join(workpath, "apps/xmind2caseapp" ,"upload")
download_dir = os.path.join(workpath,"apps/xmind2caseapp" , "download")
@x2c.route("/index")
def x2ch():
return render_template("x2c/x2c.html")
@x2c.route("/x2conf")
def x2conf():
return render_template("x2c/x2c.html")
@x2c.route('/uploader', methods=['GET', 'POST'])
def uploader():
# print(os.path.join(workpath))
if request.method == 'POST':
f = request.files['file']
if f.filename[f.filename.find("."):]!=".xmind":
return "X101" # X101:上传的不是xmind格式
filename = f.filename[:f.filename.find(".")]+".xls"
uppath = os.path.join(upload_dir, secure_filename(f.filename))
dopath = os.path.join(download_dir, filename)
f.save(uppath)
p = xmind2case.xmind2dict(uppath)
h = xmind2case.handle_xmind_msg(p)
write2excel.writr_to_excel(dopath, h)
dpath = url_for("x2c.download_file", filename=filename)
print(dpath)
return "True+"+dpath
else:
return 'False+'
# return render_template('upload.html')
@x2c.route("/download/<filename>", methods=['GET'])
def download_file(filename):
# directory=os.path.join(workpath,"download")
response = make_response(send_from_directory(
download_dir, filename, as_attachment=True))
response.headers["Content-Disposition"] = "attachment; filename={}".format(
filename.encode().decode('latin-1'))
return response
|
siqyka/QtestTool
|
x2c.py
|
x2c.py
|
py
| 1,860 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29127983258
|
from django.test import TestCase
from django.utils.translation import ugettext_lazy as _
from social_links import forms
class LinksFormTests(TestCase):
def taset_clean_url(self):
valid_urls = [['https://www.example.com','https://www.example.com']
['http://www.example.com','http://www.example.com']
['www.example.com','http://www.example.com']
['example.com','http://www.example.com']
]
for url in valid_urls:
cleaned_url = forms.clean_url(url[0])
self.assertEqual(cleaned_url, url[1])
def test_socail_form(self):
invalid_data_dicts = [
{'data': {'facebook': 'composerscouch.com',
'google_plus': '',
'twitter': ''},
'error': ('facebook', [_(u'Must be a Facebook URL.')])},
{'data': {'facebook': '',
'google_plus': 'composerscouch.com',
'twitter': ''},
'error': ('google_plus', [_(u'Must be a Google Plus URL.')])},
{'data': {'facebook': '',
'google_plus': '',
'twitter': 'composerscouch.com'},
'error': ('twitter', [_(u'Must be a Twitter URL.')])},
]
for invalid_dict in invalid_data_dicts:
form = forms.SocialLinksForm(data=invalid_dict['data'])
self.failIf(form.is_valid())
self.assertEqual(form.errors[invalid_dict['error'][0]],
invalid_dict['error'][1])
valid_data_dicts = [
{'data': {'facebook': 'https://www.facebook.com/thekooksofficial',
'google_plus': 'https://plus.google.com/116651435444058665368/about',
'twitter': 'https://twitter.com/thekooksmusic'},},
]
for valid_dict in valid_data_dicts:
form = forms.SocialLinksForm(data=valid_dict['data'])
self.failUnless(form.is_valid())
def test_photo_form(self):
invalid_data_dicts = [
{'data': {'instagram': 'composerscouch.com',
'tumblr': ''},
'error': ('instagram', [_(u'Must be a Instagram URL.')])},
{'data': {'instagram': '',
'tumblr': 'composerscouch.com'},
'error': ('tumblr', [_(u'Must be a Tumblr URL.')])},
]
for invalid_dict in invalid_data_dicts:
form = forms.PhotoLinksForm(data=invalid_dict['data'])
self.failIf(form.is_valid())
self.assertEqual(form.errors[invalid_dict['error'][0]],
invalid_dict['error'][1])
valid_data_dicts = [
{'data': {'instagram': 'http://instagram.com/thekooksmusic/',
'tumblr': 'http://thekooksmusic.tumblr.com/'},},
]
for valid_dict in valid_data_dicts:
form = forms.PhotoLinksForm(data=valid_dict['data'])
self.failUnless(form.is_valid())
def test_video_form(self):
invalid_data_dicts = [
{'data': {'youtube': 'composerscouch.com',
'vimeo': ''},
'error': ('youtube', [_(u'Must be a Youtube URL.')])},
{'data': {'youtube': '',
'vimeo': 'composerscouch.com'},
'error': ('vimeo', [_(u'Must be a Vimeo URL.')])},
]
for invalid_dict in invalid_data_dicts:
form = forms.VideoLinksForm(data=invalid_dict['data'])
self.failIf(form.is_valid())
self.assertEqual(form.errors[invalid_dict['error'][0]],
invalid_dict['error'][1])
valid_data_dicts = [
{'data': {'youtube': 'https://www.youtube.com/user/thekooksofficial',
'vimeo': 'http://vimeo.com/davissilis'},},
]
for valid_dict in valid_data_dicts:
form = forms.VideoLinksForm(data=valid_dict['data'])
self.failUnless(form.is_valid())
def test_music_form(self):
invalid_data_dicts = [
{'data': {'bandcamp': 'composerscouch.com',
'itunes': '',
'spotify': '',
'soundcloud': ''},
'error': ('bandcamp', [_(u'Must be a Bandcamp URL.')])},
{'data': {'bandcamp': '',
'itunes': 'composerscouch.com',
'spotify': '',
'soundcloud': ''},
'error': ('itunes', [_(u'Must be a iTunes URL.')])},
{'data': {'bandcamp': '',
'itunes': '',
'spotify': 'composerscouch.com',
'soundcloud': ''},
'error': ('spotify', [_(u'Must be a Spotify URL.')])},
{'data': {'bandcamp': '',
'itunes': '',
'spotify': '',
'soundcloud': 'composerscouch.com'},
'error': ('soundcloud', [_(u'Must be a SoundCloud URL.')])},
]
for invalid_dict in invalid_data_dicts:
form = forms.MusicLinksForm(data=invalid_dict['data'])
self.failIf(form.is_valid())
self.assertEqual(form.errors[invalid_dict['error'][0]],
invalid_dict['error'][1])
valid_data_dicts = [
{'data': {'bandcamp': 'http://sekinzer.bandcamp.com/track/junk-of-the-heart-cover',
'itunes': 'https://itunes.apple.com/us/artist/the-kooks/id68448386',
'spotify': 'https://play.spotify.com/artist/1GLtl8uqKmnyCWxHmw9tL4',
'soundcloud': 'https://soundcloud.com/kooksmusic'},},
]
for valid_dict in valid_data_dicts:
form = forms.MusicLinksForm(data=valid_dict['data'])
self.failUnless(form.is_valid())
|
TimBest/ComposersCouch
|
social_links/tests/tests_forms.py
|
tests_forms.py
|
py
| 5,893 |
python
|
en
|
code
| 1 |
github-code
|
6
|
5384553044
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import os
__DIR__ = os.path.abspath(os.path.dirname(__file__))
from ply import lex, yacc
from .data import Domain, MsgId, MsgStr, MsgStrPlural, MsgStrList, Message
class ParserException(Exception):
pass
DEBUG = 0
tokens = (
'COMMENT',
'DOMAIN',
'PREV_START',
'PREV_MSGCTXT',
'PREV_MSGID',
'PREV_MSGID_PLURAL',
'PREV_STRING',
'MSGCTXT',
'MSGID',
'MSGID_PLURAL',
'MSGSTR',
'NUMBER',
'STRING'
)
t_DOMAIN = r'domain'
t_MSGID = r'msgid'
t_MSGID_PLURAL = r'msgid_plural'
t_MSGSTR = r'msgstr'
t_MSGCTXT = r'msgctxt'
t_ignore = ' \t'
t_prev_ignore = t_ignore
literals = '[]'
states = (
('prev', 'exclusive'),
)
def t_PREV_START(t):
r'\#\|'
t.lexer.begin('prev')
return t
def t_COMMENT(t):
r'\#.*\n'
t.value = t.value[:-1]
return t
def t_STRING(t):
r'\"(?P<content>([^\\\n]|(\\.))*?)\"'
stval = t.lexer.lexmatch.group("content")
t.value = stval if stval else ''
return t
def t_NUMBER(t):
r'[0-9]+'
t.value = int(t.value)
return t
t_prev_NUMBER = t_NUMBER
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(t):
raise SyntaxError("Illegal character %r on %d" % (t.value[0], t.lexer.lineno))
t_prev_error = t_error
def t_prev_MSGCTXT(t):
r'msgctxt'
t.type = 'PREV_MSGCTXT'
return t
def t_prev_MSGID(t):
r'msgid'
t.type = 'PREV_MSGID'
return t
def t_prev_MSGID_PLURAL(t):
r'msgid_plural'
t.type = 'PREV_MSGID_PLURAL'
return t
def t_prev_STRING(t):
r'\"(?P<content>([^\\\n]|(\\.))*?)\"'
t.type = 'PREV_STRING'
stval = t.lexer.lexmatch.group("content")
t.value = stval if stval else ''
return t
def t_prev_newline(t):
r'\n+'
t.lexer.begin('INITIAL')
t.lexer.lineno += len(t.value)
def p_empty(p):
"empty :"
pass
def p_error(p):
raise PerserException(str(p))
def p_po_file(p):
"""
po_file : po_file comment
| po_file domain
| po_file message
| po_file error
| empty
"""
if len(p) == 2:
p[0] = []
else:
p[0] = p[1] + [p[2]]
def p_comment(p):
"""
comment : COMMENT
"""
p[0] = p[1]
def p_dommain(p):
"""
domain : DOMAIN STRING
"""
p[0] = Domain(p[2])
## -- message -- ##
def p_message(p):
"""
message : message_intro string_list MSGSTR string_list
"""
if p[1] and isinstance(p[1], tuple):
msgid = MsgId(p[2], ctxt=p[1][1])
prev = p[1][0]
else:
msgid = MsgId(p[2], ctxt=p[1])
prev = None
msgstr = MsgStr(p[4])
p[0] = Message(msgid, msgstr, prev=prev)
def p_message_plural(p):
"""
message : message_intro string_list msgid_pluralform pluralform_list
"""
if p[1] and isinstance(p[1], tuple):
msgid = MsgId(p[2], ctxt=p[1][1], pluralform=p[3])
prev = p[1][0]
else:
msgid = MsgId(p[2], ctxt=p[1], pluralform=p[3])
prev = None
msgstr = MsgStrList(p[4])
p[0] = Message(msgid, msgstr, prev=prev)
def p_message_no_msgstrplural(p):
"""
message : message_intro string_list msgid_pluralform
"""
raise PercerException("missing 'msgstr[0]' section")
def p_message_no_msgidplural(p):
"""
message : message_intro string_list pluralform_list
"""
raise PercerException("missing 'msgid_plural' section")
def p_message_no_msgstr(p):
"""
message : message_intro string_list
"""
raise PercerException("missing 'msgstr' section")
## -- message end -- ##
def p_message_intro(p):
"""
message_intro : msg_intro
| prev msg_intro
"""
if len(p)==3:
p[0] = (p[1], p[2])
else:
p[0] = p[1]
def p_prev(p):
"""
prev : prev_msg_intro prev_string_list
| prev_msg_intro prev_string_list prev_msgid_pluralform
"""
if len(p)==3:
p[0] = MsgId(p[2], ctxt=p[1])
else:
p[0] = MsgId(p[2], pluralform=p[3], ctxt=p[1])
def p_msg_intro(p):
"""
msg_intro : MSGID
| MSGCTXT string_list MSGID
"""
if len(p)==2:
return
else:
p[0] = p[2]
def p_prev_msg_intro(p):
"""
prev_msg_intro : PREV_START PREV_MSGID
| PREV_START PREV_MSGCTXT prev_string_list PREV_START PREV_MSGID
"""
if len(p)==3:
return
else:
p[0] = p[3]
def p_msgid_pluralform(p):
"""
msgid_pluralform : MSGID_PLURAL string_list
"""
p[0] = p[2]
def p_prev_msgid_pluralform(p):
"""
prev_msgid_pluralform : PREV_MSGID_PLURAL prev_string_list
"""
p[0] = p[2]
def p_pluralform_list(p):
"""
pluralform_list : pluralform
| pluralform_list pluralform
"""
if len(p)==2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[2]]
def p_pluralform(p):
"""
pluralform : MSGSTR '[' NUMBER ']' string_list
"""
p[0] = MsgStrPlural(number=p[3], value=p[5])
def p_string_list(p):
"""
string_list : STRING
| string_list STRING
"""
if len(p)==2:
p[0] = p[1]
else:
p[0] = p[1] + p[2]
def p_prev_string_list(p):
"""
prev_string_list : PREV_STRING
| prev_string_list PREV_STRING
"""
if len(p)==2:
p[0] = p[1]
else:
p[0] = p[1] + p[2]
start = str('po_file')
lexer = lex.lex(debug=DEBUG)
parser = yacc.yacc(outputdir=__DIR__, debug=DEBUG, write_tables=False)
def parse(f):
ret = parser.parse(f.read())
parser.restart()
return ret
|
takada-at/ponda
|
ponda/parser.py
|
parser.py
|
py
| 5,662 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37269948830
|
def unboundedKnapsack(n, w, items, weights):
if n == 0 or w == 0:
return 0
if dp[n][w] != -1:
return dp[n][w]
if weights[n-1] <= w:
dp[n][w] = max(items[n-1]+unboundedKnapsack(n, w-weights[n-1],
items, weights), unboundedKnapsack(n-1, w, items, weights))
return dp[n][w]
else:
dp[n][w] = unboundedKnapsack(n-1, w, items, weights)
return dp[n][w]
n = 4
items = [10, 40, 50, 70]
weights = [1, 3, 4, 5]
w = 8
dp = [[-1 for _ in range(w+1)]for _ in range(n+1)]
print(unboundedKnapsack(n, w, items, weights))
|
richiabhi/Self---Dp
|
unboundedKnapsack memoized.py
|
unboundedKnapsack memoized.py
|
py
| 593 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34371656679
|
############################################################################################################
from colorama import *
import os
import requests
import re
############################################################################################################
def search_words_in_file(file_path, words):
grabber_found = False
with open(file_path, 'r', encoding='utf-8') as file:
for line_number, line in enumerate(file):
for word in words:
if word in line:
grabber_found = True
os.system('title WARNING GRABBER WAS BEEN FOUNDED!')
print(f"{Fore.RED}[!]: {line.strip()}")
webhook_regex = r'(https?://(?:www\.)?discord(?:app)?\.com/api/webhooks/[^\s]+)'
webhook_match = re.search(webhook_regex, line)
if webhook_match:
webhook_url = webhook_match.group(1)
print(f"{Fore.LIGHTGREEN_EX}[SENDING]{Fore.WHITE} Sending a message to webhook :)")
title = '**Found Your Webhook LOL**'
description = f'Dont Grab People Nigga [>]: \nIf we see your webhook aigan its getting deleted this is a **WARNING**'
color = 0xFF5733
send_embed_to_webhook(webhook_url, title, description, color)
if not grabber_found:
print(f"{Fore.LIGHTGREEN_EX}NO GRABBER FOUND! :)")
os.system('title NO GRABBER FOUND!')
############################################################################################################
def send_embed_to_webhook(webhook_url, title, description, color):
embed = {
"title": title,
"description": description,
"color": color
}
payload = {
"username": "RainBow Blooded",
"embeds": [embed]
}
try:
response = requests.post(webhook_url, json=payload)
response.raise_for_status()
print(f"{Fore.LIGHTBLUE_EX}[SENT] {Fore.WHITE}Sent a warning to user's webhook")
except requests.exceptions.RequestException as e:
print(f"{Fore.LIGHTBLUE_EX}[ERROR] {Fore.WHITE}Failed to send message to webhook: {e}")
############################################################################################################
namefile = input(f"{Fore.LIGHTBLUE_EX}[{Fore.WHITE}>{Fore.LIGHTBLUE_EX}]: {Fore.WHITE}Filename?: ")
file_path = f'{namefile}'
words = ['b64decode', 'exec', "https://discord.com/api/webhooks/", "__t3mp__", "grabber", "stealer", "Hyperion", "OrionGrabber", "LunarStealer", "__import__('base64')", "__import__('builtins')", ".exec", ";exec", "__import__('tempfile')", "paste.fo", "paste.website", "<string>"]
search_words_in_file(file_path, words)
rem = input(f"{Fore.YELLOW}[{Fore.WHITE}+{Fore.YELLOW}]: {Fore.WHITE}do you want to remove the file? [y/n]: ")
if rem == "y":
os.remove(namefile)
else:
pass
############################################################################################################
|
OMGmultitools/Anti-Grabber
|
Anti Grabbee.py
|
Anti Grabbee.py
|
py
| 3,123 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2052137958
|
# Program to try and work out the power spectrum
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft, fftfreq, ifft
n = 1024
Lx = 100
omg = 2.0*np.pi/Lx
x = np.linspace(0, Lx, n)
y1 = 1.0*np.cos( 5.0*omg*x)
y2 = 1.0*np.sin(10.0*omg*x)
y3 = 0.5*np.sin(20.0*omg*x)
y = y1 + y2 + y3
act = y1 + y2
yd_true = (omg)*( -5.0*1.0*np.sin(5.0*omg*x) + 10.0*1.0*np.cos(10.0*omg*x) + 20.0*0.5*np.cos(20.0*omg*x))
mean_y = np.mean(y)
std_y = np.std(y)
var_y = std_y**2.0
print(mean_y, std_y, var_y)
# Creates all the necessary frequencies
freqs = fftfreq(n)
# Arranges the frequencies in ascending order
idx = np.argsort(freqs)
# wave numbers
nwaves = freqs*n
nwaves_2pi = omg*nwaves
# mask array to be used for power spectra.
# ignoring half the values, as they are complex conjucates of the other
mask = freqs > 0
# fft values
fft_vals = fft(y)
# Fourier filtering
fft_new = np.copy(fft_vals)
fft_new[np.abs(nwaves)==20] = 0.0
# inverse fourier transform to reconstruct the filtered data
filt_data = np.real(ifft(fft_new))
# derivative of y in frequency spectrum
yd_fft = 1.0j*nwaves_2pi*fft_vals
yd_recon = np.real(ifft(yd_fft))
# this is the power spectra
ps = 2.0*np.abs(fft_vals/n)**2.0
# power by variance
pow_var = ps/var_y*100.0
# freq.power spectra - for variance preserving form
fps = ps*freqs
#print(fft_vals)
#print(np.abs(fft_vals*2.0/n))
print(np.sum(ps[mask]))
plt.figure(1)
plt.title('Original Signal')
plt.plot(x, y, color='xkcd:salmon', label='original')
plt.legend()
plt.figure(2)
plt.plot(nwaves[mask], ps[mask], label='wavenumber vs spectra')
plt.title('Power Spectrum Example - wavenumber vs spectra')
plt.legend()
plt.figure(3)
plt.title('Data Filtering example')
plt.plot(x, act, color='black', label='theoretical')
plt.plot(x, filt_data, color='cyan', label='via fourier filtering')
plt.legend()
plt.figure(4)
plt.title('Derivative of the signal')
plt.plot(x, yd_true, color='black', label='theoretical')
plt.plot(x, yd_recon, color='cyan', label='via spectral method')
plt.legend()
plt.show()
|
arunprasaad2711/Python_IISC_SIAM_2017
|
Programs_Session3/06_FFT_IFFT_example.py
|
06_FFT_IFFT_example.py
|
py
| 2,057 |
python
|
en
|
code
| 8 |
github-code
|
6
|
37555045718
|
# Low-Dose CT with a Residual Encoder-Decoder Convolutional Neural Network (RED-CNN)
# https://arxiv.org/ftp/arxiv/papers/1702/1702.00288.pdf
# reference https://github.com/SSinyu/RED-CNN
import os
import numpy as np
import torch.nn as nn
from model import common
def make_model(args, parent=False):
return REDCNN(args)
class REDCNN(nn.Module):
def __init__(self, args):
super(REDCNN, self).__init__()
self.args = args
out_ch = args.n_feats
in_ch = args.n_colors
self.conv1 = nn.Conv2d(in_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.conv2 = nn.Conv2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.conv3 = nn.Conv2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.conv4 = nn.Conv2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.conv5 = nn.Conv2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.tconv1 = nn.ConvTranspose2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.tconv2 = nn.ConvTranspose2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.tconv3 = nn.ConvTranspose2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.tconv4 = nn.ConvTranspose2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.tconv5 = nn.ConvTranspose2d(out_ch, in_ch, kernel_size=5, stride=1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
# encoder
residual_1 = x
out = self.relu(self.conv1(x))
out = self.relu(self.conv2(out))
residual_2 = out
out = self.relu(self.conv3(out))
out = self.relu(self.conv4(out))
residual_3 = out
out = self.relu(self.conv5(out))
# decoder
out = self.tconv1(out)
out += residual_3
out = self.tconv2(self.relu(out))
out = self.tconv3(self.relu(out))
out += residual_2
out = self.tconv4(self.relu(out))
out = self.tconv5(self.relu(out))
out += residual_1
out = self.relu(out)
return out
|
stefenmax/pytorch-template-medical-image-restoration
|
src-v3/model/redcnn.py
|
redcnn.py
|
py
| 2,084 |
python
|
en
|
code
| 6 |
github-code
|
6
|
39868308641
|
from django.db import models
from main.model.playlist import Playlist
from main.model.track import Track
class PlaylistTracks(models.Model):
playlist = models.ForeignKey(
Playlist, on_delete=models.CASCADE
) # при удалении плейлиста чистится кросс-таблица
track = models.ForeignKey(
Track, on_delete=models.CASCADE
) # при удалении трека чистится кросс
track_order = models.PositiveIntegerField(default=0)
def __str__(self):
return "%s.%02d~ %s // %s / %s -- %s -- " % (
self.track.pgm.num,
self.track.pos,
self.track.pgm.name,
self.track.artist,
self.track.album,
self.track.title,
) + "%02d:%02d" % divmod(self.track.duration, 60)
class Meta:
ordering = ["track_order"]
verbose_name_plural = "Треки"
verbose_name = "Трек"
|
artemgv/spacemusic
|
app/main/model/playlisttracks.py
|
playlisttracks.py
|
py
| 992 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39697377859
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
# index boundaries for time 3D plot
nStart = 140000
nEnd = 160000
with open("time_series_stochastic_old.txt", "r") as file:
lines = file.readlines()
time = []
intensity = []
E_real = []
E_imag = []
for line in lines:
time.append(float((line.split(' ')[0])))
intensity.append(float((line.split(' ')[1])))
E_real.append(float((line.split(' ')[2])))
E_imag.append(float((line.split(' ')[3])))
time = np.array(time)
intensity = np.array(intensity)
E_real = np.array(E_real)
E_imag = np.array(E_imag)
fig, ax = plt.subplots()
fig.set_size_inches(5.9, 4.8)
fig.subplots_adjust(top=0.99, bottom=0.15, left=0.16, right=0.95)
ax.plot(time, intensity, color="darkblue")
ax.set_xlabel(r"time $t$ / ns", fontsize=18.0)
ax.set_ylabel(r"intensity $|E|^2$", fontsize=18.0)
ax.set_xlim(140.0, 160.0)
ax.set_ylim(0.45, 2.05)
ax.set_yticks([0.5, 1.0, 1.5, 2.0])
ax.tick_params(axis="x", labelsize=18.0)
ax.tick_params(axis="y", labelsize=18.0)
ax.grid(color="lightgray")
fig, ax = plt.subplots()
fig.set_size_inches(5.9, 4.8)
plt.rcParams.update({"font.size": 18})
plt.subplots_adjust(top=1.06, bottom=0.05, left=-0.09, right=0.96)
ax = plt.axes(projection="3d")
ax.plot3D(time[nStart:nEnd], E_real[nStart:nEnd], E_imag[nStart:nEnd], color="darkblue")
ax.set_xlabel(r"time $t$ / ns")
ax.set_ylabel(r"Re($E$)")
ax.set_zlabel(r"Im($E$)")
ax.xaxis.labelpad=16
ax.yaxis.labelpad=11
ax.zaxis.labelpad=8
ax.set_xlim(140, 160.0)
ax.set_ylim(-1.5, 1.5)
ax.set_zlim(-1.1, 1.1)
ax.set_xticks([140.0, 145.0, 150.0, 155.0, 160.0])
ax.set_yticks([-1.0, 0.0, 1.0])
ax.set_zticks([-1.0, 0.0, 1.0])
plt.show()
|
sir-aak/microscopically-derived-rate-equations
|
plotscripts/mdre_plotscript_spiking_detail.py
|
mdre_plotscript_spiking_detail.py
|
py
| 1,701 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72357813309
|
# ==============================================================================
# Main runner entry point for the project
# Implemented using SAGE math library
#
# Author: Malo RANZETTI
# Date: Spring 2023
# ==============================================================================
import os
import msidh
import sidh
import sage.all as sage
import time
import argparse
import numpy as np
parser = argparse.ArgumentParser(
prog='M-SIDH Demo Runner',
description='This program is a demo built using SAGE of proposed countermeasures to the SIDH scheme.',
epilog='Written by M.Ranzetti')
def test_SIDH(curve, n_rounds=10):
# ==============================================================================
# TEST SIDH
# NIST-LEVEL 5 (AES-256 security) 21s for the runtime
# ==============================================================================
print("Testing SIDH protocol...")
scheme = sidh.create_protocol(sidh.get_curve(curve))
results = []
for i in range(n_rounds):
print(f"Round {i+1}/{n_rounds}")
results.append(scheme.run())
print(f"Average time: {sum([r[1]for r in results])/n_rounds * 1e-9}s")
print(f"Failure count: {sum([1 for r in results if not r[0]])}")
average_time = sum([r[1]for r in results])/n_rounds * 1e-9
std = np.std([r[1]for r in results])
failure_count = sum([1 for r in results if not r[0]])
data = {
'settings': curve,
'average_time': average_time,
'std': std,
'failure_count': failure_count
}
return data
def test_MSIDH(filename, n_rounds=10):
# ==============================================================================
# TEST MSIDH
# Current maximum tested: t = 90 // 100 // 200
# GOAL -> t = 572 for AES-128 security
# Settings generation: 32.8s // 294.6s // 194.4s
# Protocol execution: 5.3s // 35.0s // 320.9s
# Currently the biggest bottlenecks are:
# - prime verification in EllipticCurve (OVERRIDEN IN SAGE SOURCE CODE)
# - computing the generators of the curve (=> there might be a way to optimize this)
#
# ==============================================================================
print("Testing MSIDH protocol...")
scheme = msidh.create_protocol_from_file(filename)
results = []
for i in range(n_rounds):
print(f"Round {i+1}/{n_rounds}")
results.append(scheme.run())
print(f"Average time: {sum([r[1]for r in results])/n_rounds * 1e-9}s")
print(f"Failure count: {sum([1 for r in results if not r[0]])}")
average_time = sum([r[1]for r in results])/n_rounds * 1e-9
std = np.std([r[1]for r in results])
failure_count = sum([1 for r in results if not r[0]])
data = {
'settings': filename.split('AES-')[1].split('.')[0],
'average_time': average_time,
'std': std,
'failure_count': failure_count
}
return data
def gen_MSIDH128():
msidh.create_g128_protocol()
def create_msidh(lam):
msidh.create_protocol(msidh.MSIDHpArbitrary, lam)
def output_data(filename, data):
'''
Write the data given as an array into csv format
data: dict of the form {name: [data list]}
'''
# check if file exists
if not os.path.exists(filename):
with open(filename, 'w') as f:
f.write(','.join(data.keys()) + '\n')
with open(filename, 'a') as f:
# add a line
f.write(','.join([str(v) for v in data.values()]) + '\n')
print(f"Data written to {filename}")
if __name__ == "__main__":
parser.add_argument('-t', '--test', type=str, choices=['sidh', 'msidh'], help='Test to run (sidh, msidh)')
parser.add_argument('-c', '--curve', type=str, choices=list(sidh.available_curves.keys()) ,help='Curve to use for SIDH')
parser.add_argument('-f', '--file', type=str, help='File to use for MSIDH paramters')
parser.add_argument('-r', '--rounds', type=int, default=10, help='Number of rounds to run tests for')
parser.add_argument('-g', '--gen', type=int, help='generate MSIDH parameters for a given security level')
parser.add_argument('-g128', '--gen128', action='store_true', help='generate MSIDH-128 parameters')
args = parser.parse_args()
if args.gen:
create_msidh(args.gen)
elif args.gen128:
gen_MSIDH128()
elif args.test == 'sidh':
if not args.curve:
print("Please provide a curve to use for SIDH using -c")
exit(1)
data = test_SIDH(args.curve, args.rounds)
output_data("sidh_results.csv", data)
elif args.test == 'msidh':
if not args.file:
print("Please provide a file to use for MSIDH using -f")
print("You can generate a file using -g <security level>")
exit(1)
data = test_MSIDH(args.file, args.rounds)
output_data("msidh_results.csv", data)
else:
print("Invalid arguments, use -h for help")
exit(1)
|
mrztti/M-SIDH
|
run.py
|
run.py
|
py
| 5,043 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25602972286
|
def sums(items):
luvut = {0: 1}
x = 0
for i in items:
new = {}
for j in luvut:
summa = i + j
new.update({j: 1})
if summa not in luvut:
new.update({summa: 1})
x += 1
luvut = new
return x
# Juho Heiskasen ratkaisu:
"""
def sums(items):
seen_sums = set()
for x in items:
for u in list(seen_sums):
seen_sums.add(x + u)
seen_sums.add(x)
return len(seen_sums) """
# Tästä opin settien olemassaolon :)
# Oma hitaampi ratkaisu listalla:
"""
def sums(items):
items.sort()
luvut = [0]
x = 0
for i in items:
for j in range(len(luvut)):
summa = i + luvut[j]
if summa not in luvut:
luvut.append(summa)
x += 1
return x """
if __name__ == "__main__":
print(sums([1, 2, 3])) # 6
print(sums([2, 2, 3])) # 5
print(sums([1, 3, 5, 1, 3, 5])) # 18
print(sums([1, 15, 5, 23, 100, 55, 2])) # 121
|
Noppacase22/DSA-2022
|
sums.py
|
sums.py
|
py
| 1,076 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43168508245
|
import asyncio
import json
import logging
import typing
from pathlib import Path
import discord
from discord.ext import commands
from fuzzywuzzy import process
from roycemorebot.constants import (
Categories,
Channels,
Emoji,
Guild,
MOD_ROLES,
StaffRoles,
)
log = logging.getLogger(__name__)
class Subscriptions(commands.Cog):
"""User-assigned subscriptions to select announcements."""
def __init__(self, bot: commands.Bot):
self.bot = bot
self._announcement_roles = self.load_announcement_roles()
@commands.Cog.listener()
async def on_ready(self) -> None:
"""Load the announcement roles, but only once guilds are available."""
if self._announcement_roles != {}:
return
log.info("No announcement roles found, requesting to reload")
mod_bot_channel = self.bot.get_channel(Channels.mod_bot_commands)
guild = discord.utils.get(self.bot.guilds, id=Guild.guild_id)
mod_role = discord.utils.get(guild.roles, id=StaffRoles.mod_role)
msg = await mod_bot_channel.send(
f"{mod_role.mention}\nNo announcement roles are loaded. Reload?"
)
await msg.add_reaction("✅")
await msg.add_reaction("❌")
try:
reaction, user = await self.bot.wait_for(
"reaction_add",
timeout=300.0,
check=lambda r, u: str(r.emoji) in ["✅", "❌"]
and r.message == msg
and not u.bot,
)
except asyncio.TimeoutError:
log.info("Reload timed out")
await mod_bot_channel.send(
"Announcement role reload timeout. Use `?subscriptions reload` "
+ "to reload the announcement roles."
)
else:
if str(reaction.emoji) == "✅":
log.info(f"Announcement role reload started by {user}")
self._announcement_roles = self.reload_announcement_roles()
await mod_bot_channel.send(
f"{Emoji.ok} Successfully reloaded announcement roles!"
)
else:
log.info(f"Announcement role reload canceled by {user}")
await mod_bot_channel.send(
f"{Emoji.no} Announcement role reload canceled. Use "
+ "`?subscriptions reload` to reload the announcement roles."
)
@staticmethod
def load_announcement_roles() -> "dict[str, dict[str, typing.Union[int, bool]]]":
"""Load all the announcement roles from the save file."""
save_file = Path("data", "announcement_roles.json")
if save_file.is_file():
log.info("Loaded announcement roles from save file")
with save_file.open("r") as f:
roles = json.load(f)
log.trace(f"File contents: {roles}")
return roles
else:
return {} # Checked later in `on_ready` and loaded from guild.
def reload_announcement_roles(
self,
) -> "dict[str, dict[str, typing.Union[int, bool]]]":
"""Reload the list of all the announcement roles in the current guild."""
announcement_roles = {}
guild = discord.utils.get(self.bot.guilds, id=Guild.guild_id)
clubs_category = discord.utils.get(guild.categories, id=Categories.clubs)
log.trace("Starting role reload.")
# Get server and event announcements seperately
announcement_roles["server"] = {
"id": discord.utils.get(guild.roles, name="Server Announcements").id,
"club": False,
}
announcement_roles["event"] = {
"id": discord.utils.get(guild.roles, name="Event Announcements").id,
"club": False,
}
for channel in clubs_category.channels:
announcement_role = discord.utils.find(
lambda role: "Announcements" in role.name
and role.name.lower().startswith(channel.name)
and role.name.index(" ") == len(channel.name), # prevents overlap
guild.roles,
)
log.trace(f"Channel: {channel.name}, role: {announcement_role}")
announcement_roles[channel.name] = {
"id": announcement_role.id,
"club": "club" in announcement_role.name.lower(),
}
log.trace("Saving announcement roles.")
save_file = Path("data", "announcement_roles.json")
save_file.parent.mkdir(exist_ok=True)
with save_file.open("w") as f:
json.dump(announcement_roles, f, indent=2)
log.info("Announcement role reload finished")
return announcement_roles
@commands.guild_only()
@commands.command(aliases=("sub",))
async def subscribe(self, ctx: commands.Context, announcement_name: str) -> None:
"""Subscribe to an announcement role on the server."""
all_roles = list(self._announcement_roles.keys())
log.trace(f"All roles: {all_roles}")
match_info = process.extractOne(
announcement_name,
all_roles,
score_cutoff=75,
)
log.trace(f"Match info: {match_info}")
author_ping = ctx.author.mention
if match_info:
role = discord.utils.get(
ctx.guild.roles, id=self._announcement_roles[match_info[0]]["id"]
)
log.trace(f"Matched role `{role}` with probability {match_info[1]}")
await ctx.author.add_roles(
role,
reason="User announcements subscription",
)
log.info(f"User {ctx.author} subscribed to {role}")
if ctx.message.channel.id == Channels.roles:
await ctx.send(
f"{author_ping}, you have successfully subscribed to {role}.",
delete_after=5.0,
)
await asyncio.sleep(5.0)
await ctx.message.delete()
else:
await ctx.send(
f"{author_ping}, you have successfully subscribed to {role}.",
)
else:
if ctx.message.channel.id == Channels.roles:
await ctx.send(
f"{author_ping}, there are no announcement roles with that name.",
delete_after=5.0,
)
await asyncio.sleep(5.0)
await ctx.message.delete()
else:
await ctx.send(
f"{author_ping}, there are no announcement roles with that name."
)
@commands.guild_only()
@commands.command(aliases=("unsub",))
async def unsubscribe(self, ctx: commands.Context, announcement_name: str) -> None:
"""Unsubscribe to an announcement role on the server."""
all_roles = list(self._announcement_roles.keys())
log.trace(f"All roles: {all_roles}")
match_info = process.extractOne(
announcement_name,
all_roles,
score_cutoff=75,
)
log.trace(f"Match info: {match_info}")
author_ping = ctx.author.mention
if match_info:
role = discord.utils.get(
ctx.guild.roles, id=self._announcement_roles[match_info[0]]["id"]
)
log.trace(f"Matched role `{role}` with probability {match_info[1]}")
await ctx.author.remove_roles(
role,
reason="User announcements unsubscription",
)
log.info(f"User {ctx.author} unsubscribed from {role}")
if ctx.message.channel.id == Channels.roles:
await ctx.send(
f"{author_ping}, you have successfully unsubscribed from {role}.",
delete_after=5.0,
)
await asyncio.sleep(5.0)
await ctx.message.delete()
else:
await ctx.send(
f"{author_ping}, you have successfully unsubscribed from {role}.",
)
else:
if ctx.message.channel.id == Channels.roles:
await ctx.send(
f"{author_ping}, there are no announcement roles with that name.",
delete_after=5.0,
)
await asyncio.sleep(5.0)
await ctx.message.delete()
else:
await ctx.send(
f"{author_ping}, there are no announcement roles with that name."
)
@commands.guild_only()
@commands.group(
name="subscriptions", aliases=("subs",), invoke_without_command=True
)
async def subscriptions_group(self, ctx: commands.Context) -> None:
"""Commands group for managing announcement subscriptions."""
await ctx.send_help(ctx.command)
@commands.guild_only()
@subscriptions_group.command(name="list", aliases=("l", "ls"))
async def list_subscriptions(self, ctx: commands.Context) -> None:
"""List all possible announcement subscriptions and their corresponding commands.""" # noqa: B950
embed = discord.Embed(
title="Announcement Subscriptions",
description="Here are all the possible announcement subscriptions and "
+ "their commands.",
color=discord.Colour.green(),
)
all_subs = list(self._announcement_roles.keys())
for subscription in all_subs:
club = self._announcement_roles[subscription]["club"]
embed.add_field(
name=f"{subscription.title()}{' Club' if club else ''} Announcements",
value=f"`?subscribe {subscription}`",
inline=True,
)
if ctx.channel.id == Channels.roles:
await ctx.send(
f"{ctx.author.mention}, please use a bot channel to run that command.",
delete_after=5.0,
)
await asyncio.sleep(5.0)
await ctx.message.delete()
else:
await ctx.send(embed=embed)
@commands.guild_only()
@commands.has_any_role(*MOD_ROLES)
@subscriptions_group.command(aliases=("r",))
async def reload(self, ctx: commands.Context) -> None:
"""Reload the announcement roles save."""
self._announcement_roles = self.reload_announcement_roles()
await ctx.send(f"{Emoji.ok} Successfully reloaded announcement roles!")
@commands.guild_only()
@commands.has_role(StaffRoles.admin_role)
@subscriptions_group.command(name="add-club", aliases=("add", "ac", "a-c", "a"))
async def add_club(
self,
ctx: commands.Context,
channel_name: str,
leaders: commands.Greedy[discord.Member] = None,
club: bool = True,
*,
leader_title: typing.Optional[str] = "Leader",
) -> None:
"""Create a new club channel with corresponding roles and leaders (if given)."""
guild = ctx.guild
name = channel_name.replace(" ", "-").lower() # Discord-safe channel names
log.info(f"Creating a new club channel at the request of {ctx.author}")
leader_names = (
list(map(lambda l: l.name + "#" + l.discriminator, leaders))
if leaders
else None
)
log.info(
f"Name: {name}, leaders: {leader_names}, club: {club}, "
+ f"leader title: {leader_title}"
)
# Create the roles and assign them
leader_role = await guild.create_role(
name=f"{name.title()}{' Club' if club else ''} {leader_title}",
mentionable=True,
reason="Club creation",
)
ann_role = await guild.create_role(
name=f"{name.title()}{' Club' if club else ''} Announcements",
mentionable=True,
reason="Club creation",
)
log.trace(f"Created {leader_role} and {ann_role} role")
if leaders:
for leader in leaders:
await leader.add_roles(leader_role, reason="Club creation")
log.trace("Assigned leaders their roles")
# Create the channel
clubs_category = discord.utils.get(guild.categories, id=Categories.clubs)
channel = await clubs_category.create_text_channel(
name,
overwrites={
discord.utils.get(
guild.roles, id=StaffRoles.mod_role
): discord.PermissionOverwrite(view_channel=True, send_messages=True),
discord.utils.get(
guild.roles, id=StaffRoles.muted_role
): discord.PermissionOverwrite(send_messages=False),
leader_role: discord.PermissionOverwrite(
view_channel=True,
manage_channels=True,
manage_permissions=True,
send_messages=True,
manage_messages=True,
),
},
reason="Club creation",
)
position = sorted(
clubs_category.text_channels, key=lambda channel: channel.name
).index(channel)
log.trace(f"Channel index: {position}")
await channel.edit(position=position, reason="Club creation")
log.trace(f"Created channel {channel} and moved to postition {position}")
# Load new announcement roles
log.info(
"Reloading announcement roles because of new announcement channel "
+ channel_name
)
self._announcement_roles = self.reload_announcement_roles()
# Completion message
await ctx.send(f"{Emoji.ok} Successfully added club channel!")
@commands.guild_only()
@commands.has_role(StaffRoles.admin_role)
@subscriptions_group.command(
name="remove-club", aliases=("remove", "rm-c", "rmc", "rm")
)
async def remove_club(
self, ctx: commands.Context, club_channel: discord.TextChannel
) -> None:
"""Delete a club channel and roles."""
log.info(
f"Deleteing club channel {club_channel} and roles at the request of "
+ f"{ctx.author}"
)
ann_role = discord.utils.get(
ctx.guild.roles, id=self._announcement_roles[club_channel.name]["id"]
)
await ann_role.delete(reason="Removing club from server")
log.trace("Deleted announcement role")
leader_role = discord.utils.find(
lambda role: role.name.lower().startswith(club_channel.name),
ctx.guild.roles,
)
await leader_role.delete(reason="Removing club from server")
log.trace("Deleted leader role")
await club_channel.delete(reason="Removing club from server")
log.trace("Deleted channel")
self._announcement_roles = self.reload_announcement_roles()
# Completion message
await ctx.send(f"{Emoji.ok} Successfully removed club channel!")
def setup(bot: commands.Bot) -> None:
"""Add the Subscriptions cog to the bot."""
bot.add_cog(Subscriptions(bot))
|
egelja/roycemorebot
|
roycemorebot/exts/subscriptions.py
|
subscriptions.py
|
py
| 15,209 |
python
|
en
|
code
| 1 |
github-code
|
6
|
24522571380
|
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
# 定义客户端类
class Client:
def __init__(self, model, train_loader, test_loader, lr=0.1):
self.model = model
self.train_loader = train_loader
self.test_loader = test_loader
self.optimizer = optim.SGD(self.model.parameters(), lr=lr)
def train(self, epochs=1):
self.model.train()
for epoch in range(epochs):
for data, target in self.train_loader:
self.optimizer.zero_grad()
output = self.model(data)
loss = nn.functional.cross_entropy(output, target)
loss.backward()
self.optimizer.step()
def test(self):
self.model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in self.test_loader:
output = self.model(data)
test_loss += nn.functional.cross_entropy(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(self.test_loader.dataset)
accuracy = 100. * correct / len(self.test_loader.dataset)
return test_loss, accuracy
def get_topk_grads(self, k=0.01):
grads = []
for param in self.model.parameters():
if param.grad is not None:
importance = param.grad.abs().sum()
k_ = int(importance.numel() * k)
topk_values, _ = torch.topk(param.grad.abs().view(-1), k_)
mask = torch.zeros_like(param.grad)
mask[param.grad.abs() >= topk_values[-1]] = 1
grads.append(mask * param.grad)
return torch.cat(grads)
# 定义服务器类
class Server:
def __init__(self, clients):
self.clients = clients
def aggregate(self):
grads = None
num_grads = 0
for client in self.clients:
client_grads = client.get_topk_grads()
if grads is None:
grads = client_grads
else:
grads += client_grads
num_grads += 1
grads /= num_grads
return grads
# 定义主函数
def main():
# 加载MNIST数据集
train_dataset = datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
test_dataset = datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
train_loaders = [torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True) for _ in range(10)]
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1000, shuffle=True)
# 初始化客户端和服务器
clients = [Client(nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 10)), train_loader, test_loader) for train_loader in train_loaders]
server = Server(clients)
# 训练和聚合
for round in range(num_rounds):
print(f"\nRound {round + 1}")
# 客户端训练并计算梯度
grad_list = []
for client in clients:
client.train()
client_optimizer.zero_grad()
# 获取客户端的本地数据
inputs, labels = client.get_data()
# 前向传播
outputs = client.model(inputs)
# 计算局部loss
loss = criterion(outputs, labels)
# 反向传播
loss.backward()
# 获取梯度并压缩
grad = client.get_topk_grads()
grad_list.append(grad)
# 服务器聚合梯度并更新模型
server.aggregate(grad_list)
server.update_model()
# 客户端更新模型
for client in clients:
client.update_model(server.model)
# 计算并打印全局loss
global_loss = evaluate_global_loss(server.model, test_loader, criterion)
print(f"Global Loss: {global_loss:.4f}")
|
huguangs/NIID-Bench-main-master
|
top-k/main.py
|
main.py
|
py
| 4,358 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22868199973
|
from global_function import data_exist, rindex
import pickle
def Filter_Redundancy(type, query, replace_space, replace_percentage, replace_apostrophe, replace_plus):
with open('Resources/redundancy/media search redundancy filter.pck', 'rb') as file:
media_search_redundancy = pickle.load(file)
for i in range(len(media_search_redundancy)):
media_search_redundancy[i][0] = media_search_redundancy[i][0].replace('__social_media_name__', type)
media_search_redundancy.sort(key=lambda x : len(x[0]), reverse=True)
command = ''
for redundancy in media_search_redundancy:
if data_exist([redundancy[0]], query, 2):
question_word = redundancy[0].split()
command_word = query.split()
for x in redundancy[1]:
if x == len(question_word) - 1:
pos = command_word.index(question_word[x])
command = ' '.join(command_word[pos + 1:])
else:
pos = command_word.index(question_word[x])
pos1 = rindex(command_word, question_word[x + 1])
command = ' '.join(command_word[pos + 1: pos1])
if command:
break
if command:
query = command
break
if replace_percentage:
query = query.replace('%', '%25')
if replace_apostrophe:
query = query.replace("'", '%27')
if replace_plus:
query = query.replace('+', '%2B')
query = query.replace(' ', replace_space)
return query
def Get_Url(type, query, media):
url = ''
replace_space = ''
replace_percentage = False
replace_apostrophe = False
replace_plus = False
for x in media:
if type == x['tag']:
url = x['url']
replace_space = x['replace space']
replace_percentage = ['replace %']
replace_apostrophe = x["replace '"]
replace_plus = x['replace plus']
break
url += Filter_Redundancy(type, query, replace_space, replace_percentage, replace_apostrophe, replace_plus)
return url
|
TroySigX/smartbot
|
mediaSearch.py
|
mediaSearch.py
|
py
| 2,195 |
python
|
en
|
code
| 2 |
github-code
|
6
|
36813380552
|
"""empty message
Revision ID: 073719702e2e
Revises: 23ecd00cae18
Create Date: 2020-03-29 13:31:19.799319
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '073719702e2e'
down_revision = '23ecd00cae18'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('task_owners',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('task_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['task_id'], ['tasks.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('user_id', 'task_id')
)
op.drop_table('tags')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tags',
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('task_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['task_id'], ['tasks.id'], name='tags_task_id_fkey'),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name='tags_user_id_fkey'),
sa.PrimaryKeyConstraint('user_id', 'task_id', name='tags_pkey')
)
op.drop_table('task_owners')
# ### end Alembic commands ###
|
koiic/project-tracker
|
migrations/versions/073719702e2e_.py
|
073719702e2e_.py
|
py
| 1,333 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15412087448
|
from django.shortcuts import render
from django.http import HttpResponse
from django.utils.translation import get_language
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.conf import settings
from . import Checksum
from paytm.models import PaytmHistory
# Create your views here.
@login_required
def home(request):
return HttpResponse("<html><a href='"+ settings.HOST_URL +"/paytm/payment'>PayNow</html>")
def payment(request):
MERCHANT_KEY = settings.PAYTM_MERCHANT_KEY
MERCHANT_ID = settings.PAYTM_MERCHANT_ID
get_lang = "/" + get_language() if get_language() else ''
CALLBACK_URL = settings.HOST_URL + get_lang + settings.PAYTM_CALLBACK_URL
# Generating unique temporary ids
order_id = Checksum.__id_generator__()
bill_amount = 100
if bill_amount:
data_dict = {
'MID':MERCHANT_ID,
'ORDER_ID':order_id,
'TXN_AMOUNT': bill_amount,
'CUST_ID':'[email protected]',
'INDUSTRY_TYPE_ID':'Retail',
'WEBSITE': settings.PAYTM_WEBSITE,
'CHANNEL_ID':'WEB',
#'CALLBACK_URL':CALLBACK_URL,
}
param_dict = data_dict
param_dict['CHECKSUMHASH'] = Checksum.generate_checksum(data_dict, MERCHANT_KEY)
return render(request,"payment.html",{'paytmdict':param_dict})
return HttpResponse("Bill Amount Could not find. ?bill_amount=10")
@csrf_exempt
def response(request):
if request.method == "POST":
MERCHANT_KEY = settings.PAYTM_MERCHANT_KEY
data_dict = {}
for key in request.POST:
data_dict[key] = request.POST[key]
verify = Checksum.verify_checksum(data_dict, MERCHANT_KEY, data_dict['CHECKSUMHASH'])
if verify:
PaytmHistory.objects.create(user=request.user, **data_dict)
return render(request,"response.html",{"paytm":data_dict})
else:
return HttpResponse("checksum verify failed")
return HttpResponse(status=200)
|
harishbisht/paytm-django
|
payments/paytm/views.py
|
views.py
|
py
| 2,130 |
python
|
en
|
code
| 31 |
github-code
|
6
|
75066284348
|
import torch
import logging
import pickle
from core.utils import scale_image, save_layer_image
from data import image_corruption
def evaluate(model, loader, args, perturbation=False, pSize=0, **kwargs):
objective_function= kwargs.get('objective_function', None)
device = kwargs['device']
if 'epoch' in kwargs:
epoch = kwargs['epoch']
ibatch = kwargs["ibatch"]
else:
epoch = 'Test'
ibatch = 0
validloss = 0
for i, (images, _) in enumerate(loader):
if i == 200:
break
corrupt_img, _ = image_corruption(images, args, perturbation=perturbation, pSize=pSize) #blurring, inpainting, noising
corrupt_img_scale = scale_image(corrupt_img, args.nBits)
restored_img, outsx, _ = model(corrupt_img_scale.to(device), objective_function=objective_function, noisyOuts=args.noisyOuts)
validloss += (torch.norm(restored_img.detach().cpu() - images, p='fro')**2).item() #MSE
if i == 0:
if epoch == 'Test':
with open(f'./results/{args.dataset}_{args.constrained}_{pSize}.pkl', 'wb') as ObjFile:
pickle.dump((images, corrupt_img, restored_img, epoch, ibatch), ObjFile)
save_layer_image(images, corrupt_img, outsx, epoch, ibatch, args, perturbation=False)
break
del images, corrupt_img, restored_img, outsx, corrupt_img_scale
torch.cuda.empty_cache()
validloss /= (len(loader)*args.batchSize)
logging.debug("Epoch {} - Batch {}, Loss {:.4f}".format(epoch, ibatch, validloss))
return validloss
|
SMRhadou/UnrolledGlow
|
core/evaluation.py
|
evaluation.py
|
py
| 1,612 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31512974964
|
import json
import requests
import constants
import tokens
def make_api_call(access_token, url, method, **kwargs):
response = method(
url=url,
headers={
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {access_token}",
},
**kwargs,
)
if not response.ok:
raise RuntimeError(f"Error making request to {url}: ", response.content)
return response
def get_user_id_and_name_from_refresh_token(refresh_token):
access_token = tokens.get_access_token(refresh_token)
user_response = make_api_call(access_token, constants.API_URL + "/me", requests.get)
return user_response.json()["id"], user_response.json()["display_name"]
def make_new_playlist(user):
response = make_api_call(
user.access_token,
url=f"{constants.API_URL}/users/{user.user_id}/playlists",
method=requests.post,
data=json.dumps({"name": constants.DEFAULT_PLAYLIST_NAME, "description": f"Here are your most recently added songs from the last {user.recently_added_delta_days} days"})
)
return response.json()["id"]
|
rjshearme/spotify_recently_added_playlist
|
api.py
|
api.py
|
py
| 1,168 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35608959841
|
#!/usr/bin/env python
try:
import orz
except ImportError:
import os
import sys
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(curr_path, "D:\Working\OpenRoleZoo\OpenRoleZoo\python"))
import orz
import os
def timestamp(filename):
if not os.path.exists(filename):
return 0
statinfo = os.stat(filename)
return statinfo.st_mtime
if __name__ == '__main__':
input_dir = 'rawmd'
output_dir = 'stamd'
filenames = os.listdir(input_dir)
count_keep = 0
count_modify = 0
for filename in filenames:
if filename[-5:] != '.json':
continue
name, ext = os.path.splitext(filename)
filename_sta = name + '.sta'
input_filename = os.path.join(input_dir, filename)
output_filename = os.path.join(output_dir, filename_sta)
if timestamp(output_filename) < timestamp(input_filename):
print('Converting %s' % input_filename)
orz.json2sta(input_filename, output_filename)
count_modify += 1
else:
print('Keeping %s' % input_filename)
count_keep += 1
count_total = count_modify + count_keep
print("Total: %d. Modified: %d, kept: %d" % (count_total, count_modify, count_keep))
|
SeetaFace6Open/OpenRoleZoo
|
python/model2model2.py
|
model2model2.py
|
py
| 1,296 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70680835069
|
from flask import Flask
from flask import render_template
import ffmpeg_streaming
from ffmpeg_streaming import Formats
import sys
app = Flask(__name__)
@app.route("/")
def streaming():
return render_template('streaming.html')
@app.route('/video')
def video_server():
video = ffmpeg_streaming.input('pexels_video.mp4')
hls = video.hls(Formats.h264())
hls.auto_generate_representations()
hls.save_master_playlist('/var/media/hls.m3u8')
return hls.output('/var/media/hls.m3u8')
def monitor(ffmpeg, duration, time_, time_left, process):
per = round(time_ / duration * 100)
sys.stdout.write(
"\rTranscoding...(%s%%) %s left [%s%s]" %
(per, datetime.timedelta(seconds=int(time_left)), '#' * per, '-' * (100 - per))
)
sys.stdout.flush()
|
ifcassianasl/python_test_rtsp
|
main.py
|
main.py
|
py
| 764 |
python
|
en
|
code
| 1 |
github-code
|
6
|
21629194042
|
import tensorflow as tf
from data_structures import UNK_label, PAD_label
def TEEmbeddingLayer(size_TE_label_vocab, size_TE_label_embed, TE_label_set):
if TE_label_set == 'none':
raise NotImplementedError('Can\'t embed TE labels if there are no TE labels')
return tf.keras.layers.Embedding(size_TE_label_vocab, size_TE_label_embed, input_shape=(None,))
def get_TE_labels_for_doc(sentence_count, sorted_nodes, word_in_doc_to_tokens_map, TE_label_vocab,
TE_label_set, max_sequence_length):
# Start by assuming all tokens are padding
token_count = sentence_count * max_sequence_length
TE_labels = [PAD_label] * token_count
# For all actual words, use UNK_label
for word_start_index, word_end_index in word_in_doc_to_tokens_map:
for k in range(word_start_index, word_end_index + 1):
TE_labels[k] = UNK_label
# For all words which are nodes, use the TE_label of the node
if TE_label_set != 'none':
for node in sorted_nodes:
if node.start_word_index_in_doc >= 0:
# Node has real words (not a meta node or a padding node)
start_index = word_in_doc_to_tokens_map[node.start_word_index_in_doc][0]
end_index = word_in_doc_to_tokens_map[node.end_word_index_in_doc][1]
for k in range(start_index, end_index + 1):
if TE_label_set == 'timex_event':
TE_labels[k] = node.TE_label
else:
TE_labels[k] = node.full_label
TE_label_ids = []
for TE_label in TE_labels:
if TE_label in TE_label_vocab:
TE_label_ids.append(TE_label_vocab.get(TE_label))
else:
print('WARNING: Label {} not in TE_label_vocab, using {}'.format(TE_label, UNK_label))
TE_label_ids.append(TE_label_vocab.get(UNK_label))
return TE_label_ids
|
bnmin/tdp_ranking
|
frozen_bert/te_embedding.py
|
te_embedding.py
|
py
| 1,927 |
python
|
en
|
code
| 1 |
github-code
|
6
|
779248836
|
from dataclasses import dataclass
from typing import Annotated, List
from fastapi import Depends
from fastapi_pagination import Page
from fastapi_pagination.ext.sqlalchemy import paginate
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from config.db import get_db_session
from readconnect.books.domain.dtos.books_query_params import BooksQueryParams
from readconnect.books.domain.models.book_model import Book
from readconnect.books.infrastructure.db.entities.book_entity import BookEntity
@dataclass()
class BooksRepository:
db: Annotated[AsyncSession, Depends(get_db_session)]
async def create_many(self, new_books: List[Book]):
books = [BookEntity(**book) for book in new_books]
self.db.add_all(books)
await self.db.commit()
return books
async def find_by_id(self, book_id: str) -> BookEntity:
query = select(BookEntity).where(BookEntity.id == book_id)
result = await self.db.execute(query)
return result.scalar()
async def find(self, query: BooksQueryParams) -> Page[Book]:
q = select(
BookEntity.id,
BookEntity.title,
BookEntity.isbn,
BookEntity.long_description,
BookEntity.short_description,
BookEntity.published_date,
BookEntity.thumbnail_url,
BookEntity.page_count,
BookEntity.status,
)
if query.include_extra_data:
q = select(BookEntity).join(BookEntity.authors).join(BookEntity.categories)
return await paginate(self.db, q)
return await paginate(self.db, q)
async def search(self, query: BooksQueryParams) -> Page[Book]:
q = select(
BookEntity.id,
BookEntity.title,
BookEntity.isbn,
BookEntity.long_description,
BookEntity.short_description,
BookEntity.published_date,
BookEntity.thumbnail_url,
BookEntity.page_count,
BookEntity.status,
).filter(
BookEntity.title.icontains(query.search)
| BookEntity.isbn.icontains(query.search)
)
if query.include_extra_data:
q = (
select(BookEntity)
.filter(
BookEntity.title.icontains(query.search)
| BookEntity.isbn.icontains(query.search)
)
.join(BookEntity.authors)
.join(BookEntity.categories)
)
return await paginate(self.db, q)
return await paginate(self.db, q)
|
YeisonKirax/readconnect-back
|
src/readconnect/books/infrastructure/db/repository/books_repository.py
|
books_repository.py
|
py
| 2,625 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11232494681
|
from collections import defaultdict
ENTER = "Enter"
LEAVE = "Leave"
CHANGE = "Change"
ENTER_MESSAGE = "님이 들어왔습니다."
LEAVE_MESSAGE = "님이 나갔습니다."
class ChatRoom:
def __init__(self):
super().__init__()
def operation(result, command, chatRoom, nicknames, uid="", name=""):
if command == ENTER:
chatRoom.append(uid)
nicknames[uid] = name
result.append((uid, ENTER_MESSAGE))
return
elif command == LEAVE:
chatRoom.remove(uid)
result.append((uid, LEAVE_MESSAGE))
return
elif command == CHANGE:
nicknames[uid] = name
return
return
def solution(record):
answer = []
nicknames = defaultdict(str)
chatRoom = []
result = []
command, uid, name = "", "", ""
for r in record:
splited = r.split()
if len(splited) == 2:
command, uid = splited
else:
command, uid, name = splited
operation(result, command, chatRoom, nicknames, uid, name)
answer = list(map(lambda x: nicknames[x[0]]+x[1], result))
return answer
if __name__ == "__main__":
test = 1
if test == 1:
record = ["Enter uid1234 Muzi", "Enter uid4567 Prodo", "Leave uid1234", "Enter uid1234 Prodo", "Change uid4567 Ryan"]
print(solution(record))
|
gatherheart/Solved_PS
|
KAKAO/2019_KAKAO_1.py
|
2019_KAKAO_1.py
|
py
| 1,378 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70276262907
|
import torch
import time
import argparse
from importlib import import_module
import numpy as np
import utils
import train
parser = argparse.ArgumentParser(description='BertClassifier')
# parser.add_argument('--model', type=str, default='BertFc', help='choose a model')
# parser.add_argument('--model', type=str, default='BertCNN', help='choose a model')
# parser.add_argument('--model', type=str, default='BertRNN', help='choose a model')
# parser.add_argument('--model', type=str, default='BertDPCNN', help='choose a model')
# parser.add_argument('--model', type=str, default='ERNIE', help='choose a model')
parser.add_argument('--model', type=str, default='ERNIEDPCNN', help='choose a model')
args = parser.parse_args()
if __name__ == '__main__':
print(torch.__version__)
# 数据集地址
dataset = 'THUCNews'
model_name = args.model
x = import_module(
'models.' + model_name) # <module 'models.BertFc' from '/home/hadoop/PycharmProjects/BertClassifier/models/BertFc.py'>
config = x.Config(dataset)
print(config.model_name)
# print(config)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed_all(4)
torch.backends.cudnn.deterministic=True # 保证每次运行结果一样
# 加载数据集
start_time=time.time()
print('加载数据集')
train_data,dev_data,test_data=utils.build_dataset(config)
train_iter=utils.build_iterator(train_data,config)
test_iter=utils.build_iterator(test_data,config)
dev_iter=utils.build_iterator(dev_data,config)
time_dif=utils.get_time_dif(start_time)
print("模型开始之前,准备数据时间:", time_dif)
# for i,(train,label) in enumerate(dev_iter):
# if (i%10==0):
# print(i,label) # dev contains 10000 items,10000/128=78.125,residue=True,79 batches,the batch 79st only has 16 items
# 模型训练,评估与测试
model=x.Model(config).to(config.device)
train.train(config,model,train_iter,dev_iter,test_iter)
|
Jacquelin803/Transformers
|
BertClassifier/main.py
|
main.py
|
py
| 2,007 |
python
|
en
|
code
| 1 |
github-code
|
6
|
5489764282
|
"""
Module that provides different readers for trajectory files.
It also provides a common interface layer between the file IO packages,
namely pygmx and mdanalysis, and mdevaluate.
"""
from .checksum import checksum
from .logging import logger
from . import atoms
from functools import lru_cache
from collections import namedtuple
import os
from os import path
from array import array
from zipfile import BadZipFile
import builtins
import warnings
import numpy as np
from scipy import sparse
from dask import delayed, __version__ as DASK_VERSION
try:
import pygmx
from pygmx.errors import InvalidMagicException, InvalidIndexException, FileTypeError
PYGMX_AVAILABLE = True
except ImportError:
PYGMX_AVAILABLE = False
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UserWarning)
import MDAnalysis as mdanalysis
MADANALYSIS_AVAILABLE = True
except ImportError:
MADANALYSIS_AVAILABLE = False
assert PYGMX_AVAILABLE or MADANALYSIS_AVAILABLE, 'Could not import any file IO package; make sure too install either pygmx or mdanalysis.'
class NojumpError(Exception):
pass
class NoReaderAvailabelError(Exception):
pass
def open_with_mdanalysis(topology, trajectory, cached=False):
"""Open a the topology and trajectory with mdanalysis."""
uni = mdanalysis.Universe(topology, trajectory, convert_units=False)
if cached is not False:
if cached is True:
maxsize = 128
else:
maxsize = cached
reader = CachedReader(uni.trajectory, maxsize)
else:
reader = BaseReader(uni.trajectory)
reader.universe = uni
atms = atoms.Atoms(
np.stack((uni.atoms.resids, uni.atoms.resnames, uni.atoms.names), axis=1),
charges=uni.atoms.charges, masses=uni.atoms.masses
).subset()
return atms, reader
def open_with_pygmx(topology, trajectory, cached=False, reindex=False,
ignore_index_timestamps=False, index_file=None):
"""Open a topology and trajectory with pygmx."""
try:
rd = pygmx.open(trajectory, ignore_index_timestamps=ignore_index_timestamps)
except InvalidMagicException:
raise InvalidIndexException('This is not a valid index file: {}'.format(trajectory))
except InvalidIndexException:
if reindex:
try:
os.remove(pygmx.index_filename_for_xtc(trajectory))
except FileNotFoundError:
pass
rd = pygmx.open(trajectory)
else:
raise InvalidIndexException('Index file is invalid, us reindex=True to regenerate.')
if cached is not False:
if isinstance(cached, bool):
maxsize = 128
else:
maxsize = cached
reader = CachedReader(rd, maxsize)
else:
reader = BaseReader(rd)
if topology.endswith('.tpr'):
atms = atoms.from_tprfile(topology, index_file=index_file)
elif topology.endswith('.gro'):
atms = atoms.from_grofile(topology, index_file=index_file)
return atms, reader
def open(topology, trajectory, cached=False, index_file=None, reindex=False, ignore_index_timestamps=False):
"""
Open a trajectory file with the apropiate reader.
Args:
filename (str):
Trajectory file to open, the reader will be chosen
according to the file extension.
cached (opt.):
If Reader should be cached with lru_cache. If this is True, maxsize for
the cache is 128, otherwise the argument is passed as maxsize.
Use cached=None to get an unbound cache.
reindex (opt.): Regenerate the index of the xtc-file
nojump (opt.): If nojump matrixes should be generated.
"""
if PYGMX_AVAILABLE and trajectory.endswith('.xtc') and topology.endswith(('.tpr', '.gro')):
return open_with_pygmx(topology, trajectory, cached=cached, reindex=reindex,
ignore_index_timestamps=ignore_index_timestamps, index_file=index_file)
elif MADANALYSIS_AVAILABLE:
return open_with_mdanalysis(topology, trajectory, cached)
else:
raise NoReaderAvailabelError('No reader package found, install pygmx or mdanalysis.')
def is_writeable(fname):
"""Test if a directory is actually writeable, by writing a temporary file."""
fdir = os.path.dirname(fname)
ftmp = os.path.join(fdir, str(np.random.randint(999999999)))
while os.path.exists(ftmp):
ftmp = os.path.join(fdir, str(np.random.randint(999999999)))
if os.access(fdir, os.W_OK):
try:
with builtins.open(ftmp, 'w'):
pass
os.remove(ftmp)
return True
except PermissionError:
pass
return False
def nojump_filename(reader):
directory, fname = path.split(reader.filename)
fname = path.join(directory, '.{}.nojump.npz'.format(fname))
if os.path.exists(fname) or is_writeable(directory):
return fname
else:
fname = os.path.join(
os.path.join(os.environ['HOME'], '.mdevaluate/nojump'),
directory.lstrip('/'),
'.{}.nojump.npz'.format(fname)
)
logger.info('Saving nojump to {}, since original location is not writeable.'.format(fname))
os.makedirs(os.path.dirname(fname), exist_ok=True)
return fname
CSR_ATTRS = ('data', 'indices', 'indptr')
NOJUMP_MAGIC = 2016
def parse_jumps(trajectory):
prev = trajectory[0].whole
box = prev.box.diagonal()
SparseData = namedtuple('SparseData', ['data', 'row', 'col'])
jump_data = (
SparseData(data=array('b'), row=array('l'), col=array('l')),
SparseData(data=array('b'), row=array('l'), col=array('l')),
SparseData(data=array('b'), row=array('l'), col=array('l'))
)
for i, curr in enumerate(trajectory):
if i % 500 == 0:
logger.debug('Parse jumps Step: %d', i)
delta = ((curr - prev) / box).round().astype(np.int8)
prev = curr
for d in range(3):
col, = np.where(delta[:, d] != 0)
jump_data[d].col.extend(col)
jump_data[d].row.extend([i] * len(col))
jump_data[d].data.extend(delta[col, d])
return jump_data
def generate_nojump_matrixes(trajectory):
"""
Create the matrixes with pbc jumps for a trajectory.
"""
logger.info('generate Nojump Matrixes for: {}'.format(trajectory))
jump_data = parse_jumps(trajectory)
N = len(trajectory)
M = len(trajectory[0])
trajectory.frames.nojump_matrixes = tuple(
sparse.csr_matrix((np.array(m.data), (m.row, m.col)), shape=(N, M)) for m in jump_data
)
save_nojump_matrixes(trajectory.frames)
def save_nojump_matrixes(reader, matrixes=None):
if matrixes is None:
matrixes = reader.nojump_matrixes
data = {'checksum': checksum(NOJUMP_MAGIC, checksum(reader))}
for d, mat in enumerate(matrixes):
data['shape'] = mat.shape
for attr in CSR_ATTRS:
data['{}_{}'.format(attr, d)] = getattr(mat, attr)
np.savez(nojump_filename(reader), **data)
def load_nojump_matrixes(reader):
zipname = nojump_filename(reader)
try:
data = np.load(zipname)
except (AttributeError, BadZipFile, OSError):
# npz-files can be corrupted, propably a bug for big arrays saved with savez_compressed?
logger.info('Removing zip-File: %s', zipname)
os.remove(nojump_filename(reader))
return
try:
if data['checksum'] == checksum(NOJUMP_MAGIC, checksum(reader)):
reader.nojump_matrixes = tuple(
sparse.csr_matrix(
tuple(data['{}_{}'.format(attr, d)] for attr in CSR_ATTRS),
shape=data['shape']
)
for d in range(3)
)
logger.info('Loaded Nojump Matrixes: {}'.format(nojump_filename(reader)))
else:
logger.info('Invlaid Nojump Data: {}'.format(nojump_filename(reader)))
except KeyError:
logger.info('Removing zip-File: %s', zipname)
os.remove(nojump_filename(reader))
return
def correct_nojump_matrixes_for_whole(trajectory):
reader = trajectory.frames
frame = trajectory[0]
box = frame.box.diagonal()
cor = ((frame - frame.whole) / box).round().astype(np.int8)
for d in range(3):
reader.nojump_matrixes[d][0] = cor[:, d]
save_nojump_matrixes(reader)
class BaseReader:
"""Base class for trajectory readers."""
@property
def filename(self):
return self.rd.filename
@property
def nojump_matrixes(self):
if self._nojump_matrixes is None:
raise NojumpError('Nojump Data not available: {}'.format(self.filename))
return self._nojump_matrixes
@nojump_matrixes.setter
def nojump_matrixes(self, mats):
self._nojump_matrixes = mats
def __init__(self, rd):
"""
Args:
filename: Trajectory file to open.
reindex (bool, opt.): If True, regenerate the index file if necessary.
"""
self.rd = rd
self._nojump_matrixes = None
if path.exists(nojump_filename(self)):
load_nojump_matrixes(self)
def __getitem__(self, item):
return self.rd[item]
def __len__(self):
return len(self.rd)
def __checksum__(self):
if hasattr(self.rd, 'cache'):
# Has an pygmx reader
return checksum(self.filename, str(self.rd.cache))
elif hasattr(self.rd, '_xdr'):
# Has an mdanalysis reader
cache = array('L', self.rd._xdr.offsets.tobytes())
return checksum(self.filename, str(cache))
class CachedReader(BaseReader):
"""A reader that has a least-recently-used cache for frames."""
@property
def cache_info(self):
"""Get Information about the lru cache."""
return self._get_item.cache_info()
def clear_cache(self):
"""Clear the cache of the frames."""
self._get_item.cache_clear()
def __init__(self, rd, maxsize):
"""
Args:
filename (str): Trajectory file that will be opened.
maxsize: Maximum size of the lru_cache or None for infinite cache.
"""
super().__init__(rd)
self._get_item = lru_cache(maxsize=maxsize)(self._get_item)
def _get_item(self, item):
"""Buffer function for lru_cache, since __getitem__ can not be cached."""
return super().__getitem__(item)
def __getitem__(self, item):
return self._get_item(item)
if DASK_VERSION >= '0.15.0':
read_xtcframe_delayed = delayed(pure=True, traverse=False)(pygmx.read_xtcframe)
else:
read_xtcframe_delayed = delayed(pure=True)(pygmx.read_xtcframe)
class DelayedReader(BaseReader):
@property
def filename(self):
if self.rd is not None:
return self.rd.filename
else:
return self._filename
def __init__(self, filename, reindex=False, ignore_index_timestamps=False):
super().__init__(filename, reindex=False, ignore_index_timestamps=False)
self.natoms = len(self.rd[0].coordinates)
self.cache = self.rd.cache
self._filename = self.rd.filename
self.rd = None
def __len__(self):
return len(self.cache)
def _get_item(self, frame):
return read_xtcframe_delayed(self.filename, self.cache[frame], self.natoms)
def __getitem__(self, frame):
return self._get_item(frame)
class EnergyReader:
"""A reader for Gromacs energy files."""
def __init__(self, edrfile):
"""
Args:
edrfile: Filename of the energy file
topology (opt.): Filename of the topology, speeds up file io since the length of the energy file is known
"""
edr = pygmx.open(edrfile)
self.time, data = edr.read()
self.types, self.units = zip(*edr.types)
self.data = data.T
def __getitem__(self, type):
"""
Get time series of an energy type.
"""
if type in self.types:
return self.data[self.types.index(type)]
else:
raise KeyError('Energy type {} not found in Energy File.'.format(type))
|
mdevaluate/mdevaluate
|
mdevaluate/reader.py
|
reader.py
|
py
| 12,347 |
python
|
en
|
code
| 5 |
github-code
|
6
|
73492056828
|
#Name: Ishika Soni
#Email: [email protected]
#Date: October 4, 2021
#This program asks the user for a 6-digit hex number and uses it as the hex code
#to stamp 4 turtles of that color into a square.
import turtle
mess = input("Please enter a 6-digit Hexadecimal number: ")
wn = turtle.Screen()
alex = turtle.Turtle()
alex.shape("turtle")
alex.color("#"+mess)
for i in range(4):
alex.stamp()
alex.left(90)
alex.forward(100)
|
issoni/Short-Turtle-Graphics
|
14colored-square.py
|
14colored-square.py
|
py
| 454 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5446974377
|
import numpy as np
import scipy as sp
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
def func2(x):
return np.round(np.random.random())
def func(x,y,z,r):
l = np.linalg.norm(np.array([x,y,z]))
if(l < r):
return 1.0
else:
return 0.0
def normalize_signal_1d(signal):
nsignal = np.zeros(signal.shape[0], dtype=complex)
s0 = signal.max()
for i in range(signal.shape[0]):
nsignal[i] = signal[i]/s0
return nsignal
def normalize_signal_2d(signal):
nsignal = np.zeros([signal.shape[0], signal.shape[1]], dtype=complex)
s0 = signal.max()
for i in range(signal.shape[0]):
for j in range(signal.shape[1]):
nsignal[i,j] = signal[i,j]/s0
return nsignal
def normalize_signal_3d(signal):
nsignal = np.zeros([signal.shape[0], signal.shape[1], signal.shape[2]], dtype=complex)
s0 = signal.max()
for k in range(signal.shape[2]):
for i in range(signal.shape[0]):
for j in range(signal.shape[1]):
nsignal[i,j,k] = signal[i,j,k]/s0
return nsignal
def plot_fft_1d_results(vecX, signal, vecK, dft, fft):
points = vecX.shape[0]
fig, axs = plt.subplots(6, 1)
axs[0].plot(vecX, signal,'o')
axs[1].plot(vecK, np.real(dft),'-')
axs[1].plot(vecK, np.imag(dft),'--')
axs[2].plot(vecK, np.real(fft),'-')
axs[2].plot(vecK, np.imag(fft),'--')
axs[3].plot(vecK, np.abs(dft),'-')
axs[4].plot(vecK, np.abs(fft),'-')
axs[5].plot(vecK, np.abs(dft),'-')
axs[5].plot(vecK, np.abs(fft),'--')
plt.show()
return
def plot_fft_2d_results(signal, dft, fft):
diff_abs = np.abs(dft)-np.abs(fft)
diff_real = np.real(dft)-np.real(fft)
diff_imag = np.imag(dft)-np.imag(fft)
cmap = cm.PRGn
cmap=cm.get_cmap(cmap)
points = signal.shape[0]
fig, axs = plt.subplots(3, 2)
im00 = axs[0,0].imshow(np.abs(signal), cmap=cmap)
im10 = axs[1,0].imshow(np.abs(dft), cmap=cmap)
im20 = axs[2,0].imshow(np.abs(fft), cmap=cmap)
im01 = axs[0,1].imshow(diff_real, cmap=cmap)
im11 = axs[1,1].imshow(diff_imag, cmap=cmap)
im21 = axs[2,1].imshow(diff_abs, cmap=cmap)
axs[0,0].set_title("signal")
axs[1,0].set_title("dft")
axs[2,0].set_title("fft")
axs[0,1].set_title("real(diff)")
axs[1,1].set_title("imag(diff)")
axs[2,1].set_title("abs(diff)")
fig.colorbar(im00, ax=axs[0,0])
fig.colorbar(im10, ax=axs[1,0])
fig.colorbar(im20, ax=axs[2,0])
fig.colorbar(im01, ax=axs[0,1])
fig.colorbar(im11, ax=axs[1,1])
fig.colorbar(im21, ax=axs[2,1])
fig.tight_layout()
plt.show()
return
def plot_fft_3d_results(signal, dft, fft, nimgs=1):
diff_abs = np.abs(dft)-np.abs(fft)
diff_real = np.real(dft)-np.real(fft)
diff_imag = np.imag(dft)-np.imag(fft)
cmap = cm.PRGn
cmap=cm.get_cmap(cmap)
points = signal.shape[0]
fig, axs = plt.subplots(nimgs, 6)
img_list = []
for im in range(nimgs):
im00 = axs[im,0].imshow(np.abs(signal[im]), cmap=cmap)
im10 = axs[im,1].imshow(np.abs(dft[im]), cmap=cmap)
im20 = axs[im,2].imshow(np.abs(fft[im]), cmap=cmap)
im01 = axs[im,3].imshow(diff_real[im], cmap=cmap)
im11 = axs[im,4].imshow(diff_imag[im], cmap=cmap)
im21 = axs[im,5].imshow(diff_abs[im], cmap=cmap)
fig.colorbar(im00, ax=axs[im,0])
fig.colorbar(im10, ax=axs[im,1])
fig.colorbar(im20, ax=axs[im,2])
fig.colorbar(im01, ax=axs[im,3])
fig.colorbar(im11, ax=axs[im,4])
fig.colorbar(im21, ax=axs[im,5])
axs[0,0].set_title("signal")
axs[0,1].set_title("dft")
axs[0,2].set_title("fft")
axs[0,3].set_title("real(diff)")
axs[0,4].set_title("imag(diff)")
axs[0,5].set_title("abs(diff)")
for im in range(nimgs):
for col in range(6):
axs[im,col].grid(False)
# Hide axes ticks
axs[im,col].set_xticks([])
axs[im,col].set_yticks([])
# fig.tight_layout()
plt.show()
return
def apply_dft_1d(signal, vecx, veck, length, points):
kspec = np.zeros(points, dtype=complex)
dX = length / (points)
for i in range(points):
gsum = 0.0
for rx in range(points):
gsum += dX * signal[rx] * np.exp((-1.0j) * veck[i] * vecx[rx])
kspec[i] = (1.0 / points) * gsum
return kspec
def apply_dft_2d(signal, vecx, vecy, veckx, vecky, area, points):
kspec = np.zeros([points, points], dtype=complex)
dA = area / (points**2)
for i in range(points):
for j in range(points):
gsum = 0.0
for ry in range(points):
for rx in range(points):
gsum += dA * signal[ry,rx] * np.exp((-1.0j) * (veckx[j] * vecx[rx] + vecky[i] * vecy[ry]))
kspec[i,j] = (1.0 / area) * gsum
return kspec
def apply_dft_3d(signal, vecx, vecy, vecz, veckx, vecky, veckz, volume, points):
kspec = np.zeros([points, points, points], dtype=complex)
dV = volume / (points**3)
elems = points**3
count = 0
for k in range(points):
for i in range(points):
for j in range(points):
count += 1
print(":: {} fourier coefficient out of {}.".format(count, elems))
gsum = 0.0
for rz in range(points):
for ry in range(points):
for rx in range(points):
gsum += dV * signal[ry,rx,rz] * np.exp((-1.0j) * (veckx[j] * vecx[rx] + vecky[i] * vecy[ry] + veckz[k] * vecz[rz]))
kspec[i,j, k] = (1.0 / volume) * gsum
return kspec
def apply_fft_1d(signal):
kspec = np.fft.fft(signal, norm='ortho')
kspec = np.fft.fftshift(kspec)
return kspec
def apply_fft_2d(signal):
kspec = np.fft.fft2(signal, norm='ortho')
kspec = np.fft.fftshift(kspec)
return kspec
def apply_fft_3d(signal):
kspec = np.fft.fftn(signal, norm='ortho')
kspec = np.fft.fftshift(kspec)
return kspec
def test_fft1D():
N = 256
a = 1.0
size = 2*N + 1
signal = np.zeros(size)
Xfreq = 0.5*a / float(N)
# [x, y, z] = np.meshgrid(rfreq * Nrange, rfreq * Nrange, rfreq * Nrange)
vecX = np.linspace(-0.5*a, 0.5*a, 2*size + 1)[1:2*size:2]
Kfreq = 2*np.pi/a
[vecK] = np.meshgrid(Kfreq*np.arange(-N, N+1))
# for i in range(size):
# signal[i] = func(vecX[i])
for i in range(size//4):
signal[size//2-i] = 1.0
signal[size//2+i] = 1.0
dft_kspec = apply_dft_1d(signal, vecX, vecK, a, size)
fft_kspec = apply_fft_1d(signal)
plot_fft_1d_results(vecX, signal, vecK, dft_kspec, fft_kspec)
norm_fft_kspec = normalize_signal(fft_kspec)
norm_dft_kspec = normalize_signal(dft_kspec)
plot_fft_1d_results(vecX, signal, vecK, norm_dft_kspec, norm_fft_kspec)
return
def test_fft2D():
N = 16
a = 1.0
area = a**2
size = 2*N + 1
signal = np.zeros([size, size])
Xfreq = 0.5*a / float(N)
# [x, y, z] = np.meshgrid(rfreq * Nrange, rfreq * Nrange, rfreq * Nrange)
vecX = np.linspace(-0.5*a, 0.5*a, 2*size + 1)[1:2*size:2]
vecY = np.linspace(-0.5*a, 0.5*a, 2*size + 1)[1:2*size:2]
Kfreq = 2*np.pi/a
[vecKX] = np.meshgrid(Kfreq*np.arange(-N, N+1))
[vecKY] = np.meshgrid(Kfreq*np.arange(-N, N+1))
for i in range(size):
for j in range(size):
signal[i,j] = func(vecX[i])
for i in range(size//4):
for j in range(size//4):
signal[size//2-i, size//2-j] = 1.0
signal[size//2+i, size//2-j] = 1.0
dft_kspec = apply_dft_2d(signal, vecX, vecY, vecKX, vecKY, area, size)
fft_kspec = apply_fft_2d(signal)
plot_fft_2d_results(signal, dft_kspec, fft_kspec)
norm_fft_kspec = normalize_signal_2d(fft_kspec)
norm_dft_kspec = normalize_signal_2d(dft_kspec)
plot_fft_2d_results(signal, norm_dft_kspec, norm_fft_kspec)
return
def test_fft3D():
N = 6
a = 1.0
volume = a**3
size = 2*N + 1
signal = np.zeros([size, size, size])
Xfreq = 0.5*a / float(N)
# [x, y, z] = np.meshgrid(rfreq * Nrange, rfreq * Nrange, rfreq * Nrange)
vecX = np.linspace(-0.5*a, 0.5*a, 2*size + 1)[1:2*size:2]
vecY = np.linspace(-0.5*a, 0.5*a, 2*size + 1)[1:2*size:2]
vecZ = np.linspace(-0.5*a, 0.5*a, 2*size + 1)[1:2*size:2]
Kfreq = 2*np.pi/a
[vecKX] = np.meshgrid(Kfreq*np.arange(-N, N+1))
[vecKY] = np.meshgrid(Kfreq*np.arange(-N, N+1))
[vecKZ] = np.meshgrid(Kfreq*np.arange(-N, N+1))
for k in range(size):
for i in range(size):
for j in range(size):
signal[i,j, k] = func(vecX[j], vecY[i], vecZ[k], a/2)
for k in range(size//4):
for i in range(size//4):
for j in range(size//4):
signal[size//2-i, size//2-j, size//2-k] = 1.0
signal[size//2+i, size//2-j, size//2+k] = 1.0
dft_kspec = apply_dft_3d(signal, vecX, vecY, vecZ, vecKX, vecKY, vecKZ, volume, size)
fft_kspec = apply_fft_3d(signal)
plot_fft_3d_results(signal, dft_kspec, fft_kspec, size)
norm_fft_kspec = normalize_signal_3d(fft_kspec)
norm_dft_kspec = normalize_signal_3d(dft_kspec)
plot_fft_3d_results(signal, norm_dft_kspec, norm_fft_kspec, size)
return
|
mcastrorib/bergman_periodic_solution
|
python/fft_test.py
|
fft_test.py
|
py
| 9,440 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74796406586
|
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import cv2
from PIL import Image
# 1、transform使用Totensor
img_path = "../dataset/train/ants/0013035.jpg"
img_PIL = Image.open(img_path)
tensor_trans = transforms.ToTensor()
img_tensor = tensor_trans(img_PIL)
# 2、tensor数据类型
writer = SummaryWriter("../logs")
writer.add_image("Tensor_img", img_tensor)
# 3、normalize
print(img_tensor[0, 0, 0])
trans_normal = transforms.Normalize([9, 0, 1], [1, 4, 6])
normal_img = trans_normal(img_tensor)
print(normal_img[0, 0, 0])
writer.add_image("normalize", normal_img, 2)
# 4、resize
print(img_PIL.size)
trans_resize = transforms.Resize((512, 512))
img_resize = trans_resize(img_PIL)
print(img_resize.size)
img_resize_tensor = tensor_trans(img_resize)
writer.add_image("resize", img_resize_tensor)
# 5、compose
trans_resize_2 = transforms.Resize(1080)
trans_compose = transforms.Compose([trans_resize_2, tensor_trans])
img_resize_2 = trans_compose(img_PIL)
print(img_resize_2.size())
writer.add_image("resize", img_resize_2, 2)
# 6、Randomcrop
trans_random_crop = transforms.RandomCrop([100, 200])
trans_compose_2 = transforms.Compose([trans_random_crop, tensor_trans])
for i in range(10):
img_crop = trans_compose_2(img_PIL)
writer.add_image("random_cropHW", img_crop, i)
writer.close()
|
ccbit1997/pytorch_learning
|
src/learn_transform.py
|
learn_transform.py
|
py
| 1,344 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37562147384
|
"""helper
=============
Helper functions for inventory scripts.
"""
__author__ = "Balz Aschwanden"
__email__ = "[email protected]"
__copyright__ = "Copyright 2017, University of Basel"
__credits__ = ["Balz Aschwanden"]
__license__ = "GPL"
import json
import os
import socket
def get_hostname():
"""Return FQDN for this host."""
return socket.gethostname()
def get_simple_hostname(fqdn):
"""Convert FQDN and return simple host name."""
simple_hostname = fqdn.split(".")[0]
return simple_hostname
def format_output(output):
"""Return results in Ansible JSON syntax.
Ansible requirements are documented here:
http://docs.ansible.com/ansible/latest/dev_guide/developing_inventory.html
"""
return json.dumps(output, sort_keys=True, indent=4, separators=(",", ": "))
def write_cache(cache_file, output):
"""Format and write inventory cache to file."""
with open(cache_file, "w") as cache:
for line in format_output(output):
cache.write(line)
def read_cache(cache_file):
"""Read cache file and return content or False."""
if not os.path.isfile(cache_file):
return False
with open(cache_file, "r") as cache:
return cache.read()
if __name__ == "__main__":
pass
|
ANTS-Framework/ants
|
antslib/inventory/helper.py
|
helper.py
|
py
| 1,278 |
python
|
en
|
code
| 42 |
github-code
|
6
|
24981950749
|
fishlist = list(map(int, open('2021\D6\input.txt','r').read().split(',') ))
print(fishlist)
days = 256
numfish = []
numfish.append([0]*7) # old fish 0 -> 6
numfish.append([0]*2) # new fish 0 -> 1
out = 0
for fish in fishlist:
numfish[0][fish] += 1
for day in range(days):
temp = [0]*7
for i in range(7):
temp[i-1] = numfish[0][i]
temp[6] += numfish[1][0]
numfish[1][0] = numfish[1][1]
numfish[1][1] = numfish[0][0]
numfish[0] = temp
# update total number of fish
out = 0
for count in numfish[0]: out += count
for count in numfish[1]: out += count
print(f'D{day+1}: {out}')
|
elliotcoy/AdventOfCode2021
|
2021/D6/P2.py
|
P2.py
|
py
| 677 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35413595898
|
# -*- coding:utf-8 -*-
import random
import pygame
class BoardManager:
WALL = 0
FOOD = 1
NONE = 2
HEAD = 3
BODY = 4
def __init__(self, x_blocks, y_blocks, block_width, origin_x, origin_y, caption):
self.x_blocks = x_blocks
self.y_blocks = y_blocks
# NONE的方块
self.non_blocks = None
self.total_len = x_blocks * y_blocks
self.blocks_status = None
self.block_width = block_width
self.screen = None
self.origin_x = origin_x
self.origin_y = origin_y
self.caption = caption
pygame.init()
def init_board(self, bg_color=(0, 0, 0), caption=None):
pygame.display.set_caption(caption or self.caption)
board_x, board_y = self.x_blocks * (1 + self.block_width) + \
2 * self.origin_x, (self.y_blocks + 1) * (self.block_width + 1) + self.origin_y
self.screen = pygame.display.set_mode((board_x, board_y), 0, 32)
self.blocks_status = [[self.NONE for _ in range(self.y_blocks)] for _ in range(self.x_blocks)]
self.non_blocks = self._gen_non_blocks()
pygame.display.update()
self.set_bg_color(bg_color)
def set_bg_color(self, color=(0, 0, 0)):
self.screen.fill(color)
pygame.display.update()
def _gen_non_blocks(self):
non_blocks = []
for i in range(0, self.x_blocks):
for j in range(0, self.y_blocks):
non_blocks.append((i, j))
return non_blocks
# 显示网格线
def show_pods(self, color=(255, 255, 255)):
start_pos_x, start_pos_y = self.origin_x, self.origin_y
end_pos_x, end_pos_y = self.origin_x, (self.block_width + 1) * self.y_blocks + self.origin_y
# 先画竖线
for c_index in range(0, self.x_blocks + 1):
pygame.draw.line(self.screen, color, (start_pos_x, start_pos_y), (end_pos_x, end_pos_y), 1)
start_pos_x = end_pos_x = start_pos_x + 1 + self.block_width
start_pos_x, start_pos_y = self.origin_x, self.origin_y
end_pos_x, end_pos_y = self.origin_x + (self.block_width + 1) * self.x_blocks, self.origin_y
# 画横线
for r_index in range(0, self.y_blocks + 1):
pygame.draw.line(self.screen, color, (start_pos_x, start_pos_y), (end_pos_x, end_pos_y), 1)
start_pos_y = end_pos_y = start_pos_y + 1 + self.block_width
pygame.display.flip()
def show_wall(self, color=(255, 0, 0)):
start_pos_x, start_pos_y = self.origin_x, self.origin_y
end_pos_x, end_pos_y = self.origin_x + (self.block_width + 1) * self.x_blocks, \
(self.block_width + 1) * self.y_blocks + self.origin_y
pygame.draw.line(self.screen, color, (start_pos_x, start_pos_y), (end_pos_x, start_pos_y))
pygame.draw.line(self.screen, color, (start_pos_x, start_pos_y), (start_pos_x, end_pos_y))
pygame.draw.line(self.screen, color, (end_pos_x, start_pos_y), (end_pos_x, end_pos_y))
pygame.draw.line(self.screen, color, (start_pos_x, end_pos_y), (end_pos_x, end_pos_y))
pygame.display.flip()
def draw_block(self, x, y, color=(111, 111, 111)):
pos_x = self.origin_x + x * (self.block_width + 1) + 1
pos_y = self.origin_y + y * (self.block_width + 1) + 1
pygame.draw.rect(self.screen, color, (pos_x, pos_y, self.block_width, self.block_width), 0)
pygame.display.update((pos_x, pos_y, self.block_width, self.block_width))
def set_block(self, pos, status):
old_status = self.blocks_status[pos[0]][pos[1]]
if old_status == status:
return
self.blocks_status[pos[0]][pos[1]] = status
if old_status == self.NONE:
self.non_blocks.remove(pos)
if status == self.NONE:
self.non_blocks.append(pos)
def get_status(self, pos):
x, y = pos[0], pos[1]
if x < 0 or x >= self.x_blocks or y < 0 or y > self.y_blocks:
return self.WALL
return self.blocks_status[x][y]
def gen_food(self, color=(255, 0, 0)):
index = random.randint(0, len(self.non_blocks) - 1)
block_pos = self.non_blocks[index]
pos_x = self.origin_x + block_pos[0] * (self.block_width + 1) + 1
pos_y = self.origin_y + block_pos[1] * (self.block_width + 1) + 1
rect = (pos_x, pos_y, self.block_width, self.block_width)
self.set_block(block_pos, self.FOOD)
pygame.draw.rect(self.screen, color, rect, 0)
pygame.display.update(rect)
def show_score_and_speed(self):
pass
def show_game_over(self):
pass
def show_pause(self):
pass
def show_start(self):
pass
|
coderwf/pygames
|
glutsnake/board.py
|
board.py
|
py
| 4,748 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6606127726
|
dx = [0, 0, 1, -1]
dy = [-1, 1, 0, 0]
def solution(game_board, table):
def rotate():
tmp = []
for elm in block_piece:
start_x, start_y = elm[0]
space = [(0, 0)]
for x, y in elm[1:]:
space.append((start_y-y, x-start_x))
space.sort(key=lambda x: (-x[1], x[0]))
tmp.append(space)
return tmp
def check_can_insert():
cnt = 0
for idx1, space in enumerate(empty_space):
for idx2, piece in enumerate(block_piece):
if filled[idx1] or used[idx2]:
continue
if space == piece:
cnt += len(space)
filled[idx1] = 1
used[idx2] = 1
return cnt
def search_space(target, start_x, start_y, flag):
space = [(0, 0)]
stack = [(start_x, start_y)]
target[start_x][start_y] = flag
while stack:
x, y = stack.pop()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < n and 0 <= ny < n:
if (flag and not target[nx][ny]) or (not flag and target[nx][ny]):
space.append((nx-start_x, ny-start_y))
stack.append((nx, ny))
target[nx][ny] = flag
space.sort(key=lambda x: (-x[1], x[0]))
return space
answer = 0
n = len(game_board)
empty_space = [] # 빈 공간
block_piece = [] # 사용가능한 조각
for x in range(n):
for y in range(n):
if not game_board[x][y]:
empty_space.append(search_space(game_board, x, y, 1))
if table[x][y]:
block_piece.append(search_space(table, x, y, 0))
filled = [0 for _ in range(len(empty_space))]
used = [0 for _ in range(len(block_piece))]
for _ in range(4):
answer += check_can_insert()
block_piece = rotate()
return answer
|
JeongGod/Algo-study
|
hyeonjun/18week/p84021.py
|
p84021.py
|
py
| 2,031 |
python
|
en
|
code
| 7 |
github-code
|
6
|
34787406936
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from wordcloud_process import wordcloud_img
from article_loader import ArticleLoader
article_loader = ArticleLoader('english_corpora.yaml')
articles = article_loader.load()
def feature_and_matrix(articles, **kwargs):
tfid_vectorizer = TfidfVectorizer(**kwargs)
matrix = tfid_vectorizer.fit_transform(articles)
return tfid_vectorizer.get_feature_names(), matrix.A
def produce_wordcloud(features, vector, file_name):
wordcloud_img(dict((k, v) for k, v in zip(features, vector) if v != 0.),
file_name)
features, matrix = feature_and_matrix(articles, stop_words='english')
for i, vector in enumerate(matrix):
produce_wordcloud(features, vector, f'image/English_News_{i}.png')
print(f'1-gram TF-IDF cosine similarity: {cosine_similarity(matrix)[0, 1]}')
features, matrix = feature_and_matrix(articles, stop_words='english',
ngram_range=(2, 3))
for i, vector in enumerate(matrix):
produce_wordcloud(features, vector, f'image/English_News_{i}_2+3-gram.png')
print(f'2-gram + 3-gram TF-IDF cosine similarity: '\
f'{cosine_similarity(matrix)[0, 1]}')
|
is3ka1/NLP-Practice
|
week1/english_news_analyse.py
|
english_news_analyse.py
|
py
| 1,252 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21836192189
|
import sys
from pprint import pprint
sys.stdin = open('../input.txt', 'r')
N = int(input())
dp = [[0] * 10 for _ in range(N)]
dp[0] = [1] * 10
for i in range(1, N): # 행
for j in range(10): # 열
for k in range(j, 10):
dp[i][k] += dp[i-1][j]
print(sum(dp[N-1]) % 10007)
|
liza0525/algorithm-study
|
BOJ/boj_11057_increasing_number.py
|
boj_11057_increasing_number.py
|
py
| 298 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25172905892
|
#%%
import pandas as pd
import altair as alt
import numpy as np
from sklearn.neighbors import NearestNeighbors
alt.data_transformers.disable_max_rows()
#%%
RADICL_file="/home/vipink/Documents/FANTOM6/HDBSCAN_RADICL_peak/data/processed/chr16_filter_df.csv"
#%%
radicl_df = pd.read_csv(RADICL_file,delimiter="\t")
# Observe likely PCR duplicates (tiny minority)
#%%
dedup_radicl_df = (radicl_df.loc[:,['chrom','start','end','strand','DNA_start','DNA_end','DNA_strand']]
.drop_duplicates())
# %%
plus_strand_space_df = (dedup_radicl_df
.query("strand == '+'")
.loc[:,['start','DNA_start']]
.reset_index()
)
#%%
nbrs = NearestNeighbors(n_neighbors=2, metric='euclidean',radius=25).fit(plus_strand_space_df.to_numpy())
distances, indices = nbrs.kneighbors(plus_strand_space_df.to_numpy())
# %%
read_neighbour_df = (plus_strand_space_df
.assign(closest_DNA=np.abs(plus_strand_space_df.loc[indices[:,0],'DNA_start'].to_numpy() - plus_strand_space_df.loc[indices[:,1],'DNA_start'].to_numpy()),
closest_RNA=np.abs(plus_strand_space_df.loc[indices[:,0],'start'].to_numpy() - plus_strand_space_df.loc[indices[:,1],'start'].to_numpy())))
# %%
dna_dist_cdf = (read_neighbour_df
.sort_values('closest_DNA')
.groupby('closest_DNA')
.agg(read_count=('start','count'))
.reset_index()
.assign(cread=lambda df_:df_.read_count.cumsum()/plus_strand_space_df.shape[0])
.rename(columns={'closest_DNA':'distance'})
.assign(end='DNA'))
rna_dist_cdf = (read_neighbour_df
.sort_values('closest_RNA')
.groupby('closest_RNA')
.agg(read_count=('start','count'))
.reset_index()
.assign(cread=lambda df_:df_.read_count.cumsum()/plus_strand_space_df.shape[0])
.rename(columns={'closest_RNA':'distance'})
.assign(end='RNA'))
tot_df = pd.concat([rna_dist_cdf,dna_dist_cdf])
# %%
(alt.Chart(tot_df.assign(log_val=lambda df_:np.log10(df_.distance + 1)))
.mark_line(opacity=0.6).encode(
x="log_val:Q",
y='cread:Q',
color="end"
))
# %%
|
princeps091-binf/HDBSCAN_RADICL_peak
|
scripts/RADICL_read_neighbourhood.py
|
RADICL_read_neighbourhood.py
|
py
| 2,053 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6597568149
|
from numpy.polynomial import Polynomial as P
from numpy import polynomial
print("Interpolação de Newton")
pontos = []
k = 0
while True:
k += 1
while True:
print("Digite as coordenadas do ", k, "º ponto separados por espaço:", end="\nf para finalizar \n")
l = input()
if l == "f".lower():
break
l = l.split()
if len(l) == 2:
break
else:
print("Entrada inválida, tente novamente!")
if l == "f".lower():
break
for i in range(len(l)):
l[i] = float(l[i])
pontos.append(l)
l = []
for i in range(len(pontos)):
l.append(pontos[i][1])
tabela = []
tabela.append(l)
for i in range(len(pontos) - 1):
l = []
for j in range(len(pontos) - i - 1):
dif = (tabela[i][j + 1] - tabela[i][j]) / (pontos[j + 1 + i][0] - pontos[j][0])
l.append(dif)
tabela.append(l)
difdiv = []
for i in range(len(tabela)):
difdiv.append(tabela[i][0])
somatorio = 0
for i in range(1, len(pontos)):
produtorio = 1
for k in range(i):
produtorio *= (P([-pontos[k][0], 1]))
somatorio += difdiv[i] * produtorio
Pn = difdiv[0] + somatorio
funcao = list(Pn)
texto = ""
for i in range(len(funcao)):
if funcao[i] == 0:
continue
elif i == 0:
texto += str(funcao[i])
else:
texto += " + " + str(funcao[i])
texto += ("*x^%o" % (i))
print("Pn(x) :")
print(texto)
s = input(
"Deseja calcular Pn(x) dado um valor de x? \n [S/N]").lower()
if s == "s":
print("Pn(x) em qual ponto?")
p = float(input())
print("Pn(%a) é igual a %a" % (p, Pn(p)))
print("Raízes de Pn(x): ", polynomial.polynomial.polyroots(list(Pn)))
s = input(
"Encontrar os valores de x dado um valor de Pn(x)? \n [S/N]")
if s == "s":
print("Valores de x para qual valor de Pn(x)")
p = float(input())
print("para Pn(x)=%a temos: " % p, polynomial.polynomial.polyroots(list(Pn - P([p]))),
"Obs: Se houver, j = raiz de menos 1")
print("(x,Pn(x))")
for i in range(len(pontos)):
print("(%a,%a)" % (pontos[i][0], round(Pn(pontos[i][0]), 4)))
|
fernandoajn/calculo-numerico
|
metodos/newton.py
|
newton.py
|
py
| 2,151 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
70622403067
|
class Car :
addr = '서울' #static 변수 역할
__slots__ = ['name', 'price', 'company'] # 슬롯 . 이 클래스가 가질 수 있는 멤버변수명을 미리 지정해 줄 수 있다.
def __init__(self, **args):
if 'name' in args:
# self.add(value) #이런식으로 넣을 수 없음 의미가없다.
self.name = args['name']
if 'price' in args:
self.price = args.get('price')
if 'company' in args:
self.company = args.get('company')
def info(self):
if 'name' in self.__dict__:
print(f'자동차명: {self.name} ', end="\t")
if 'price' in self.__dict__:
print(f'가격: {self.price}', end="\t")
if 'company' in self.__dict__:
print(f'회사: {self.company}', end="\t")
c = Car(name='그랜저', price='4000', company='현대')
c2 = Car(name='모닝', price='2100')
c.info()
c2.info()
print(c.addr)
print(c2.addr)
#c2.addr = '부산' # 올바른방식은 Class 명 . 으로 접근해야한다
|
Yang-Seungjae/Python
|
test.py
|
test.py
|
py
| 1,048 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
3721769844
|
import io
import time
from openpyxl import Workbook
import openpyxl as O
import speedtest
import re
import sys
import psycopg2
from psycopg2 import Error
from datetime import datetime
import hashlib
from .connection import Connection
cn=Connection()
class SpeedPage():
def asking_name(self):
print("Введите ваше ИМЯ:")
author=input()
return author
def asking_serial_number(self):
print("Введите ваш Серийный номер:")
s=input()
serialno=s.replace("S/N: ","")
return serialno
def id_of_test(self):
try:
connection = psycopg2.connect(user="dssadmin",
password="dssadmin",
host="10.10.2.180",
port="5432",
database="devices")
cursor = connection.cursor()
cursor.execute(""" SELECT MAX(testId) FROM testhistory """)
result = cursor.fetchall()
for i in result:
testId = i[0] + 1
except (Exception, Error) as error:
print("Ошибка при работе с PostgreSQL", error)
finally:
if connection:
cursor.close()
connection.close()
return testId
def port_number(self):
print("Какой это порт:")
port_number=input()
return port_number
def speedtest_database(self,author,serialno,testId,pretty_serialno,port_number,j):
for i in range(1,3):
current_date = datetime.now().strftime("%Y-%m-%d")
current_time = datetime.now().strftime("%H:%M:%S")
start_time=datetime.now()
st=speedtest.Speedtest(secure=True)
st.get_best_server()
download_number=st.download()
print("Ваша входящяя скорость:", end=' ')
download_speed=self.test_download_test(download_number)
print(download_speed)
upload_number=st.upload()
print("Ваша исходящяя скорость:", end=' ')
upload_speed=self.test_upload_test(upload_number)
print(upload_speed)
stop_time=datetime.now()
duration=str((stop_time - start_time).seconds)+' сек'
self.excel_uploading(download_speed,upload_speed,j,pretty_serialno,port_number)
print("Количество времени потраченная на тест:", end=' ')
print(duration)
print("Тест номер:", end=' ')
print(testId)
cn.connect_to_database(author,serialno,testId,download_speed,upload_speed,
duration,current_date,current_time,port_number)
def pretty_speed(self,speed):
unit = 'bps'
kmg = ['', 'K', 'M', 'G']
i = 0
while speed >= 1000:
speed /= 1000
i += 1
return "{:.2f}".format(speed) + ' ' + kmg[i] + unit
def speed_measure(self,speed):
i=0
while speed >= 1000:
speed /= 1000
i+=1
return speed
def pretty_file_format(self,serialno):
encoding='.xlsx'
return serialno+encoding
def test_download_test(self,download_number):
download_speed=self.speed_measure(download_number)
download_beauty_speed = self.pretty_speed(download_number)
if(download_speed<100):
print("FAIL", end=' ')
return download_beauty_speed
def test_upload_test(self,upload_number):
upload_speed=self.speed_measure(upload_number)
upload_beauty_speed = self.pretty_speed(upload_number)
if(upload_speed<100):
print("FAIL", end=' ')
return upload_beauty_speed
def create_excel_file(self,serialno):
wb=Workbook()
ws=wb.active
wb.save(filename=serialno)
def excel_uploading(self,download_speed,upload_speed,i,serialno,port_number):
Excel_file=serialno
Excel_worksheet="Sheet"
i=i+1
wb=O.load_workbook(Excel_file)
ws=wb[Excel_worksheet]
ws.cell(i,1).value=port_number
ws.cell(1,1).value='Port Number'
ws.cell(1,2).value='Download Speed'
ws.cell(1,3).value='Upload Speed'
ws.cell(i,2).value=download_speed
ws.cell(i,3).value=upload_speed
wb.save(Excel_file)
wb.close()
|
Astarota/SpeedTestCLI
|
pages/speed_page.py
|
speed_page.py
|
py
| 3,737 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29192430322
|
# Hack 1: InfoDB lists. Build your own/personalized InfoDb with a list length > 3, create list within a list as illustrated with Owns_Cars
InfoDb = []
# List with dictionary records placed in a list
InfoDb.append({
"FirstName": "Joan",
"LastName": "Mir",
"Number": 36,
"Team": "Suzuki Ecstar",
"Age": "24",
"Results":["3","2","4","DNF", "2"]
})
InfoDb.append({
"FirstName": "Marc",
"LastName": "Marquez",
"Number": 93,
"Team": "Repsol Honda",
"Age": "29",
"Results":["1","DNF","DNF","2", "1"]
})
InfoDb.append({
"FirstName": "Valentino",
"LastName": "Rossi",
"Number": 46,
"Team": "Monster Energy Yamaha",
"Age": "43",
"Results":["10","DNF","9","11", "13"]
})
InfoDb.append({
"FirstName": "Jorge",
"LastName": "Martin",
"Number": 89,
"Team": "Ducati Lenovo",
"Age": "24",
"Results":["4","3","1","DNF", "5"]
})
def printe(f):
print(f["FirstName"], f["LastName"]) #the Firstname or LastName of InfoDb
print("\t Number:", f["Number"]) # the f is the f string in python
print("\t Team:", f["Team"]) # it is another way of formatting
print("\t Age:", f["Age"])
print("\t Results:", f["Results"])
def for_loop():
for f in InfoDb:
print(10*"~", "For Loops", 10*"~")
printe(f)
def while_loop():
f = 0
while f < len(InfoDb):
print(10*"~", "While Loops", 10*"~")
printe(InfoDb[f])
f = f+1
def recursive_loop(f):
if f == len(InfoDb): # this makes sure that the f is equal to the length of InfoDb
return f # this puts f outside, so the second if statement can use it
if f == 0:
print("~"*10 + "Recursive Loops" + "~"*10)
printe(InfoDb[f])
print("~"*10 + "Recursive Loops" + "~"*10)
recursive_loop(f + 1)
|
kkwan0/Tri-3-Kurtis-Kwan
|
week1/infoDB.py
|
infoDB.py
|
py
| 2,157 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8909257357
|
import numpy as np
import cv2
import os
from PIL import Image
X = 10 # 0
Y = 105 # 95
WIDTH = 215 # 356
HEIGHT = 440 # 440
def process_img(original_img):
processed_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)
processed_img = cv2.resize(processed_img, (10, 20))
return processed_img
def grab_screen():
if not os.path.isfile('./image_data/FIFO'):
os.mkfifo('./image_data/FIFO')
os.system('screencapture -x -tjpg -R{},{},{},{} ./image_data/FIFO'.format(X, Y, WIDTH, HEIGHT))
with Image.open('./image_data/FIFO') as fifo:
screen = fifo
processed_screen = process_img(np.array(screen))
return processed_screen
|
sebastianandreasson/tetris_tensorflow
|
grab_screen.py
|
grab_screen.py
|
py
| 671 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6748902164
|
import cv2
import numpy as np
import face_recognition
import os
from datetime import datetime
import keyboard
import os
import pandas as pd
train_data_path = os.path.join('artifacts', "attendance.csv")
os.makedirs(os.path.dirname(train_data_path), exist_ok=True)
columns =['Name','Time']
test = []
train = pd.DataFrame(test, columns=columns)
train.to_csv(train_data_path, index=False)
def take_attendance():
path = 'uploads'
images = []
class_names = []
my_list = os.listdir(path)
print(my_list)
for cl in my_list:
cur_image = cv2.imread(f'{path}/{cl}')
images.append(cur_image)
class_names.append(os.path.splitext(cl)[0])
print(class_names)
def find_encodings(images):
encode_list = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encode_list.append(encode)
return encode_list
def mark_attendance(name):
with open('artifacts/attendance.csv','r+') as f:
my_data_list = f.readlines()
name_list = []
for line in my_data_list:
entry = line.split(',')
name_list.append(entry[0])
if name not in name_list:
now = datetime.now()
dt_string = now.strftime('%H:%M:%S')
f.writelines(f'\n{name}, {dt_string}')
encode_list_known = find_encodings(images)
print("Encoding complete")
cap = cv2.VideoCapture(0)
web=True
while web:
success, img = cap.read()
img_s = cv2.resize(img, (0,0), None, 0.25,0.25)
img_s = cv2.cvtColor(img_s, cv2.COLOR_BGR2RGB)
faces_cur_frame = face_recognition.face_locations(img_s)
encode_cur_frame = face_recognition.face_encodings(img_s, faces_cur_frame)
for encode_face, face_loc in zip(encode_cur_frame, faces_cur_frame):
matches = face_recognition.compare_faces(encode_list_known, encode_face)
face_dis = face_recognition.face_distance(encode_list_known, encode_face)
print(face_dis)
match_index = np.argmin(face_dis)
if matches[match_index]:
name = class_names[match_index].upper()
print(name)
y1,x2,y2,x1 = face_loc
y1, x2, y2, x1 = y1*4, x2*4, y2*4, x1*4
cv2.rectangle(img, (x1,y1),(x2,y2),(0,255,0),2)
cv2.rectangle(img, (x1,y2-35),(x2,y2),(0,255,0), cv2.FILLED)
cv2.putText(img, name, (x1+6, y2-6), cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),2)
mark_attendance(name)
cv2.imshow('Webcam',img)
cv2.waitKey(1)
if keyboard.is_pressed('q'):
web=False # if key 'q' is pressed
cv2.destroyAllWindows()
|
aruneer007/attendance
|
face.py
|
face.py
|
py
| 2,859 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9580721700
|
"""
Tomasulo main module.
"""
import argparse
import logger
from machine import Machine
def main():
"""
Main entry point.
Parses command line argument and begins execution.
:return: None
"""
parser = argparse.ArgumentParser(description='Simulate execution of DLX code on a Tomasulo processor.')
parser.add_argument('-f', type=str, dest='filename', required=True, help='The input file. Must be a .hex file.')
parser.add_argument('-v', '--verbose', action='store_true', help='Increase output verbosity.')
args = parser.parse_args()
logger.setLogLevel(1 if args.verbose else 0)
# Run
machine = Machine()
machine.loadProgram(args.filename)
machine.run()
if __name__ == '__main__':
main()
|
kaledj/TomasuloSim
|
tomasulo.py
|
tomasulo.py
|
py
| 751 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7946627355
|
from cs50 import get_float
# The coins
quarter = 25
dime = 10
nickel = 5
penny = 1
# Counter for change owed
counter = 0
# To get the number from user
while True:
dollars = get_float("Change owed: ")
if dollars >= 0:
break
# Converting to cents
cents = int(round(dollars * 100))
# Calculating the result
while cents != 0:
if cents >= quarter:
cents -= quarter
counter += 1
continue
elif cents >= dime:
cents -= dime
counter += 1
continue
elif cents >= nickel:
cents -= nickel
counter += 1
continue
elif cents >= penny:
cents -= penny
counter += 1
continue
# Printing the result
print(counter)
|
eiliaJafari/CS50X-2021
|
ProblemSets/6/cash.py
|
cash.py
|
py
| 774 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73951878269
|
"""
faster
"""
class Solution:
def countArrangement(self, N: int) -> int:
li=list(range(0,N+1)) # 1-indexing # 0 is useless
self.ans=0
def backtrack(right): # swap from RHS
# from RHS is better since larger numbers are less likely to be Beautiful
# if we start from left to right there will be too many false hopes
if right==0: # reach the left bound
self.ans+=1
for left in range(right,0,-1):
li[left],li[right]=li[right],li[left]
if li[right] % right == 0 or right % li[right] == 0:
# if Beautiful for this pos `right` then go deeper
backtrack(right-1)
li[left],li[right]=li[right],li[left]
backtrack(N)
return self.ans
"""
slower
"""
class Solution:
def countArrangement(self, N: int) -> int:
def beautiful(num,pos):
return num % pos == 0 or pos % num == 0
nums=list(range(N,0,-1)) # n ~ 1
counter=[1]*(N+1) # 0th is useless
self.ans=0
def backtrack(pos):
if pos == 0: # means all pos are filled
self.ans+=1
for num in nums: # try on all nums # all numbers N~1 # try on large num first
if counter[num]>0 and beautiful(num,pos):
# if still not used up then we can use it # check if beautiful
counter[num]-=1
backtrack(pos-1) # go fill the next one
counter[num]+=1
backtrack(N) # start from the last pos
return self.ans
|
y56/leetcode
|
526. Beautiful Arrangement/solns.py
|
solns.py
|
py
| 1,643 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24436690438
|
from __future__ import annotations
import idaapi
import pyphrank.utils as utils
from pyphrank.ast_analyzer import CTreeAnalyzer, TFG
from pyphrank.cfunction_factory import CFunctionFactory
from pyphrank.type_flow_graph_parts import Node, UNKNOWN_SEXPR
def get_funcname(func_ea: int) -> str:
return idaapi.get_name(func_ea)
class FunctionManager:
def __init__(self, cfunc_factory=None):
if cfunc_factory is None:
cfunc_factory = CFunctionFactory()
self.func_factory = cfunc_factory
def get_tfg(self, func_ea:int) -> TFG:
if not utils.is_func_start(func_ea):
utils.log_warn(f"{hex(func_ea)} is not a function")
cfunc = self.get_cfunc(func_ea)
if cfunc is None:
nop_node = Node(Node.EXPR, UNKNOWN_SEXPR)
analysis = TFG(nop_node)
else:
analysis = CTreeAnalyzer(cfunc).lift_cfunc()
return analysis
def get_cfunc(self, func_ea:int) -> idaapi.cfunc_t|None:
return self.func_factory.get_cfunc(func_ea)
def get_func_details(self, func_ea: int):
func_tinfo = self.get_func_tinfo(func_ea)
if func_tinfo is None:
return None
func_details = idaapi.func_type_data_t()
rv = func_tinfo.get_func_details(func_details)
if not rv:
utils.log_warn(f"failed to get func details in {get_funcname(func_ea)}")
return None
return func_details
def get_args_count(self, func_ea:int) -> int:
cfunc = self.get_cfunc(func_ea)
if cfunc is None:
return 0
return len(cfunc.arguments)
def get_cfunc_lvar_type(self, func_ea:int, var_id:int) -> idaapi.tinfo_t:
func_tif = self.get_func_tinfo(func_ea)
if func_tif is not None and var_id > func_tif.get_nargs():
arg_type = func_tif.get_nth_arg(var_id)
if not utils.is_tif_correct(arg_type):
arg_type = utils.UNKNOWN_TYPE
if arg_type is not utils.UNKNOWN_TYPE:
return arg_type
arg_type = self.get_arg_type(func_ea, var_id)
if not utils.is_tif_correct(arg_type):
arg_type = utils.UNKNOWN_TYPE
if arg_type is not utils.UNKNOWN_TYPE:
return arg_type
cfunc = self.get_cfunc(func_ea)
if cfunc is None:
utils.log_warn(f"failed to get variable type, because of decompilation failure in {get_funcname(func_ea)}")
return utils.UNKNOWN_TYPE
if len(cfunc.lvars) <= var_id:
print("ERROR:", "var id is too big.")
return utils.UNKNOWN_TYPE
var = cfunc.lvars[var_id]
arg_type = var.type().copy()
if not utils.is_tif_correct(arg_type):
arg_type = utils.UNKNOWN_TYPE
return arg_type
def set_lvar_tinfo(self, func_ea:int, var_id:int, var_type:idaapi.tinfo_t):
cfunc = self.get_cfunc(func_ea)
if cfunc is None:
utils.log_warn(f"failed to change variable type, because of decompilation failure in {get_funcname(func_ea)}")
return
var = cfunc.lvars[var_id]
# var.set_user_type()
# var.set_final_lvar_type(var_type)
info = idaapi.lvar_saved_info_t()
info.ll = var
info.type = var_type
info.name = var.name
rv = idaapi.modify_user_lvar_info(func_ea, idaapi.MLI_TYPE, info)
assert rv, "Failed to modify lvar"
self.func_factory.clear_cfunc(func_ea)
def get_cfunc_lvar(self, func_ea: int, lvar_id:int):
cfunc = self.get_cfunc(func_ea)
if cfunc is None:
return None
return cfunc.lvars[lvar_id]
def get_arg_type(self, func_ea:int, arg_id:int) -> idaapi.tinfo_t:
# XXX do not refactor this into one liner,
# XXX because ida will lose arg type somewhere along the way
fdet = self.get_func_details(func_ea)
if fdet is None:
utils.log_warn(f"failed to get func details in {get_funcname(func_ea)}")
return utils.UNKNOWN_TYPE
if len(fdet) <= arg_id:
return utils.UNKNOWN_TYPE
return fdet[arg_id].type.copy()
def set_arg_type(self, func_ea:int, arg_id:int, arg_type:idaapi.tinfo_t):
if isinstance(arg_type, str):
arg_type = utils.str2tif(arg_type)
func_details = self.get_func_details(func_ea)
if func_details is None:
utils.log_warn(f"failed to change argument type (no func details) in {get_funcname(func_ea)}")
return
func_details[arg_id].type = arg_type.copy()
new_func_tinfo = idaapi.tinfo_t()
rv = new_func_tinfo.create_func(func_details)
assert rv, "Failed to create func tinfo from details"
rv = idaapi.apply_tinfo(func_ea, new_func_tinfo, 0)
assert rv, "Failed to apply new tinfo to function"
self.func_factory.clear_cfunc(func_ea)
def get_func_tinfo(self, func_ea:int) -> idaapi.tinfo_t:
tif = idaapi.tinfo_t()
if idaapi.get_tinfo(tif, func_ea) and tif.is_correct():
return tif
cfunc = self.get_cfunc(func_ea)
if cfunc is not None:
cfunc.get_func_type(tif)
if tif.is_correct():
return tif
if utils.is_movrax_ret(func_ea):
rv = utils.str2tif("__int64 (*)()")
return rv.copy()
utils.log_warn(f"failed to get tinfo for {hex(func_ea)} {get_funcname(func_ea)}")
return utils.UNKNOWN_TYPE
def get_funcptr_tinfo(self, func_ea:int) -> idaapi.tinfo_t:
tif = self.get_func_tinfo(func_ea)
if tif is utils.UNKNOWN_TYPE:
return utils.UNKNOWN_TYPE
rv = tif.create_ptr(tif)
if rv is False:
utils.log_warn(f"failed to change tinfo of {str(tif)}")
return utils.UNKNOWN_TYPE
return tif
def get_nargs(self, func_ea:int) -> int:
tif = self.get_func_tinfo(func_ea)
if tif is None:
return 0
return tif.get_nargs()
def get_lvars_counter(self, func_ea:int) -> int:
cfunc = self.get_cfunc(func_ea)
if cfunc is None:
return 0
counter = 0
for lv in cfunc.get_lvars():
if lv.name == '':
continue
counter += 1
return counter
def get_lvar_name(self, func_ea:int, lvar_id:int) -> str:
lvar = self.get_cfunc_lvar(func_ea, lvar_id)
if lvar is None:
return ""
return lvar.name
|
Mizari/phrank
|
pyphrank/function_manager.py
|
function_manager.py
|
py
| 5,588 |
python
|
en
|
code
| 51 |
github-code
|
6
|
35492004084
|
from ninja import Router
from ninja import NinjaAPI, File
from ninja.files import UploadedFile
from django.http import HttpResponse
from RECOGNIZE.text_reader import OCR_Reader
import io
import PIL.Image as Image
import cv2
import os
import time
import json
import uuid
import requests
router = Router()
path = __file__
splited = path.split("/")
path=""
for i in splited[1:-1]:
path += "/"+i
@router.post("/recognize")
def Recognize_Plate(request,file: UploadedFile = File(...)):
try:
# print(url)
# experiment_id = str(time.strftime("%Y-%m-%d_%H-%M-%S"))
# os.system("wget "+url+" -O /root/LACTURE/PLATNOMER_RECOGNIZE/RECOGNIZE/img/"+experiment_id+".jpg")
# image = cv2.imread("/root/LACTURE/PLATNOMER_RECOGNIZE/RECOGNIZE/img/"+experiment_id+".jpg")
data = file.read()
image = Image.open(io.BytesIO(data))
uuids = str(uuid.uuid4())
image.save(path+"/img/"+uuids+".png")
############## SAVE #################
img = Image.open(path+"/img/"+uuids+".png")
box = (600, 300, 1100, 700)
img2 = img.crop(box)
img2.save(path+"/croping/"+uuids+".png")
############## CROP #################
imageread = cv2.imread(path+"/croping/"+uuids+".png")
reader = OCR_Reader(False)
image, text, boxes = reader.read_text(imageread)
return {
"message":"success",
"data" : text,
"name" : uuids+".png"
}
except BaseException as err:
print(str(err))
return {
"message" : "error"
}
@router.get("/img/nocrop/{name}")
def ImgaeNoCrop(request,name:str):
try:
with open(path+"/img/"+name, 'rb') as image_file:
# Read the image content
image_data = image_file.read()
# Set the content type header
response = HttpResponse(content_type='image/jpeg')
# Set the content of the response to the image data
response.write(image_data)
return response
except BaseException as err:
return {
"message" : "Internal server error"
}
@router.get("/img/crop/{name}")
def ImgaeNoCrop(request,name:str):
try:
with open(path+"/croping/"+name, 'rb') as image_file:
# Read the image content
image_data = image_file.read()
# Set the content type header
response = HttpResponse(content_type='image/jpeg')
# Set the content of the response to the image data
response.write(image_data)
return response
except BaseException as err:
return {
"message" : "Internal server error"
}
|
fakhrilak/image_recognize
|
RECOGNIZE/index.py
|
index.py
|
py
| 2,687 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35010781963
|
from grafo.Grafo import *
from collections import deque
class EdmondsKarp:
def __init__(self, grafo: Grafo) -> None:
self.grafo = grafo
self.fluxo = {arco: 0 for arco in self.grafo.arcos.values()}
self.fluxo = {}
for arco in self.grafo.arcos.values():
self.fluxo[(arco.vertice1.indice, arco.vertice2.indice)] = 0
self.fluxo[(arco.vertice2.indice, arco.vertice1.indice)] = 0
for arco in list(self.grafo.arcos.values()):
if (arco.vertice2.indice, arco.vertice1.indice) not in self.grafo.arcos:
self.grafo.arcos[(arco.vertice2.indice, arco.vertice1.indice)] = Arco(arco.vertice2, arco.vertice1, 0)
arco.vertice2.vizinhos_saintes.append(arco.vertice1)
def buscaLargura(self, s: int, t: int) -> dict:
parent = {s: None}
queue = deque([s])
while queue:
u = queue.popleft()
for v in self.grafo.vizinhos_saintes(u):
if v.indice not in parent and self.grafo.arcos[(u, v.indice)].peso - self.fluxo[(u, v.indice)] > 0:
parent[v.indice] = u
if v.indice == t:
return parent
queue.append(v.indice)
return None
def execute(self) -> None:
self.processarAlgoritmo()
self.imprimir()
def processarAlgoritmo(self) -> None:
s = 1
t = len(self.grafo.vertices)
max_flow = 0
while True:
parent = self.buscaLargura(s, t)
if parent is None:
break
path_flow = float("Inf")
v = t
while v != s:
u = parent[v]
path_flow = min(path_flow, self.grafo.arcos[(u, v)].peso - self.fluxo[(u, v)])
v = u
max_flow += path_flow
v = t
while v != s:
u = parent[v]
self.fluxo[(u, v)] += path_flow
self.fluxo[(v, u)] -= path_flow
v = u
self.max_flow = max_flow
def imprimir(self) -> None:
print("O fluxo máximo possível é %d" % self.max_flow)
|
jdanprad0/INE5413-Grafos
|
Atividade-03-Grafos/algoritmos_t3/edmondsKarp/EdmondsKarp.py
|
EdmondsKarp.py
|
py
| 2,190 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6159961636
|
import json
a = {
"name": "ivo",
"age": "22"
}
def serialize_to(path, data):
json_string = json.dumps(a, indent=4)
with open(file, "w") as f:
f.write(json_string)
def unserialize_from(path):
with open(path, "r") as f:
contents = f.read()
return json.loads(contents)
|
Vencislav-Dzhukelov/101-3
|
week3/3-Panda-Social-Network/panda_json.py
|
panda_json.py
|
py
| 316 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18101264424
|
from typing import List, Tuple
from unittest import TestCase, main
class Solution:
def longestPalindrome(self, s: str) -> str:
def func(left: int, right: int, longest: str) -> str:
"""returns the longest palindromic substring using left and right index"""
longest_length = len(longest)
while 0 <= left and right < l and s[left] == s[right]:
length = right - left + 1
if longest_length < length:
longest = s[left : right + 1]
longest_length = length
left -= 1
right += 1
return longest
l = len(s)
longest = s[0]
for i in range(l):
longest = func(i - 1, i + 1, longest)
longest = func(i, i + 1, longest)
return longest
class Test(TestCase):
s = Solution()
data: List[Tuple[str, str]] = [
("cbbd", "bb"),
("aaaa", "aaaa"),
("babad", "bab"),
]
def test_solution(self):
for input, expected in self.data:
self.assertEqual(self.s.longestPalindrome(input), expected)
if __name__ == "__main__":
main()
|
hirotake111/leetcode_diary
|
leetcode/longest_palindromic_substring/solution.py
|
solution.py
|
py
| 1,187 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25408409971
|
from FemFrameTool import read_fun, read_vel
from RPData import RPData
# from IntegratorST import integrate_by_st_vert, integrate_by_st_vem
import numpy as np
import pandas as pd
def calc_area_field(relative_path):
area = read_fun(relative_path + 'field_1.fun')
return area
def calc_height_field(relative_path):
height = read_fun(relative_path + 'h.fun')
return height
def calc_volume_field(relative_path):
volume = calc_height_field(relative_path)
return volume
def calc_sandiness_field(relative_path):
sandiness = read_fun(relative_path + 'e.fun')
return sandiness
def calc_perm_field(relative_path):
perm = read_fun(relative_path + 'perm.fun')
return perm
def calc_collector_field(relative_path):
e = calc_sandiness_field(relative_path)
h = calc_height_field(relative_path)
collector = e * h
return collector
def calc_poro_field(relative_path):
poro = read_fun(relative_path + 'm.fun')
return poro
def calc_poro_volume_field(relative_path):
e = calc_sandiness_field(relative_path)
h = calc_height_field(relative_path)
m = calc_poro_field(relative_path)
poro_volume = e * m * h
return poro_volume
def calc_init_water_saturation_field(relative_path):
s0 = read_fun(relative_path + 's0.fun')
return s0
def calc_init_store_oil_field(relative_path):
e = calc_sandiness_field(relative_path)
h = calc_height_field(relative_path)
m = calc_poro_field(relative_path)
s0 = calc_init_water_saturation_field(relative_path)
stock_oil = e * h * m * (1 - s0)
return stock_oil
def calc_current_water_saturation_field(relative_path):
s = read_fun(relative_path + 's.fun')
return s
def calc_current_store_oil_field(relative_path):
e = calc_sandiness_field(relative_path)
h = calc_height_field(relative_path)
m = calc_poro_field(relative_path)
s = calc_current_water_saturation_field(relative_path)
stock_oil = e * h * m * (1 - s)
return stock_oil
def calc_velocity_field(relative_path):
velocity = read_vel(relative_path + 'resvelo.vel')
return velocity
def calc_press_field(relative_path):
press = read_fun(relative_path + 'result.fun')
return press
def calc_ph_field(relative_path):
p = calc_press_field(relative_path)
h = calc_height_field(relative_path)
ph = p * h
return ph
def calc_mh_field(relative_path):
m = calc_poro_field(relative_path)
h = calc_height_field(relative_path)
mh = m * h
return mh
def calc_conductivity_water(relative_path, mu_water):
k = calc_perm_field(relative_path)
h = calc_height_field(relative_path)
return k * h / mu_water
def calc_conductivity_mixture(relative_path, mu_water, mu_oil):
k = calc_perm_field(relative_path)
h = calc_height_field(relative_path)
s = calc_current_water_saturation_field(relative_path)
k_mu = mu_water / mu_oil
ofp = RPData(relative_path + 'relpermFVM.rp')
return k * h * phi(s, ofp) / mu_water
def phi(s, ofp):
n = s.__len__()
ph = np.zeros(n, dtype = float)
for i in range(n):
ph[i] = ofp.fi(s[i])
return ph
def calc_t_field(relative_path):
t = read_fun(relative_path + 't.fun')
return t
def integral_jacob(grid):
j = np.zeros(grid.Nelem)
for i in range(grid.Nelem):
x1 = grid.vert[grid.elem_vert[i][0]][0]
y1 = grid.vert[grid.elem_vert[i][0]][1]
x2 = grid.vert[grid.elem_vert[i][1]][0]
y2 = grid.vert[grid.elem_vert[i][1]][1]
x3 = grid.vert[grid.elem_vert[i][2]][0]
y3 = grid.vert[grid.elem_vert[i][2]][1]
j[i] = ((x2 - x1) * (y3 - y1) - (x3 - x1) * (y2 - y1)) / 2
return j
def integrate_by_st_vert(grid, color, jacob, field):
integral_color = np.zeros(len(color))
for i in range(grid.Nelem):
for j in range(grid.elem_nvert[i]):
for k in range(len(color)):
if grid.vert_color[grid.elem_vert[i][j]] == list(color[k].color):
f = (field[grid.elem_vert[i][0]] + field[grid.elem_vert[i][1]] + field[grid.elem_vert[i][2]]) / 3
integral_color[k] = integral_color[k] + (jacob[i] * f) / 3
return integral_color
def integrate_by_st_vert_for_t(grid, color, jacob, t, tau, field):
integral_color = np.zeros(len(color))
for i in range(grid.Nelem):
for j in range(grid.elem_nvert[i]):
for k in range(len(color)):
if tau - t[grid.elem_vert[i][j]] > 0:
if grid.vert_color[grid.elem_vert[i][j]] == list(color[k].color):
f = (field[grid.elem_vert[i][0]] + field[grid.elem_vert[i][1]] + field[grid.elem_vert[i][2]]) / 3
integral_color[k] = integral_color[k] + (jacob[i] * f) / 3
else:
continue
return integral_color
def integrate_by_st_vem(grid, color, jacob, field, field_u, ku):
integral_color = np.zeros(len(color))
u = ku * np.median(field_u)
for i in range(grid.Nelem):
if field_u[i] - u > 0:
for j in range(grid.elem_nvert[i]):
for k in range(len(color)):
if grid.vert_color[grid.elem_vert[i][j]] == list(color[k].color):
f = (field[grid.elem_vert[i][0]] + field[grid.elem_vert[i][1]] + field[grid.elem_vert[i][2]]) / 3
integral_color[k] = integral_color[k] + (jacob[i] * f) / 3
else:
continue
return integral_color
def calc_all_params(relative_path, grid, color, config):
jacob = integral_jacob(grid)
ku = config.ku
mu_water = config.mu_water
mu_oil = config.mu_oil
path_rel_phase_perm = config.path_relative_phase_permeability
output_values = []
output_values.append(
('Area', integrate_by_st_vert(grid, color, jacob, calc_area_field(relative_path))))
output_values.append(
('Volume_ST', integrate_by_st_vert(grid, color, jacob, calc_volume_field(relative_path))))
output_values.append(
('Volume_collector', integrate_by_st_vert(grid, color, jacob, calc_collector_field(relative_path))))
output_values.append(
('Volume_poro', integrate_by_st_vert(grid, color, jacob, calc_poro_volume_field(relative_path))))
output_values.append(
('Stock_oil', integrate_by_st_vert(grid, color, jacob, calc_init_store_oil_field(relative_path))))
output_values.append(
('Stock_current_oil', integrate_by_st_vert(grid, color, jacob, calc_current_store_oil_field(relative_path))))
output_values.append(
('Stock_6', integrate_by_st_vem(
grid, color, jacob, calc_init_store_oil_field(relative_path), calc_velocity_field(relative_path), ku)))
output_values.append(('p*h', integrate_by_st_vert(grid, color, jacob, calc_ph_field(relative_path))))
output_values.append(('m*h', integrate_by_st_vert(grid, color, jacob, calc_mh_field(relative_path))))
output_values.append(
('m_i_water', integrate_by_st_vert(
grid, color, jacob, calc_conductivity_water(relative_path, mu_water))/output_values[1][1]))
output_values.append(
('m_i', integrate_by_st_vert(
grid, color, jacob, calc_conductivity_mixture(relative_path, mu_water, mu_oil))/output_values[1][1]))
return output_values
def calc_t_param(relative_path, grid, color, config):
jacob = integral_jacob(grid)
# tau = config.tau
# sor = config.sor # неснижаемая нефтенасыщенность
t = calc_t_field(relative_path)
m = calc_poro_field(relative_path)
h = calc_height_field(relative_path)
so = 1 - calc_current_water_saturation_field(relative_path) # (1-s) нефтенасыщенность
tau = 2 #
sor = 0.0001 # неснижаемая нефтенасыщенность
# tau_arr = np.linspace(np.min(t), np.max(t), 10)
t[t == np.inf] = np.NaN
df = pd.DataFrame(t)
tau_arr = np.linspace(df.min(), 0.0046, 2)
# tau_arr = np.linspace(df.min(), df.max(), 10)
general_field = m * h * (so - sor)
arr_st_t_tau = []
for i in range(tau_arr.__len__()):
arr_st_t_tau.append(integrate_by_st_vert_for_t(grid, color, jacob, t, tau_arr[i], general_field))
output_values = []
for i in range(arr_st_t_tau.__len__()):
output_values.append((str(tau_arr[i]), arr_st_t_tau[i]))
return output_values
if __name__ == '__main__':
raise SystemExit("CalcParamClass.py это не основное приложение!")
else:
print('CalcParamClass.py Используется как библиотека!')
|
brilliantik/Color_ST_Integrate
|
CalcParam.py
|
CalcParam.py
|
py
| 7,887 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13974817829
|
from pydantic import BaseModel, Field
from typing import List, Union
import pydantic
from .validators import validate_polygon, validate_is_plane_orthogonal_to_polygon, validate_plane_normal_is_not_zero
class Point3DModel(BaseModel):
__root__: List[float] = Field(..., min_items=3, max_items=3)
class PlaneModel(BaseModel):
point: Point3DModel = Field()
normal: Point3DModel = Field()
@pydantic.validator('normal', always=True)
@classmethod
def validate_plane_normal_is_not_zero(cls, value):
validate_plane_normal_is_not_zero(value.__root__)
return value
class Point2DModel(BaseModel):
__root__: List[float] = Field(..., min_items=2, max_items=2)
def to_3D(self) -> Point3DModel:
return Point3DModel(__root__=[*self.__root__, 0])
class EmptyPolygonModel(BaseModel):
__root__: List[Point2DModel] = Field(..., min_items=0, max_items=0)
class PolygonModel(BaseModel):
__root__: List[Point2DModel] = Field(..., min_items=3)
@classmethod
def get_polygon_normal(cls) -> Point3DModel:
return Point3DModel(__root__=[0, 0, 1])
@classmethod
def get_polygon_plane(cls) -> PlaneModel:
return PlaneModel(
point=Point3DModel(__root__=[0, 0, 0]),
normal=cls.get_polygon_normal()
)
@pydantic.validator('__root__', always=True)
@classmethod
def validate_is_polygon_convex(cls, value):
points = [v.__root__ for v in value]
validate_polygon(points)
return value
class SplittingRequestData(BaseModel):
polygon: PolygonModel = Field()
plane: PlaneModel = Field()
@pydantic.root_validator(skip_on_failure=True)
@classmethod
def validate_is_plane_orthogonal_to_polygon(cls, values):
plane_normal = values['plane'].normal
polygon_normal = PolygonModel.get_polygon_normal()
validate_is_plane_orthogonal_to_polygon(
plane_normal.__root__,
polygon_normal.__root__,
)
return values
class SplittingResponseData(BaseModel):
polygon1: PolygonModel = Field()
polygon2: Union[PolygonModel, EmptyPolygonModel] = Field()
|
mikheev-dev/polygon_splitter
|
src/data_model.py
|
data_model.py
|
py
| 2,164 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33551925024
|
import requests as rq
from dotenv import load_dotenv
import os
import smtplib
import sys
class FPL:
URL = 'https://fantasy.premierleague.com/api/bootstrap-static/'
def __init__(self):
self.response_raw = rq.get(FPL.URL)
load_dotenv()
self.email_sent = os.getenv('EMAIL_SENT')
self.app_pw = os.getenv('APP_PW')
self.email = os.getenv('EMAIL')
if self.response_raw.status_code != 200:
self.send_email('error', str(self.response_raw.status_code) + ";" + self.response_raw.text)
self.shit_hit_the_fan = True
return
self.shit_hit_the_fan = False
self.response = self.response_raw.json()
self.tot_players = int(self.response['total_players'])
def send_email(self, setting: str = 'normal', error_message: str = ''):
if setting == 'normal':
message = 'Subject: {}\n\n{}'.format("FPL REGISTRATION IS OPEN", "GO GET THEM")
elif setting == 'ping':
message = 'Subject: {}\n\n{}'.format("PING FPL", "Script is working")
else:
message = 'Subject: {}\n\n{}'.format("ERROR FPL", error_message)
try:
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(self.email, self.app_pw)
s.sendmail(self.email, self.email, message)
s.quit()
return True
except Exception as e:
print(e)
return False
def run(self):
if 0 < self.tot_players < 1_000_000:
if self.email_sent is None:
if self.send_email():
with open(".env", "a") as f:
f.write("EMAIL_SENT=1")
def ping(self):
if self.send_email('ping'):
pass
if __name__ == '__main__':
fpl = FPL()
if not fpl.shit_hit_the_fan:
if len(sys.argv) == 1:
fpl.run()
else:
fpl.ping()
|
FilleDille/fpl_reg_chaser
|
main.py
|
main.py
|
py
| 1,966 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35416632908
|
import logging
import battle.main
import memory.main
import screen
import xbox
FFXC = xbox.controller_handle()
logger = logging.getLogger(__name__)
def yojimbo(gil_value: int = 263000):
logger.info("Yojimbo overdrive")
screen.await_turn()
memory.main.wait_frames(6)
if not screen.turn_aeon():
return
while memory.main.battle_menu_cursor() != 35:
xbox.menu_up()
memory.main.wait_frames(6)
xbox.menu_b()
logger.info("Selecting amount")
memory.main.wait_frames(15)
battle.main.calculate_spare_change_movement(gil_value)
logger.info(f"Amount selected: {gil_value}")
xbox.tap_b()
xbox.tap_b()
xbox.tap_b()
xbox.tap_b()
xbox.tap_b()
return
|
coderwilson/FFX_TAS_Python
|
battle/overdrive.py
|
overdrive.py
|
py
| 723 |
python
|
en
|
code
| 14 |
github-code
|
6
|
5480419217
|
import os
import requests
from bs4 import BeautifulSoup
import re
import time
import sys
user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36'
def get_music_data(url):
"""
用于获取歌曲列表中的歌曲信息
"""
headers = {'User-Agent':user_agent}
webData = requests.get(url,headers=headers).text
soup = BeautifulSoup(webData,'lxml')
find_list = soup.find('ul',class_="f-hide").find_all('a')
tempArr = []
for a in find_list:
music_id = a['href'].replace('/song?id=','')
music_name = a.text
tempArr.append({'id':music_id,'name':music_name})
return tempArr
def get(values,output_path):
"""
用于下载歌曲
"""
downNum = 0
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
for x in values:
x['name'] = re.sub(rstr, "_", x['name'])# 替换字符串中的匹配项
if not os.path.exists(output_path + os.sep + x['name'] + '.mp3'):
print('[*] '+ x['name'] + '.mp3 下载中...')
url = 'http://music.163.com/song/media/outer/url?id=' + x['id'] + '.mp3'
try:
save_file(url , output_path + os.sep + x['name'] + '.mp3')
downNum = downNum + 1
print('[+] '+ x['name'] + '.mp3 下载完成 !')
except:
print('[+] '+ x['name'] + '.mp3 下载失败 !')
print('[+] 共计下载完成歌曲 ' + str(downNum) + ' 首 !')
def save_file(url,path):
"""
用于保存歌曲文件
"""
headers = {'User-Agent':user_agent,'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8','Upgrade-Insecure-Requests':'1'}
response = requests.get(url,headers=headers)
f = open(path, 'wb')
f.write(response.content)
f.flush()
def poc_head():
print("""
__ _______.___._____.___. __________ __ .__.__
/ \ / \__ | |\__ | | \______ \ ____ _______/ |_|__| | ____
\ \/\/ // | | / | | | _// __ \\____ \ __\ | | _/ __ \
\ / \____ | \____ | | | \ ___/| |_> > | | | |_\ ___/
\__/\ / / ______| / ______|____|____|_ /\___ > __/|__| |__|____/\___ >
\/ \/ \/ /_____/ \/ \/|__| \/
author 昊辰
博客: www.haochen1204.com
公众号: 霜刃信安
""")
def main():
url = ''
output_path = sys.argv[0][0:len(sys.argv[0])-len(os.path.basename(sys.argv[0]))]+'music_'+time.strftime('%Y%m%d%H%M', time.localtime())
poc_head()
url = input('请输入歌单的网址:').replace("#/","")
if not os.path.exists(output_path):
os.makedirs(output_path)
music_list = get_music_data(url)
print('[+] 歌单获取成功! 共计',len(music_list),'首歌曲!')
get(music_list,output_path)
print('[+] 歌曲存放目录为 '+output_path+' 文件')
print('[+] 程序运行结束 10秒后自动退出')
time.sleep(10)
main()
|
haochen1204/Reptile_WYYmusic
|
网易云爬虫.py
|
网易云爬虫.py
|
py
| 3,336 |
python
|
en
|
code
| 1 |
github-code
|
6
|
35177658753
|
import numpy as np
from sklearn.model_selection import train_test_split
class ToyDataset:
def __init__(self, min_len, max_len):
self.SOS = "<s>"
self.EOS = "/<s>"
self.characters = list("abcd")
self.int2char = self.characters
# 1 for SOS, 1 for EOS, 1 for padding
self.char2int = {c: i+3 for i, c in enumerate(self.characters)}
self.min_str_len = min_len
self.max_str_len = max_len
self.max_seq_len = max_len + 2
self.vocab_size = len(self.characters) + 3
def get_dataset(self, num_samples):
inp_set = []
tar_set = []
for _ in range(num_samples):
i, t = self._sample()
inp_set.append(i)
tar_set.append(t)
return inp_set, tar_set
def split_dataset(self, inp_set, tar_set, test_ratio=0.2):
return train_test_split(inp_set, tar_set, test_size=test_ratio)
def _sample(self):
random_len = np.random.randint(self.min_str_len, self.max_str_len+1)
random_char = np.random.choice(self.characters, random_len)
inp = [self.char2int.get(c) for c in random_char]
tar = inp[::-1]
inp = [1] + inp + [2]
tar = [1] + tar + [2]
inp = np.pad(inp, (0, self.max_str_len+2-len(inp)), 'constant', constant_values='0')
tar = np.pad(tar, (0, self.max_str_len+2-len(tar)), 'constant', constant_values='0')
return inp, tar
def char_index(self, char):
if char == self.SOS:
return 1
elif char == self.EOS:
return 2
else:
return self.char2int[char]
def index_char(self, index):
if index == 0:
return ":"
elif index == 1:
return self.SOS
elif index == 2:
return self.EOS
else:
return self.characters[index-3]
if __name__ == '__main__':
toy = ToyDataset(5, 10)
inp_set, tar_set = toy.get_dataset(10)
input_train, input_val, target_train, target_val = toy.split_dataset(inp_set, tar_set, 0.2)
|
xuzhiyuan1528/tf2basic
|
Seq2Seq/Utils.py
|
Utils.py
|
py
| 2,080 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25205799104
|
# -*- coding: utf-8 -*-
"""
Abstract class for detectors
"""
import abc
class Embedder(abc.ABC):
@abc.abstractmethod
def embed(self):
'Return embed features'
return NotImplemented
@abc.abstractmethod
def get_input_shape(self):
'Return input shape'
return NotImplemented
from easydict import EasyDict as edict
import mxnet as mx
import numpy as np
import cv2
from skimage import transform as trans
class ArcFace_Embedder(Embedder):
def get_input_shape(self):
pass
def do_flip(self, data):
for idx in range(data.shape[0]):
data[idx,:,:] = np.fliplr(data[idx,:,:])
return data
def __init__(self):
modeldir = './model/insight_face/model-r50-am-lfw/model'
gpuid = 0
ctx = mx.gpu(gpuid)
self.nets = []
image_shape = [3, 112, 112]
modeldir_=modeldir+',0'
for model in modeldir_.split('|'):
vec = model.split(',')
assert len(vec)>1
prefix = vec[0]
epoch = int(vec[1])
print('loading',prefix, epoch)
net = edict()
net.ctx = ctx
net.sym, net.arg_params, net.aux_params = mx.model.load_checkpoint(prefix, epoch)
#net.arg_params, net.aux_params = ch_dev(net.arg_params, net.aux_params, net.ctx)
all_layers = net.sym.get_internals()
net.sym = all_layers['fc1_output']
net.model = mx.mod.Module(symbol=net.sym, context=net.ctx, label_names = None)
net.model.bind(data_shapes=[('data', (1, 3, image_shape[1], image_shape[2]))])
net.model.set_params(net.arg_params, net.aux_params)
#_pp = prefix.rfind('p')+1
#_pp = prefix[_pp:]
#net.patch = [int(x) for x in _pp.split('_')]
#assert len(net.patch)==5
#print('patch', net.patch)
self.nets.append(net)
def align(self, detections):
warped_images=[]
for det in detections:
raw_face_image = det['face_img']
#plt.imshow(raw_face_image)
#plt.show()
image_size = [112,112]
src = np.array([
[30.2946, 51.6963],
[65.5318, 51.5014],
[48.0252, 71.7366],
[33.5493, 92.3655],
[62.7299, 92.2041] ], dtype=np.float32 )
if image_size[1]==112:
src[:,0] += 8.0
offset = ([
[det['face_bbox'][0],det['face_bbox'][1]],
[det['face_bbox'][0],det['face_bbox'][1]],
[det['face_bbox'][0],det['face_bbox'][1]],
[det['face_bbox'][0],det['face_bbox'][1]],
[det['face_bbox'][0],det['face_bbox'][1]]
])
npoint= np.array(det['face_keypoint']) - np.array(offset)
dst = npoint#.reshape( (2,5) ).T
tform = trans.SimilarityTransform()
tform.estimate(dst, src)
M = tform.params[0:2,:]
warped = cv2.warpAffine(raw_face_image,M,(image_size[1],image_size[0]), borderValue = 0.0)
#plt.imshow(warped)
warped_images.append(warped)
return warped_images
def embed(self, detections):
det_with_face = [ det for det in detections if det['face_img'] is not None]
if len(det_with_face)==0:
return detections
aligned_face_images = self.align(det_with_face)
embeds =[]
# Image_based Detection time per face : 0.018270
# for image in aligned_face_images:
# image = np.transpose( image, (2,0,1) )
# F = None
# for net in self.nets:
# embedding = None
# #ppatch = net.patch
# for flipid in [0,1]:
# _img = np.copy(image)
# if flipid==1:
# #plt.imshow(np.transpose( _img, (1,2,0) )[:,:,::-1])
# #plt.show()
# _img = self.do_flip(_img)
# #plt.imshow(np.transpose( _img, (1,2,0) )[:,:,::-1])
# #plt.show()
# input_blob = np.expand_dims(_img, axis=0)
# data = mx.nd.array(input_blob)
# db = mx.io.DataBatch(data=(data,))
# net.model.forward(db, is_train=False)
# _embedding = net.model.get_outputs()[0].asnumpy().flatten()
# #print(_embedding.shape)
# if embedding is None:
# embedding = _embedding
# else:
# embedding += _embedding
# _norm=np.linalg.norm(embedding)
# embedding /= _norm
# if F is None:
# F = embedding
# else:
# F += embedding
# #F = np.concatenate((F,embedding), axis=0)
# _norm=np.linalg.norm(F)
# F /= _norm
# embeds.append(F)
# Batch_based Detection time per face : 0.004155
batch_images = []
for image in aligned_face_images:
image = np.transpose( image, (2,0,1) )
for flipid in [0,1]:
_img = np.copy(image)
if flipid==1:
_img = self.do_flip(_img)
batch_images.append(_img)
input_blob = np.array(batch_images)
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,))
for net in self.nets:
net.model.forward(db, is_train=False)
_embedding = net.model.get_outputs()[0].asnumpy()#.flatten()
tmp = []
for i in range(0,len(_embedding),2):
mean_flip = (_embedding[i]+_embedding[i+1])/2
_norm=np.linalg.norm(mean_flip)
mean_flip/= _norm
tmp.append( mean_flip )
embeds.append(tmp)
# Instead of adding up, we temporary replace with mean
embeds = np.mean(embeds,axis=0)
for det, emb in zip(det_with_face, embeds):
det['face_embed'] = emb
return detections
def embed_imgs(self, images):
aligned_face_images = images
embeds =[]
# Batch_based Detection time per face : 0.004155
batch_images = []
for image in aligned_face_images:
image = np.transpose( image, (2,0,1) )
for flipid in [0,1]:
_img = np.copy(image)
if flipid==1:
_img = self.do_flip(_img)
batch_images.append(_img)
input_blob = np.array(batch_images)
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,))
for net in self.nets:
net.model.forward(db, is_train=False)
_embedding = net.model.get_outputs()[0].asnumpy()#.flatten()
tmp = []
for i in range(0,len(_embedding),2):
mean_flip = (_embedding[i]+_embedding[i+1])/2
_norm=np.linalg.norm(mean_flip)
mean_flip/= _norm
tmp.append( mean_flip )
embeds.append(tmp)
return embeds
# TEST CODES
if __name__ == '__main__':
import time
import matplotlib.pyplot as plt
import glob
import os
os.chdir('../../')
import src.detector.detectors as detectors
# Detector and Embedder
Y_MTCNN = detectors.Yolov2_MTCNN()
embed=ArcFace_Embedder()
# Load Images
paths = glob.glob('./src/face_reid/test_images/*.jpg')
paths.sort()
dets = []
for img_path in paths:
test_img=cv2.imread(img_path)
s = time.time()
result_Y = Y_MTCNN.predict(test_img)
dets.append(result_Y[0])
e = time.time()
print('Detection time per frame : %f'%(e-s))
vis_img = test_img.copy()
for track in result_Y:
x1, y1, x2, y2 = track['person_bbox']
color = np.random.randint(low=0,high=255,size=3)
color = (int(color[0]),int(color[1]),int(color[2]))
cv2.rectangle(vis_img,(x1, y1), (x2, y2),color,5)
fx1, fy1, fx2, fy2 = track['face_bbox']
cv2.rectangle(vis_img, (x1+fx1, y1+fy1), (x1+fx2, y1+fy2), color, 5)
for pt in track['face_keypoint']:
cv2.circle(vis_img, (x1+pt[0], y1+pt[1]), 5, color,5 ,1)
plt.imshow(vis_img[:,:,::-1])
plt.show()
# Test Code
s = time.time()
dets = embed.embed(dets)
embed_features = [det['face_embed'] for det in dets]
e = time.time()
print('Detection time per face : %f'%((e-s)/len(dets)))
dis_chart = np.zeros((len(embed_features),len(embed_features)))
for i in range(len(embed_features)):
for j in range(len(embed_features)):
dis_chart[i,j]= np.sqrt( np.sum( np.square(embed_features[i] - embed_features[j]))+1e-12 )
sim_chart = np.zeros((len(embed_features),len(embed_features)))
for i in range(len(embed_features)):
for j in range(len(embed_features)):
sim_chart[i,j]= np.dot( embed_features[i], embed_features[j].T )
'''
if len(detections)>0:
have_face_indexs =[]
input_dets =[]
for idx,det in enumerate(detections):
if det['face_img'] is not None:
have_face_indexs.append(idx)
input_dets.append(det)
if len(input_dets)>0:
emb_results = self.FACE_EMBEDDER.embed(input_dets)
for i,e in zip(have_face_indexs,emb_results):
detections[i]['face_embed'] = e
'''
|
chunhanl/ElanGuard_Public
|
src/face_reid/embedders.py
|
embedders.py
|
py
| 10,556 |
python
|
en
|
code
| 13 |
github-code
|
6
|
17176402024
|
import datetime
import h5py
import librosa
import numpy as np
import os
import pandas as pd
import soundfile as sf
import sys
import time
import localmodule
# Define constants.
data_dir = localmodule.get_data_dir()
dataset_name = localmodule.get_dataset_name()
orig_sr = localmodule.get_sample_rate()
negative_labels = localmodule.get_negative_labels()
clip_length = int(0.500 * orig_sr) # a clip lasts 500 ms
args = sys.argv[1:]
unit_str = args[0]
units = localmodule.get_units()
# Print header.
start_time = int(time.time())
print(str(datetime.datetime.now()) + " Start.")
print("Generating " + dataset_name + " clips for " + unit_str + ".")
print("h5py version: {:s}".format(h5py.__version__))
print("librosa version: {:s}".format(librosa.__version__))
print("numpy version: {:s}".format(np.__version__))
print("pandas version: {:s}".format(pd.__version__))
print("soundfile version: {:s}".format(sf.__version__))
print("")
# Create directory for original (i.e. non-augmented) clips.
predictions_name = "_".join([dataset_name, "baseline-predictions"])
predictions_dir = os.path.join(data_dir, predictions_name)
recordings_name = "_".join([dataset_name, "full-audio"])
recordings_dir = os.path.join(data_dir, recordings_name)
annotations_name = "_".join([dataset_name, "annotations"])
annotations_dir = os.path.join(data_dir, annotations_name)
dataset_wav_name = "_".join([dataset_name, "audio-clips"])
dataset_wav_dir = os.path.join(data_dir, dataset_wav_name)
os.makedirs(dataset_wav_dir, exist_ok=True)
original_dataset_wav_dir = os.path.join(dataset_wav_dir, "original")
os.makedirs(original_dataset_wav_dir, exist_ok=True)
# Create directory corresponding to the recording unit.
unit_dir = os.path.join(original_dataset_wav_dir, unit_str)
os.makedirs(unit_dir, exist_ok=True)
# Open full night recording.
samples = []
annotation_name = unit_str + ".txt"
annotation_path = os.path.join(annotations_dir, annotation_name)
df = pd.read_csv(annotation_path, sep='\t')
recording_name = unit_str + ".flac"
recording_path = os.path.join(recordings_dir, recording_name)
full_night = sf.SoundFile(recording_path)
n_positive_samples = 0
n_negative_samples = 0
# Export every annotation either as positive (flight call) or negative (alarm).
for index, row in df.iterrows():
# Compute center time of the annotation bounding box.
begin_time = float(row["Begin Time (s)"])
end_time = float(row["End Time (s)"])
mid_time = 0.5 * (begin_time + end_time)
sample = int(orig_sr * mid_time)
sample_str = str(sample).zfill(9)
# Compute center frequency of the annotation bounding box.
low_freq = float(row["Low Freq (Hz)"])
high_freq = float(row["High Freq (Hz)"])
mid_freq = 0.5 * (low_freq + high_freq)
freq_str = str(int(mid_freq)).zfill(5)
if "Calls" in row and row["Calls"] in negative_labels:
label_str = "0"
n_negative_samples = n_negative_samples + 1
else:
label_str = "1"
n_positive_samples = n_positive_samples + 1
clip_list = [unit_str, sample_str, freq_str, label_str, "original.wav"]
clip_str = "_".join(clip_list)
# Read.
sample_start = sample - int(0.5 * clip_length)
full_night.seek(sample_start)
data = full_night.read(clip_length)
# Export.
clip_path = os.path.join(unit_dir, clip_str)
sf.write(clip_path, data, orig_sr)
samples.append(sample)
# The number of false positives to be added to the dataset is equal to the
# difference between the number of annotated positives and
# the number of annotated negatives.
n_false_positives = n_positive_samples - n_negative_samples
print("Number of positives: " + str(n_positive_samples) + ".")
print("Number of negatives: " + str(n_negative_samples) + ".")
print("Number of false positives (clips fooling baseline detector): "
+ str(n_false_positives) + ".")
print("Total number of clips: " + str(2*n_positive_samples) + ".")
print("")
# Load probabilities of the baseline prediction model.
prediction_name = unit_str + ".npy"
prediction_path = os.path.join(predictions_dir, prediction_name)
prob_matrix = np.load(prediction_path)
# Retrieve timestamps corresponding to decreasing confidences.
prob_samples = (prob_matrix[:, 0] * orig_sr).astype('int')
probs = prob_matrix[:, 1]
sorting_indices = np.argsort(probs)[::-1]
sorted_probs = probs[sorting_indices]
sorted_prob_samples = prob_samples[sorting_indices]
sorted_prob_samples = sorted_prob_samples
# The exported false positives correspond to the timestamps with highest
# confidences under the condition that they are 12000 samples (500 ms) apart
# from all previously exported clips.
prob_counter = 0
false_positive_counter = 0
while false_positive_counter < n_false_positives:
prob_sample = sorted_prob_samples[prob_counter]
dists = [np.abs(sample-prob_sample) for sample in samples]
min_dist = np.min(dists)
if min_dist > clip_length:
# Append sample to growing list.
samples.append(prob_sample)
sample_str = str(prob_sample).zfill(9)
# By convention, the frequency of a false positive example is 0 Hz.
freq_str = str(0).zfill(5)
clip_list = [unit_str, sample_str, freq_str, "0", "original.wav"]
false_positive_counter = false_positive_counter + 1
clip_str = "_".join(clip_list)
# Read.
sample_start = prob_sample - int(0.5 * clip_length)
full_night.seek(sample_start)
data = full_night.read(clip_length)
# Export.
clip_path = os.path.join(unit_dir, clip_str)
sf.write(clip_path, data, orig_sr)
prob_counter = prob_counter + 1
# Print elapsed time.
print(str(datetime.datetime.now()) + " Finish.")
elapsed_time = time.time() - int(start_time)
elapsed_hours = int(elapsed_time / (60 * 60))
elapsed_minutes = int((elapsed_time % (60 * 60)) / 60)
elapsed_seconds = elapsed_time % 60.
elapsed_str = "{:>02}:{:>02}:{:>05.2f}".format(elapsed_hours,
elapsed_minutes,
elapsed_seconds)
print("Total elapsed time: " + elapsed_str + ".")
|
BirdVox/bv_context_adaptation
|
src/001_generate-audio-clips.py
|
001_generate-audio-clips.py
|
py
| 6,126 |
python
|
en
|
code
| 8 |
github-code
|
6
|
31854187897
|
# 歌词解析程序
# 歌词管理器 <> 词句
import time
import os
# 建立数据模型,储存一句歌词的相关信息
class LrcItem:
time = 0.0 # 以秒为单位的时间
seg = "" # 一句歌词
def __init__(self, time, seg):
self.time = time
self.seg = seg
# 歌词管理器,负责通过时间获取歌词
class LrcManager:
lrcList = []
# 设置歌词路径
def set_lrc_path(self, path):
self.lrcList = [] # 清空作用
# 打开歌词文件
lines_list = self.open_lrc_file(path)
# 解析歌词,创建歌词对象,存储数据
self.anazy_lines(lines_list)
# 根据时间排序
self.sort_lrc_items()
def open_lrc_file(self, path):
file = open(path, "r")
return file.readlines()
def anazy_lines(self, lines_list):
for line in lines_list:
self.anazy_line(line)
def anazy_line(self, line):
l = line.split("]") # [00:00.00] 用右括号进行拆分
if l[1] == "": # 没有I[1],I[1]不存在则直接返回
return
# 建立模型
ret = self.get_time_from_string(l[0]) # l[0]表示时间,获取之
if ret: # 或者写成 if ret!= None,表示如果ret为真
lrc_item = LrcItem(ret, l[1]) # LrcItem不在本类里,所有没有self.
self.lrcList.append(lrc_item)
def get_time_from_string(self, string):
# [03:20.08 [ti
m = string[1:3] # 获取分
s = string[4:] # 获取秒
if not m.isdigit(): # 如果不是全数字,则返回,可以看看双截棍前面部分,存在分钟不是全数字的情况
return None
return int(m) * 60 + float(s) # 返回总的秒数
# 对歌词进行排序 时间从大到小 # 冒泡排序法,了解下,以后遇到也基本是一样的
def sort_lrc_items(self):
for i in range(0, len(self.lrcList)):
for j in range(0, len(self.lrcList) - i - 1):
if self.lrcList[j].time < self.lrcList[j + 1].time: # 比较时间,永远把小的放在后面
item = self.lrcList[j]
self.lrcList[j] = self.lrcList[j + 1]
self.lrcList[j + 1] = item
def show_lrc_items(self): #循环打印
for item in self.lrcList:
print(item.time, item.seg)
# 根据给的时间返回歌词
def get_lrc_for_time(self, t): # 注意时间从大到小排序
for item in self.lrcList:
if item.time < t:
return item.seg # 结束函数并返回歌词
return "" # 给的时间太小,不返回任何东西
lrc = LrcManager()
lrc.set_lrc_path("周杰伦双截棍(Live).lrc")
# lrc.show_lrc_items()
t = 0
while True:
time.sleep(1)
x = os.system('cls')
t += 1
print(lrc.get_lrc_for_time(t))
|
920630yzx/python_course
|
pycharm学习-进阶篇/面向对象的程序设计/4.1歌词(项目).py
|
4.1歌词(项目).py
|
py
| 2,916 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
8385108161
|
from __future__ import absolute_import
from __future__ import print_function
import argparse
from lxml import etree
parser = argparse.ArgumentParser(
description='Create tls links from sumo net as needed by tls_csv2SUMO.py. You have to edit the link number ' +
'field (preset with g). The comment gives the link number shown on demand in SUMO-GUI')
parser.add_argument('net', help='Input file name')
args = parser.parse_args()
doc = etree.parse(args.net)
connections = {}
for conn in doc.xpath('//connection'):
if 'linkIndex' in conn.attrib:
# use traffic light id and right adjusted number for sorting and as
# comment
numIndex = conn.attrib['linkIndex']
index = conn.attrib['tl'] + ';' + numIndex.zfill(3)
connections[index] = conn.attrib['from'] + '_' + conn.attrib['fromLane'] + \
';' + conn.attrib['to'] + '_' + conn.attrib['toLane']
# print record
# print conn.attrib['from'], conn.attrib['to'],
# conn.attrib['linkIndex']
for conn in sorted(connections):
# print conn, connections[conn]
print("link;g;{};0".format(connections[conn]).ljust(50) + '#' + str(conn).rjust(3))
|
ngctnnnn/DRL_Traffic-Signal-Control
|
sumo-rl/sumo/tools/tls/createTlsCsv.py
|
createTlsCsv.py
|
py
| 1,181 |
python
|
en
|
code
| 17 |
github-code
|
6
|
35279730803
|
from flask import Flask, url_for, render_template, request, flash, redirect, session, abort, jsonify
import RPi.GPIO as GPIO
import subprocess, os, logging
import ipdb
from config import Config
from time import sleep
'''initial VAR'''
# Light GPIO
RELAIS_4_GPIO = 2
# Water GPIO
RELAIS_WATER_GPIO = 22
logging.basicConfig(
filename='server.log',
level=logging.DEBUG,
format='[%(asctime)s] %(levelname)s:%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p'
)
app = Flask(__name__)
app.config.from_object(Config)
TOKEN = app.config['TOKEN']
'''functions'''
# Turn the light on
@app.route('/accendilucicortile', methods=['POST'])
def lights_on():
token = request.json.get('token', None)
if token != TOKEN:
logging.debug('not authorized access')
return jsonify({"msg": "Unauthorized"}), 400
elif token == TOKEN:
logging.debug('Turn the lights on')
GPIO.output(RELAIS_4_GPIO, GPIO.LOW)
logging.debug('Lights are on')
return jsonify({"msg": "Lights on"}), 200
else:
return jsonify({"msg": "This should never happen"}), 200
# lights off
@app.route('/spegnilucicortile', methods=['POST'])
def lights_off():
token = request.json.get('token', None)
if token != TOKEN:
logging.debug('not authorized access')
return jsonify({"msg": "Unauthorized"}), 400
elif token == TOKEN:
logging.debug('Turn the lights off')
GPIO.output(RELAIS_4_GPIO, GPIO.HIGH)
logging.debug('Lights are off')
return jsonify({"msg": "Lights off"}), 200
else:
return jsonify({"msg": "This should never happen"}), 200
# water on
@app.route('/accendiacqua', methods=['POST'])
def water_on():
token = request.json.get('token', None)
if token != TOKEN:
logging.debug('not authorized access')
return jsonify({"msg": "Unauthorized"}), 400
elif token == TOKEN:
GPIO.output(RELAIS_WATER_GPIO, GPIO.LOW)
logging.debug('Starting irrigation')
sleep(5)
if GPIO.input(RELAIS_WATER_GPIO):
logging.error('Irrigation not started')
else:
logging.debug('Irrigation correctly started')
return "<h1>Irrigation is on</h1>"
else:
return jsonify({"msg": "This should never happen"}), 200
# water off
@app.route('/spegniacqua', methods=['POST'])
def water_off():
token = request.json.get('token', None)
if token != TOKEN:
logging.debug('not authorized access')
return jsonify({"msg": "Unauthorized"}), 400
elif token == TOKEN:
GPIO.output(RELAIS_WATER_GPIO, GPIO.HIGH)
logging.debug('Stopping Irrigation')
sleep(5)
if GPIO.input(RELAIS_WATER_GPIO):
logging.debug('Irrigation correctly stopped')
else:
logging.error('Irrigation not stopped')
return "<h1>Irrigation is off</h1>"
else:
return jsonify({"msg": "This should never happen"}), 200
if __name__ == '__main__':
logging.info('starting up')
GPIO.setmode(GPIO.BCM)
GPIO.setup(RELAIS_4_GPIO,GPIO.OUT, initial=GPIO.HIGH) #lights off
if GPIO.input(RELAIS_4_GPIO):
logging.debug('Luce spenta')
else:
logging.debug('Luce accesa')
GPIO.setup(RELAIS_WATER_GPIO, GPIO.OUT, initial=GPIO.HIGH) #water off
if GPIO.input(RELAIS_WATER_GPIO):
logging.debug('Irrigazione spenta')
else:
logging.debug('Irrigazione accesa')
app.secret_key = os.urandom(12)
try:
app.run(
debug=True,
host='0.0.0.0',
port=5000
)
except:
logging.info('exception')
finally:
GPIO.output(RELAIS_WATER_GPIO, GPIO.HIGH)
GPIO.cleanup()
|
oldgiova/python-api-webservice-lightscontrol
|
main.py
|
main.py
|
py
| 3,747 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31211285971
|
''' TAREA5
Determina el tamaño de muestra requerido por cada lugar decimal
de precisión del estimado obtenido para el integral, comparando
con Wolfram Alpha para por lo menos desde uno hasta siete decimales;
representa el resultado como una sola gráfica o de tipo caja-bigote
o un diagrama de violin.
'''
from math import exp, pi
import numpy as np
def g(x):
return (2 / (pi * (exp(x) + exp(-x))))
wolf = str(0.0488340) #con 7 decimales
vg = np.vectorize(g)
X = np.arange(-8, 8, 0.001) # ampliar y refinar
Y = vg(X) # mayor eficiencia
from GeneralRandom import GeneralRandom
generador = GeneralRandom(np.asarray(X), np.asarray(Y))
desde = 3
hasta = 7
pedazo = 10 # desde 1 hasta 1000000 hasta
cuantos = 200 # 200
def parte(replica):
V = generador.random(pedazo)[0]
return ((V >= desde) & (V <= hasta)).sum()
import multiprocessing
if __name__ == "__main__":
state = 0
with multiprocessing.Pool(2) as pool:
while (True):
montecarlo = pool.map(parte, range(cuantos))
integral = sum(montecarlo) / (cuantos * pedazo)
num = str((pi / 2) * integral)
pedazo = pedazo + 100
if num[0] == wolf[0] and num[1] == wolf[1] and num[2] == wolf[2] and state == 0: # 1er Decimal
print("Se logra el primer decimal con:", pedazo, "pedazos. Dado que:", num[2], " es igual a", wolf[2])
state = 1
if num[0] == wolf[0] and num[1] == wolf[1] and num[2] == wolf[2] and num[3] == wolf[3] and state == 1: # 2do Decimal
print("Se logra el segunda decimal con:", pedazo, "pedazos. Dado que:", num[3], " es igual a", wolf[3])
state = 2
if num[0] == wolf[0] and num[1] == wolf[1] and num[2] == wolf[2] and num[3] == wolf[3] and num[4] == wolf[4] and state == 2: # 3er Decimal
print("Se logra el tercer decimal con:", pedazo, "pedazos. Dado que:", num[4], " es igual a", wolf[4])
state = 3
if num[0] == wolf[0] and num[1] == wolf[1] and num[2] == wolf[2] and num[3] == wolf[3] and num[4] == wolf[4] and num[5] == wolf[5] and state == 3: # 4to Decimal
print("Se logra el cuarto decimal con:", pedazo, "pedazos. Dado que:", num[5], " es igual a", wolf[5])
state = 4
if num[0] == wolf[0] and num[1] == wolf[1] and num[2] == wolf[2] and num[3] == wolf[3] and num[4] == wolf[4] and num[5] == wolf[5] and num[6] == wolf[6] and state == 4: # 5to Decimal
print("Se logra el quinto decimal con:", pedazo, "pedazos. Dado que:", num[6], " es igual a", wolf[6])
state = 5
if num[0] == wolf[0] and num[1] == wolf[1] and num[2] == wolf[2] and num[3] == wolf[3] and num[4] == wolf[4] and num[5] == wolf[5] and num[6] == wolf[6] and num[7] == wolf[7] and state == 5: # 6to Decimal
print("Se logra el sexto decimal con:", pedazo, "pedazos. Dado que:", num[7], " es igual a", wolf[7])
state = 6
if num[0] == wolf[0] and num[1] == wolf[1] and num[2] == wolf[2] and num[3] == wolf[3] and num[4] == wolf[4] and num[5] == wolf[5] and num[6] == wolf[6] and num[7] == wolf[7] and num[8] == wolf[8] and state == 6: # 7mo Decimal
print("Se logra el septimo decimal con:", pedazo, "pedazos. Dado que:", num[8], " es igual a", wolf[8])
break
print(pedazo, num)
|
Elitemaster97/Simulacion
|
Tarea5/Tarea5.1.py
|
Tarea5.1.py
|
py
| 3,447 |
python
|
es
|
code
| 0 |
github-code
|
6
|
40140601153
|
import streamlit as st
import pandas as pd
st.set_page_config(layout="wide")
col1, col2 = st.columns([3, 1])
option = None
df = pd.read_csv('reuters_summaries.csv')
with col2:
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
option = st.selectbox('', ['Crude Oil', 'Biofuel'])
with col1:
st.title('REUTERS')
st.divider()
if option == 'Crude Oil':
#df = pd.read_csv('crude_oil_summary_new.csv')
df_crude = df[df['keyword']=='crude oil']
for index, series in df_crude.iterrows():
st.markdown("### [{}]({})".format(series['title'], series['link']))
st.markdown("*{}*".format(series['published']))
st.markdown('**Keywords: {}**'.format(series['keywords']))
st.write(series['summary'])
st.divider()
if option == 'Biofuel':
#df = pd.read_csv('biofuel_summary_new.csv')
df_bio = df[df['keyword']=='biofuel']
for index, series in df_bio.iterrows():
st.markdown("### [{}]({})".format(series['title'], series['link']))
st.markdown("*{}*".format(series['published']))
st.markdown('**Keywords: {}**'.format(series['keywords']))
st.write(series['summary'])
st.divider()
|
Jayanth-Shanmugam/news-articles-summarization
|
pages/Reuters.py
|
Reuters.py
|
py
| 1,302 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21107944564
|
from classification.image_to_array import imageTonpv
from classification.cnn_class import cnn_class
import csv
from os import listdir
from os.path import isfile, join
import cv2
import numpy as np
def finalReport(label_ids = [], cids_test = [], class_count=[], class_viability =[], path = '', model='' ):
viability_ratio = []
num_counting = []
num_viability = []
label_set = sorted(set(label_ids))
for img_ids in label_set:
count = 0
viability = 0
for index, ids in enumerate(label_ids):
if ids == img_ids:
if class_count[index] < class_viability[index]:
class_viability[index] = class_count[index]
count = count + class_count[index]
viability = viability + class_viability[index]
if count < viability:
viability = count
# fix bug
if count == 0:
viability_ratio.append(0)
else:
viability_ratio.append(float(viability/count))
num_counting.append(count)
num_viability.append(viability)
label_format = []
for index, ids in enumerate(label_set):
label_format.append(str(format(ids, '05d')) + '_')
import time
timestr = time.strftime("%Y%m%d-%H%M%S")
out = open(path + '/classification/' + str(timestr) + 'FINAL_REPORT' + model + '_' + 'CNN_csv.csv', 'a',
newline='')
csv_write = csv.writer(out, dialect='excel')
csv_write.writerow(label_format)
csv_write.writerow(viability_ratio)
csv_write.writerow(num_counting)
csv_write.writerow(num_viability)
def saveasSpectrum(label_ids = [], cids_test = [], class_count=[], class_viability =[],path = '', model='' ):
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
save_path = path + '/classification/'
DETECTION_test = np.load('classification/npv/VIABILITY/' + model + '/detection.npy')
label_set = sorted(set(label_ids))
for index, folder in enumerate(onlyfiles):
raw = cv2.imread(path + '\\' + folder)
ind = folder.split('RGB')[0]
new_imageBGR = raw
for index_ids, ids in enumerate(label_ids):
if list(label_set)[index] == ids and not len(DETECTION_test[index_ids]) == 0:
count = class_count[index_ids]
viability = class_viability[index_ids]
if count < viability:
viability = count
if count == 0:
green_level = 0
red_level = 255
blue_level = 255
else:
green_level = int(255 * viability / count)
red_level = int(255 * (1 - viability / count))
blue_level = 0
color_spectrum = (blue_level, green_level, red_level)
for position in DETECTION_test[index_ids]:
new_imageBGR[position[0], position[1]] = color_spectrum
cv2.imwrite(save_path + ind + model + '.png', new_imageBGR)
def classifyMain(folder_test = '', folder_train = '', analysis_type = dict()):
# counting model and viability model
if not analysis_type["predict_type"] == 0:
if analysis_type["model_type"] == 1:
# step 1.0 background preprocess and npv convert
imageTonpv(folder_test, 'UNET')
# step 2.0 load model weight and predict
label_ids, cids_test, class_count = cnn_class(folder_test, 'UNET', 'COUNTING')
label_ids, cids_test, class_viability = cnn_class(folder_test, 'UNET', 'VIABILITY')
# step 3.0 save final csv results and live-dead markers
finalReport(label_ids, cids_test, class_count, class_viability, folder_test, 'UNET')
saveasSpectrum(label_ids, cids_test, class_count, class_viability, folder_test, 'UNET')
print("U-NET complete")
elif analysis_type["model_type"] == 0:
# step 1.0 background preprocess and npv convert
imageTonpv(folder_test, 'watershed')
# step 2.0 load model weight and predict
label_ids, cids_test, class_count = cnn_class(folder_test, 'watershed', 'COUNTING')
label_ids, cids_test, class_viability = cnn_class(folder_test, 'watershed', 'VIABILITY')
# step 3.0 save final csv results and live-dead markers
finalReport(label_ids, cids_test, class_count, class_viability, folder_test, 'watershed')
saveasSpectrum(label_ids, cids_test, class_count, class_viability, folder_test, 'WATERSHED')
print("watershed complete")
elif analysis_type["model_type"] == 2:
# step 1.0 background preprocess and npv convert
imageTonpv(folder_test, 'UW')
# step 2.0 load model weight and predict
label_ids, cids_test, class_count = cnn_class(folder_test, 'UW', 'COUNTING')
label_ids, cids_test, class_viability = cnn_class(folder_test, 'UW', 'VIABILITY')
# step 3.0 save final csv results and live-dead markers
finalReport(label_ids, cids_test, class_count, class_viability, folder_test, 'UW')
saveasSpectrum(label_ids, cids_test, class_count, class_viability, folder_test, 'UW')
print("U-NET watershed complete")
print("classify complete")
|
chenxun511happy/Cartilage-Net
|
classification/ClassifyMain.py
|
ClassifyMain.py
|
py
| 5,545 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73573186747
|
import requests
addresses = {
"TXA2WjFc5f86deJcZZCdbdpkpUTKTA3VDM": "energyRateModelContract",
"TSe1pcCnU1tLdg69JvbFmQirjKwTbxbPrG": "sTRXImpl",
"TU3kjFuhtEo42tsCBtfYUAZxoqQ4yuSLQ5": "sTRXProxy",
"TNoHbPuBQrVanVf9qxUsSvHdB2eDkeDAKD": "marketImpl",
"TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd": "marketProxy",
}
json_ori = {
"contractAddress": "",
"contractName": "",
"projectId": 2,
"remark": "",
"accessToken": "tronsmart"
}
for address, name in addresses.items():
json = json_ori.copy()
json['contractAddress'] = address
json['contractName'] = name
resp = requests.post("https://mining.ablesdxd.link" + "/admin/upsertContractIntoWhiteList", json=json)
print(f"{address} {name} {resp.text}")
|
dpneko/pyutil
|
contract_whitelist.py
|
contract_whitelist.py
|
py
| 733 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74306420987
|
from dataclasses import dataclass
from sqlalchemy import (Boolean, Column, DateTime, ForeignKey, Integer,
MetaData, Numeric, String, Table, create_engine)
metadata = MetaData()
@dataclass
class IOLModel:
sql_path: str
def __post_init__(self):
self.metadata = MetaData()
self.model_tables()
self.create_engine()
self.create_database()
def model_tables(self):
"""Create table models"""
self.asset_class_country = Table(
'asset_class_country', self.metadata,
Column('id', Integer(), primary_key=True, autoincrement = True),
Column('asset_class', String(20), nullable=False),
Column('country', String(20), nullable=False)
)
self.fci_info = Table(
'fci_info', self.metadata,
Column('symbol', String(20), primary_key=True, unique=True, nullable=False),
Column('desc', String(50)),
Column('type', String(20)),
Column('adm_type', String(20)),
Column('horizon', String(20)),
Column('profile', String(20)),
Column('yearly_var', Numeric(5,5)),
Column('monthly_var', Numeric(5,5)),
Column('investment', String(300)),
Column('term', String(2)),
Column('rescue', String(2)),
Column('report', String(250)),
Column('regulation', String(250)),
Column('currency', String(20)),
Column('country', String(20)),
Column('market', String(20)),
Column('bloomberg', String(20)),
)
self.screen_last_price = Table(
'screen_last_price', self.metadata,
Column('country', String(20), nullable=False),
Column('asset_class', String(20), nullable=False),
Column('screen', String(20), nullable=False),
Column('symbol', String(20), primary_key=True, unique=True, nullable=False),
Column('desc', String(50)),
Column('date_time', DateTime()),
Column('open', Numeric(12,2)),
Column('high', Numeric(12,2)),
Column('low', Numeric(12,2)),
Column('close', Numeric(12,2)),
Column('bid_q', Numeric(12,2)),
Column('bid_price', Numeric(12,2)),
Column('ask_price', Numeric(12,2)),
Column('ask_q', Numeric(12,2)),
Column('vol', Numeric(12,2)),
)
self.screens_country_instrument = Table(
'screens_country_instrument', self.metadata,
Column('id', Integer(), primary_key=True, autoincrement = True),
Column('country', String(20), nullable=False),
Column('asset_class', String(20), nullable=False),
Column('screen', String(20), nullable=False),
)
self.symbol_daily = Table(
'symbol_daily', self.metadata,
Column('id', Integer(), primary_key=True, autoincrement = True),
Column('symbol', String(20), nullable=False),
Column('market', String(20), nullable=False),
Column('date', DateTime()),
Column('open', Numeric(12,2)),
Column('high', Numeric(12,2)),
Column('low', Numeric(12,2)),
Column('close', Numeric(12,2)),
Column('vol', Numeric(12,2)),
)
self.symbol_info = Table(
'symbol_info', self.metadata,
Column('symbol', String(20), primary_key=True, unique=True, nullable=False),
Column('market', String(20)),
Column('desc', String(50)),
Column('country', String(20)),
Column('type', String(20)),
Column('term', String(2)),
Column('currency', String(20)),
)
self.symbol_last_price = Table(
'symbol_last_price', self.metadata,
Column('id', Integer(), primary_key=True, autoincrement = True),
Column('symbol', String(20)),
Column('type', String(20)),
Column('date_time', DateTime()),
Column('open', Numeric(12,2)),
Column('high', Numeric(12,2)),
Column('low', Numeric(12,2)),
Column('close', Numeric(12,2)),
Column('bid_q', Numeric(12,2)),
Column('bid_price', Numeric(12,2)),
Column('ask_price', Numeric(12,2)),
Column('ask_q', Numeric(12,2)),
Column('vol', Numeric(12,2)),
Column('desc', String(50)),
Column('market', String(20)),
Column('currency', String(20)),
Column('country', String(20)),
Column('term', String(2)),
Column('lote', Numeric(12,2)),
Column('lamina_min', Numeric(12,2)),
Column('q_min', Numeric(12,2)),
Column('shown', Boolean()),
Column('buyable', Boolean()),
Column('sellable', Boolean()),
)
self.symbol_options = Table(
'symbol_options', self.metadata,
Column('underlying', String(20)),
Column('date_time', DateTime()),
Column('symbol', String(20), primary_key=True, unique=True, nullable=False),
Column('type', String(20)),
Column('expire', DateTime()),
Column('days_expire', Numeric(3)),
Column('desc', String(50)),
Column('strike', Numeric(12,2)),
Column('open', Numeric(12,2)),
Column('high', Numeric(12,2)),
Column('low', Numeric(12,2)),
Column('close', Numeric(12,2)),
Column('bid_ask', Numeric(12,2)),
# Column('bid_price', Numeric(12,2)),
# Column('ask_price', Numeric(12,2)),
# Column('ask_q', Numeric(12,2)),
Column('vol', Numeric(12,2)),
Column('var', Numeric(12,2)),
# Column('market', String(20)),
# Column('currency', String(20)),
Column('country', String(20)),
# Column('term', String(2)),
# Column('lote', Numeric(12,2)),
# Column('lamina_min', Numeric(12,2)),
# Column('q_min', Numeric(12,2)),
# Column('shown', Boolean()),
# Column('buyable', Boolean()),
# Column('sellable', Boolean()),
)
def create_engine(self):
"""Create an SQLite DB engine"""
self.engine = create_engine(f'sqlite:///{self.sql_path}')
def create_database(self):
"""Create DataBase from engine"""
self.metadata.create_all(self.engine)
# cookies = Table('cookies', metadata,
# Column('cookie_id', Integer(), primary_key=True),
# Column('cookie_name', String(50), index=True),
# Column('cookie_recipe_url', String(255)),
# Column('cookie_sku', String(55)),
# Column('quantity', Integer()),
# Column('unit_cost', Numeric(12, 2))
# )
# users = Table('users', metadata,
# Column('user_id', Integer(), primary_key=True),
# Column('customer_number', Integer(), autoincrement=True),
# Column('username', String(15), nullable=False, unique=True),
# Column('email_address', String(255), nullable=False),
# Column('phone', String(20), nullable=False),
# Column('password', String(25), nullable=False),
# Column('created_on', DateTime(), default=datetime.now),
# Column('updated_on', DateTime(), default=datetime.now, onupdate=datetime.now)
# )
# orders = Table('orders', metadata,
# Column('order_id', Integer(), primary_key=True),
# Column('user_id', ForeignKey('users.user_id'))
# )
# line_items = Table('line_items', metadata,
# Column('line_items_id', Integer(), primary_key=True),
# Column('order_id', ForeignKey('orders.order_id')),
# Column('cookie_id', ForeignKey('cookies.cookie_id')),
# Column('quantity', Integer()),
# Column('extended_cost', Numeric(12, 2))
# )
# engine = create_engine('sqlite:///:memory:')
# metadata.create_all(engine)
|
fscorrales/apys
|
src/apys/models/iol_model.py
|
iol_model.py
|
py
| 7,935 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39386351985
|
from board import Board
from piece import Grid_Point
#variable controlling board size
size = 8
current_player = 'O'
def main():
#create board and current player string
selection = 0
global size
global current_player
#loop until player starts game
while(selection != '1'):
#print welcome message and menu options
print("Welcome to Reversi")
print("Please select a menu option")
print("\t1: Play Game")
print("\t2: Show Rules")
print("\t3: Change Settings \n")
selection = input("Please select: ")
if(selection == '1'):
print("Game Starting... \n")
elif(selection == '2'):
#print rules
print("RULES: ")
print("\tPlayers O and X take turns placing their respective pieces on the board.")
print("\tPieces must be placed on open cell that sandwhiches at least one opponent piece either vertically, horizontally, or diagonally.")
print("\tAll opposing pieces that are sandwhiched flip are flipped over and become friendly pieces.")
print("\tIf a player has no legal moves, their turn is skipped.")
print("\tThe game continues until a player has no pieces left, there are no empty tiles left, or neither player has a legal move.")
print("\tThe player with more pieces on the board at the end wins. \n")
elif(selection == '3'):
#get player choice on starting player and grid size
print("SETTINGS:")
inp = input("Please enter the grid size (default 8): ")
#catch illegal inputs and set it to default if it is
try:
size = int(inp)
if(size <= 4):
raise ValueError
except ValueError:
print("Invalid size entered, set to default of 8")
size = 8
inp = input("Please enter which player starts first O or X (default O): ").upper()
#catch illegal inputs and set it to default if it is
if(inp != 'X' or inp != 'O'):
print("Invalid starting player, set to default of O")
current_player = 'O'
else:
current_player = inp
else:
print("Unknown selection, please choose again \n")
pass
input("Input any key to continue: ")
#create the board
global brd
brd = Board(size)
#enter the main game loop
gameLoop()
def gameLoop():
global current_player
global brd
#variable for checking if both players have skipped their turns in order to terminate if neither player has moves
turn_skipped = False
while(1):
#print the board
print(brd)
#get the current piece count
o_num, x_num = brd.get_piece_count()
#get all possible moves for the current player
possible_moves = brd.get_available_moves(current_player)
#if a player has no possible moves and the previous player did not skip their turn, skip the current_players turn
if(not possible_moves and not turn_skipped):
turn_skipped = True
print("\nTurn Skipped!\n")
if current_player == 'X':
current_player = 'O'
else:
current_player = 'X'
continue
#if the previous player has skipped their turn or either play has 0 pieces, the game is over so check who wins
elif((not possible_moves and turn_skipped) or o_num == 0 or x_num == 0):
print(f'Current Score: {o_num} O\'s, {x_num} X\'s \n')
if(o_num < x_num):
print('Player X wins!')
quit()
elif(o_num > x_num):
print('Player O wins')
quit()
else:
print('Its a tie!')
quit()
#resset turn skipped to false if a player has a legal move
turn_skipped = False
#print out the player, current score, and possible moves
print(f'Current Score: {o_num} O\'s, {x_num} X\'s \n')
print(f'Player {current_player}\'s turn: ')
print(f'Possible Moves: {possible_moves}\n')
desired_position = ()
allowed = False
#get a legal input for a position
while(not allowed):
inp = input("Please enter which position you'd like (row, col) or 'exit' to terminate program:")
#terminate program on exit
if(inp == 'exit'):
print('program terminating...')
quit()
#otherwise split the input
vals = [x.strip() for x in inp.split(',')]
#a proper input is a list with length 2, where each element is a numeric string
if (not (isinstance(vals, list) and len(vals) == 2 and vals[0].isdigit() and vals[1].isdigit())):
print('Please enter a valid input!')
continue
#if the input is legal, convert it to a grid_point
#grid point auto incriments its input for Human printing so need to subtract one from the human input which is already in that form
desired_position = Grid_Point(int(vals[0]) - 1, int(vals[1]) - 1)
#if the desired position is in the possible moves, its legal, otherwise they must select a new point
allowed = desired_position in possible_moves
if(not allowed):
print("Please enter a legal move\n")
#add the current player's piece to their desired position. Also handles flipping pieces
brd.add_piece(desired_position, current_player)
#switch the current player
if current_player == 'X':
current_player = 'O'
else:
current_player = 'X'
if __name__ == "__main__":
main()
|
dank-dan-k/Reversi
|
main.py
|
main.py
|
py
| 6,101 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30039234138
|
from django.views.decorators.http import require_http_methods
from django.http import JsonResponse
from django.http import HttpResponse
from .service.login import Login
from .service.report import uploadData
from .service.getdata import getRoadMap
import json
# Create your views here.
@require_http_methods(["GET", "POST"])
def wx_test(request):
response = {}
try:
# json_result = json.loads(request.body)
# print(request.body)
# process.decode_json(json_result)
response['ok'] = 1
except Exception as e:
response['msg'] = str(e)
response['ok'] = 0
return JsonResponse(response)
# 登录函数
@require_http_methods(["GET"])
def login(request):
response = {}
try:
code = request.GET.get('code')
# print(code)
response = Login(code)
except Exception as e:
response['msg'] = str(e)
response['ok'] = 0
return JsonResponse(response)
# 上传数据函数
@require_http_methods(["POST"])
def uploaddata(request):
response = {}
try:
json_result = json.loads(request.body)
response = uploadData(json_result['data'])
except Exception as e:
response['msg'] = str(e)
response['ok'] = 0
return JsonResponse(response)
# 获取路线图函数
@require_http_methods(["GET"])
def getroadmap(request):
response = {}
try:
response = getRoadMap()
except Exception as e:
response['msg'] = str(e)
response['ok'] = 0
return JsonResponse(response)
|
luzy99/road_smoothness_detection
|
road_detect_server/my_server/wx/views.py
|
views.py
|
py
| 1,542 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70102817469
|
from RedditClient import RedditClient
class RedditGold(RedditClient):
def __init__(self):
super().__init__()
self.full_name = None
self.months = None
self.username = None
def generate_body(self):
body = {}
if self.full_name != None:
body['fullname'] = self.full_name
if self.months != None:
body['months'] = self.months
if self.username != None:
body['username'] = self.username
return body
def delete_body(self):
self.full_name = None
self.months = None
self.username = None
|
cthacker-udel/Python-Reddit-API
|
RedditGold.py
|
RedditGold.py
|
py
| 625 |
python
|
en
|
code
| 1 |
github-code
|
6
|
14279541024
|
from collections import defaultdict
from intcode.intcode import read_program, VM
import matplotlib.pyplot as plt
DIRECTION_LEFT = (-1, 0)
DIRECTION_RIGHT = (1, 0)
DIRECTION_UP = (0, -1)
DIRECTION_DOWN = (0, 1)
TURN_LEFT = 0
TURN_RIGHT = 1
COLOR_BLACK = 0
COLOR_WHITE = 1
next_direction_left = {
DIRECTION_UP: DIRECTION_LEFT,
DIRECTION_LEFT: DIRECTION_DOWN,
DIRECTION_DOWN: DIRECTION_RIGHT,
DIRECTION_RIGHT: DIRECTION_UP
}
next_direction_right = {
DIRECTION_UP: DIRECTION_RIGHT,
DIRECTION_RIGHT: DIRECTION_DOWN,
DIRECTION_DOWN: DIRECTION_LEFT,
DIRECTION_LEFT: DIRECTION_UP
}
def compute_position_to_color(initial_state, inital_color):
position_to_color = defaultdict(lambda: COLOR_BLACK)
position = (0, 0)
direction = DIRECTION_UP
position_to_color[position] = inital_color
vm = VM(initial_state)
running = True
while running:
try:
current_color = position_to_color[position]
vm.send_input(current_color)
color = vm.get_output()
turn = vm.get_output()
position_to_color[position] = color
direction = next_direction(direction, turn)
position = next_position(position, direction)
except StopIteration:
running = False
return position_to_color
def next_direction(current_direction, turn):
if turn == TURN_LEFT:
return next_direction_left[current_direction]
else:
return next_direction_right[current_direction]
def next_position(position, direction):
dx, dy = direction
x, y = position
return (x + dx, y + dy)
def draw(position_to_color):
white_positions = [position for position, color in position_to_color.items() if color == COLOR_WHITE]
x, y = zip(*white_positions)
plt.scatter(x, y, s=20)
plt.ylim((10, -20))
plt.show()
if __name__ == "__main__":
initial_state = read_program("input.txt")
position_to_color = compute_position_to_color(initial_state, COLOR_BLACK)
print("Part 1:", len(position_to_color))
position_to_color = compute_position_to_color(initial_state, COLOR_WHITE)
draw(position_to_color)
|
bwdvolde/advent-of-code-2019
|
day11/solution.py
|
solution.py
|
py
| 2,178 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36942823350
|
import turtle
window = turtle.Screen()
window.bgcolor("black")
shapeA = turtle.Turtle()
shapeB = turtle.Turtle()
shapeC = turtle.Turtle()
shapeD = turtle.Turtle()
shapeA.sety(100)
shapeA.pencolor("red")
for i in range(3):
shapeA.pensize(3)
shapeA.fd(50)
shapeA.left(120)
shapeB.setx(-100)
shapeB.pencolor("blue")
for i in [1, 2, 3, 4]:
shapeB.pensize(3)
shapeB.fd(50)
shapeB.right(90)
shapeC.pencolor("purple")
shapeC.pensize(3)
shapeC.circle(30)
shapeD.setx(25)
shapeD.sety(-25)
shapeD.pencolor("yellow")
shapeD.pensize(3)
shapeD.fd(100)
window.exitonclick()
|
Sir-Lance/CS1400
|
shit.py
|
shit.py
|
py
| 590 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26969763906
|
import os
from utils.util import make_dir_under_root, read_dirnames_under_root
OUTPUT_ROOT_DIR_NAMES = [
'masked_frames',
'result_frames',
'optical_flows'
]
class RootInputDirectories:
def __init__(
self,
root_videos_dir,
root_masks_dir,
video_names_filename=None
):
self.root_videos_dir = root_videos_dir
self.root_masks_dir = root_masks_dir
if video_names_filename is not None:
with open(video_names_filename, 'r') as fin:
self.video_dirnames = [
os.path.join(root_videos_dir, line.split()[0])
for line in fin.readlines()
]
else:
self.video_dirnames = read_dirnames_under_root(root_videos_dir)
self.mask_dirnames = read_dirnames_under_root(root_masks_dir)
def __len__(self):
return len(self.video_dirnames)
class RootOutputDirectories:
def __init__(
self, root_outputs_dir,
):
self.output_root_dirs = {}
for name in OUTPUT_ROOT_DIR_NAMES:
self.output_root_dirs[name] = \
make_dir_under_root(root_outputs_dir, name)
def __getattr__(self, attr):
if attr in self.output_root_dirs:
return self.output_root_dirs[attr]
else:
raise KeyError(
f"{attr} not in root_dir_names {self.output_root_dirs}")
class VideoDirectories:
def __init__(
self, root_inputs_dirs, root_outputs_dirs, video_name, mask_name
):
self.name = f"video_{video_name}_mask_{mask_name}"
rid = root_inputs_dirs
rod = root_outputs_dirs
self.frames_dir = os.path.join(rid.root_videos_dir, video_name)
self.mask_dir = os.path.join(rid.root_masks_dir, mask_name)
self.masked_frames_dir = os.path.join(rod.masked_frames, self.name)
self.results_dir = os.path.join(rod.result_frames, self.name)
self.flows_dir = os.path.join(rod.optical_flows, video_name)
|
amjltc295/Free-Form-Video-Inpainting
|
src/utils/directory_IO.py
|
directory_IO.py
|
py
| 2,029 |
python
|
en
|
code
| 323 |
github-code
|
6
|
70779220668
|
import math
import os
import re
from ast import literal_eval
from dataclasses import dataclass
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from accelerate.logging import get_logger
from accelerate.utils import is_tpu_available
from sklearn.metrics import accuracy_score, average_precision_score, roc_auc_score
logger = get_logger(__name__, "INFO")
@dataclass
class Task:
name: str
num_classes: int
property: str
def get_task(task_name):
if re.findall("mortality|readmission|los", task_name):
return Task(task_name, 1, "binary")
elif re.findall("diagnosis", task_name):
return Task(task_name, 17, "multilabel")
elif re.findall("creatinine|platelets", task_name):
return Task(task_name, 5, "multiclass")
elif re.findall("wbc|bicarbonate|sodium", task_name):
return Task(task_name, 3, "multiclass")
elif re.findall("hb", task_name):
return Task(task_name, 4, "multiclass")
# To Load & Save n_epoch
class N_Epoch:
def __init__(self):
self.epoch = 0
def __call__(self):
return self.epoch
def increment(self):
self.epoch += 1
def state_dict(self):
return {"epoch": self.epoch}
def load_state_dict(self, state_dict):
self.epoch = state_dict["epoch"]
def load_model(path, model):
state_dict = torch.load(path, map_location="cpu")
if "pred_model.model.embed_positions.weight" in state_dict:
del state_dict["pred_model.model.embed_positions.weight"]
model.load_state_dict(state_dict, strict=False)
return model
def get_max_seq_len(args):
df = pd.read_csv(
os.path.join(
args.input_path, f"{args.pred_time}h", f"{args.src_data}_cohort.csv"
),
usecols=["time", "hi_start"],
)
if args.time >= 0:
df["hi_start"] = df["hi_start"].map(literal_eval).map(lambda x: x[args.time])
else:
df["hi_start"] = 0
max_seq_len = df.apply(
lambda x: x["time"].count(",") + 1 - x["hi_start"], axis=1
).max()
max_seq_len = math.ceil(max_seq_len / 128) * 128
return max_seq_len
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(
self, patience=7, verbose=True, delta=0, compare="increase", metric="avg_auroc"
):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
path (str): Path for the checkpoint to be saved to.
Default: 'checkpoint.pt'
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.target_metric_min = 0
self.delta = delta
self.compare_score = self.increase if compare == "increase" else self.decrease
self.metric = metric
def __call__(self, target_metric):
update_token = False
score = target_metric
if self.best_score is None:
self.best_score = score
if self.compare_score(score):
self.counter += 1
logger.info(
f"EarlyStopping counter: {self.counter} out of {self.patience} ({target_metric:.6f})",
main_process_only=True,
)
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
if self.verbose:
logger.info(
f"Validation {self.metric} {self.compare_score.__name__}d {self.target_metric_min:.6f} --> {target_metric:.6f})",
main_process_only=True,
)
self.target_metric_min = target_metric
self.counter = 0
update_token = True
return update_token
def increase(self, score):
if score < self.best_score + self.delta:
return True
else:
return False
def decrease(self, score):
if score > self.best_score + self.delta:
return True
else:
return False
def state_dict(self):
return {
"best_score": self.best_score,
"counter": self.counter,
"early_stop": self.early_stop,
"target_metric_min": self.target_metric_min,
}
def load_state_dict(self, state_dict):
self.best_score = state_dict["best_score"]
self.counter = state_dict["counter"]
self.early_stop = state_dict["early_stop"]
self.target_metric_min = state_dict["target_metric_min"]
def reset(self):
self.counter = 0
self.early_stop = False
def log_from_dict(metric_dict, split, n_epoch):
log_dict = {"epoch": n_epoch, split: metric_dict}
return log_dict
class PredLoss:
def __init__(self, args):
self.args = args
# How to drop na in binary??
self.bce = nn.BCELoss(reduction="sum")
self.ce = nn.NLLLoss(reduction="sum", ignore_index=-1)
self.sim = nn.CosineSimilarity(dim=-1)
def __call__(self, output, reprs):
# NOTE: If null label is too many in binary/multilabel, it will be cause a nan loss.
losses, preds, truths, masks = {}, {}, {}, {}
loss_total = 0
# To suport Rag Retriever
tasks = [i for i in self.args.tasks if i.name in output["target"].keys()]
for task in tasks:
pred = output["pred"][task.name]
target = output["target"][task.name]
if task.property == "binary":
# Calculate mask for -1(NaN)
mask = (target != -1).bool()
pred = mask * pred
target = mask * target
loss = self.bce(pred, target)
elif task.property == "multilabel":
# Calculate mask for -1(NaN)
mask = (target.sum(axis=-1) > 0).bool().unsqueeze(-1)
pred = mask * pred
target = mask * target
loss = self.bce(pred, target) / task.num_classes
elif task.property == "multiclass":
mask = (target.sum(axis=-1) > 0).bool().unsqueeze(-1)
nl = (pred + 1e-10).log() # For numerical Stability
pred = mask * pred
nl = mask * nl
target = mask * target
loss = self.ce(nl, target.argmax(dim=1))
else:
raise NotImplementedError()
losses[task.name] = loss / self.args.local_batch_size
preds[task.name] = pred
truths[task.name] = target
masks[task.name] = mask
loss_total += loss
logging_outputs = {
# SHould detach or not??
"loss_total": loss_total,
"preds": preds,
"truths": truths,
"losses": losses,
"masks": masks,
}
return loss_total, logging_outputs
class BaseMetric:
def __init__(self, args, target):
self.args = args
self._update_target = target
self.is_tpu = is_tpu_available()
self.reset()
def reset(self):
raise NotImplementedError()
def __call__(self, out, accelerator=None):
raise NotImplementedError()
def get_metrics(self):
raise NotImplementedError()
def gather(self, accelerator, *args):
if accelerator is not None:
args = accelerator.gather_for_metrics(args)
args = [(i if i.shape else i.unsqueeze(0)) for i in args]
if len(args) == 1:
return args[0]
else:
return args
@property
def compare(self):
return "decrease" if "loss" in self.update_target else "increase"
@property
def update_target(self):
return self._update_target
class PredMetric(BaseMetric):
def __init__(self, args, target="avg_auroc"):
self.tasks = args.tasks
super().__init__(args, target)
def reset(self):
self.losses = {k.name: [] for k in self.tasks}
self.truths = {k.name: [] for k in self.tasks}
self.preds = {k.name: [] for k in self.tasks}
self.masks = {k.name: [] for k in self.tasks}
def __call__(self, out, accelerator=None):
# NOTE: On train step, only compute metrics for the master process
tasks = [i for i in self.tasks if i.name in out["preds"].keys()]
for task in tasks:
mask = out["masks"][task.name]
if task.property != "binary":
mask = mask.squeeze(-1)
truth = out["truths"][task.name]
pred = out["preds"][task.name]
loss = out["losses"][task.name]
truth, pred, mask, loss = self.gather(accelerator, truth, pred, mask, loss)
self.truths[task.name].append(truth.detach().cpu().float().numpy())
self.preds[task.name].append(pred.detach().cpu().float().numpy())
self.losses[task.name].append(loss.detach().cpu().float().numpy())
self.masks[task.name].append(mask.detach().cpu().numpy())
def get_metrics(self):
# For REMed
tasks = [i for i in self.tasks if len(self.preds[i.name]) != 0]
for task in tasks:
self.losses[task.name] = np.concatenate(self.losses[task.name], 0)
self.truths[task.name] = np.concatenate(self.truths[task.name], 0)
self.preds[task.name] = np.concatenate(self.preds[task.name], 0)
self.masks[task.name] = np.concatenate(self.masks[task.name], 0)
self.truths[task.name] = self.truths[task.name][self.masks[task.name]]
self.preds[task.name] = self.preds[task.name][self.masks[task.name]]
self.epoch_dict = {}
for task in tasks:
self.epoch_dict[task.name + "_loss"] = np.mean(self.losses[task.name])
self.epoch_dict[task.name + "_auprc"] = self.auprc(task)
self.epoch_dict[task.name + "_auroc"] = self.auroc(task)
self.epoch_dict[task.name + "_acc"] = self.acc(task)
self.epoch_dict["avg_loss"] = np.mean(
[self.epoch_dict[k] for k in self.epoch_dict.keys() if "loss" in k]
)
self.epoch_dict["avg_auprc"] = np.mean(
[self.epoch_dict[k] for k in self.epoch_dict.keys() if "auprc" in k]
)
self.epoch_dict["avg_auroc"] = np.mean(
[self.epoch_dict[k] for k in self.epoch_dict.keys() if "auroc" in k]
)
self.epoch_dict["avg_acc"] = np.mean(
[self.epoch_dict[k] for k in self.epoch_dict.keys() if "acc" in k]
)
self.reset()
return self.epoch_dict
def auroc(self, task):
return roc_auc_score(
self.truths[task.name],
self.preds[task.name],
average="micro",
multi_class="ovr",
)
def auprc(self, task):
return average_precision_score(
self.truths[task.name],
self.preds[task.name],
average="micro",
)
def acc(self, task):
if task.property in ["binary", "multilabel"]:
return accuracy_score(
self.truths[task.name].round(), self.preds[task.name].round()
)
elif task.property == "multiclass":
return accuracy_score(
self.truths[task.name].argmax(axis=1),
self.preds[task.name].argmax(axis=1),
)
else:
raise NotImplementedError()
|
starmpcc/REMed
|
src/utils/trainer_utils.py
|
trainer_utils.py
|
py
| 11,900 |
python
|
en
|
code
| 8 |
github-code
|
6
|
3337680824
|
def multiply(num1: str, num2: str) -> str:
print(int(num1)*int(num2))
if num1 == '0' or num2 == '0':
return '0'
jin_nums = 0
nums_list = [] # 存放所有想加的结果
# nn1=list(num1)
# nn1.reverse()
# nn2=list(num2)
# nn2.reverse()
for i in range(len(num1)-1,0,-1):
print('-------')
nums = []
for j in range(len(num2)-1,0,-1):
ss = int(num1[i]) * int(num2[j]) + jin_nums
jin_nums=0
if ss >= 10:
jin_nums = ss // 10
ss = ss % 10
nums.append(ss)
if jin_nums!=0:
nums.append(jin_nums)
jin_nums=0
nums.reverse()
nums_list.append(nums)
print(nums)
nums_sum='0'
count=0
for nums in nums_list:
nums=nums+['0']*count
nums_sum=str_add(nums_sum,nums)
count+=1
return nums_sum
def str_add(s1,s2):
# s1=list(s1)
# s2=list(s2)
# s1.reverse()
# s2.reverse()
if len(s1)>len(s2):
min_s=s2
max_s=s1
else:
min_s = s1
max_s = s2
new_s=[]
jin=0
for i in range(len(max_s)-1,0,-1):
if i<len(min_s):
mi = 0
else:
mi = min_s[i]
ma=max_s[i]
he=int(mi)+int(ma)+jin
if he>=10:
jin=1
he-=10
else:
jin=0
new_s.append(str(he))
if jin!=0:
new_s.append(str(jin))
new_s.reverse()
return ''.join(new_s)
s1="123123"
s2="123"
#print(multiply(s1,s2),'-')
str_add('1024','998')
|
zml1996/learn_record
|
leet_code/字符串相乘.py
|
字符串相乘.py
|
py
| 1,594 |
python
|
en
|
code
| 2 |
github-code
|
6
|
29272947400
|
import pymongo
import json
from pymongo import MongoClient
from bson import json_util
def StatImages():
client = MongoClient('mongodb://0.0.0.0:27017/')
db = client['diplom_mongo_1']
posts = db.posts
data = posts.find({"type": "image"})
count = 0
weight = 0
copies = 0
copiesId = {}
copiesIdList = []
imgFormat = {}
for item in data:
count += 1
weight += item['weight']
if (imgFormat.get(item)):
imgFormat[item['format']] += 1
else:
imgFormat[item['format']] = 1
flag = False
for item1 in data:
if item != item1:
if (item['md5'] == item1['md5']) and (item1['id'] not in copiesIdList):
if(flag):
copies += 1
copiesId[item['id']].append(item1['id'])
copiesIdList.append(item1['id'])
else:
copies += 2
copiesId[item['id']] = []
copiesId[item['id']].append(item1['id'])
flag = True
copiesIdList.append(item['id'])
copiesIdList.append(item1['id'])
print(copiesIdList)
|
dethdiez/viditory_analyzer
|
api/stat.py
|
stat.py
|
py
| 964 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39763237198
|
s1=input()
s2=input()
s1=s1.lower()
s2=s2.lower()
b=[]
for i in s1:
if i in s2:
if i==' ':
continue
if ord(i) not in b:
b.append(ord(i))
print(len(b))
|
gokinahemalatha/codemind-python
|
common_characters_-II.py
|
common_characters_-II.py
|
py
| 193 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5435410186
|
import turtle
import time
import random
abstand = time.sleep(0.1)
fenster = turtle.Screen()
fenster.title("Israa @snake spiel")
fenster.setup(width=500, height=500)
fenster.bgcolor("black")
Kopf = turtle.Turtle()
Kopf.color("red")
Kopf.penup()
Kopf.goto(0,0)
Kopf.shape("square")
Kopf.direction="stop"
tortue = turtle.Turtle()
tortue.color("yellow")
tortue.penup()
tortue.goto(0,100)
tortue.shape("turtle")
def move():
if Kopf.direction == "up":
y = Kopf.ycor()
Kopf.sety(y+1)
if Kopf.direction == "right":
x = Kopf.xcor()
Kopf.setx(x+1)
if Kopf.direction == "left":
x = Kopf.xcor()
Kopf.setx(x-1)
if Kopf.direction == "down":
y = Kopf.ycor()
Kopf.sety(y-1)
def up():
Kopf.direction = "up"
def down():
Kopf.direction = "down"
def left():
Kopf.direction = "left"
def right():
Kopf.direction = "right"
fenster.listen()
fenster.onkeypress(up , "Up")
fenster.onkeypress(right , "Right")
fenster.onkeypress(left , "Left")
fenster.onkeypress(down , "Down")
erweiterung = []
while True:
fenster.update()
move()
if Kopf.distance(tortue) < 20:
y = random.randint(-255 , 255)
x = random.randint(-255 , 255)
tortue.goto(x,y)
tortueNew = turtle.Turtle()
tortueNew.color("green")
tortueNew.penup()
tortueNew.shape("square")
erweiterung.append(tortueNew)
for i in range(len(erweiterung) -1, 0 , -1):
x = erweiterung[i-1].xcor()
y = erweiterung[i-1].ycor()
erweiterung[i].goto(x,y)
if len(erweiterung) > 0:
x = Kopf.xcor()
y = Kopf.ycor()
erweiterung[0].goto(x,y)
abstand
fenster.mainloop()
|
Israti/MeineProjekte
|
Snack.py
|
Snack.py
|
py
| 1,816 |
python
|
de
|
code
| 0 |
github-code
|
6
|
42432941077
|
import pandas as pd
import numpy as np
import math
class Node:
def __init__(self, fkey=None, fval=None, output=None, children=None):
self.fkey = fkey # 特征名
self.fval = fval # 特征值
self.output = output # 当前节点的输出值
self.children = {} if children is None else children # 子节点
class ID3:
def __init__(self, eps=0.01):
self.eps = eps # 最小信息熵增益阈值
self.tree = None # 决策树
# 计算数据集的信息熵
def entropy(self, y):
n = len(y)
unique, counts = np.unique(y, return_counts=True)
entropy = 0
for i in range(len(unique)):
pi = counts[i] / n
entropy += -pi * math.log2(pi)
return entropy
# 计算数据集按某个特征划分后的信息熵
def conditional_entropy(self, X, y, fkey, fval):
instances = X[fkey]
indices = np.where(instances == fval)
y_subset = y[indices]
entropy_subset = self.entropy(y_subset)
return entropy_subset
# 计算信息熵增益
def information_gain(self, X, y, fkey):
n = len(y)
entropy_before = self.entropy(y)
entropy_after = 0
unique, counts = np.unique(X[fkey], return_counts=True)
for i in range(len(unique)):
pi = counts[i] / n
entropy_after += pi * self.conditional_entropy(X, y, fkey, unique[i])
gain = entropy_before - entropy_after
return gain
# 生成决策树
def fit(self, X, y, feature_names):
self.tree = self._build_tree(X, y, feature_names)
# 递归生成决策树
def _build_tree(self, X, y, feature_names):
# 生成当前节点
node = Node()
# 当前数据集中所有输出值相同
if len(np.unique(y)) == 1:
node.output = y[0]
return node
# 当前数据集中没有特征可划分
if len(feature_names) == 0:
node.output = np.bincount(y).argmax()
return node
# 选择信息熵增益最大的特征
gain_max = -1
for fkey in feature_names:
gain = self.information_gain(X, y, fkey)
if gain > gain_max:
gain_max = gain
best_fkey = fkey
# 当信息熵增益小于阈值时,停止划分
if gain_max < self.eps:
node.output = np.bincount(y).argmax()
return node
# 根据最佳特征划分数据集
unique, counts = np.unique(X[best_fkey], return_counts=True)
for i in range(len(unique)):
fval = unique[i]
indices = np.where(X[best_fkey] == fval)
X_subset = X.iloc[indices].drop(columns=[best_fkey])
y_subset = y[indices]
if len(y_subset) == 0:
child_node = Node()
child_node.output = np.bincount(y).argmax()
else:
feature_names_subset = feature_names.copy()
feature_names_subset.remove(best_fkey)
child_node = self._build_tree(X_subset, y_subset, feature_names_subset)
child_node.fkey = best_fkey
child_node.fval = fval
node.children[fval] = child_node
return node
# 预测输出值
def predict(self, X_test):
y_pred = []
for i in range(len(X_test)):
node = self.tree
while node.children:
fval = X_test[node.fkey][i]
if fval not in node.children:
break
node = node.children[fval]
y_pred.append(node.output)
return np.array(y_pred)
df = pd.read_csv('data.csv')
X = df.drop(columns=['PlayTennis'])
y = df['PlayTennis']
feature_names = X.columns.tolist()
# 训练模型
model = ID3()
model.fit(X, y, feature_names)
# 预测输出值
X_test = pd.DataFrame({'Outlook': ['Sunny', 'Rainy', 'Overcast'],
'Temperature': ['Cool', 'Mild', 'Hot'],
'Humidity': ['Normal', 'High', 'High'],
'Wind': ['Weak', 'Strong', 'Weak']})
y_pred = model.predict(X_test)
print(y_pred)
|
mygithub-gyq/gyq2023.github.io
|
聚类算法/ID3.py
|
ID3.py
|
py
| 4,300 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34892249806
|
import IsPrime
def str_to_int(l):
k = []
for i in l:
i = int(i)
k.append(i)
return k
def IsPrimeUserList():
user_input = input("Enter a list of numbers: ")
l = user_input.split(" ")
l = str_to_int(l)
k = []
for i in l:
if IsPrime.IsPrime(i):
k.append(i)
return k
#print(str_to_int([1, 2, 3]))
#print(IsPrimeUserList())
|
ekloberdanz/python
|
isPrimeUserList.py
|
isPrimeUserList.py
|
py
| 395 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12015658480
|
import numpy as np
import collections # it is optional to use collections
from operator import itemgetter, attrgetter
# prediction function is to predict label of one sample using k-NN
def predict(X_train, y_train, one_sample, k, lambda_value = 1):
one_sample = np.array(one_sample)
X_train = np.array(X_train)
y_distance = []
for vector in X_train:
y_distance.append(distancesquare(vector, one_sample))
index = 0
y_distance = np.array(y_distance)
# np.argsort returns the index of sorted ndarray
index = np.argsort(y_distance)
# by plugging in the index, we sort the distance and the labels by the distance.
y = np.array(y_train[index])
y_distance = y_distance[index]
label = [0 for y in range(10)]
# Simple KNN gives wrong answer. Use Weighted KNN instead
# Normalize the distances of 1~kth NN by dividing the distances with k+1th NN's distance
# https://epub.ub.uni-muenchen.de/1769/1/paper_399.pdf page 7
for i in range(k):
label[y[i]] += weight(y_distance[i]/y_distance[k], lambda_value)
prediction = label.index(max(label))
############################
return prediction
def distancesquare(pos1, pos2):
d = np.sum(np.square(pos1 - pos2))
return d
def weight(distance, lambda_value):
w = np.exp(- distance / lambda_value)
return w
# accuracy function is to return average accuracy for test or validation sets
def accuracy(X_train, y_train, X_test, y_test, k, lambda_value = 1): # You can use def prediction above.
## Fill In Your Code Here ##
acc = 0
for test_x, test_y in zip(X_test, y_test):
acc += test_y == predict(X_train, y_train, test_x, k, lambda_value)
acc = acc / len(y_test)
############################
return acc
# stack_accuracy_over_k is to stack accuracy over k. You can use def accuracy above.
def stack_accuracy_over_k(X_train, y_train, X_val, y_val):
accuracies = []
## Fill In Your Code Here ##
for k in range(1, 21):
accuracies.append(accuracy(X_train, y_train, X_val, y_val, k))
############################
assert len(accuracies) == 20
return accuracies
def stack_accuracy_over_lambda(X_train, y_train, X_val, y_val):
accuracies = []
k = 3
lambdas = list(100/(2**i) for i in range(20))
## Fill In Your Code Here ##
for lambda_value in lambdas:
accuracies.append(accuracy(X_train, y_train, X_val, y_val, k, lambda_value = lambda_value))
############################
assert len(accuracies) == 20
return accuracies
def stack_accuracy_on_k_and_lambda(X_train, y_train, X_val, y_val):
accuracies = []
lambdas = list(100/(2**i) for i in range(10))
## Fill In Your Code Here ##
for k in range(1, 21):
for lambda_value in lambdas:
accuracies.append(accuracy(X_train, y_train, X_val, y_val, k, lambda_value = lambda_value))
accuracies = np.array(accuracies)
accuracies = accuracies.reshape((20,10))
############################
assert accuracies.shape == (20, 10)
return accuracies
|
arkincognito/EEE3314-02Assignments
|
P04_2.py
|
P04_2.py
|
py
| 3,080 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72612614909
|
"""trt_face_detection.py
This script demonstrates how to do real-time face detection with
TensorRT optimized retinaface engine.
"""
import os
import cv2
import time
import argparse
import pycuda.autoinit # This is needed for initializing CUDA driver
from utils.camera import add_camera_args, Camera
from utils.display import open_window, set_display, show_fps
from utils.face_detection import TRT_RetinaFace
from utils.prior_box import PriorBox
from data import cfg_mnet
WINDOW_NAME = 'Face_detection'
def parse_args():
"""Parse input arguments."""
desc = ('Capture and display live camera video, while doing '
'real-time face detection with TensorRT optimized '
'retinaface model on Jetson')
parser = argparse.ArgumentParser(description=desc)
parser = add_camera_args(parser)
parser.add_argument(
'-m', '--model', type=str, required=True,
help=('[retinaface]-'
'[{dimension}], where dimension could be a single '
'number (e.g. 320, 640)'))
args = parser.parse_args()
return args
def loop_and_detect(cam, trt_retinaface, priors, cfg):
"""Continuously capture images from camera and do face detection.
# Arguments
cam: the camera instance (video source).
trt_retinaface: the TRT_RetinaFace face detector instance.
priors: priors boxes with retinaface model
cfg: retinaface model parameter configure
"""
full_scrn = False
fps = 0.0
tic = time.time()
while True:
if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
break
img = cam.read()
if img is None:
break
facePositions, landmarks = trt_retinaface.detect(priors, cfg, img)
for (x1, y1, x2, y2), landmark in zip(facePositions, landmarks):
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.circle(img, (landmark[0], landmark[1]), 1, (0, 0, 255), 2)
cv2.circle(img, (landmark[2], landmark[3]), 1, (0, 255, 255), 2)
cv2.circle(img, (landmark[4], landmark[5]), 1, (255, 0, 255), 2)
cv2.circle(img, (landmark[6], landmark[7]), 1, (0, 255, 0), 2)
cv2.circle(img, (landmark[8], landmark[9]), 1, (255, 0, 0), 2)
img = show_fps(img, fps)
cv2.imshow(WINDOW_NAME, img)
toc = time.time()
curr_fps = 1.0 / (toc - tic)
# calculate an exponentially decaying average of fps number
fps = curr_fps if fps == 0.0 else (fps*0.95 + curr_fps*0.05)
tic = toc
key = cv2.waitKey(1)
if key == 27: # ESC key: quit program
break
elif key == ord('F') or key == ord('f'): # Toggle fullscreen
full_scrn = not full_scrn
set_display(WINDOW_NAME, full_scrn)
def main():
args = parse_args()
if not os.path.isfile('retinaface/%s.trt' % args.model):
raise SystemExit('ERROR: file (retinaface/%s.trt) not found!' % args.model)
cam = Camera(args)
if not cam.isOpened():
raise SystemExit('ERROR: failed to open camera!')
cfg = cfg_mnet
input_size = args.model.split('-')[-1]
input_shape = (int(input_size), int(input_size))
priorbox = PriorBox(cfg, input_shape)
priors = priorbox.forward()
trt_retinaface = TRT_RetinaFace(args.model, input_shape)
open_window(
WINDOW_NAME, 'Camera TensorRT Face Detection Demo',
cam.img_width, cam.img_height)
loop_and_detect(cam, trt_retinaface, priors, cfg)
cam.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
d246810g2000/tensorrt
|
face_recognition/trt_face_detection.py
|
trt_face_detection.py
|
py
| 3,588 |
python
|
en
|
code
| 35 |
github-code
|
6
|
24670591384
|
from AcceptNumbers import *
def main():
num = int(input("Enter number of elements: "))
if(num <= 0):
print("Enter positive number")
return
numList = acceptNNumbers(num)
print("Maximum of given numbers is:", max(numList))
if(__name__ == "__main__"):
main()
|
SnehalKaranje/python
|
list/MaxFromList.py
|
MaxFromList.py
|
py
| 297 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7239186990
|
import cv2 as cv
import numpy as np
import imutils
path = "/home/pks/Downloads/Assignment/IVP/mini project/"
def orientation(image):
'''
Rotate the image before any operation
based on the pos. of roll no. box w.r.t number table
'''
row, col = image.shape[:2]
thresh = cv.Canny(image, 40, 90)
thresh = cv.dilate(thresh, None, iterations=1)
'''Find max (Number table) and 2nd max (Roll no. box) contour'''
cnts = cv.findContours(thresh.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key=lambda x: cv.contourArea(x), reverse=True)
c1, c2 = cnts[:2]
rect1, rect2 = cv.minAreaRect(c1), cv.minAreaRect(c2)
box1, box2 = cv.boxPoints(rect1), cv.boxPoints(rect2)
# Max
box1 = sorted(box1, key=lambda x: x[0])
r_most1, l_most1 = box1[-1], box1[0]
# 2nd Max
box2 = sorted(box2, key=lambda x: x[0])
r_most2, l_most2 = box2[-1], box2[0]
C1, C2 = min(col, row), max(col, row)
x,y = 600, 800
pts1 = np.float32([[0,row], [0,0], [col,row], [col,0]])
'''Roll no box is at right of number table, rotate left'''
if l_most2[0] >= r_most1[0]:
pts2 = np.float32([[x,y], [0,y], [x,0], [0,0]])
elif r_most2[0] <= l_most1[0]:
'''Opposite, rotate right'''
pts2 = np.float32([[0,0], [x,0], [0,y], [x,y]])
else:
return image
M = cv.getPerspectiveTransform(pts1,pts2)
image = cv.warpPerspective(image,M,(x,y))
return image
'''END'''
def intersection_bw_2_lines(l1, l2):
'''
Returns point of intersection between 2 lines
Parameters:
l1 : line1
l2 : line2
Returns:
x and y coordinate of point of intersection of l1 and l2
'''
rho1, theta1 = l1
rho2, theta2 = l2
A = np.array([
[np.cos(theta1), np.sin(theta1)],
[np.cos(theta2), np.sin(theta2)]
])
B = np.array([[rho1], [rho2]])
x0, y0 = np.linalg.solve(A, B)
x0, y0 = int(np.round(x0)), int(np.round(y0))
return [x0, y0]
def remove_mult_lines(set_of_lines, dist):
'''
Replaces all close lines within some threshold distance with a single one
Parameters:
set_of_lines : rho, theta value of all the lines
dist : maximum allowed distance b/w two seperate lines
Returns:
Well-seperated set of lines (in rho, theta form)
'''
temp, temp_lines = [], []
set_of_lines = sorted(set_of_lines, key=lambda x: (abs(x[0]), x[1]))
temp.append(set_of_lines[0])
for index,point in enumerate(set_of_lines):
if abs(abs(point[0])-abs(temp[-1][0])) <= dist:
temp.append(point)
if index == len(set_of_lines)-1:
temp_lines.append(temp[len(temp)//2])
# temp_lines.append(np.median(temp, axis=0))
else:
temp_lines.append(temp[len(temp)//2])
# temp_lines.append(np.median(temp, axis=0))
temp = [point]
if index == len(set_of_lines)-1:
temp_lines.append(point)
return temp_lines
def extract_roi(image):
'''
Extract the marks-table from the image and divide it into cells
Parametrs:
image : Given image
Returns:
extracted table and four points of each rectangular cell
'''
image = orientation(image.copy())
image = cv.resize(image.copy(), (600, 800))
cv.imshow("org", image)
cv.waitKey(0)
# Convert to gray image
gr_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# Thresholding
thresh = cv.Canny(gr_image, 40, 120)
# Closing
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (2, 3))
thresh = cv.morphologyEx(thresh, cv.MORPH_CLOSE, kernel)
row, col = image.shape[:2]
cv.imshow("thresh", thresh)
cv.waitKey(0)
# ROI Detection <--start-->
cnts = cv.findContours(thresh.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv.contourArea)
cnt = [list(el[0]) for el in c]
'''Removing some araeas not needed'''
b_r = max(cnt, key=lambda x: x[0]+x[1])
b_l = min(cnt, key=lambda x: x[0]-x[1])
b_r[1] = b_r[1] - 35
b_l[1] = b_l[1] - 35
m = (b_l[1]-b_r[1]) / (b_l[0]-b_r[0])
a, b, c = 1, (-1)*m, m*b_l[0] - b_l[1]
org_sign = a*0 + b*0 + c
thresh_r = np.array([np.array([(a*i + b*j + c) for j in range(col)]) for i in range(row)])
if org_sign > 0:
thresh[thresh_r < 0] = 0
else:
thresh[thresh_r > 0] = 0
'''END'''
'''Contour detection for extract the ROI'''
cnts = cv.findContours(thresh.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv.contourArea)
cnt = [list(el[0]) for el in c]
'''Four corners ofthe ROI'''
b_r = max(cnt, key=lambda x: x[0]+x[1])
t_l = min(cnt, key=lambda x: x[0]+x[1])
t_r = max(cnt, key=lambda x: x[0]-x[1])
b_l = min(cnt, key=lambda x: x[0]-x[1])
b_r[0], b_r[1] = b_r[0] + 2, b_r[1] + 0
b_l[0], b_l[1] = b_l[0] - 2, b_l[1] + 0
t_r[0], t_r[1] = t_r[0] + 2, t_r[1] - 2
t_l[0], t_l[1] = t_l[0] - 2, t_l[1] - 2
'''Extract only the ROI'''
w,h = 800, 600
# pts1 = np.float32(crop)
pts1 = np.float32([t_l, t_r, b_l, b_r])
# w,h = image.shape
pts2 = np.float32([[0,0], [h,0], [0,w], [h,w]])
M = cv.getPerspectiveTransform(pts1,pts2)
image = cv.warpPerspective(image,M,(h,w))
# ROI Detection <--end-->
cv.imshow("org", image)
cv.waitKey(0)
gr_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# TODO : Canny edge detection parameters
edges = cv.Canny(gr_image, 45, 90)
cv.imshow("edges", edges)
cv.waitKey(0)
# Hough Line Detection
lines = cv.HoughLines(edges,1,np.pi/180,150)
# Removing multiple ambiguous Lines <--start-->
points = np.array([[line[0][0], line[0][1]] for line in lines])
pi_val = np.pi
v1 = list(filter(lambda x: x[1]>=0 and x[1]<pi_val/4, points))
v2 = list(filter(lambda x: x[1]>=(3*pi_val)/4 and x[1]<(5*pi_val)/4, points))
v3 = list(filter(lambda x: x[1]>=(7*pi_val)/4 and x[1]<=pi_val*2, points))
vertical = v1 + v2 + v3
h1 = list(filter(lambda x: x[1]>=pi_val/4 and x[1]<(3*pi_val)/4, points))
h2 = list(filter(lambda x: x[1]>=(5*pi_val)/4 and x[1]<(7*pi_val)/4, points))
horizontal = h1 + h2
h_lines = remove_mult_lines(horizontal, 15)
v_lines = remove_mult_lines(vertical, 15)
lines = h_lines + v_lines
# # Removing multiple ambiguous Lines <--end-->
# Drawing the lines
line_image = image.copy()
for rho, theta in lines:
# rho, theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv.line(line_image,(x1,y1),(x2,y2),(0,0,255),1)
cv.imshow("lines", line_image)
cv.waitKey(0)
ret_cell = []
# Detecting cells
counter = 1
if len(h_lines) >= 14:
start = 1
else:
start = 0
for i in range(start,len(h_lines)-1):
for j in range(1,len(v_lines)-1):
hl1, hl2 = h_lines[i], h_lines[i+1]
vl1, vl2 = v_lines[j], v_lines[j+1]
p1 = intersection_bw_2_lines(hl1, vl1)
p2 = intersection_bw_2_lines(hl1, vl2)
p3 = intersection_bw_2_lines(hl2, vl1)
p4 = intersection_bw_2_lines(hl2, vl2)
ret_cell.append([p1, p2, p3, p4])
# cell = image[p1[1]:p3[1]+1, p1[0]:p2[0]+1]
# cv.imwrite(path + "img" + str(counter) + ".jpg", cell)
# counter = counter + 1
cv.destroyAllWindows()
return image, ret_cell
|
pritamksahoo/III-IV-YEAR-Assignments
|
IVP/extract_ROI.py
|
extract_ROI.py
|
py
| 8,042 |
python
|
en
|
code
| 2 |
github-code
|
6
|
21071664643
|
from dataclasses import dataclass
from typing import Union
import numpy as np
from matplotlib import pyplot as plt
@dataclass
class SpeakerSegment:
start: int = 0
end: Union[int, None] = None
@dataclass
class SplitStuff4Tw:
threshold_value: float
split_index: int
class CustomSegmentationStrategy:
def __init__(self, analyze_only_seconds: int = 30):
self.analyze_only_seconds = analyze_only_seconds
self.min_seconds_preferred = 6
self.max_seconds_preferred = 15
self.sampling_rate = 16000
self.step_size = 200
self.step_size_seconds = self.step_size / 16000
self.number_steps = int(self.analyze_only_seconds / self.step_size_seconds)
self.median_probs = None
self.trig_sum = None
self.silence_seconds = 0.3
self.silence_window_nr_steps = int(self.silence_seconds / self.step_size_seconds)
self.trigger_window_seconds = 4.0
self.trigger_window_nr_steps = int(self.trigger_window_seconds / self.step_size_seconds)
def is_silence(self, buffer_window):
if np.mean(buffer_window) < self.trig_sum:
return True
return False
def is_above_threshold(self, buffer_window):
if np.mean(buffer_window) > self.trig_sum:
return True
return False
def convert_steps_to_samples(self, steps):
# 1 step is 200 samples or self.step_size
return steps * self.step_size
def create_better_split_long_length(self, buffer):
mid_of_clip = int(len(buffer) / 2)
# 2 seconds each side
two_seconds = 2 * 16000 / self.step_size
thresholds = []
for step_range in range(int(mid_of_clip - two_seconds), int(mid_of_clip + two_seconds),
self.silence_window_nr_steps):
threshold_value = np.mean(buffer[step_range + self.silence_window_nr_steps])
thresholds.append(SplitStuff4Tw(split_index=int(step_range + self.silence_window_nr_steps / 2),
threshold_value=threshold_value))
best_split = sorted(thresholds, key=lambda x: x.threshold_value, reverse=False)[0].split_index
return best_split
def create_better_split_short_length(self):
pass
def segment(self, speaker_vads: np.ndarray):
self.median = np.median(speaker_vads)
self.trig_sum = 0.89 * self.median + 0.08
final_segments = []
is_speech = False
current_buffer = []
temp_speaker_values = None
for i in range(len(speaker_vads)):
current_activation = speaker_vads[i]
current_buffer.append(current_activation)
if not len(current_buffer) >= self.trigger_window_nr_steps:
continue
if not is_speech and self.is_above_threshold(current_buffer):
is_speech = True
temp_speaker_values = SpeakerSegment(start=self.convert_steps_to_samples(i - len(current_buffer) + 1))
elif is_speech:
# If this but we are not above threshold, check if we are in silence for last steps
if self.is_silence(buffer_window=current_buffer[:-self.silence_window_nr_steps]):
if len(current_buffer) > self.sampling_rate * self.max_seconds_preferred / self.step_size:
# find_better split
# Todo: Do this recursively
split_index = self.create_better_split_long_length(buffer=current_buffer)
temp_speaker_values.end = self.convert_steps_to_samples(
i - (len(current_buffer) - split_index) - 1)
final_segments.append(temp_speaker_values)
temp_speaker_values = SpeakerSegment(
start=self.convert_steps_to_samples(i - (len(current_buffer) - split_index) + 1),
end=self.convert_steps_to_samples(i))
final_segments.append(temp_speaker_values)
temp_speaker_values = None
is_speech = False
current_buffer = []
elif len(current_buffer) < self.sampling_rate * self.min_seconds_preferred / self.step_size:
pass #Since we want at least x seconds, we continue here
else:
temp_speaker_values.end = self.convert_steps_to_samples(i)
final_segments.append(temp_speaker_values)
temp_speaker_values = None
is_speech = False
current_buffer = []
else:
# If not above threshold, then keep window constant
current_buffer.pop(0)
return final_segments
def plot_VAD(self, array_yo):
x = [self.step_size_seconds * i for i in range(self.number_steps)]
plt.plot(x, array_yo[:self.number_steps])
plt.show()
|
centre-for-humanities-computing/Gjallarhorn
|
data_processing/custom_segmentation.py
|
custom_segmentation.py
|
py
| 5,069 |
python
|
en
|
code
| 1 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.