seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
74798420027
|
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.cm import get_cmap
data = pd.read_csv("./output_geo.csv")
df = pd.DataFrame(data)
fig, ax = plt.subplots()
# get a color map
cmap = get_cmap("tab20", 28) # type: matplotlib.colors.ListedColormap
colors = cmap.colors # type: list
ips = df['ip']
mean_rtt = df['mean_rtt']
ax.bar(ips, mean_rtt, color=colors)
ax.set_ylabel('Mean RTT in ms')
ax.set_title('Mean RTT for each hop between ips')
plt.xticks(rotation=15)
plt.show()
|
LisandroDiMeo/tp_traceroute
|
create_graph_rtt.py
|
create_graph_rtt.py
|
py
| 499 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14405820391
|
# coding=utf-8
from django.contrib.admin import ModelAdmin, site
from models import News
class NewsAdmin(ModelAdmin):
list_display = ('id', 'match_type', 'game_start_time', 'end_score', 'middle_score', 'status', 'team1', 'score',
'team2', 'yapan','yapanSB', 'daxiaopan','daxiaopanSB', 'findex', 'create_time')
ording = ('id',)
list_per_page = 300
list_filter = ('create_time',)
search_fields = ['team1', 'team2', 'findex', 'score']
site.register(News, NewsAdmin)
|
xiaoqiu206/football
|
spider/admin.py
|
admin.py
|
py
| 507 |
python
|
en
|
code
| 36 |
github-code
|
6
|
25911864922
|
import math
from Solution import Solution
class P003(Solution):
def is_prime(self, number):
if number % 2 == 0:
return False
upper_limit = math.floor(math.sqrt(number))
if upper_limit % 2 == 0:
upper_limit -= 1
for i in range(upper_limit, 1, -2):
if number % i == 0:
return False
return True
def solve(self):
self.problem_number = 3
number = 600851475143
upper_limit = math.floor(math.sqrt(number))
if upper_limit % 2 == 0:
upper_limit -= 1
possible_factor = upper_limit
result = number
while possible_factor > 1:
if (number % possible_factor == 0) and self.is_prime(possible_factor):
result = possible_factor
break
possible_factor -=2
return result
def main():
P003().run()
if __name__ == "__main__":
main()
|
TalaatHarb/project-euler-100
|
python-project-euler-100/p003.py
|
p003.py
|
py
| 947 |
python
|
en
|
code
| 2 |
github-code
|
6
|
26349073306
|
import functools
from enum import Enum
import json
import re
class SCJDEntry:
def __init__(self):
self.data = {}
def set_title(self, title):
self.data['title'] = title
def set_id(self, idx):
self.data['idx'] = idx
def glue_pronounce(self, prn):
if 'prn' in self.data:
self.data['prn'] += prn
else:
self.data['prn'] = prn
def push_kanji(self, kanji):
if 'kanjis' in self.data:
self.data['kanjis'].append(kanji)
else:
self.data['kanjis'] = [kanji]
def push_void_definition(self):
if 'defs' in self.data:
self.data['defs'].append({})
else:
self.data['defs'] = [{}]
def set_definition_order(self, order):
self.data['defs'][-1]['order'] = order
def glue_definition_indicator(self, ind):
if 'defs' in self.data:
if 'ind' in self.data['defs'][-1]:
self.data['defs'][-1]['ind'] += ind
else:
self.data['defs'][-1]['ind'] = ind
def push_definition_chinese_translation(self, trans):
if 'chi_transs' in self.data['defs'][-1]:
self.data['defs'][-1]['chi_transs'].append(trans)
else:
self.data['defs'][-1]['chi_transs'] = [trans]
def push_definition_english_translation(self, trans):
if 'eng_transs' in self.data['defs'][-1]:
self.data['defs'][-1]['eng_transs'].append(trans)
else:
self.data['defs'][-1]['eng_transs'] = [trans]
def push_void_definition_sentence_example(self):
if 'sent_exs' in self.data['defs'][-1]:
self.data['defs'][-1]['sent_exs'].append({})
else:
self.data['defs'][-1]['sent_exs'] = [{}]
def glue_definition_sentence_example(self, jpn_sent, chi_sent):
if jpn_sent is not None:
if 'jpn_sent' in self.data['defs'][-1]['sent_exs'][-1]:
self.data['defs'][-1]['sent_exs'][-1]['jpn_sent'] += jpn_sent
else:
self.data['defs'][-1]['sent_exs'][-1]['jpn_sent'] = jpn_sent
if chi_sent is not None:
if 'chi_sent' in self.data['defs'][-1]['sent_exs'][-1]:
self.data['defs'][-1]['sent_exs'][-1]['chi_sent'] += chi_sent
else:
self.data['defs'][-1]['sent_exs'][-1]['chi_sent'] = chi_sent
def push_void_phrase(self):
if 'phrs' in self.data:
self.data['phrs'].append({})
else:
self.data['phrs'] = [{}]
def set_phrase_idx(self, idx):
self.data['phrs'][-1]['idx'] = idx
def glue_phrase_first_title(self, title):
if 'titles' in self.data['phrs'][-1]:
self.data['phrs'][-1]['titles'][0] += title
else:
self.data['phrs'][-1]['titles'] = [title]
def push_phrase_title(self, title):
self.data['phrs'][-1]['titles'].append(title)
def push_void_phrase_section(self):
if 'secs' in self.data['phrs'][-1]:
self.data['phrs'][-1]['secs'].append({})
else:
self.data['phrs'][-1]['secs'] = [{}]
def push_phrase_section_translation(self, trans):
if 'transs' in self.data['phrs'][-1]['secs'][-1]:
self.data['phrs'][-1]['secs'][-1]['transs'].append(trans)
else:
self.data['phrs'][-1]['secs'][-1]['transs'] = [trans]
def push_void_phrase_section_sentence_example(self):
if 'sent_exs' in self.data['phrs'][-1]['secs'][-1]:
self.data['phrs'][-1]['secs'][-1]['sent_exs'].append({})
else:
self.data['phrs'][-1]['secs'][-1]['sent_exs'] = [{}]
def glue_phrase_section_sentence_example(self, jpn_sent, chi_sent):
if jpn_sent is not None:
if 'jpn_sent' in self.data['phrs'][-1]['secs'][-1]['sent_exs'][-1]:
self.data['phrs'][-1]['secs'][-1]['sent_exs'][-1]['jpn_sent'] += jpn_sent
else:
self.data['phrs'][-1]['secs'][-1]['sent_exs'][-1]['jpn_sent'] = jpn_sent
if chi_sent is not None:
if 'chi_sent' in self.data['phrs'][-1]['secs'][-1]['sent_exs'][-1]:
self.data['phrs'][-1]['secs'][-1]['sent_exs'][-1]['chi_sent'] += chi_sent
else:
self.data['phrs'][-1]['secs'][-1]['sent_exs'][-1]['chi_sent'] = chi_sent
def get_data(self):
return self.data
class SCJDStateMachine:
class Node(Enum):
IGNORE = 0
ROOT = 1
D_ENTRY = 2
D_PRN = 3
D_DEF = 4
A = 5
HWG_SPAN = 10
HW_SPAN = 11
HV_SPAN = 12
GRAMB_SPAN = 13
SEMB_SPAN = 14
TRG_SPAN = 15
OUP_LABEL_SPAN = 16
TRANS_SPAN = 17
IDMB_SPAN = 18
IDMSEC_SPAN = 19
IDM_SPAN = 20
EXG_SPAN = 21
EX_SPAN = 22
IND_SPAN = 23
CB_SPAN = 24
CSEC_SPAN = 25
CW_SPAN = 26
CV_SPAN = 27
class State(Enum):
NUMBNESS = 0
BEGIN = 1
GRAMB = 10
GRAMB_SEMB = 11
GRAMB_OUP_LABEL = 12
GRAMB_TRG_AFTER_OUP_LABEL = 13
GRAMB_EXG = 14
IDMB = 20
IDMB_FIRST_IDM = 21
IDMB_NOT_FIRST_IDM = 22
IDMB_SEMB = 23
IDMB_EXG = 24
CB = 30
CB_GRAMB = 31
CB_SEMB = 32
CB_EXG = 33
def get_gramb_cluster():
return [SCJDStateMachine.State.GRAMB, SCJDStateMachine.State.GRAMB_SEMB, SCJDStateMachine.State.GRAMB_OUP_LABEL, SCJDStateMachine.State.GRAMB_EXG, SCJDStateMachine.State.GRAMB_TRG_AFTER_OUP_LABEL]
def get_idmb_cluster():
return [SCJDStateMachine.State.IDMB, SCJDStateMachine.State.IDMB_FIRST_IDM, SCJDStateMachine.State.IDMB_NOT_FIRST_IDM, SCJDStateMachine.State.IDMB_SEMB, SCJDStateMachine.State.IDMB_EXG]
def get_cb_cluster():
return [SCJDStateMachine.State.CB, SCJDStateMachine.State.CB_GRAMB, SCJDStateMachine.State.CB_SEMB, SCJDStateMachine.State.CB_EXG]
IGNORE_SPAN = {'hvg', 'gp', 'x_xoh', 'ty_pinyin', 'x_xdh', 'sn', 'gl', 'cwg', 'cvg', 'tail', 'ty_日中比較', 'x_xopt', 'pr', 'ty_参考', 'ty_参考参照', 'ty_項目参照', 'ty_注意', 'gr', 'ty_文化', 'ph', 'xr', 'xrlabelGroup', 'xrlabel', 'underline'}
INHERIT_SPAN = {'rf', 'tg_ind', 't_fld', 'subEnt'}
def __init__(self):
self.reinit()
def reinit(self):
self.stk = [SCJDStateMachine.Node.ROOT]
self.sta = SCJDStateMachine.State.BEGIN
def get_node(self):
return self.stk[-1]
def get_state(self):
return self.sta
def push_node(self, node):
self.stk.append(node)
def pop_node(self):
return self.stk.pop()
def numb(self):
self.sta = SCJDStateMachine.State.NUMBNESS
def is_numb(self):
return self.get_state() == SCJDStateMachine.State.NUMBNESS
def startelement_move(self, tag, attrs):
if tag == 'd:entry':
self.push_node(SCJDStateMachine.Node.D_ENTRY)
elif tag == 'span':
attrs_keys = attrs.getQNames()
if 'class' in attrs_keys:
attrs_class_values = attrs['class'].split(' ')
if not SCJDStateMachine.INHERIT_SPAN.isdisjoint(attrs_class_values):
self.push_node(self.get_node())
elif not SCJDStateMachine.IGNORE_SPAN.isdisjoint(attrs_class_values):
self.push_node(SCJDStateMachine.Node.IGNORE)
elif 'hwg' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.HWG_SPAN)
elif 'hw' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.HW_SPAN)
elif 'hv' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.HV_SPAN)
elif 'gramb' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.GRAMB_SPAN)
if self.get_state() == SCJDStateMachine.State.BEGIN:
self.sta = SCJDStateMachine.State.GRAMB
elif self.get_state() in SCJDStateMachine.State.get_cb_cluster():
self.sta = SCJDStateMachine.State.CB_GRAMB
else:
raise RuntimeError(f'{self.get_state()}')
elif 'semb' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.SEMB_SPAN)
if self.get_state() in SCJDStateMachine.State.get_gramb_cluster():
self.sta = SCJDStateMachine.State.GRAMB_SEMB
elif self.get_state() in SCJDStateMachine.State.get_idmb_cluster():
self.sta = SCJDStateMachine.State.IDMB_SEMB
elif self.get_state() in SCJDStateMachine.State.get_cb_cluster():
self.sta = SCJDStateMachine.State.CB_SEMB
else:
raise RuntimeError(f'{self.get_state()}')
elif 'trg' in attrs_class_values:
"""
Generally, there is only one "oup_label" span in "gramb-semb" span, except in some rare cases.
This happens when there is more than one kinds of translation in one "gramb-semb" span.
An example is where id = j_CRJC000115, and title = 相手役.
That "[芝居など] 配角" and "[ダンス] 舞伴" show up here is weird.
And this is the reason why I put "tg_ind" span into INHERIT_SPAN instead of IGNORE_SPAN.
Otherwise, key of ind in entry will become "芝居などダンス" instead of "[芝居など][ダンス]".
"""
if 'x_xd2' in attrs_class_values:
if self.get_state() == SCJDStateMachine.State.GRAMB_OUP_LABEL:
self.sta = SCJDStateMachine.State.GRAMB_TRG_AFTER_OUP_LABEL
elif self.get_state() == SCJDStateMachine.State.GRAMB_EXG:
pass
elif self.get_state() == SCJDStateMachine.State.GRAMB_SEMB:
pass
elif self.get_state() == SCJDStateMachine.State.GRAMB_TRG_AFTER_OUP_LABEL:
pass
else:
raise RuntimeError(f'{self.get_state()}')
self.push_node(SCJDStateMachine.Node.TRG_SPAN)
elif 'oup_label' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.OUP_LABEL_SPAN)
if self.get_state() == SCJDStateMachine.State.GRAMB_SEMB:
self.sta = SCJDStateMachine.State.GRAMB_OUP_LABEL
elif self.get_state() == SCJDStateMachine.State.GRAMB_TRG_AFTER_OUP_LABEL:
self.sta = SCJDStateMachine.State.GRAMB_OUP_LABEL
"""
Generally, there is no "oup_label" span in "idmb" span, except in some rare cases.
An example is where id = j_CRJC010600, and title = 塞翁が馬.
"""
elif self.get_state() == SCJDStateMachine.State.IDMB_SEMB:
pass
else:
raise RuntimeError(f'{self.get_state()}')
elif 'trans' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.TRANS_SPAN)
elif 'idmb' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.IDMB_SPAN)
self.sta = SCJDStateMachine.State.IDMB
elif 'idmsec' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.IDMSEC_SPAN)
elif 'idm' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.IDM_SPAN)
if self.get_state() == SCJDStateMachine.State.IDMB or self.get_state() == SCJDStateMachine.State.IDMB_EXG or self.get_state() == SCJDStateMachine.State.IDMB_SEMB:
self.sta = SCJDStateMachine.State.IDMB_FIRST_IDM
elif self.get_state() == SCJDStateMachine.State.IDMB_FIRST_IDM:
self.sta = SCJDStateMachine.State.IDMB_NOT_FIRST_IDM
elif 'exg' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.EXG_SPAN)
if self.get_state() in SCJDStateMachine.State.get_gramb_cluster():
self.sta = SCJDStateMachine.State.GRAMB_EXG
elif self.get_state() in SCJDStateMachine.State.get_idmb_cluster():
self.sta = SCJDStateMachine.State.IDMB_EXG
elif self.get_state() in SCJDStateMachine.State.get_cb_cluster():
self.sta = SCJDStateMachine.State.CB_EXG
else:
raise RuntimeError(f'{self.get_state()}')
elif 'ex' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.EX_SPAN)
elif 'ind' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.IND_SPAN)
elif 'fld' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.IND_SPAN)
elif 'cb' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.CB_SPAN)
self.sta = SCJDStateMachine.State.CB
elif 'csec' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.CSEC_SPAN)
elif 'cw' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.CW_SPAN)
elif 'cv' in attrs_class_values:
self.push_node(SCJDStateMachine.Node.CV_SPAN)
else:
raise RuntimeError(f"SPAN with {attrs_class_values} in class key is not defined")
else:
raise RuntimeError(f"SPAN with {attrs_keys} key is not defined")
elif tag == 'd:prn':
self.push_node(SCJDStateMachine.Node.D_PRN)
elif tag == 'd:def':
self.push_node(SCJDStateMachine.Node.D_DEF)
elif tag == 'a':
self.push_node(SCJDStateMachine.Node.A)
else:
raise RuntimeError(f"TAG {tag} is not defined")
def endelement_move(self, tag):
self.pop_node()
def startdocument_move(self):
self.reinit()
def enddocument_move(self):
pass
def log(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if args[0].debug:
if func.__name__ == 'startElement':
print(f'SM: {args[0].sm.stk}, State: {args[0].sm.sta}')
print(f'Tag: {args[1]}, class: {args[2].getValueByQName("class") if "class" in args[2].getQNames() else None}')
print(f'{args[0].entry.get_data()}')
print(f'==========')
elif func.__name__ == 'endElement':
print(f'Tag: {args[1]}')
print(f'==========')
elif func.__name__ == 'characters':
print(f'Chars: {args[1]}')
print(f'==========')
return func(*args, **kwargs)
return wrapper
class SCJDController:
def __init__(self, debug=False):
self.reinit()
self.debug = debug
def reinit(self):
self.sm = SCJDStateMachine()
self.entry = SCJDEntry()
def get_entry(self):
return self.entry.data
@log
def startElement(self, tag, attrs):
if self.sm.is_numb():
return
self.sm.startelement_move(tag, attrs)
if self.sm.get_node() == SCJDStateMachine.Node.D_ENTRY:
if re.search("^j_CRJC.*", attrs['id']) is not None:
self.entry.set_title(attrs['d:title'])
self.entry.set_id(attrs['id'])
else:
self.sm.numb()
elif self.sm.get_node() == SCJDStateMachine.Node.SEMB_SPAN:
if self.sm.get_state() in SCJDStateMachine.State.get_gramb_cluster():
self.entry.push_void_definition()
if 'ord' in attrs.getQNames():
self.entry.set_definition_order(attrs['ord'])
else:
self.entry.set_definition_order('1')
elif self.sm.get_state() in SCJDStateMachine.State.get_idmb_cluster():
self.entry.push_void_phrase_section()
elif self.sm.get_state() in SCJDStateMachine.State.get_cb_cluster():
self.entry.push_void_phrase_section()
else:
raise RuntimeError(f"Node {self.sm.get_node()} with State {self.sm.get_state()} startElement function is not defined")
elif self.sm.get_node() == SCJDStateMachine.Node.IDMSEC_SPAN:
self.entry.push_void_phrase()
self.entry.set_phrase_idx(attrs['id'])
elif self.sm.get_node() == SCJDStateMachine.Node.EXG_SPAN:
if self.sm.get_state() in SCJDStateMachine.State.get_idmb_cluster():
self.entry.push_void_phrase_section_sentence_example()
elif self.sm.get_state() in SCJDStateMachine.State.get_gramb_cluster():
self.entry.push_void_definition_sentence_example()
elif self.sm.get_state() in SCJDStateMachine.State.get_cb_cluster():
self.entry.push_void_phrase_section_sentence_example()
else:
raise RuntimeError(f"Node {self.sm.get_node()} with State {self.sm.get_state()} startElement function is not defined")
elif self.sm.get_node() == SCJDStateMachine.Node.CSEC_SPAN:
self.entry.push_void_phrase()
self.entry.set_phrase_idx(attrs['id'])
@log
def endElement(self, tag):
if self.sm.is_numb():
return
self.sm.endelement_move(tag)
@log
def characters(self, chars):
if self.sm.is_numb():
return
def process_chars(chars):
return chars.strip()
if self.sm.get_node() == SCJDStateMachine.Node.HW_SPAN:
self.entry.glue_pronounce(process_chars(chars))
elif self.sm.get_node() == SCJDStateMachine.Node.HV_SPAN:
self.entry.push_kanji(process_chars(chars))
elif self.sm.get_node() == SCJDStateMachine.Node.TRANS_SPAN:
if self.sm.get_state() == SCJDStateMachine.State.GRAMB_SEMB:
self.entry.push_definition_chinese_translation(process_chars(chars))
elif self.sm.get_state() == SCJDStateMachine.State.GRAMB_TRG_AFTER_OUP_LABEL:
self.entry.push_definition_chinese_translation(process_chars(chars))
elif self.sm.get_state() == SCJDStateMachine.State.GRAMB_OUP_LABEL:
self.entry.push_definition_english_translation(process_chars(chars))
elif self.sm.get_state() == SCJDStateMachine.State.GRAMB_EXG:
self.entry.glue_definition_sentence_example(None, process_chars(chars))
elif self.sm.get_state() == SCJDStateMachine.State.IDMB_SEMB:
self.entry.push_phrase_section_translation(chars.strip())
elif self.sm.get_state() == SCJDStateMachine.State.IDMB_EXG:
self.entry.glue_phrase_section_sentence_example(None, chars.strip())
elif self.sm.get_state() == SCJDStateMachine.State.CB_SEMB:
self.entry.push_phrase_section_translation(chars.strip())
elif self.sm.get_state() == SCJDStateMachine.State.CB_EXG:
self.entry.glue_phrase_section_sentence_example(None, chars.strip())
else:
raise RuntimeError(f"Node {self.sm.get_node()} with State {self.sm.get_state()} characters function is not defined")
elif self.sm.get_node() == SCJDStateMachine.Node.IDM_SPAN:
if self.sm.get_state() == SCJDStateMachine.State.IDMB_FIRST_IDM:
self.entry.glue_phrase_first_title(process_chars(chars))
elif self.sm.get_state() == SCJDStateMachine.State.IDMB_NOT_FIRST_IDM:
self.entry.push_phrase_title(process_chars(chars))
else:
raise RuntimeError(f"Node {self.sm.get_node()} with State {self.sm.get_state()} characters function is not defined")
elif self.sm.get_node() == SCJDStateMachine.Node.EX_SPAN:
if self.sm.get_state() == SCJDStateMachine.State.IDMB_EXG:
self.entry.glue_phrase_section_sentence_example(process_chars(chars), None)
elif self.sm.get_state() == SCJDStateMachine.State.CB_EXG:
self.entry.glue_phrase_section_sentence_example(process_chars(chars), None)
elif self.sm.get_state() == SCJDStateMachine.State.GRAMB_EXG:
self.entry.glue_definition_sentence_example(process_chars(chars), None)
else:
raise RuntimeError(f"Node {self.sm.get_node()} with State {self.sm.get_state()} characters function is not defined")
elif self.sm.get_node() == SCJDStateMachine.Node.IND_SPAN:
self.entry.glue_definition_indicator(process_chars(chars))
elif self.sm.get_node() == SCJDStateMachine.Node.CW_SPAN:
self.entry.glue_phrase_first_title(process_chars(chars))
elif self.sm.get_node() == SCJDStateMachine.Node.CV_SPAN:
self.entry.push_phrase_title(process_chars(chars))
def startDocument(self):
self.sm.startdocument_move()
self.reinit()
def endDocument(self):
if self.sm.is_numb():
return
self.sm.enddocument_move()
# print(self.entry.data['title'])
|
Leundo/apple-dictionary-extractor
|
ADParser/scjd_controller.py
|
scjd_controller.py
|
py
| 17,976 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73266752189
|
import cv2
import numpy as np
from scipy import signal
import math
import matplotlib.pyplot as plt
if __name__ == "__main__":
gauss_blur_filter = [[0 for x in range(3)] for y in range(3)]
gauss_blur_filter[0][0] = 1/16
gauss_blur_filter[0][1] = 1/8
gauss_blur_filter[0][2] = 1/16
gauss_blur_filter[1][0] = 1/8
gauss_blur_filter[1][1] = 1/4
gauss_blur_filter[1][2] = 1/8
gauss_blur_filter[2][0] = 1/16
gauss_blur_filter[2][1] = 1/8
gauss_blur_filter[2][2] = 1/16
image = cv2.imread('point.jpg',0)
kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype = np.float)
replicate = cv2.copyMakeBorder(image,20,20,20,20,cv2.BORDER_REPLICATE)
resultant_image = cv2.blur(replicate,(5,5))
cv2.imwrite('gauss-blue.jpg',resultant_image)
resultant_image_1 = signal.convolve2d(image,kernel,'same')
rows,columns = resultant_image_1.shape
for i in range(rows):
for j in range(columns):
resultant_image_1[i][j] = abs(resultant_image_1[i][j])
cv2.imwrite('mask-application.jpg',resultant_image_1)
print(resultant_image_1.max())
for i in range(rows):
for j in range(columns):
if resultant_image_1[i][j] >= 2024:
print(i,j)
else:
resultant_image_1[i][j] = 0
cv2.imwrite('point-detection.jpg',resultant_image_1)
image_segment = cv2.imread('segment.jpg',0)
rows,columns = image_segment.shape
'''x = np.zeros(255)
y = np.arange(0,255,1)
for i in range(rows):
for j in range(columns):
if image_segment[i][j] != 0:
x[image_segment[i][j]] += 1
hist, bins = np.histogram(x, bins=y)
width = 1.0 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
#plt.plot(y,x)
#plt.bar(np.arange(len(y)),y)
plt.show()'''
for i in range(rows):
for j in range(columns):
if image_segment[i][j] > 208 or image_segment[i][j] < 200 :
image_segment[i][j] = 0
cv2.imwrite('segemented.jpg',image_segment)
|
Srivenkat1995/Image-Segmentation-and-Point-Detection
|
task2.py
|
task2.py
|
py
| 2,305 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72784253307
|
# https://www.codewars.com/kata/58558673b6b0e5a16b000028
def fight_resolve(defender, attacker):
if (defender.lower() == defender) == (attacker.lower() == attacker):
return -1
defender_win = { 'a': 's', 'k':'a', 'p': 'k', 's': 'p'}
a = attacker.lower()
d = defender.lower()
if defender_win[d] == a:
return defender
else:
return attacker
|
blzzua/codewars
|
7-kyu/boardgame_fight_resolve.py
|
boardgame_fight_resolve.py
|
py
| 386 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37681820103
|
import pygame
from configs import ColorConfig
class Button(object):
def __init__(self, x_coordinate: int, y_coordinate: int, button_width: int,
button_height: int, text_font: str, text_size: str,
button_name: str, onclick_function=None):
self.x = x_coordinate
self.y = y_coordinate
self.width = button_width
self.height = button_height
self.function_by_click = onclick_function
self.buttonSurface = pygame.Surface((self.width, self.height))
self.buttonRect = pygame.Rect(self.x, self.y, self.width, self.height)
self.buttonSurf = pygame.font.SysFont(
text_font, int(text_size)).render(button_name, True, (20, 20, 20))
def process(self, game_window: pygame.display, parameter: str):
self.buttonSurface.fill(ColorConfig.WHITE)
if self.buttonRect.collidepoint(pygame.mouse.get_pos()):
self.buttonSurface.fill(ColorConfig.GREY)
if pygame.mouse.get_pressed(num_buttons=3)[0]:
self.buttonSurface.fill(ColorConfig.GREEN)
self.function_by_click(parameter)
return True
self.buttonSurface.blit(self.buttonSurf, [
self.buttonRect.width / 2 - self.buttonSurf.get_rect().width / 2,
self.buttonRect.height / 2 - self.buttonSurf.get_rect().height / 2
])
game_window.blit(self.buttonSurface, self.buttonRect)
|
pavst23/project_game
|
elements/button.py
|
button.py
|
py
| 1,452 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19773909067
|
# -*- coding: utf-8 -*-
"""
IBEIS CORE
Defines the core dependency cache supported by the image analysis api
Extracts annotation chips from imaages and applies optional image
normalizations.
TODO:
* interactive callback functions
* detection interface
* identification interface
NOTES:
HOW TO DESIGN INTERACTIVE PLOTS:
decorate as interactive
depc.get_property(recompute=True)
instead of calling preproc as a generator and then adding,
calls preproc and passes in a callback function.
preproc spawns interaction and must call callback function when finished.
callback function adds the rowids to the table.
Needed Tables:
Chip
NormChip
Feats
Keypoints
Descriptors
ProbChip
IdentifyQuery
NeighborIndex
QualityClassifier
ViewpointClassifier
CommandLine:
python -m ibeis.control.IBEISControl --test-show_depc_annot_graph --show
Setup:
>>> from ibeis.core_annots import * # NOQA
>>> import ibeis
>>> import plottool as pt
>>> ibs = ibeis.opendb('testdb1')
>>> depc = ibs.depc_annot
>>> aid_list = ibs.get_valid_aids()[0:2]
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from six.moves import zip
import dtool
import utool as ut
import vtool as vt
import numpy as np
import cv2
from ibeis.control.controller_inject import register_preprocs, register_subprops
from ibeis.algo.hots.chip_match import ChipMatch
from ibeis.algo.hots import neighbor_index
(print, rrr, profile) = ut.inject2(__name__, '[core_annots]')
derived_attribute = register_preprocs['annot']
register_subprop = register_subprops['annot']
# dtool.Config.register_func = derived_attribute
def testdata_core(defaultdb='testdb1', size=2):
import ibeis
# import plottool as pt
ibs = ibeis.opendb(defaultdb=defaultdb)
depc = ibs.depc_annot
aid_list = ut.get_argval(('--aids', '--aid'), type_=list,
default=ibs.get_valid_aids()[0:size])
return ibs, depc, aid_list
class ChipConfig(dtool.Config):
_param_info_list = [
#ut.ParamInfo('dim_size', 128, 'sz', hideif=None),
#ut.ParamInfo('dim_size', 960, 'sz', hideif=None),
ut.ParamInfo('dim_size', 700, 'sz', hideif=None), # TODO: allow types to vary
ut.ParamInfo(
'resize_dim', 'width', '',
#'resize_dim', 'area', '',
valid_values=['area', 'width', 'height', 'diag', 'maxwh', 'wh'],
hideif=lambda cfg: cfg['dim_size'] is None),
ut.ParamInfo('dim_tol', 0, 'tol', hideif=0),
ut.ParamInfo('preserve_aspect', True, hideif=True),
ut.ParamInfo('histeq', False, hideif=False),
ut.ParamInfo('adapteq', False, hideif=False),
ut.ParamInfo('histeq_thresh', False, hideif=False),
ut.ParamInfo('pad', 0, hideif=0),
ut.ParamInfo('ext', '.png', hideif='.png'),
]
ChipImgType = dtool.ExternType(vt.imread, vt.imwrite, extkey='ext')
@derived_attribute(
tablename='chips', parents=['annotations'],
colnames=['img', 'width', 'height', 'M'],
coltypes=[ChipImgType, int, int, np.ndarray],
configclass=ChipConfig,
#depprops=['image_uuid', 'verts', 'theta'],
fname='chipcache4',
rm_extern_on_delete=True,
chunksize=256,
)
def compute_chip(depc, aid_list, config=None):
r"""
Extracts the annotation chip from the bounding box
Args:
depc (ibeis.depends_cache.DependencyCache):
aid_list (list): list of annotation rowids
config (dict): (default = None)
Yields:
(uri, int, int): tup
CommandLine:
ibeis --tf compute_chip --show
ibeis --tf compute_chip --show --pad=64 --dim_size=256 --db PZ_MTEST
ibeis --tf compute_chip --show --pad=64 --dim_size=None --db PZ_MTEST
ibeis --tf compute_chip --show --db humpbacks
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> import ibeis
>>> defaultdb = 'testdb1'
>>> ibs = ibeis.opendb(defaultdb=defaultdb)
>>> depc = ibs.depc_annot
>>> config = ChipConfig.from_argv_dict(dim_size=None)
>>> aid_list = ibs.get_valid_aids()[0:8]
>>> chips = depc.get_property('chips', aid_list, 'img', config={'dim_size': 256})
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> #interact_obj = pt.interact_multi_image.MultiImageInteraction(chips, nPerPage=4)
>>> import ibeis.viz.interact.interact_chip
>>> interact_obj = ibeis.viz.interact.interact_chip.interact_multichips(ibs, aid_list, config2_=config)
>>> interact_obj.start()
>>> pt.show_if_requested()
"""
print('Preprocess Chips')
print('config = %r' % (config,))
ibs = depc.controller
chip_dpath = ibs.get_chipdir() + '2'
ut.ensuredir(chip_dpath)
#ext = config['ext']
pad = config['pad']
dim_size = config['dim_size']
dim_tol = config['dim_tol']
resize_dim = config['resize_dim']
#cfghashid = config.get_hashid()
#avuuid_list = ibs.get_annot_visual_uuids(aid_list)
# TODO: just hash everything together
#_fmt = 'chip_aid_{aid}_avuuid_{avuuid}_{cfghashid}{ext}'
#cfname_list = [_fmt.format(aid=aid, avuuid=avuuid, ext=ext, cfghashid=cfghashid)
# for aid, avuuid in zip(aid_list, avuuid_list)]
#cfpath_list = [ut.unixjoin(chip_dpath, chip_fname)
# for chip_fname in cfname_list]
#gfpath_list = ibs.get_annot_image_paths(aid_list)
gid_list = ibs.get_annot_gids(aid_list)
# TODO: use verts instead
bbox_list = ibs.get_annot_bboxes(aid_list)
theta_list = ibs.get_annot_thetas(aid_list)
bbox_size_list = ut.take_column(bbox_list, [2, 3])
# Checks
invalid_flags = [w == 0 or h == 0 for (w, h) in bbox_size_list]
invalid_aids = ut.compress(aid_list, invalid_flags)
assert len(invalid_aids) == 0, 'invalid aids=%r' % (invalid_aids,)
if resize_dim == 'wh':
assert isinstance(dim_size, tuple), (
'must specify both width and height in dim_size when resize_dim=wh')
# Aspect ratio is not preserved. Use exact specifications.
newsize_list = [dim_size for _ in range(len(bbox_size_list))]
else:
scale_func_dict = {
'width': vt.get_scaled_size_with_width,
'area': vt.get_scaled_size_with_area, # actually root area
}
scale_func = scale_func_dict[resize_dim]
if dim_size is None:
newsize_list = bbox_size_list
else:
if resize_dim == 'area':
dim_size = dim_size ** 2
dim_tol = dim_tol ** 2
newsize_list = [scale_func(dim_size, w, h, dim_tol) for (w, h) in bbox_size_list]
if pad > 0:
halfoffset_ms = (pad, pad)
extras_list = [vt.get_extramargin_measures(bbox, new_size, halfoffset_ms)
for bbox, new_size in zip(bbox_list, newsize_list)]
# Overwrite bbox and new size with margined versions
bbox_list = ut.take_column(extras_list, 0)
newsize_list = ut.take_column(extras_list, 1)
# Build transformation from image to chip
M_list = [vt.get_image_to_chip_transform(bbox, new_size, theta) for
bbox, theta, new_size in zip(bbox_list, theta_list, newsize_list)]
#arg_iter = zip(cfpath_list, gid_list, newsize_list, M_list)
arg_iter = zip(gid_list, newsize_list, M_list)
arg_list = list(arg_iter)
filterfn_list = []
from vtool import image_filters
if config['histeq']:
filterfn_list.append(image_filters.histeq_fn)
if config['adapteq']:
filterfn_list.append(image_filters.adapteq_fn)
warpkw = dict(flags=cv2.INTER_LANCZOS4, borderMode=cv2.BORDER_CONSTANT)
last_gid = None
for tup in ut.ProgIter(arg_list, lbl='computing chips', backspace=True):
# FIXME: THE GPATH SHOULD BE PASSED HERE WITH AN ORIENTATION FLAG
#cfpath, gid, new_size, M = tup
gid, new_size, M = tup
# Read parent image # TODO: buffer this
if gid != last_gid: # We assume the gids are nicely ordered, no need to load the image more than once, if so
imgBGR = ibs.get_images(gid)
last_gid = gid
# Warp chip
chipBGR = cv2.warpAffine(imgBGR, M[0:2], tuple(new_size), **warpkw)
for filtfn in filterfn_list:
chipBGR = filtfn(chipBGR)
width, height = vt.get_size(chipBGR)
yield (chipBGR, width, height, M)
# Write chip to disk
#vt.imwrite(cfpath, chipBGR)
#yield (cfpath, width, height, M)
@register_subprop('chips', 'dlen_sqrd')
def compute_dlen_sqrd(depc, aid_list, config=None):
size_list = np.array(depc.get('chips', aid_list, ('width', 'height'), config))
dlen_sqrt_list = (size_list ** 2).sum(axis=1).tolist()
return dlen_sqrt_list
class AnnotMaskConfig(dtool.Config):
_param_info_list = [
ut.ParamInfo('manual', True)
]
_sub_config_list = [
ChipConfig
]
@derived_attribute(
tablename='annotmask', parents=['annotations'],
colnames=['img', 'width', 'height'],
coltypes=[('extern', vt.imread), int, int],
configclass=AnnotMaskConfig,
fname='../maskcache2',
# isinteractive=True,
)
def compute_annotmask(depc, aid_list, config=None):
r"""
Interaction dispatcher for annotation masks.
Args:
depc (ibeis.depends_cache.DependencyCache):
aid_list (list): list of annotation rowids
config (AnnotMaskConfig): (default = None)
Yields:
(uri, int, int): tup
CommandLine:
python -m ibeis.core_annots --exec-compute_annotmask --show
python -m ibeis.core_annots --exec-compute_annotmask --show --edit
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> ibs, depc, aid_list = testdata_core()
>>> config = AnnotMaskConfig(dim_size=None)
>>> chip_config = config.chip_cfg
>>> edit = ut.get_argflag('--edit')
>>> mask = depc.get_property('annotmask', aid_list, 'img', config, recompute=edit)[0]
>>> chip = depc.get_property('chips', aid_list, 'img', config=chip_config)[0]
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> resized = vt.resize_mask(mask, chip)
>>> blended = vt.blend_images_multiply(chip, resized)
>>> pt.imshow(blended, title='mask')
>>> pt.show_if_requested()
"""
from plottool import interact_impaint
# TODO: Ensure interactive required cache words
# Keep manual things above the cache dir
mask_dpath = ut.unixjoin(depc.cache_dpath, '../ManualChipMask')
ut.ensuredir(mask_dpath)
ibs = depc.controller
chip_config = config.chip_cfg
chip_imgs = depc.get('chips', aid_list, 'img', config=chip_config)
cfghashid = config.get_hashid()
avuuid_list = ibs.get_annot_visual_uuids(aid_list)
# TODO: just hash everything together
ext = '.png'
_fmt = 'mask_aid_{aid}_avuuid_{avuuid}_{cfghashid}{ext}'
fname_list = [_fmt.format(aid=aid, avuuid=avuuid, ext=ext, cfghashid=cfghashid)
for aid, avuuid in zip(aid_list, avuuid_list)]
for img, fname, aid in zip(chip_imgs, fname_list, aid_list):
mask_fpath = ut.unixjoin(mask_dpath, fname)
if ut.checkpath(mask_fpath):
# Allow for editing on recompute
init_mask = vt.imread(mask_fpath)
else:
init_mask = None
mask = interact_impaint.impaint_mask2(img, init_mask=init_mask)
vt.imwrite(mask_fpath, mask)
print('imwrite')
w, h = vt.get_size(mask)
yield mask_fpath, w, h
# Remove the old chips
#ibs.delete_annot_chips([aid])
#ibs.delete_annot_chip_thumbs([aid])
class ProbchipConfig(dtool.Config):
# TODO: incorporate into base
_named_defaults = {
'rf': {
'fw_detector': 'rf',
'smooth_thresh': None,
'smooth_ksize': None,
}
}
_param_info_list = [
#ut.ParamInfo('preserve_aspect', True, hideif=True),
ut.ParamInfo('fw_detector', 'cnn', 'detector='),
ut.ParamInfo('fw_dim_size', 256, 'sz'),
ut.ParamInfo('smooth_thresh', 20, 'thresh='),
ut.ParamInfo('smooth_ksize', 20, 'ksz=', hideif=lambda cfg: cfg['smooth_thresh'] is None),
#ut.ParamInfo('ext', '.png'),
]
#_sub_config_list = [
# ChipConfig
#]
ProbchipImgType = dtool.ExternType(ut.partial(vt.imread, grayscale=True),
vt.imwrite, extern_ext='.png')
@derived_attribute(
tablename='probchip', parents=['annotations'],
colnames=['img'],
coltypes=[ProbchipImgType],
configclass=ProbchipConfig,
fname='chipcache4',
# isinteractive=True,
)
def compute_probchip(depc, aid_list, config=None):
""" Computes probability chips using pyrf
CommandLine:
python -m ibeis.core_annots --test-compute_probchip --nocnn --show --db PZ_MTEST
python -m ibeis.core_annots --test-compute_probchip --show --fw_detector=cnn
python -m ibeis.core_annots --test-compute_probchip --show --fw_detector=rf --smooth_thresh=None
Example1:
>>> # DISABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> import ibeis
>>> ibs, depc, aid_list = testdata_core()
>>> aid_list = ibs.get_valid_aids(species='zebra_plains')[0:10]
>>> config = ProbchipConfig.from_argv_dict(fw_detector='rf', smooth_thresh=None)
>>> #probchip_fpath_list_ = ut.take_column(list(compute_probchip(depc, aid_list, config)), 0)
>>> probchip_list_ = ut.take_column(list(compute_probchip(depc, aid_list, config)), 0)
>>> #result = ut.list_str(probchip_fpath_list_)
>>> #print(result)
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> #xlabel_list = list(map(str, [vt.image.open_image_size(p) for p in probchip_fpath_list_]))
>>> #iteract_obj = pt.interact_multi_image.MultiImageInteraction(probchip_fpath_list_, nPerPage=4, xlabel_list=xlabel_list)
>>> xlabel_list = [str(vt.get_size(img)) for img in probchip_list_]
>>> iteract_obj = pt.interact_multi_image.MultiImageInteraction(probchip_list_, nPerPage=4, xlabel_list=xlabel_list)
>>> iteract_obj.start()
>>> ut.show_if_requested()
"""
print('[core] COMPUTING FEATWEIGHTS')
print('config = %r' % (config,))
import vtool as vt
ibs = depc.controller
# Use the labeled species for the fw_detector
species_list = ibs.get_annot_species_texts(aid_list)
fw_detector = config['fw_detector']
dim_size = config['fw_dim_size']
smooth_thresh = config['smooth_thresh']
smooth_ksize = config['smooth_ksize']
if fw_detector == 'rf':
pad = 64
else:
pad = 0
probchip_dir = ibs.get_probchip_dir() + '2'
cfghashid = config.get_hashid()
# TODO: just hash everything together
ut.ensuredir(probchip_dir)
_fmt = 'probchip_avuuid_{avuuid}_' + cfghashid + '.png'
annot_visual_uuid_list = ibs.get_annot_visual_uuids(aid_list)
probchip_fpath_list = [ut.unixjoin(probchip_dir, _fmt.format(avuuid=avuuid))
for avuuid in annot_visual_uuid_list]
chip_config = ChipConfig(pad=pad, dim_size=dim_size)
mchip_path_list = depc.get('chips', aid_list, 'img', config=chip_config, read_extern=False)
aid_list = np.array(aid_list)
species_list = np.array(species_list)
species_rowid = np.array(ibs.get_species_rowids_from_text(species_list))
# Group by species
unique_species_rowids, groupxs = vt.group_indices(species_rowid)
grouped_aids = vt.apply_grouping(aid_list, groupxs)
grouped_species = vt.apply_grouping(species_list, groupxs)
grouped_mpaths = ut.apply_grouping(mchip_path_list, groupxs)
grouped_ppaths = ut.apply_grouping(probchip_fpath_list, groupxs)
unique_species = ut.get_list_column(grouped_species, 0)
if ut.VERBOSE:
print('[preproc_probchip] +--------------------')
print(('[preproc_probchip.compute_and_write_probchip] '
'Preparing to compute %d probchips of %d species')
% (len(aid_list), len(unique_species)))
print(config)
#grouped_probchip_fpath_list = []
grouped_probchips = []
_iter = zip(grouped_aids, unique_species, grouped_ppaths, grouped_mpaths)
_iter = ut.ProgIter(_iter, nTotal=len(grouped_aids),
lbl='probchip for species', enabled=ut.VERBOSE, backspace=True)
if fw_detector == 'rf':
for aids, species, probchip_fpaths, inputchip_fpaths in _iter:
if len(aids) == 0:
continue
gen = rf_probchips(ibs, aids, species, probchip_fpaths, inputchip_fpaths, pad,
smooth_thresh, smooth_ksize)
#grouped_probchip_fpath_list.append(probchip_fpaths)
grouped_probchips.append(list(gen))
elif fw_detector == 'cnn':
for aids, species, probchip_fpaths, inputchip_fpaths in _iter:
if len(aids) == 0:
continue
gen = cnn_probchips(ibs, species, probchip_fpath_list, inputchip_fpaths,
smooth_thresh, smooth_ksize)
#grouped_probchip_fpath_list.append(probchip_fpaths)
grouped_probchips.append(list(gen))
else:
raise NotImplementedError('unknown fw_detector=%r' % (fw_detector,))
if ut.VERBOSE:
print('[preproc_probchip] Done computing probability images')
print('[preproc_probchip] L_______________________')
#probchip_fpath_list = vt.invert_apply_grouping2(
# grouped_probchip_fpath_list, groupxs, dtype=object)
#for fpath in probchip_fpath_list:
# yield (fpath,)
probchip_result_list = vt.invert_apply_grouping2(
grouped_probchips, groupxs, dtype=object)
for probchip in probchip_result_list:
yield (probchip,)
def cnn_probchips(ibs, species, probchip_fpath_list, inputchip_fpaths, smooth_thresh, smooth_ksize):
# dont use extrmargin here (for now)
mask_gen = ibs.generate_species_background_mask(inputchip_fpaths, species)
_iter = zip(probchip_fpath_list, mask_gen)
for chunk in ut.ichunks(_iter, 256):
_progiter = ut.ProgIter(chunk, lbl='compute probchip chunk', adjust=True, time_thresh=30.0, backspace=True)
for probchip_fpath, probchip in _progiter:
if smooth_thresh is not None and smooth_ksize is not None:
probchip = postprocess_mask(probchip, smooth_thresh, smooth_ksize)
yield probchip
#vt.imwrite(probchip_fpath, probchip)
def rf_probchips(ibs, aids, species, probchip_fpaths, inputchip_fpaths, pad,
smooth_thresh, smooth_ksize):
from ibeis.algo.detect import randomforest
extramargin_probchip_fpaths = [ut.augpath(path, '_margin')
for path in probchip_fpaths]
rfconfig = {'scale_list': [1.0], 'mode': 1,
'output_gpath_list': extramargin_probchip_fpaths}
probchip_generator = randomforest.detect_gpath_list_with_species(
ibs, inputchip_fpaths, species, **rfconfig)
# Evalutate genrator until completion
ut.evaluate_generator(probchip_generator)
extramargin_mask_gen = (vt.imread(fpath, grayscale=True)
for fpath in extramargin_probchip_fpaths)
# Crop the extra margin off of the new probchips
_iter = zip(probchip_fpaths, extramargin_mask_gen)
for (probchip_fpath, extramargin_probchip) in _iter:
half_w, half_h = (pad, pad)
probchip = extramargin_probchip[half_h:-half_h, half_w:-half_w]
if smooth_thresh is not None and smooth_ksize is not None:
probchip = postprocess_mask(probchip, smooth_thresh, smooth_ksize)
yield probchip
#vt.imwrite(probchip_fpath, probchip)
def postprocess_mask(mask, thresh=20, kernel_size=20):
r"""
Args:
mask (ndarray):
Returns:
ndarray: mask2
CommandLine:
python -m ibeis.core_annots --exec-postprocess_mask --cnn --show --aid=1 --db PZ_MTEST
python -m ibeis --tf postprocess_mask --cnn --show --db PZ_MTEST --adapteq=True
SeeAlso:
python -m ibeis_cnn --tf generate_species_background_mask --show --db PZ_Master1 --aid 9970
Ignore:
input_tuple = aid_list
tablename = 'probchip'
config = full_config
rowid_kw = dict(config=config)
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> import plottool as pt
>>> ibs, depc, aid_list = testdata_core()
>>> config = ChipConfig.from_argv_dict()
>>> probchip_config = ProbchipConfig(smooth_thresh=None)
>>> chip = ibs.depc_annot.get('chips', aid_list, 'img', config)[0]
>>> mask = ibs.depc_annot.get('probchip', aid_list, 'img', probchip_config)[0]
>>> mask2 = postprocess_mask(mask)
>>> ut.quit_if_noshow()
>>> fnum = 1
>>> pt.imshow(chip, pnum=(1, 3, 1), fnum=fnum, xlabel=str(chip.shape))
>>> pt.imshow(mask, pnum=(1, 3, 2), fnum=fnum, title='before', xlabel=str(mask.shape))
>>> pt.imshow(mask2, pnum=(1, 3, 3), fnum=fnum, title='after', xlabel=str(mask2.shape))
>>> ut.show_if_requested()
"""
import cv2
thresh = 20
kernel_size = 20
mask2 = mask.copy()
# light threshold
mask2[mask2 < thresh] = 0
# open and close
kernel = np.ones((kernel_size, kernel_size), np.uint8)
mask2 = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernel)
mask2 = cv2.morphologyEx(mask2, cv2.MORPH_OPEN, kernel)
mask2 = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernel)
return mask2
class FeatConfig(dtool.Config):
r"""
Example:
>>> from ibeis.core_annots import * # NOQA
>>> feat_cfg = FeatConfig()
>>> result = str(feat_cfg)
>>> print(result)
<FeatConfig(hesaff+sift)>
"""
# TODO: FIXME
#_parents = [ChipConfig]
def get_param_info_list(self):
import pyhesaff
default_keys = list(pyhesaff.get_hesaff_default_params().keys())
default_items = list(pyhesaff.get_hesaff_default_params().items())
param_info_list = [
ut.ParamInfo('feat_type', 'hesaff+sift', ''),
ut.ParamInfo('maskmethod', None, hideif=None)
]
param_info_dict = {
name: ut.ParamInfo(name, default, hideif=default)
for name, default in default_items
}
#param_info_dict['scale_max'].default = -1
#param_info_dict['scale_max'].default = 50
param_info_list += ut.dict_take(param_info_dict, default_keys)
return param_info_list
def get_hesaff_params(self):
# Get subset of these params that correspond to hesaff
import pyhesaff
default_keys = list(pyhesaff.get_hesaff_default_params().keys())
hesaff_param_dict = ut.dict_subset(self, default_keys)
return hesaff_param_dict
@derived_attribute(
tablename='feat', parents=['chips'],
colnames=['num_feats', 'kpts', 'vecs'],
coltypes=[int, np.ndarray, np.ndarray],
configclass=FeatConfig,
fname='featcache', chunksize=1024,
)
def compute_feats(depc, cid_list, config=None):
r"""
Computes features and yields results asynchronously: TODO: Remove IBEIS from
this equation. Move the firewall towards the controller
Args:
depc (dtool.DependencyCache):
cid_list (list):
config (None):
Returns:
generator : generates param tups
SeeAlso:
~/code/ibeis_cnn/ibeis_cnn/_plugin.py
CommandLine:
python -m ibeis.core_annots --test-compute_feats:0 --show
python -m ibeis.core_annots --test-compute_feats:1
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> ibs, depc, aid_list = testdata_core()
>>> chip_config = {}
>>> config = FeatConfig()
>>> cid_list = depc.get_rowids('chips', aid_list, config=chip_config)
>>> featgen = compute_feats(depc, cid_list, config)
>>> feat_list = list(featgen)
>>> assert len(feat_list) == len(aid_list)
>>> (nFeat, kpts, vecs) = feat_list[0]
>>> assert nFeat == len(kpts) and nFeat == len(vecs)
>>> assert kpts.shape[1] == 6
>>> assert vecs.shape[1] == 128
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> chip = depc.get_native('chips', cid_list[0:1], 'img')[0]
>>> pt.interact_keypoints.KeypointInteraction(chip, kpts, vecs, autostart=True)
>>> ut.show_if_requested()
Example:
>>> # TIMING
>>> from ibeis.core_annots import * # NOQA
>>> ibs, depc, aid_list = testdata_core('PZ_MTEST', 100)
>>> config = {'dim_size': 450}
>>> num_feats = depc.get('feat', aid_list, 'num_feats', config=config, recompute=True)
ibs.delete_annot_feats(aid_list)
ibs.get_annot_feat_rowids(aid_list)
"""
nInput = len(cid_list)
hesaff_params = config.get_hesaff_params()
feat_type = config['feat_type']
maskmethod = config['maskmethod']
ut.assert_all_not_None(cid_list, 'cid_list')
chip_fpath_list = depc.get_native('chips', cid_list, 'img', read_extern=False)
if maskmethod is not None:
assert False
#aid_list = ibs.get_chip_aids(cid_list)
#probchip_fpath_list = ibs.get_annot_probchip_fpath(aid_list)
else:
probchip_fpath_list = (None for _ in range(nInput))
if ut.NOT_QUIET:
print('[preproc_feat] config = %s' % config)
if ut.VERYVERBOSE:
print('full_params = ' + ut.dict_str())
ibs = depc.controller
if feat_type == 'hesaff+sift':
# Multiprocessing parallelization
dictargs_iter = (hesaff_params for _ in range(nInput))
arg_iter = zip(chip_fpath_list, probchip_fpath_list, dictargs_iter)
# eager evaluation.
# TODO: Check if there is any benefit to just passing in the iterator.
arg_list = list(arg_iter)
featgen = ut.generate(gen_feat_worker, arg_list, nTasks=nInput, freq=10,
ordered=True, force_serial=ibs.force_serial)
elif feat_type == 'hesaff+siam128':
from ibeis_cnn import _plugin
assert maskmethod is None, 'not implemented'
assert False, 'not implemented'
featgen = _plugin.generate_siam_l2_128_feats(ibs, cid_list, config=config)
else:
raise AssertionError('unknown feat_type=%r' % (feat_type,))
for nFeat, kpts, vecs in featgen:
yield (nFeat, kpts, vecs,)
def gen_feat_worker(tup):
r"""
Function to be parallelized by multiprocessing / joblib / whatever.
Must take in one argument to be used by multiprocessing.map_async
Args:
tup (tuple):
Returns:
tuple: (None, kpts, vecs)
CommandLine:
python -m ibeis.core_annots --exec-gen_feat_worker --show
python -m ibeis.core_annots --exec-gen_feat_worker --show --aid 1988 --db GZ_Master1 --affine-invariance=False --scale_max=30
python -m ibeis.core_annots --exec-gen_feat_worker --show --aid 1988 --db GZ_Master1 --affine-invariance=False --maskmethod=None --scale_max=30
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> ibs, depc, aid_list = testdata_core()
>>> aid = aid_list[0]
>>> config = {}
>>> feat_config = FeatConfig.from_argv_dict()
>>> chip_fpath = ibs.depc_annot.get('chips', aid_list[0], 'img', config=config, read_extern=False)
>>> maskmethod = ut.get_argval('--maskmethod', type_=str, default='cnn')
>>> probchip_fpath = ibs.depc_annot.get('probchip', aid_list[0], 'img', config=config, read_extern=False) if feat_config['maskmethod'] == 'cnn' else None
>>> hesaff_params = feat_config.asdict()
>>> # Exec function source
>>> tup = (chip_fpath, probchip_fpath, hesaff_params)
>>> masked_chip, num_kpts, kpts, vecs = ut.exec_func_src(
>>> gen_feat_worker, key_list=['masked_chip', 'num_kpts', 'kpts', 'vecs'],
>>> sentinal='num_kpts = kpts.shape[0]')
>>> result = ('(num_kpts, kpts, vecs) = %s' % (ut.repr2((num_kpts, kpts, vecs)),))
>>> print(result)
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> from plottool.interactions import ExpandableInteraction
>>> interact = ExpandableInteraction()
>>> interact.append_plot(pt.interact_keypoints.KeypointInteraction(masked_chip, kpts, vecs))
>>> interact.append_plot(lambda **kwargs: pt.plot_score_histograms([vt.get_scales(kpts)], **kwargs))
>>> interact.start()
>>> ut.show_if_requested()
"""
import pyhesaff
#import numpy as np
#import vtool as vt
chip_fpath, probchip_fpath, hesaff_params = tup
chip = vt.imread(chip_fpath)
if probchip_fpath is not None:
probchip = vt.imread(probchip_fpath, grayscale=True)
probchip = vt.resize_mask(probchip, chip)
#vt.blend_images_multiply(chip, probchip)
masked_chip = (chip * (probchip[:, :, None].astype(np.float32) / 255)).astype(np.uint8)
else:
masked_chip = chip
kpts, vecs = pyhesaff.detect_feats_in_image(masked_chip, **hesaff_params)
num_kpts = kpts.shape[0]
return (num_kpts, kpts, vecs)
class FeatWeightConfig(dtool.Config):
_param_info_list = [
ut.ParamInfo('featweight_enabled', True, 'enabled='),
]
# FIXME: incorporate config dependencies in dtool
#_parents = [FeatConfig, ProbchipConfig]
@derived_attribute(
tablename='featweight', parents=['feat', 'probchip'],
colnames=['fwg'],
coltypes=[np.ndarray],
configclass=FeatWeightConfig,
fname='featcache', chunksize=512,
)
def compute_fgweights(depc, fid_list, pcid_list, config=None):
"""
Args:
depc (dtool.DependencyCache): depc
fid_list (list):
config (None): (default = None)
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> ibs, depc, aid_list = testdata_core()
>>> full_config = {}
>>> config = FeatConfig()
>>> fid_list = depc.get_rowids('feat', aid_list, config=full_config)
>>> pcid_list = depc.get_rowids('probchip', aid_list, config=full_config)
>>> prop_list = list(compute_fgweights(depc, fid_list, pcid_list))
>>> featweight_list = ut.take_column(prop_list, 0)
>>> result = np.array_str(featweight_list[0][0:3], precision=3)
>>> print(result)
"""
ibs = depc.controller
nTasks = len(fid_list)
print('[compute_fgweights] Computing %d fgweights' % (nTasks,))
#aid_list = depc.get_ancestor_rowids('feat', fid_list, 'annotations')
#probchip_fpath_list = depc.get(aid_list, 'img', config={}, read_extern=False)
probchip_list = depc.get_native('probchip', pcid_list, 'img')
cid_list = depc.get_ancestor_rowids('feat', fid_list, 'chips')
chipsize_list = depc.get_native('chips', cid_list, ('width', 'height'))
kpts_list = depc.get_native('feat', fid_list, 'kpts')
# Force grayscale reading of chips
arg_iter = zip(kpts_list, probchip_list, chipsize_list)
# ibs = depc.controller
# featweight_gen = ut.generate(gen_featweight_worker, arg_iter,
# nTasks=nTasks, ordered=True, freq=10,
# force_serial=ibs.force_serial)
featweight_gen = ut.generate(gen_featweight_worker, arg_iter,
nTasks=nTasks, ordered=True, freq=10,
force_serial=ibs.force_serial
)
featweight_list = list(featweight_gen)
print('[compute_fgweights] Done computing %d fgweights' % (nTasks,))
for fw in featweight_list:
yield (fw,)
def gen_featweight_worker(tup):
"""
Function to be parallelized by multiprocessing / joblib / whatever.
Must take in one argument to be used by multiprocessing.map_async
Args:
tup (aid, tuple(kpts(ndarray), probchip_fpath )): keypoints and
probability chip file path aid, kpts, probchip_fpath
CommandLine:
python -m ibeis.core_annots --test-gen_featweight_worker --show
python -m ibeis.core_annots --test-gen_featweight_worker --show --dpath figures --save ~/latex/crall-candidacy-2015/figures/gen_featweight.jpg
python -m ibeis.core_annots --test-gen_featweight_worker --show --db PZ_MTEST --qaid_list=1,2,3,4,5,6,7,8,9
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> #test_featweight_worker()
>>> ibs, depc, aid_list = testdata_core()
>>> aid_list = aid_list[0:1]
>>> config = {'dim_size': 450, 'resize_dim': 'area', 'smooth_thresh': 0, 'smooth_ksize': 0}
>>> probchip = depc.get('probchip', aid_list, 'img', config=config)[0]
>>> chipsize = depc.get('chips', aid_list, ('width', 'height'), config=config)[0]
>>> kpts = depc.get('feat', aid_list, 'kpts', config=config)[0]
>>> tup = (kpts, probchip, chipsize)
>>> weights = gen_featweight_worker(tup)
>>> assert np.all(weights <= 1.0), 'weights cannot be greater than 1'
>>> chip = depc.get('chips', aid_list, 'img', config=config)[0]
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> fnum = 1
>>> pnum_ = pt.make_pnum_nextgen(1, 3)
>>> pt.figure(fnum=fnum, doclf=True)
>>> pt.imshow(chip, pnum=pnum_(0), fnum=fnum)
>>> pt.imshow(probchip, pnum=pnum_(2), fnum=fnum)
>>> pt.imshow(chip, pnum=pnum_(1), fnum=fnum)
>>> color_list = pt.draw_kpts2(kpts, weights=weights, ell_alpha=.3)
>>> cb = pt.colorbar(weights, color_list)
>>> cb.set_label('featweights')
>>> pt.show_if_requested()
"""
(kpts, probchip, chipsize) = tup
if probchip is None:
# hack for undetected chips. SETS ALL FEATWEIGHTS TO .25 = 1/4
assert False, 'should not be in this state'
weights = np.full(len(kpts), .25, dtype=np.float32)
else:
sfx, sfy = (probchip.shape[1] / chipsize[0], probchip.shape[0] / chipsize[1])
kpts_ = vt.offset_kpts(kpts, (0, 0), (sfx, sfy))
#vtpatch.get_warped_patches()
if False:
# VERY SLOW
patch_list1 = [vt.get_warped_patch(probchip, kp)[0].astype(np.float32) / 255.0 for kp in kpts_]
weight_list = [vt.gaussian_average_patch(patch1) for patch1 in patch_list1]
#weight_list = [patch.sum() / (patch.size) for patch in patch_list]
else:
# New way
weight_list = vt.patch_gaussian_weighted_average_intensities(probchip, kpts_)
weights = np.array(weight_list, dtype=np.float32)
return weights
class VsOneRequest(dtool.base.VsOneSimilarityRequest):
_tablename = 'vsone'
def postprocess_execute(request, parent_rowids, result_list):
import ibeis
depc = request.depc
ibs = depc.controller
qaid_list, daid_list = list(zip(*parent_rowids))
unique_qaids, groupxs = ut.group_indices(qaid_list)
grouped_daids = ut.apply_grouping(daid_list, groupxs)
unique_qnids = ibs.get_annot_nids(unique_qaids)
single_cm_list = ut.take_column(result_list, 1)
grouped_cms = ut.apply_grouping(single_cm_list, groupxs)
_iter = zip(unique_qaids, unique_qnids, grouped_daids, grouped_cms)
cm_list = []
for qaid, qnid, daids, cms in _iter:
# Hacked in version of creating an annot match object
chip_match = ibeis.ChipMatch.combine_cms(cms)
chip_match.score_maxcsum(request)
cm_list.append(chip_match)
#import utool
#utool.embed()
#cm = cm_list[0]
#cm.print_inspect_str(request)
#cm.assert_self(request, assert_feats=False)
return cm_list
class VsOneConfig(dtool.Config):
"""
Example:
>>> from ibeis.core_annots import * # NOQA
>>> cfg = VsOneConfig()
>>> result = str(cfg)
>>> print(result)
"""
_param_info_list = [
#ut.ParamInfo('sver_xy_thresh', .01),
ut.ParamInfo('sver_xy_thresh', .001),
ut.ParamInfo('ratio_thresh', .625),
ut.ParamInfo('refine_method', 'homog'),
ut.ParamInfo('symmetric', False),
ut.ParamInfo('K', 1),
ut.ParamInfo('Knorm', 1),
ut.ParamInfo('version', 0),
ut.ParamInfo('augment_queryside_hack', False),
]
_sub_config_list = [
FeatConfig,
ChipConfig, # TODO: infer chip config from feat config
FeatWeightConfig,
]
def test_cut(ibs, parent_rowids_T, score_list2):
unique_aids = ut.unique(ut.flatten(parent_rowids_T))
#for view in set(ibs.get_annot_yaw_texts(unique_aids)):
# aid2_idx = ut.make_index_lookup(unique_aids)
# #idx2_aid = ut.invert_dict(aid2_idx)
# idx_pairs = np.array(ut.unflat_take(aid2_idx, zip(*parent_rowids_T)))
# num = len(aid2_idx)
# flat_idx = np.ravel_multi_index(idx_pairs.T, (num, num))
# score_list2 = np.array(score_list2)
# cost_matrix = np.zeros(num * num)
# cost_matrix[flat_idx] = score_list2
# cost_matrix = cost_matrix.reshape((num, num))
# thresh = np.median(cost_matrix)
# thresh = 20
# labels = vt.unsupervised_multicut_labeling(cost_matrix, thresh)
# grouping = ut.group_items(unique_aids, labels)
if True:
#vp2_name2_aids = ibs.group_annots_by_multi_prop(unique_aids, [ibs.get_annot_yaw_texts, ibs.get_annot_name_texts])
aid2_idx = ut.make_index_lookup(unique_aids)
num = len(aid2_idx)
idx_pairs = np.array(ut.unflat_take(aid2_idx, zip(*parent_rowids_T)))
flat_idx = np.ravel_multi_index(idx_pairs.T, (num, num))
score_list2 = np.array(score_list2)
cost_matrix = np.zeros(num * num)
cost_matrix[flat_idx] = score_list2
cost_matrix = cost_matrix.reshape((num, num))
vp2_aids = ibs.group_annots_by_multi_prop(unique_aids, [ibs.get_annot_yaw_texts])
for view, aids in vp2_aids.items():
print('---')
print('view = %r' % (view,))
print('len(aids) = %r' % (len(aids),))
idxs = ut.take(aid2_idx, aids)
if len(idxs) == 1:
continue
real_group = ibs.group_annots_by_name(aids)[0]
sub_cost_matrix = cost_matrix[idxs].T[idxs].T
#ibs = ut.search_stack_for_localvar('ibs')
for thresh in [5, 7, 10, 15, 25, 50]:
labels = vt.unsupervised_multicut_labeling(sub_cost_matrix, thresh)
grouping = ut.group_items(aids, labels)
diff = ut.compare_groupings(real_group, grouping.values())
print('thresh = %r, diff=%r' % (thresh, diff))
#print('--')
if False:
# synthetic data
size = 100
thresh = 50
np.random.randint(0, 1)
np.zeros((size, size))
#np.random.rand(size, size)
size = 40
for size in range(2, 100):
aids = np.arange(size)
encounter_lbls = np.random.randint(0, size, size)
grid1 = np.tile(encounter_lbls, (size, 1))
is_match = grid1.T == grid1
good_pos = np.where(is_match)
bad_pos = np.where(~is_match)
sub_cost_matrix = np.empty((size, size))
sub_cost_matrix[good_pos] = np.random.randn(len(good_pos[0])) + 20
sub_cost_matrix[bad_pos] = np.random.randn(len(bad_pos[0])) - 20
sub_cost_matrix[np.diag_indices_from(sub_cost_matrix)] = np.inf
labels = vt.unsupervised_multicut_labeling(sub_cost_matrix, 0)
diff = ut.compare_groupings(
list(ut.group_items(aids, encounter_lbls).values()),
list(ut.group_items(aids, labels).values()))
print('diff = %r' % (diff,))
@derived_attribute(
tablename='vsone', parents=['annotations', 'annotations'],
colnames=['score', 'match'], coltypes=[float, ChipMatch],
requestclass=VsOneRequest,
configclass=VsOneConfig,
chunksize=128,
#chunksize=16,
fname='vsone',
)
def compute_one_vs_one(depc, qaids, daids, config):
r"""
CommandLine:
python -m ibeis.core_annots --test-compute_one_vs_one --show
python -m ibeis.control.IBEISControl --test-show_depc_annot_graph --show
python -m ibeis.control.IBEISControl --test-show_depc_annot_table_input --show --tablename=vsone
Ignore:
>>> from ibeis.core_annots import * # NOQA
>>> import ibeis
>>> ibs, aid_list = ibeis.testdata_aids('PZ_Master1', 'default:')
>>> occurid2_aids = ibs.temp_group_annot_occurrences(aid_list)
>>> aids_list = [np.unique(aids) for aids in occurid2_aids.values()]
>>> aids_list = [aids for aids in aids_list if len(aids) > 1 and len(aids) < 100]
aids = ut.sortedby([a.tolist() for a in aids_list], ut.lmap(len, aids_list))[-1]
depc = ibs.depc_annot
progiter = ut.ProgIter(aids_list, freq=1)
for aids in progiter:
request = depc.new_request('vsone', aids, aids, {'dim_size': 450})
qaids, daids = request.parent_rowids_T
config = request.config
parent_rowids_T = request.parent_rowids_T
rawres_list2 = request.execute(postprocess=False)
#score_list2 = ut.take_column(rawres_list2, 0)
ut.list_T = ut.list_transpose
#test_cut(ibs, parent_rowids_T, score_list2)
# x = 44
#test_cut(ibs, ut.list_T(ut.list_T(parent_rowids_T)[0:x]), score_list2[0:x])
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> #ibs, depc, aid_list = testdata_core(size=5)
>>> import ibeis
>>> #ibs, aid_list = ibeis.testdata_aids('wd_peter2', 'timectrl:pername=2,view=left,view_ext=0,exclude_reference=True')
>>> ibs, aid_list = ibeis.testdata_aids('testdb2', 'default:')
>>> _, aids = ut.items_sorted_by_value(ut.group_items(aid_list, ibs.get_annot_occurrence_text(aid_list)), key=len)[-1]
>>> aid_list = aids[0:4]
>>> depc = ibs.depc_annot
>>> request = depc.new_request('vsone', aid_list, aid_list, {'resize_dim': 'width', 'dim_size': 450})
>>> config = request.config
>>> parent_rowids_T = request.parent_rowids_T
>>> qaids, daids = request.parent_rowids_T
>>> # Compute using request
>>> print('...Test vsone cache')
>>> rawres_list2 = request.execute(postprocess=False)
>>> score_list2 = ut.take_column(rawres_list2, 0)
>>> res_list2 = request.execute()
>>> print(res_list2)
>>> # Compute using function
>>> #print('...Test vsone function')
>>> #rawres_list1 = list(compute_one_vs_one(depc, qaids, daids, config))
>>> #score_list1 = ut.take_column(rawres_list1, 0)
>>> #print(score_list1)
>>> #assert np.all(score_list1 == score_list2)
>>> ut.quit_if_noshow()
>>> ut.ensure_pylab_qt4()
>>> match = res_list2[0]
>>> match.print_inspect_str(request)
>>> #match.show_analysis(qreq_=request)
>>> #match.ishow_analysis(qreq_=request)
>>> #match.ishow_single_annotmatch(qreq_=request)
>>> match.show_single_annotmatch(qreq_=request, vert=False)
>>> ut.show_if_requested()
Example:
>>> # Example of a one-vs-one query
>>> import ibeis
>>> ibs = ibeis.opendb('testdb1')
>>> config = {'codename': 'vsone'}
>>> qreq_ = ibs.new_query_request([1], [2], cfgdict=config)
>>> cm_list = qreq_.execute()
>>> match = cm_list[0]
>>> match.print_inspect_str(qreq_)
>>> match.show_single_annotmatch(qreq_=qreq_, vert=False)
>>> import utool as ut
>>> ut.show_if_requested()
Example:
>>> # Example of a one-vs-many query
>>> import ibeis
>>> ibs = ibeis.opendb('testdb1')
>>> config = {'codename': 'vsmany'}
>>> qreq_ = ibs.new_query_request([1], ibs.get_valid_aids(), cfgdict=config)
>>> cm_list = qreq_.execute()
>>> match = cm_list[0]
>>> match.print_inspect_str(qreq_)
>>> match.show_single_annotmatch(qreq_=qreq_, vert=False)
>>> import utool as ut
>>> ut.show_if_requested()
"""
import ibeis
ibs = depc.controller
qconfig2_ = config
dconfig2_ = config
unique_qaids = np.unique(qaids)
unique_daids = np.unique(daids)
# TODO: Ensure entire pipeline can use new dependencies
# DEPC Precompute
ibs.depc.d.get_feat_rowids(unique_qaids, config=qconfig2_)
ibs.depc.d.get_feat_rowids(unique_daids, config=dconfig2_)
if True:
annot1_list = [ibs.get_annot_lazy_dict2(qaid, config=qconfig2_)
for qaid in unique_qaids]
annot2_list = [ibs.get_annot_lazy_dict2(daid, config=dconfig2_)
for daid in unique_daids]
else:
#config.chip_cfgstr = config.chip_cfg.get_cfgstr()
#config.chip_cfg_dict = config.chip_cfg.asdict()
annot1_list = [ibs.get_annot_lazy_dict(qaid, config2_=qconfig2_)
for qaid in unique_qaids]
annot2_list = [ibs.get_annot_lazy_dict(daid, config2_=dconfig2_)
for daid in unique_daids]
# precache flann structures
# TODO: Make depcache node
flann_params = {'algorithm': 'kdtree', 'trees': 8}
for annot1 in annot1_list:
if 'flann' not in annot1:
annot1['flann'] = lambda: vt.flann_cache(
annot1['vecs'], flann_params=flann_params, quiet=True,
verbose=False)
qaid_to_annot = dict(zip(unique_qaids, annot1_list))
daid_to_annot = dict(zip(unique_daids, annot2_list))
#all_aids = np.unique(ut.flatten([qaids, daids]))
verbose = False
#yeild_ = []
#print("START VSONE")
for qaid, daid in ut.ProgIter(zip(qaids, daids), nTotal=len(qaids),
lbl='compute vsone', backspace=True, freq=1):
annot1 = qaid_to_annot[qaid]
annot2 = daid_to_annot[daid]
metadata = {
'annot1': annot1,
'annot2': annot2,
}
vt_match = vt.vsone_matching2(metadata, cfgdict=config, verbose=verbose)
matchtup = vt_match.matches['TOP+SV']
H = vt_match.metadata['H_TOP']
score = matchtup.fs.sum()
fm = matchtup.fm
fs = matchtup.fs
match = ibeis.ChipMatch(
qaid=qaid,
daid_list=[daid],
fm_list=[fm],
fsv_list=[vt.atleast_nd(fs, 2)],
H_list=[H],
fsv_col_lbls=['L2_SIFT'])
match._update_daid_index()
match.evaluate_dnids(ibs)
match._update_daid_index()
match.set_cannonical_name_score([score], [score])
#import utool
#utool.embed()
if False:
ut.ensure_pylab_qt4()
ibs, depc, aid_list = testdata_core(size=3)
request = depc.new_request('vsone', aid_list, aid_list, {'dim_size': 450})
match.ishow_analysis(request)
#match = SingleMatch_IBEIS(qaid, daid, score, fm)
#yeild_.append((score, match))
yield (score, match)
class IndexerConfig(dtool.Config):
"""
Example:
>>> from ibeis.core_annots import * # NOQA
>>> cfg = VsOneConfig()
>>> result = str(cfg)
>>> print(result)
"""
_param_info_list = [
ut.ParamInfo('algorithm', 'kdtree', 'alg'),
ut.ParamInfo('random_seed', 42, 'seed'),
ut.ParamInfo('trees', 4, hideif=lambda cfg: cfg['algorithm'] != 'kdtree'),
ut.ParamInfo('version', 1),
]
_sub_config_list = [
#FeatConfig,
#ChipConfig, # TODO: infer chip config from feat config
#FeatWeightConfig,
]
def get_flann_params(cfg):
default_params = vt.get_flann_params(cfg['algorithm'])
flann_params = ut.update_existing(default_params, cfg.asdict())
return flann_params
testmode = ut.get_argflag('--testmode')
#if 1 or testmode:
@derived_attribute(
#tablename='neighbor_index', parents=['annotations*'],
#tablename='neighbor_index', parents=['annotations'],
#tablename='neighbor_index', parents=['feat*'],
tablename='neighbor_index', parents=['featweight*'],
# tablename='neighbor_index', parents=['feat*'],
#tablename='neighbor_index', parents=['feat'],
colnames=['indexer'], coltypes=[neighbor_index.NeighborIndex2],
configclass=IndexerConfig,
chunksize=1, fname='indexer',
)
def compute_neighbor_index(depc, fids_list, config):
r"""
Args:
depc (dtool.DependencyCache):
fids_list (list):
config (dtool.Config):
CommandLine:
python -m ibeis.core_annots --exec-compute_neighbor_index --show
python -m ibeis.control.IBEISControl --test-show_depc_annot_table_input --show --tablename=neighbor_index
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> import ibeis
>>> ibs, aid_list = ibeis.testdata_aids('testdb1')
>>> depc = ibs.depc_annot
>>> fid_list = depc.get_rowids('feat', aid_list)
>>> aids_list = tuple([aid_list])
>>> fids_list = tuple([fid_list])
>>> # Compute directly from function
>>> config = ibs.depc_annot['neighbor_index'].configclass()
>>> result1 = list(compute_neighbor_index(depc, fids_list, config))
>>> nnindexer1 = result1[0][0]
>>> # Compute using depcache
>>> result2 = ibs.depc_annot.get('neighbor_index', [aids_list], 'indexer', config, recompute=False, _debug=True)
>>> #result3 = ibs.depc_annot.get('neighbor_index', [tuple(fids_list)], 'indexer', config, recompute=False)
>>> print(result2)
>>> print(result3)
>>> assert result2[0] is not result3[0]
>>> assert nnindexer1.knn(ibs.get_annot_vecs(1), 1) is not None
>>> assert result3[0].knn(ibs.get_annot_vecs(1), 1) is not None
"""
print('[IBEIS] COMPUTE_NEIGHBOR_INDEX:')
# TODO: allow augment
assert len(fids_list) == 1, 'only working with one indexer at a time'
fid_list = fids_list[0]
aid_list = depc.get_root_rowids('feat', fid_list)
flann_params = config.get_flann_params()
cfgstr = config.get_cfgstr()
verbose = True
nnindexer = neighbor_index.NeighborIndex2(flann_params, cfgstr)
# Initialize neighbor with unindexed data
support = nnindexer.get_support(depc, aid_list, config)
nnindexer.init_support(aid_list, *support, verbose=verbose)
nnindexer.config = config
nnindexer.reindex()
yield (nnindexer,)
#class FeatNeighborConfig(dtool.Config)
if testmode:
# NOT YET READY
@derived_attribute(
tablename='feat_neighbs', parents=['featweight', 'neighbor_index'],
colnames=['qfx2_idx', 'qfx2_dist'], coltypes=[np.ndarray, np.ndarray],
#configclass=IndexerConfig,
chunksize=1, fname='neighbors',
)
def compute_feature_neighbors(depc, fid_list, indexer_rowid_list, config):
"""
Args:
depc (dtool.DependencyCache):
aids_list (list):
config (dtool.Config):
CommandLine:
python -m ibeis.core_annots --exec-compute_feature_neighbors --show
python -m ibeis.control.IBEISControl --test-show_depc_annot_table_input --show --tablename=feat_neighbs
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> #ibs, depc, aid_list = testdata_core(size=5)
>>> import ibeis
>>> ibs, qaid_list = ibeis.testdata_aids('seaturtles')
>>> daid_list = qaid_list
>>> depc = ibs.depc_annot
>>> index_config = ibs.depc_annot['neighbor_index'].configclass()
>>> fid_list = depc.get_rowids('feat', qaid_list)
>>> indexer_rowid_list = ibs.depc_annot.get_rowids('neighbor_index', [daid_list], index_config)
>>> config = ibs.depc_annot['feat_neighbs'].configclass()
>>> compute_feature_neighbors(depc, fid_list, indexer_rowid_list, config)
"""
print('[IBEIS] NEAREST NEIGHBORS')
#assert False
# do computation
#num_neighbors = (config['K'] + config['Knorm'])
ibs = depc.controller
num_neighbors = 1
#b = np.broadcast([1, 2, 3], [1])
#list(b)
#[(1, 1), (2, 1), (3, 1)]
# FIXME: not sure how depc should handle this case
# Maybe it groups by indexer_rowid_list and then goes from there.
indexer = depc.get_native('neighbor_index', indexer_rowid_list, 'indexer')[0]
qvecs_list = depc.get_native('feat', fid_list, 'vecs', eager=False, nInput=len(fid_list))
#qvecs_list = depc.get('feat', qaid_list, 'vecs', config, eager=False, nInput=len(qaid_list))
qaid_list = depc.get_ancestor_rowids('feat', fid_list)
ax2_encid = np.array(ibs.get_annot_encounter_text(indexer.ax2_aid))
for qaid, qfx2_vec in zip(qaid_list, qvecs_list):
qencid = ibs.get_annot_encounter_text([qaid])[0]
invalid_axs = np.where(ax2_encid == qencid)[0]
#indexer.ax2_aid[invalid_axs]
nnindxer = indexer
qfx2_idx, qfx2_dist, iter_count = nnindxer.conditional_knn(qfx2_vec,
num_neighbors,
invalid_axs)
yield qfx2_idx, qfx2_dist
# NOT YET READY
@derived_attribute(
tablename='sver', parents=['feat_neighbs'],
colnames=['chipmatch'], coltypes=[ChipMatch],
#configclass=IndexerConfig,
chunksize=1, fname='vsmany',
)
def compute_sver(depc, fid_list, config):
pass
@derived_attribute(
tablename='vsmany', parents=['sver'],
colnames=['chipmatch'], coltypes=[ChipMatch],
#configclass=IndexerConfig,
chunksize=1, fname='vsmany',
)
def compute_vsmany(depc, fid_list, config):
pass
class LabelerConfig(dtool.Config):
_param_info_list = [
ut.ParamInfo('labeler_sensitivity', 0.2),
]
_sub_config_list = [
ChipConfig
]
@derived_attribute(
tablename='labeler', parents=['annotations'],
colnames=['score', 'species', 'viewpoint', 'quality', 'orientation', 'probs'],
coltypes=[float, str, str, str, float, dict],
configclass=LabelerConfig,
fname='chipcache4',
chunksize=128,
)
def compute_labels_annotations(depc, aid_list, config=None):
r"""
Extracts the detections for a given input image
Args:
depc (ibeis.depends_cache.DependencyCache):
gid_list (list): list of image rowids
config (dict): (default = None)
Yields:
(float, str): tup
CommandLine:
ibeis compute_labels_annotations
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.core_images import * # NOQA
>>> import ibeis
>>> defaultdb = 'PZ_MTEST'
>>> ibs = ibeis.opendb(defaultdb=defaultdb)
>>> depc = ibs.depc_annot
>>> aid_list = ibs.get_valid_aids()[0:8]
>>> # depc.delete_property('labeler', aid_list)
>>> results = depc.get_property('labeler', aid_list, None)
>>> print(results)
"""
from ibeis.algo.detect.labeler.labeler import label_chip_list
print('[ibs] Process Annotation Labels')
print('config = %r' % (config,))
# Get controller
ibs = depc.controller
depc = ibs.depc_annot
config = {
'dim_size' : (128, 128),
'resize_dim' : 'wh',
}
chip_list = depc.get_property('chips', aid_list, 'img', config=config)
result_list = label_chip_list(chip_list)
# yield detections
for result in result_list:
yield result
if __name__ == '__main__':
r"""
CommandLine:
python -m ibeis.core_annots
python -m ibeis.core_annots --allexamples
utprof.py -m ibeis.core_annots --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
smenon8/ibeis
|
ibeis/core_annots.py
|
core_annots.py
|
py
| 57,012 |
python
|
en
|
code
| null |
github-code
|
6
|
70952123708
|
def func(file):
with open(file) as d:
text = d.readlines()
for line in text:
words = line.split()
print(words.replace('172','192'))
file1='running-config.cfg'
func(file1)
dict={}
|
inwk6312fall2017/programming-task-final-lavneeshj
|
task3.py
|
task3.py
|
py
| 201 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40197352017
|
from django.urls import path
from . import views
app_name = "Employees"
urlpatterns = [
path('profile', views.profile, name="profile"),
path('edit_profile', views.editprofile, name="edit_profile"),
path('check_employee', views.checkemployee, name="check_employee"),
path('employee_position', views.employeeposition, name="employee_position"),
path('modify_permissions', views.modifypermissions, name="modify_permissions"),
path('access_requests', views.access_request, name="access_request"),
path('access_requests_list', views.access_request_list, name="access_request_list"),
]
|
jakubbm/employees-management
|
Employees/urls.py
|
urls.py
|
py
| 615 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2772837306
|
# Given a string, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases.
# Note: For the purpose of this problem, we define empty string as valid palindrome.
# Example 1:
# Input: "A man, a plan, a canal: Panama"
# Output: true
# Example 2:
# Input: "race a car"
# Output: false
class Solution:
def isPalindrome(self, s: str) -> bool:
##直接遍历判断
## 首先去除非字母
# 大写转换成小写
s = s.lower()
i = 0
while i<len(s):
if (ord(s[i])>=ord('a') and ord(s[i])<=ord('z'))or (ord(s[i])>=ord('0') and ord(s[i])<=ord('9')) :
i+=1
else:
s = s[:i]+s[i+1:]
# print(s)
n = len(s)
for i in range(n//2):
if s[i] != s[n-1-i]:
return False
return True
def isPalindrome1(self,s:str):
if len(s) == 0:
return True
lo, hi = 0, len(s) - 1
while lo<hi:
if not s[lo].isalnum():
lo += 1; continue
if not s[hi].isalnum():
hi -= 1; continue
if s[lo].lower() != s[hi].lower():
return False
lo += 1; hi -= 1
return True
|
queryor/algorithms
|
leetcode/125. Valid Palindrome.py
|
125. Valid Palindrome.py
|
py
| 1,300 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28521286035
|
"""Added instructions html to make instructions dynamic
Revision ID: 6a1ef6fabfaf
Revises: 1c8b21137307
Create Date: 2017-08-12 01:36:17.185403
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '6a1ef6fabfaf'
down_revision = '1c8b21137307'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('test', sa.Column('instruction_html', mysql.LONGTEXT(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('test', 'instruction_html')
# ### end Alembic commands ###
|
harveyslash/backend-cleaned
|
beatest/migrations/versions/0005_6a1ef6fabfaf_added_instructions_html_to_make_.py
|
0005_6a1ef6fabfaf_added_instructions_html_to_make_.py
|
py
| 748 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73823047227
|
def climb(n):
a = 0
b = 1
c = 0
if n == 1:
return 1
for i in range(n):
c = a + b
a = b
b = c
return c
print(climb(5))
# time complexity: o(n)
# space complexity: o(1)
|
jateen67/leetcode
|
1d dynamic programming/easy/70_climbing_stairs.py
|
70_climbing_stairs.py
|
py
| 229 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29639554051
|
# -*- coding: utf-8 -*-
from odoo import api, fields, models
class CrmLead(models.Model):
_name = 'crm.lead'
_inherit = ['crm.lead', 'l10n_cl.counties.mixin']
city_id = fields.Many2one(
"res.city", string="Commune",
help="Commune of the lead")
@api.onchange('city_id')
def _onchange_city_id(self):
"""Autocomplete the address based on a selected city."""
self.full_city_address()
@api.model
def create(self, values):
"""Autocomplete the address in the case that only the city
has been provided."""
res = super().create(values)
target_id = self.env.ref('base.cl')
if res.country_id.id == target_id.id and res.city_id\
and not res.state_id and not res.city:
res.full_city_address()
return res
|
OdooJC/Scientech-YVes
|
Custom/crm_counties/models/crm_lead.py
|
crm_lead.py
|
py
| 833 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73726701627
|
from django.shortcuts import render
from .models import Post, Categories
# Create your views here.
def blog(request):
post = Post.objects.all()
cat = [i.categories.all()[0] for i in post]
cat = list(set(cat))
return render(request, 'blog/blog.html',
{'posts': post, 'categories': cat})
def category(request, category_id):
cat = Categories.objects.get(id=category_id)
posts = Post.objects.filter(categories=cat)
print(cat)
return render(request,
'blog/categories.html',
{'categories': cat, 'posts': posts})
|
rebecalvarezc/django_clases
|
firstWeb/blogApp/views.py
|
views.py
|
py
| 596 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28684800381
|
num=int(input("Enter the number of terms for the Fizz_buzz: "))
for i in range(1,num+1):
if i % 3 == 0 and i % 5 == 0:
print("Fizz_Buzz")
elif i % 3 == 0:
print("Fizz")
elif i % 5 == 0:
print("Buzz")
else:
print(i)
|
Heinrich-Swart/FizzBuzz
|
Fizzbuzz.py
|
Fizzbuzz.py
|
py
| 266 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21763637442
|
# Approach 3: Hash Map
# Time: O(n*log(n))
# Space: O(n)
class Solution:
def findWinners(self, matches: List[List[int]]) -> List[List[int]]:
losses_count = {}
for winner, loser in matches:
losses_count[winner] = losses_count.get(winner, 0)
losses_count[loser] = losses_count.get(loser, 0) + 1
zero_lose, one_lose = [], []
for player, count in losses_count.items():
if count == 0:
zero_lose.append(player)
elif count == 1:
one_lose.append(player)
return [sorted(zero_lose), sorted(one_lose)]
|
jimit105/leetcode-submissions
|
problems/find_players_with_zero_or_one_losses/solution.py
|
solution.py
|
py
| 668 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34825925053
|
# todo: add hash sum to judge solution file name on web-app side cuz it can break everything
import os
import shutil
import subprocess
from typing import List
import source.models
from .service import sequence_to_dict
from .static import *
from .config import BUILD_SOURCE_MAX_TIME, SQL_GET_TASK_ATTRIBUTE, SQL_GET_CODE_FILE
class TaskManager(object):
env_dir: str = 'environment'
input_file_name: str = 'input.in'
output_file_name: str = 'output.out'
def __init__(self, event: source.models.Event):
self.working_dir = os.getcwd()
self.solution = event.solution
self.code_file: source.models.CodeFile = event.code_file
self.tests = event.tests
self.judge_solution_source: source.models.CodeFile = source.models.CodeFile()
def prepare_environment(self):
env_path: str = os.path.join(self.working_dir, self.env_dir)
if os.path.exists(env_path):
shutil.rmtree(env_path)
os.mkdir(env_path)
code_file: str = os.path.join(env_path, self.code_file.file_name)
input_file: str = os.path.join(env_path, self.input_file_name)
output_file: str = os.path.join(env_path, self.output_file_name)
__code = open(code_file, write_mode)
__code.write(self.code_file.code)
__code.close()
__input = open(input_file, write_mode)
__input.close()
__output = open(output_file, write_mode)
__output.close()
db = source.models.DataBase()
db.execute(SQL_GET_CODE_FILE, self.__get_task_solution_file_id__())
code_file_attributes = {}
for result in db.result():
code_file_attributes = sequence_to_dict(result, class_CodeFile_attributes)
code_file_obj: source.models.CodeFile = source.models.CodeFile(**code_file_attributes)
__judge_code = open(os.path.join(self.working_dir, self.env_dir, code_file_obj.file_name), write_mode)
__judge_code.write(code_file_obj.code)
__judge_code.close()
self.judge_solution_source = code_file_obj
print(os.listdir(env_path))
# noinspection DuplicatedCode
def check_solution_event(self):
self.prepare_environment()
if self.__build__() != 0:
self.solution.__update__('verdict', source.models.Verdict.BUILD_FAILED)
self.solution.__update__('verdict_text', 'Ошибка компиляции')
return
_1 = BuildHandler.executable_file_name
_2 = BuildHandler.build_log_file_name
BuildHandler.executable_file_name = BuildHandler.executable_judge_file_name
BuildHandler.build_log_file_name = BuildHandler.judge_build_log_file_name
self.build_judge_solution(path=os.path.join(self.working_dir,
self.env_dir,
self.judge_solution_source.file_name),
lang=self.judge_solution_source.language)
BuildHandler.executable_file_name = _1
BuildHandler.build_log_file_name = _2
correct_tests_number: int = 0
points: int = 0
grading_system = None
for test_number, test in enumerate(self.tests):
input_file = open(os.path.join(self.env_dir, self.input_file_name), write_mode)
input_file.write(test.content)
input_file.close()
print(f'Running on test {test_number + 1}')
exec_code = self.__execute__(test_number=test_number + 1)
print('execute code', exec_code)
if exec_code > 0:
self.solution.__update__('verdict', source.models.Verdict.RUNTIME_ERROR)
self.solution.__update__('verdict_text', f'Ошибка исполнения на тесте {test_number + 1}')
return
elif exec_code < 0:
self.solution.__update__('verdict', source.models.Verdict.TIME_LIMIT_ERROR)
self.solution.__update__('verdict_text', f'Превышен лимит времени на тесте {test_number + 1}')
return
output_file = open(os.path.join(self.env_dir, self.output_file_name), read_mode)
user_output = output_file.readlines()
output_file.close()
answer_type = self.__get_task_answer_type__()
grading_system = self.__get_task_grading_system__()
if answer_type == source.models.TaskAnswerType.CONSTANT_ANSWER:
if self.is_constant_answer_valid(user_output=user_output, test_number=test_number):
correct_tests_number += 1
elif answer_type == source.models.TaskAnswerType.VARIABLE_ANSWER:
judge_verdict: int = self.is_variable_answer_valid(user_output=user_output, test_number=test_number)
if grading_system == source.models.TaskGradingSystem.BINARY:
correct_tests_number += 1 if judge_verdict > 0 else 0
elif grading_system == source.models.TaskGradingSystem.BINARY_FOR_EACH_TEST:
correct_tests_number += 1 if judge_verdict > 0 else 0
elif grading_system == source.models.TaskGradingSystem.N_POINTS_FOR_EACH_TEST:
correct_tests_number += 1 if judge_verdict > 0 else 0
points += judge_verdict
if grading_system == source.models.TaskGradingSystem.BINARY:
self.solution.__update__('points', 1 if correct_tests_number == len(self.tests) else 0)
if correct_tests_number == len(self.tests):
self.solution.__update__('verdict', source.models.Verdict.CORRECT_SOLUTION)
self.solution.__update__('verdict_text', f'Все тесты пройдены')
else:
self.solution.__update__('verdict', source.models.Verdict.WRONG_ANSWER)
else:
if correct_tests_number == len(self.tests):
self.solution.__update__('verdict', source.models.Verdict.CORRECT_SOLUTION)
self.solution.__update__('verdict_text', f'Все тесты пройдены')
elif 0 < correct_tests_number < len(self.tests):
self.solution.__update__('verdict', source.models.Verdict.PARTIAL_SOLUTION)
elif correct_tests_number == 0:
self.solution.__update__('verdict', source.models.Verdict.WRONG_ANSWER)
if grading_system == source.models.TaskGradingSystem.BINARY_FOR_EACH_TEST:
self.solution.__update__('points', correct_tests_number)
else:
self.solution.__update__('points', points)
# noinspection DuplicatedCode
def validate_task_event(self):
self.solution.__update__('status', source.models.Status.IN_PROGRESS)
self.prepare_environment()
if self.__build__() != 0:
self.solution.__update__('verdict', source.models.Verdict.BUILD_FAILED)
self.solution.__update__('status', source.models.Status.CHECK_FAILED)
return
for test_number, test in enumerate(self.tests):
input_file = open(os.path.join(self.env_dir, self.input_file_name), write_mode)
input_file.write(test.content)
input_file.close()
print(f'Running on test {test_number + 1}')
exec_code = self.__execute__(test_number=test_number)
if exec_code > 0:
self.solution.__update__('verdict', source.models.Verdict.RUNTIME_ERROR)
self.solution.__update__('status', source.models.Status.CHECK_FAILED)
return
elif exec_code < 0:
self.solution.__update__('verdict', source.models.Verdict.TIME_LIMIT_ERROR)
self.solution.__update__('status', source.models.Status.CHECK_FAILED)
return
output_file = open(os.path.join(self.env_dir, self.output_file_name), read_mode)
judge_output = output_file.read()
output_file.close()
test.__update__('right_answer', judge_output)
self.solution.__update__('verdict', source.models.Verdict.CORRECT_SOLUTION)
self.solution.__update__('status', source.models.Status.CHECK_SUCCESS)
def __get_task_grading_system__(self):
return source.models.Task.get_attribute('grading_system', self.solution.task_id)
def __get_task_time_limit__(self):
return source.models.Task.get_attribute('time_limit_seconds', self.solution.task_id)
def __get_task_answer_type__(self):
return source.models.Task.get_attribute('answer_type', self.solution.task_id)
def __get_task_solution_file_id__(self):
return source.models.Task.get_attribute('solution_file_id', self.solution.task_id)
def build_judge_solution(self, path, lang):
return self.__build__(path=path, lang=lang, )
def __build__(self, **kwargs) -> 'Return code':
""":param kwargs - can contain 'lang' and 'path' """
print('lang', 0 if 'lang' not in kwargs else kwargs['lang'])
if BuildHandler.get_execution_type(
self.code_file.language
if 'lang' not in kwargs else kwargs['lang']
) == source.models.CodeExecuteType.BUILD_AND_RUN:
build_handler = BuildHandler(source_file_path=os.path.join(self.working_dir,
self.env_dir,
self.code_file.file_name
if 'path' not in kwargs else kwargs['path']),
language=self.code_file.language
if 'lang' not in kwargs else kwargs['lang'])
return build_handler.build()
else:
return 0
def __execute__(self, test_number: int):
execute_handler = ExecuteHandler(executable_file_path=self.get_execute_path(self.code_file.language),
language=self.code_file.language,
time_limit=self.__get_task_time_limit__(),
test_number=test_number)
return execute_handler.execute()
def is_constant_answer_valid(self, user_output, test_number: int) -> bool:
print('right ans:\n', TaskManager.string_to_array(
self.tests[test_number].right_answer))
print('user ans: \n', user_output)
if TaskManager.handle_output_array(
TaskManager.string_to_array(
self.tests[test_number].right_answer)) == TaskManager.handle_output_array(user_output):
return True
return False
def is_variable_answer_valid(self, user_output, test_number: int) -> int or None:
judge_execution_handler: ExecuteHandler = ExecuteHandler(
executable_file_path=os.path.join(self.env_dir,
BuildHandler.executable_judge_file_name),
language=self.judge_solution_source.language,
time_limit=2,
test_number=test_number)
user_out: str = open(f'{os.path.join(self.working_dir, self.env_dir, self.output_file_name)}', read_mode).read()
test_content: str = self.tests[test_number].content
print('user output:', user_out)
judge_input_content: str = f'{test_content}\n{user_out}'
print('judge input:\n=====\n', judge_input_content, '\n=====')
__input = open(f'{os.path.join(self.working_dir, self.env_dir, self.input_file_name)}', write_mode)
__input.write(judge_input_content)
__input.close()
if judge_execution_handler.execute() != 0:
return 0
output = open(os.path.join(self.working_dir, self.env_dir, self.output_file_name), read_mode)
print('Judge output:', output.read())
output.seek(0)
return int(output.read())
@staticmethod
def string_to_array(string) -> List[str]:
result: list = []
for line in string.split('\n'):
result.append(line)
return result
@staticmethod
def string_drop_special_symbols(string: str) -> str:
return string.replace('\n', '').replace('\r', '').replace('\t', '')
@staticmethod
def handle_output_array(array: List[str] or List[List[str]]) -> List[List[str]]:
for i in range(len(array)):
array[i] = TaskManager.string_drop_special_symbols(array[i]).split()
return array
# noinspection DuplicatedCode
def get_execute_path(self, language: source.models.Language):
execute_path = {
source.models.Language.GNU_ASM: self.get_gnu_exe_path,
source.models.Language.GNU_C99: self.get_gnu_exe_path,
source.models.Language.GNU_C11: self.get_gnu_exe_path,
source.models.Language.GNU_CXX_11: self.get_gnu_exe_path,
source.models.Language.GNU_CXX_14: self.get_gnu_exe_path,
source.models.Language.GNU_CXX_17: self.get_gnu_exe_path,
source.models.Language.GNU_CXX_20: self.get_gnu_exe_path,
source.models.Language.PYTHON_2_7: self.get_absolute_path,
source.models.Language.PYTHON_3_9: self.get_absolute_path,
source.models.Language.JAVA_8: self.get_class_name,
}
try:
return execute_path[language]()
except KeyError:
return None
def get_absolute_path(self):
return os.path.join(self.working_dir, self.env_dir, self.code_file.file_name)
def get_gnu_exe_path(self):
return os.path.join(self.env_dir, BuildHandler.executable_file_name)
def get_class_name(self):
return self.code_file.file_name.split('.')[0]
class BuildHandler(object):
executable_file_name: str = 'solution_executable'
executable_judge_file_name: str = 'judge_solution_executable'
build_log_file_name: str = 'build_log.out'
judge_build_log_file_name: str = 'judge_build_log.out'
def __init__(self, source_file_path: str, language: source.models.Language):
print('build init')
self.working_dir = os.getcwd()
self.source_file = source_file_path
self.language = language
@staticmethod
def get_execution_type(language: source.models.Language):
print('get_execution_type')
lang_info = {
source.models.Language.GNU_ASM: source.models.CodeExecuteType.BUILD_AND_RUN,
source.models.Language.GNU_C99: source.models.CodeExecuteType.BUILD_AND_RUN,
source.models.Language.GNU_C11: source.models.CodeExecuteType.BUILD_AND_RUN,
source.models.Language.GNU_CXX_11: source.models.CodeExecuteType.BUILD_AND_RUN,
source.models.Language.GNU_CXX_14: source.models.CodeExecuteType.BUILD_AND_RUN,
source.models.Language.GNU_CXX_17: source.models.CodeExecuteType.BUILD_AND_RUN,
source.models.Language.GNU_CXX_20: source.models.CodeExecuteType.BUILD_AND_RUN,
source.models.Language.PYTHON_2_7: source.models.CodeExecuteType.JUST_RUN,
source.models.Language.PYTHON_3_9: source.models.CodeExecuteType.JUST_RUN,
source.models.Language.JAVA_8: source.models.CodeExecuteType.BUILD_AND_RUN,
}
try:
return lang_info[language]
except KeyError:
return None
def build(self) -> 'Return code':
print('build')
build_command: str = self.get_build_command(source_path=self.source_file,
exe_path=os.path.join(
self.working_dir,
TaskManager.env_dir,
self.executable_file_name
),
language=self.language)
print()
print(build_command)
build_process = subprocess.Popen(
build_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
try:
build_process.wait(BUILD_SOURCE_MAX_TIME)
except subprocess.TimeoutExpired:
return 1
log = open(os.path.join(self.working_dir,
TaskManager.env_dir,
self.build_log_file_name), 'a')
log.write(build_process.communicate()[0].decode('utf-8'))
log.write(build_process.communicate()[1].decode('utf-8'))
log.close()
print('code is', build_process.poll())
return build_process.poll()
def get_build_command(self, source_path: str,
exe_path: str,
language: source.models.Language):
print('get_build_command')
build_command = {
source.models.Language.GNU_ASM: self.gbc_gnu_asm,
source.models.Language.GNU_C99: self.gbc_gnu_gcc_c99,
source.models.Language.GNU_C11: self.gbc_gnu_gcc_c11,
source.models.Language.GNU_CXX_11: self.gbc_gnu_gxx_cxx11,
source.models.Language.GNU_CXX_14: self.gbc_gnu_gxx_cxx14,
source.models.Language.GNU_CXX_17: self.gbc_gnu_gxx_cxx17,
source.models.Language.GNU_CXX_20: self.gbc_gnu_gxx_cxx20,
source.models.Language.JAVA_8: self.gbc_java8,
}
try:
return build_command[language](source_path=source_path,
exe_path=exe_path,
log=os.path.join(self.working_dir,
TaskManager.env_dir,
self.build_log_file_name))
except KeyError:
print('key error')
return None
@staticmethod
def gbc_gnu_asm(source_path: str, exe_path: str, log: str):
return f'gcc -s {source_path} -o {exe_path} -Wall -v -O3 -Ofast > {log}'
@staticmethod
def gbc_gnu_gcc_c99(source_path: str, exe_path: str, log: str):
return f'gcc -std=c99 {source_path} -o {exe_path} -Wall -v -O3 -Ofast > {log}'
@staticmethod
def gbc_gnu_gcc_c11(source_path: str, exe_path: str, log: str):
return f'gcc -std=c11 {source_path} -o {exe_path} -Wall -v -O3 -Ofast > {log}'
@staticmethod
def gbc_gnu_gxx_cxx11(source_path: str, exe_path: str, log: str):
return f'g++ -std=c++11 {source_path} -o {exe_path} -Wall -v -O3 -Ofast > {log}'
@staticmethod
def gbc_gnu_gxx_cxx14(source_path: str, exe_path: str, log: str):
return f'g++ -std=c++14 {source_path} -o {exe_path} -Wall -v -O3 -Ofast > {log}'
@staticmethod
def gbc_gnu_gxx_cxx17(source_path: str, exe_path: str, log: str):
return f'g++ -std=c++17 {source_path} -o {exe_path} -Wall -v -O3 -Ofast > {log}'
@staticmethod
def gbc_gnu_gxx_cxx20(source_path: str, exe_path: str, log: str):
return f'g++ -std=c++2a {source_path} -o {exe_path} -Wall -v -O3 -Ofast > {log}'
@staticmethod
def gbc_java8(source_path: str, exe_path: str, log: str):
return f'javac -cp "{os.path.join(os.getcwd(), TaskManager.env_dir)}" {source_path} > {log}'
class ExecuteHandler(object):
execute_log_file_name: str = 'execute_log.out'
def __init__(self, executable_file_path: str,
language: source.models.Language,
time_limit: int,
test_number: int):
self.executable_path = executable_file_path
self.language = language
self.time_limit = time_limit
self.test_number = test_number
try:
self.executable_class = executable_file_path.split('/')[-1].split('.')[0]
print(self.executable_class)
except IndexError:
pass
def get_execute_command(self):
execute_command: dict = {
source.models.Language.GNU_ASM: self.gec_gnu_asm,
source.models.Language.GNU_C99: self.gec_gnu_gcc_c99,
source.models.Language.GNU_C11: self.gec_gnu_gcc_c11,
source.models.Language.GNU_CXX_11: self.gec_gnu_gxx_cxx11,
source.models.Language.GNU_CXX_14: self.gec_gnu_gxx_cxx14,
source.models.Language.GNU_CXX_17: self.gec_gnu_gxx_cxx17,
source.models.Language.GNU_CXX_20: self.gec_gnu_gxx_cxx20,
source.models.Language.PYTHON_2_7: self.gec_python2,
source.models.Language.PYTHON_3_9: self.gec_python3,
source.models.Language.JAVA_8: self.gec_java8,
}
try:
return execute_command[self.language](self.executable_path)
except KeyError:
return None
@staticmethod
def get_iostream_route():
wd: str = os.getcwd()
env: str = TaskManager.env_dir
in_s: str = TaskManager.input_file_name
out_s: str = TaskManager.output_file_name
return f' < {os.path.join(wd, env, in_s)} > {os.path.join(wd, env, out_s)}'
@staticmethod
def gec_gnu_asm(executable_path: str):
return f'.{executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_gnu_gcc_c99(executable_path: str):
return f'./{executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_gnu_gcc_c11(executable_path: str):
return f'./{executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_gnu_gxx_cxx11(executable_path: str):
return f'./{executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_gnu_gxx_cxx14(executable_path: str):
return f'./{executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_gnu_gxx_cxx17(executable_path: str):
return f'./{executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_gnu_gxx_cxx20(executable_path: str):
return f'./{executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_python2(executable_path: str):
return f'python2 {executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_python3(executable_path: str):
return f'python3 {executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_java8(executable_path: str, **kwargs):
env_dir_path: str = os.path.join(os.getcwd(), TaskManager.env_dir)
return f'java -cp "{env_dir_path}/:{env_dir_path}/*" {executable_path}' + ExecuteHandler.get_iostream_route()
def execute(self):
execute_command: str = self.get_execute_command()
print(execute_command)
print(os.listdir(os.path.join(os.getcwd(), TaskManager.env_dir)))
execute_process = subprocess.Popen(execute_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
time_limit_expired: bool = False
try:
execute_process.wait(self.time_limit)
except subprocess.TimeoutExpired:
execute_process.terminate()
execute_process.kill()
print('Time limit exceeded!')
print(execute_process.pid)
return -1
status = execute_process.poll()
print('status is', status)
execute_process.kill()
print(time_limit_expired)
stdout, stderr = execute_process.communicate()
log = open(
os.path.join(os.getcwd(),
TaskManager.env_dir,
f'test_{self.test_number}_' + self.execute_log_file_name), 'a')
log.write(stdout.decode('utf-8'))
log.write(stderr.decode('utf-8'))
log.close()
return status
|
TolimanStaR/AtomicJudge
|
source/task_manager.py
|
task_manager.py
|
py
| 24,126 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4641112247
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, absolute_import
__author__ = 'Andres'
import re
# Matches tags
tagRE = re.compile(r'(.*?)<(/?\w+)[^>]*>(?:([^<]*)(<.*?>)?)?')
# Matches bold/italic
bold_italic = re.compile(r"'''''(.*?)'''''")
bold = re.compile(r"'''(.*?)'''")
italic_quote = re.compile(r"''\"([^\"]*?)\"''")
italic = re.compile(r"''(.*?)''")
bulletlist = re.compile(r'\*{1,}')
# Matches space
spaces = re.compile(r' {2,}')
# Matches dots, newlines
dots = re.compile(r'\.{4,}')
newlines = re.compile(r'\n{2,}')
#templates = re.compile(r'\.{4,}')
discardElements = [
'gallery', 'timeline', 'noinclude', 'pre',
'table', 'tr', 'td', 'th', 'caption', 'div',
'form', 'input', 'select', 'option', 'textarea',
'ul', 'li', 'ol', 'dl', 'dt', 'dd', 'menu', 'dir',
'ref', 'references', 'img', 'imagemap', 'source', 'small'
]
comment = re.compile(r'<!--.*?-->', re.DOTALL)
selfClosingTags = [ 'br', 'hr', 'nobr', 'references', 'nowiki' ]
# These tags are dropped, keeping their content.
# handle 'a' separately, depending on keepLinks
ignoredTags = [
'abbr', 'b', 'big', 'blockquote', 'center', 'cite', 'div', 'em',
'font', 'h1', 'h2', 'h3', 'h4', 'hiero', 'i', 'kbd', 'nowiki',
'p', 'plaintext', 's', 'span', 'strike', 'strong',
'sub', 'sup', 'tt', 'u', 'var',
]
placeholder_tags = {'math':'formula', 'code':'codice'}
quote_quote = re.compile(r'""([^"]*?)""')
# Match selfClosing HTML tags
selfClosing_tag_patterns = [
re.compile(r'<\s*%s\b[^>]*/\s*>' % tag, re.DOTALL | re.IGNORECASE) for tag in selfClosingTags
]
# Match HTML placeholder tags
placeholder_tag_patterns = [
(re.compile(r'<\s*%s(\s*| [^>]+?)>.*?<\s*/\s*%s\s*>' % (tag, tag), re.DOTALL | re.IGNORECASE),
repl) for tag, repl in placeholder_tags.items()
]
# Match ignored tags
ignored_tag_patterns = []
def ignoreTag(tag):
left = re.compile(r'<%s\b[^>/]*>' % tag, re.IGNORECASE) # both <ref> and <reference>
right = re.compile(r'</\s*%s>' % tag, re.IGNORECASE)
ignored_tag_patterns.append((left, right))
for tag in ignoredTags:
ignoreTag(tag)
def dropNested(text, openDelim, closeDelim):
"""
A matching function for nested expressions, e.g. namespaces and tables.
"""
openRE = re.compile(openDelim)
closeRE = re.compile(closeDelim)
# partition text in separate blocks { } { }
spans = [] # pairs (s, e) for each partition
nest = 0 # nesting level
start = openRE.search(text, 0)
if not start:
return text
end = closeRE.search(text, start.end())
next = start
while end:
next = openRE.search(text, next.end())
if not next: # termination
while nest: # close all pending
nest -=1
end0 = closeRE.search(text, end.end())
if end0:
end = end0
else:
break
spans.append((start.start(), end.end()))
break
while end.end() < next.start():
# { } {
if nest:
nest -= 1
# try closing more
last = end.end()
end = closeRE.search(text, end.end())
if not end: # unbalanced
if spans:
span = (spans[0][0], last)
else:
span = (start.start(), last)
spans = [span]
break
else:
spans.append((start.start(), end.end()))
# advance start, find next close
start = next
end = closeRE.search(text, next.end())
break # { }
if next != start:
# { { }
nest += 1
# collect text outside partitions
return dropSpans(spans, text)
def dropSpans(spans, text):
"""
Drop from text the blocks identified in :param spans:, possibly nested.
"""
spans.sort()
res = ''
offset = 0
for s, e in spans:
if offset <= s: # handle nesting
if offset < s:
res += text[offset:s]
offset = e
res += text[offset:]
return res
def clean(text):
"""
Transforms wiki markup.
@see https://www.mediawiki.org/wiki/Help:Formatting
"""
text = bold_italic.sub(r'\1', text)
text = bold.sub(r'\1', text)
text = italic_quote.sub(r'"\1"', text)
text = italic.sub(r'"\1"', text)
text = quote_quote.sub(r'"\1"', text)
# residuals of unbalanced quotes
text = text.replace("'''", '').replace("''", '"')
text = newlines.sub(r'\n', text)
text = bulletlist.sub(r'', text)
# Collect spans
spans = []
# Drop HTML comments
for m in comment.finditer(text):
spans.append((m.start(), m.end()))
# Drop self-closing tags
for pattern in selfClosing_tag_patterns:
for m in pattern.finditer(text):
spans.append((m.start(), m.end()))
# Drop ignored tags
for left, right in ignored_tag_patterns:
for m in left.finditer(text):
spans.append((m.start(), m.end()))
for m in right.finditer(text):
spans.append((m.start(), m.end()))
# Bulk remove all spans
text = dropSpans(spans, text)
# Drop discarded elements
for tag in discardElements:
text = dropNested(text, r'<\s*%s\b[^>/]*>' % tag, r'<\s*/\s*%s>' % tag)
# Expand placeholders
for pattern, placeholder in placeholder_tag_patterns:
index = 1
for match in pattern.finditer(text):
text = text.replace(match.group(), '%s_%d' % (placeholder, index))
index += 1
text = text.replace('<<', u'«').replace('>>', u'»')
#############################################
# Cleanup text
text = text.replace('\t', ' ')
text = spaces.sub(' ', text)
text = dots.sub('...', text)
text = re.sub(u' (,:\.\)\]»)', r'\1', text)
text = re.sub(u'(\[\(«) ', r'\1', text)
text = re.sub(r'\n\W+?\n', '\n', text, flags=re.U) # lines with only punctuations
text = text.replace(',,', ',').replace(',.', '.')
return text
if __name__ == '__main__':
with open("armeenia.txt", encoding='utf-8') as f:
data = f.read()
print(clean(data))
|
keeleleek/estnltk
|
estnltk/wiki/cleaner.py
|
cleaner.py
|
py
| 6,425 |
python
|
en
|
code
| null |
github-code
|
6
|
36153558414
|
import unittest
import datetime
import json
from app.textsapi.models.submission import Submission
from app.textsapi.models.text import Text
from app.tests.base import BaseTestCase
def register_ok_submission(self, token):
return self.client.post(
'/submission/',
headers=dict(
Authorization="Token {}".format(token)
),
data=json.dumps(dict(
submitted_texts=['text1', 'text2']
)),
content_type='application/json'
)
def register_illegal_submission(self, token):
return self.client.post(
'/submission/',
headers=dict(
Authorization="Token {}".format(token)
),
data=json.dumps(dict(
submitted_texts=[1, 'text2']
)),
content_type='application/json'
)
def get_submissions(self, token):
return self.client.get(
'/submission/',
headers=dict(
Authorization="Token {}".format(token)
)
)
class TestSubmission(BaseTestCase):
def test_create_valid_submission(self):
""" Test for creating a valid submission """
with self.client:
# valid submission registration
sub_response = register_ok_submission(self, self.token)
response_data = json.loads(sub_response.data.decode())
self.assertTrue(response_data['status']=='success')
def test_create_invalid_submission(self):
""" Test for creating an invalid submission """
with self.client:
# invalid submission registration
sub_response = register_illegal_submission(self, self.token)
response_data = json.loads(sub_response.data.decode())
self.assertTrue(response_data['errors']!=None)
def test_update_submission(self):
""" Test for updating a submission """
sub_response_register = register_ok_submission(self, self.token)
response_data = json.loads(sub_response_register.data.decode())
self.assertTrue(response_data['status']=='success')
sub = [sub for sub in Submission.query(hash_key=self.new_user.username, range_key_condition=Submission.sort.startswith('SUBMISSION_'))][0]
sub_response_update = self.client.put(
'/submission/{}'.format(str(sub.public_id)),
headers=dict(
Authorization="Token {}".format(self.token)
),
data=json.dumps(dict(
submitted_texts=['updated_text1']
)),
content_type='application/json'
)
update_data = json.loads(sub_response_update.data.decode())
upd_sub = Submission.get(hash_key=sub.username, range_key=sub.sort)
self.assertTrue(update_data['status']=='success')
self.assertTrue(upd_sub.text_count == 1)
def test_get_submission(self):
""" Test getting the submissions from database """
# creating a submission
sub_register = register_ok_submission(self, self.token)
response_data = json.loads(sub_register.data.decode())
self.assertTrue(response_data['status']=='success')
# getting it from the service
get_response = get_submissions(self, self.token)
response_data = json.loads(get_response.data.decode())
self.assertTrue(response_data['data'][0]['text_count']==2)
self.assertTrue(isinstance(response_data['data'][0]['texts'], list))
if __name__ == '__main__':
unittest.main()
|
jkausti/flask-textsapi
|
app/tests/_test_submission_controller.py
|
_test_submission_controller.py
|
py
| 3,468 |
python
|
en
|
code
| 1 |
github-code
|
6
|
19876190982
|
#!/usr/bin/python3
import os
import argparse
from subprocess import call
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('basedir', type=str, help='The base directory to walk from')
args = parser.parse_args()
print('The base dir is: {}'.format(args.basedir))
for dirname, subdirs, filenames in os.walk(args.basedir):
for filename in filenames:
full_path = os.path.join(dirname, filename)
if filename.endswith('.bz2'):
print("Decompressing {}".format(full_path))
call(["bzip2", "-d", full_path])
else:
print("Ignoring {}".format(full_path))
|
ruizhang84/B565-Data-Mining
|
src/preprocess/scripts/decompress.py
|
decompress.py
|
py
| 688 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36525273442
|
"""Parser object that performs coarse-to-fine and postprocessing.
Additionally, a simple command line interface similar to bitpar."""
from __future__ import print_function
import io
import os
import re
import sys
import time
import gzip
import codecs
import logging
import tempfile
import traceback
import string # pylint: disable=W0402
import multiprocessing
if sys.version[0] > '2':
imap = map
else:
from itertools import imap
from math import exp, log
from heapq import nlargest
from getopt import gnu_getopt, GetoptError
from operator import itemgetter
from functools import wraps
import numpy as np
from discodop import plcfrs, pcfg
from discodop.grammar import defaultparse
from discodop.containers import Grammar
from discodop.coarsetofine import prunechart, whitelistfromposteriors
from discodop.disambiguation import getderivations, marginalize, doprerank
from discodop.tree import Tree
from discodop.lexicon import replaceraretestwords, UNKNOWNWORDFUNC, UNK
from discodop.treebank import WRITERS, writetree
from discodop.treebanktransforms import reversetransform, rrbacktransform, \
saveheads, NUMBERRE, readheadrules
from discodop.treetransforms import mergediscnodes, unbinarize, \
removefanoutmarkers
USAGE = '''
usage: %(cmd)s [options] <grammar/> [input [output]]
or: %(cmd)s --simple [options] <rules> <lexicon> [input [output]]
'grammar/' is a directory with a model produced by "discodop runexp".
When no filename is given, input is read from standard input and the results
are written to standard output. Input should contain one sentence per line
with space-delimited tokens. Output consists of bracketed trees in
selected format. Files must be encoded in UTF-8.
General options:
-x Input is one token per line, sentences separated by two
newlines (like bitpar).
-b k Return the k-best parses instead of just 1.
--prob Print probabilities as well as parse trees.
--tags Tokens are of the form "word/POS"; give both to parser.
--fmt=(export|bracket|discbracket|alpino|conll|mst|wordpos)
Format of output [default: discbracket].
--numproc=k Launch k processes, to exploit multiple cores.
--simple Parse with a single grammar and input file; similar interface
to bitpar. The files 'rules' and 'lexicon' define a binarized
grammar in bitpar or PLCFRS format.
--verbosity=x 0 <= x <= 4. Same effect as verbosity in parameter file.
Options for simple mode:
-s x Use "x" as start symbol instead of default "TOP".
--bt=file Apply backtransform table to recover TSG derivations.
--obj=(mpd|mpp|mrp|mcc|shortest|sl-dop)
Objective function to maximize [default: mpd].
-m x Use x derivations to approximate objective functions;
mpd and shortest require only 1.
--bitpar Use bitpar to parse with an unbinarized grammar.
''' % dict(cmd=sys.argv[0], fmt=','.join(WRITERS))
DEFAULTSTAGE = dict(
name='stage1', # identifier, used for filenames
mode='plcfrs', # use the agenda-based PLCFRS parser
prune=False, # whether to use previous chart to prune this stage
split=False, # split disc. nodes VP_2[101] as { VP*[100], VP*[001] }
splitprune=False, # treat VP_2[101] as {VP*[100], VP*[001]} for pruning
markorigin=False, # mark origin of split nodes: VP_2 => {VP*1, VP*2}
collapselabels=None, # options: None, 'head', 'all'. TODO: implement.
k=50, # no. of coarse pcfg derivations to prune with; k=0: filter only
dop=None, # DOP mode (DOP reduction / double DOP)
binarized=True, # for double dop, whether to binarize extracted grammar
# (False requires use of bitpar)
sample=False, kbest=True,
m=10, # number of derivations to sample/enumerate
estimator='rfe', # choices: rfe, ewe
objective='mpp', # choices: mpp, mpd, shortest, sl-dop[-simple]
# NB: w/shortest derivation, estimator only affects tie breaking.
sldop_n=7, # number of trees to consider when using sl-dop[-simple]
mcc_labda=1.0, # weight to assign to recall vs. mistake rate with mcc
mcc_labels=None, # optionally, set of labels to optimize for with mcc
packedgraph=False, # use packed graph encoding for DOP reduction
iterate=False, # for double dop, whether to add fragments of fragments
complement=False, # for double dop, whether to include fragments which
# form the complement of the maximal recurring fragments extracted
neverblockre=None, # do not prune nodes with label that match regex
estimates=None, # compute, store & use outside estimates
)
class DictObj(object):
"""Trivial class to wrap a dictionary for reasons of syntactic sugar."""
def __init__(self, *a, **kw):
self.__dict__.update(*a, **kw)
def update(self, *a, **kw):
"""Update/add more attributes."""
self.__dict__.update(*a, **kw)
def __getattr__(self, name):
"""Dummy function for suppressing pylint E1101 errors."""
raise AttributeError('%r instance has no attribute %r' % (
self.__class__.__name__, name))
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
',\n\t'.join('%s=%r' % a for a in self.__dict__.items()))
PARAMS = DictObj() # used for multiprocessing when using CLI of this module
def main():
"""Handle command line arguments."""
flags = 'prob tags bitpar simple'.split()
options = flags + 'obj= bt= numproc= fmt= verbosity='.split()
try:
opts, args = gnu_getopt(sys.argv[1:], 'b:s:m:x', options)
except GetoptError as err:
print(err, USAGE)
return
if not 1 <= len(args) <= 4:
print('ERROR: incorrect number of arguments')
print(USAGE)
return
for n, filename in enumerate(args):
if not os.path.exists(filename):
raise ValueError('file %d not found: %r' % (n + 1, filename))
opts = dict(opts)
numparses = int(opts.get('-b', 1))
top = opts.get('-s', 'TOP')
prob = '--prob' in opts
tags = '--tags' in opts
oneline = '-x' not in opts
if '--simple' in opts:
if not 2 <= len(args) <= 4:
print('ERROR: incorrect number of arguments')
print(USAGE)
return
rules = (gzip.open if args[0].endswith('.gz') else open)(args[0]).read()
lexicon = codecs.getreader('utf-8')((gzip.open if args[1].endswith('.gz')
else open)(args[1])).read()
bitpar = rules[0] in string.digits
if '--bitpar' in opts:
if not bitpar:
raise ValueError('bitpar requires bitpar grammar format.')
mode = 'pcfg-bitpar-nbest'
else:
mode = 'pcfg' if bitpar else 'plcfrs'
grammar = Grammar(rules, lexicon, start=top, bitpar=bitpar,
binarized='--bitpar' not in opts)
stages = []
stage = DEFAULTSTAGE.copy()
backtransform = None
if opts.get('--bt'):
backtransform = (gzip.open if opts.get('--bt').endswith('.gz')
else open)(opts.get('--bt')).read().splitlines()
stage.update(
name='grammar',
mode=mode,
grammar=grammar,
binarized='--bitpar' not in opts,
backtransform=backtransform if len(args) < 4 else None,
m=numparses,
objective='mpd')
if '--obj' in opts:
stage.update(
dop='reduction' if backtransform is None else 'doubledop',
objective=opts['--obj'],
m=int(opts.get('-m', 1)))
stages.append(DictObj(stage))
if backtransform:
_ = stages[-1].grammar.getmapping(None,
neverblockre=re.compile(b'.+}<'))
parser = Parser(stages, verbosity=int(opts.get('--verbosity', 2)))
morph = None
del args[:2]
else:
from discodop.runexp import readparam
directory = args[0]
if not os.path.isdir(directory):
raise ValueError('expected directory produced by "discodop runexp"')
params = readparam(os.path.join(directory, 'params.prm'))
params['resultdir'] = directory
stages = params['stages']
postagging = params['postagging']
readgrammars(directory, stages, postagging,
top=params.get('top', top))
parser = Parser(stages,
transformations=params.get('transformations'),
binarization=params['binarization'],
postagging=postagging if postagging and
postagging.method == 'unknownword' else None,
relationalrealizational=params.get('relationalrealizational'),
verbosity=int(opts.get('--verbosity',
params.get('verbosity', 2))))
morph = params['morphology']
del args[:1]
infile = (io.open(args[0], encoding='utf-8')
if len(args) >= 1 else sys.stdin)
out = (io.open(args[1], 'w', encoding='utf-8')
if len(args) == 2 else sys.stdout)
doparsing(parser, infile, out, prob, oneline, tags, numparses,
int(opts.get('--numproc', 1)), opts.get('--fmt', 'discbracket'),
morph)
def doparsing(parser, infile, out, printprob, oneline, usetags, numparses,
numproc, fmt, morphology):
"""Parse sentences from file and write results to file, log to stdout."""
times = []
unparsed = 0
if not oneline:
infile = readinputbitparstyle(infile)
infile = (line for line in infile if line.strip())
if numproc == 1:
initworker(parser, printprob, usetags, numparses, fmt, morphology)
mymap = imap
else:
pool = multiprocessing.Pool(processes=numproc, initializer=initworker,
initargs=(parser, printprob, usetags, numparses, fmt,
morphology))
mymap = pool.imap
for output, noparse, sec, msg in mymap(worker, enumerate(infile)):
if output:
print(msg, file=sys.stderr)
out.write(output)
if noparse:
unparsed += 1
times.append(sec)
sys.stderr.flush()
out.flush()
print('average time per sentence', sum(times) / len(times),
'\nunparsed sentences:', unparsed,
'\nfinished',
file=sys.stderr)
out.close()
def initworker(parser, printprob, usetags, numparses,
fmt, morphology):
"""Load parser for a worker process."""
headrules = None
if fmt in ('mst', 'conll'):
headrules = readheadrules(parser.binarization.headrules)
PARAMS.update(parser=parser, printprob=printprob,
usetags=usetags, numparses=numparses, fmt=fmt,
morphology=morphology, headrules=headrules)
def workerfunc(func):
"""Wrap a multiprocessing worker function to produce a full traceback."""
@wraps(func)
def wrapper(*args, **kwds):
"""Apply decorated function."""
try:
import faulthandler
faulthandler.enable() # Dump information on segfault.
except (ImportError, io.UnsupportedOperation):
pass
# NB: only concurrent.futures on Python 3.3+ will exit gracefully.
try:
return func(*args, **kwds)
except Exception: # pylint: disable=W0703
# Put traceback as string into an exception and raise that
raise Exception('in worker process\n%s' %
''.join(traceback.format_exception(*sys.exc_info())))
return wrapper
@workerfunc
def worker(args):
"""Parse a single sentence."""
n, line = args
line = line.strip()
if not line:
return '', True, 0, ''
begin = time.clock()
sent = line.split(' ')
tags = None
if PARAMS.usetags:
sent, tags = zip(*(a.rsplit('/', 1) for a in sent))
msg = 'parsing %d: %s' % (n, ' '.join(sent))
result = list(PARAMS.parser.parse(sent, tags=tags))[-1]
output = ''
if result.noparse:
msg += '\nNo parse for "%s"' % ' '.join(sent)
if PARAMS.printprob:
output += 'prob=%.16g\n' % result.prob
output += '%s\t%s\n' % (result.parsetree, ' '.join(sent))
else:
output += ''.join(
writetree(
PARAMS.parser.postprocess(tree)[0], sent,
n if PARAMS.numparses == 1 else ('%d-%d' % (n, k)),
PARAMS.fmt, headrules=PARAMS.headrules,
morphology=PARAMS.morphology,
comment=('prob=%.16g' % prob) if PARAMS.printprob else None)
for k, (tree, prob, _) in enumerate(nlargest(
PARAMS.numparses, result.parsetrees, key=itemgetter(1))))
sec = time.clock() - begin
msg += '\n%g s' % sec
return output, result.noparse, sec, msg
def readinputbitparstyle(infile):
"""Yields lists of tokens, where '\\n\\n' identifies a sentence break.
Lazy version of ``infile.read().split('\\n\\n')``."""
sent = []
for line in infile:
line = line.strip()
if not line:
yield ' '.join(sent)
sent = []
sent.append(line)
if sent:
yield ' '.join(sent)
class Parser(object):
"""A coarse-to-fine parser based on a given set of parameters.
:param stages: a list of coarse-to-fine stages containing grammars and
parameters.
:param transformations: treebank transformations to reverse on parses.
:param binarization: settings used for binarization; used for the
tailmarker attribute which identifies heads in parser output.
:param postagging: if given, an unknown word model is used to assign POS
tags during parsing. The model consists of a DictObj with (at least)
the following attributes:
- unknownwordfun: function to produces signatures for unknown words.
- lexicon: the set of known words in the grammar.
- sigs: the set of word signatures occurring in the grammar.
:param relationalrealizational: whether to reverse the RR-transform."""
def __init__(self, stages, transformations=None, postagging=None,
binarization=DictObj(tailmarker=None),
relationalrealizational=None, verbosity=2):
self.stages = stages
self.transformations = transformations
self.binarization = binarization
self.postagging = postagging
self.relationalrealizational = relationalrealizational
self.verbosity = verbosity
for stage in stages:
if stage.mode.startswith('pcfg-bitpar'):
exportbitpargrammar(stage)
model = u'default'
if stage.dop:
if (stage.estimator == 'ewe'
or stage.objective.startswith('sl-dop')):
model = u'ewe'
elif stage.estimator == 'bon':
model = u'bon'
if stage.objective == 'shortest':
model = u'shortest'
stage.grammar.switch(model, logprob=stage.mode != 'pcfg-posterior')
if verbosity >= 3:
logging.debug(stage.name)
logging.debug(stage.grammar)
def parse(self, sent, tags=None):
"""Parse a sentence and perform postprocessing.
Yields a dictionary from parse trees to probabilities for each stage.
:param sent: a sequence of tokens.
:param tags: if given, will be given to the parser instead of trying
all possible tags."""
if self.postagging:
if self.transformations and 'FOLD-NUMBERS' in self.transformations:
sent = ['000' if NUMBERRE.match(a) else a for a in sent]
sent = replaceraretestwords(sent,
self.postagging.unknownwordfun,
self.postagging.lexicon, self.postagging.sigs)
sent = list(sent)
if tags is not None:
tags = list(tags)
chart = start = inside = outside = lastsuccessfulparse = None
for n, stage in enumerate(self.stages):
begin = time.clock()
noparse = False
parsetrees = fragments = None
msg = '%s:\t' % stage.name.upper()
model = u'default'
if stage.dop:
if (stage.estimator == 'ewe'
or stage.objective.startswith('sl-dop')):
model = u'ewe'
elif stage.estimator == 'bon':
model = u'bon'
if stage.objective == 'shortest':
model = u'shortest'
x = stage.grammar.currentmodel
stage.grammar.switch(model, logprob=stage.mode != 'pcfg-posterior')
if stage.mode.startswith('pcfg-bitpar') and (
not hasattr(stage, 'rulesfile')
or x != stage.grammar.currentmodel):
exportbitpargrammar(stage)
if not stage.binarized and not stage.mode.startswith('pcfg-bitpar'):
raise ValueError('non-binarized grammar requires use of bitpar')
if not stage.prune or chart:
if n != 0 and stage.prune and stage.mode != 'dop-rerank':
beginprune = time.clock()
if self.stages[n - 1].mode == 'pcfg-posterior':
whitelist, msg1 = whitelistfromposteriors(
inside, outside, start,
self.stages[n - 1].grammar, stage.grammar,
stage.k, stage.splitprune,
self.stages[n - 1].markorigin,
stage.mode.startswith('pcfg'))
else:
whitelist, msg1 = prunechart(
chart, stage.grammar, stage.k,
stage.splitprune,
self.stages[n - 1].markorigin,
stage.mode.startswith('pcfg'),
self.stages[n - 1].mode == 'pcfg-bitpar-nbest')
msg += '%s; %gs\n\t' % (msg1, time.clock() - beginprune)
else:
whitelist = None
if stage.mode == 'pcfg':
chart, msg1 = pcfg.parse(
sent, stage.grammar, tags=tags,
whitelist=whitelist if stage.prune else None)
elif stage.mode == 'pcfg-posterior':
inside, outside, start, msg1 = pcfg.doinsideoutside(
sent, stage.grammar, tags=tags)
chart = start
elif stage.mode.startswith('pcfg-bitpar'):
if stage.mode == 'pcfg-bitpar-forest':
numderivs = 0
elif (n == len(self.stages) - 1
or not self.stages[n + 1].prune):
numderivs = stage.m
else: # request 1000 nbest parses for CTF pruning
numderivs = 1000
chart, cputime, msg1 = pcfg.parse_bitpar(stage.grammar,
stage.rulesfile.name, stage.lexiconfile.name,
sent, numderivs,
stage.grammar.start,
stage.grammar.toid[stage.grammar.start], tags=tags)
begin -= cputime
elif stage.mode == 'plcfrs':
chart, msg1 = plcfrs.parse(
sent, stage.grammar, tags=tags,
exhaustive=stage.dop or (
n + 1 != len(self.stages)
and self.stages[n + 1].prune),
whitelist=whitelist,
splitprune=stage.splitprune
and self.stages[n - 1].split,
markorigin=self.stages[n - 1].markorigin,
estimates=(stage.estimates, stage.outside)
if stage.estimates in ('SX', 'SXlrgaps')
else None)
elif stage.mode == 'dop-rerank':
if chart:
parsetrees = doprerank(chart, sent, stage.k,
self.stages[n - 1].grammar, stage.grammar)
msg1 = 're-ranked %d parse trees. ' % len(parsetrees)
else:
raise ValueError('unknown mode specified.')
msg += '%s\n\t' % msg1
if (n != 0 and not chart and not noparse
and stage.split == self.stages[n - 1].split):
logging.error('ERROR: expected successful parse. '
'sent: %s\nstage: %s.', ' '.join(sent), stage.name)
# raise ValueError('ERROR: expected successful parse. '
# 'sent %s, %s.' % (nsent, stage.name))
if chart and stage.mode not in ('pcfg-posterior', 'dop-rerank'
) and not (self.relationalrealizational and stage.split):
begindisamb = time.clock()
if stage.mode == 'pcfg-bitpar-nbest':
if not stage.kbest or stage.sample:
raise ValueError('sampling not possible with bitpar '
'in nbest mode.')
derivations = chart.rankededges[chart.root()]
entries = [None] * len(derivations)
else:
derivations, entries = getderivations(chart, stage.m,
kbest=stage.kbest, sample=stage.sample,
derivstrings=stage.dop != 'doubledop'
or self.verbosity >= 3
or stage.objective == 'mcc')
if self.verbosity >= 3:
print('sent: %s\nstage: %s' % (' '.join(sent), stage.name))
print('%d-best derivations:\n%s' % (
min(stage.m, 100),
'\n'.join('%d. %s %s' % (n + 1,
('subtrees=%d' % abs(int(prob / log(0.5))))
if stage.objective == 'shortest'
else ('p=%g' % exp(-prob)), deriv)
for n, (deriv, prob) in enumerate(derivations[:100]))))
print('sum of probabitilies: %g\n' %
sum(exp(-prob) for _, prob in derivations[:100]))
if stage.objective == 'shortest':
stage.grammar.switch(u'ewe' if stage.estimator == 'ewe'
else u'default', True)
parsetrees, msg1 = marginalize(
stage.objective if stage.dop else 'mpd',
derivations, entries, chart,
sent=sent, tags=tags,
backtransform=stage.backtransform,
k=stage.m, sldop_n=stage.sldop_n,
mcc_labda=stage.mcc_labda, mcc_labels=stage.mcc_labels,
bitpar=stage.mode == 'pcfg-bitpar-nbest')
msg += 'disambiguation: %s, %gs\n\t' % (
msg1, time.clock() - begindisamb)
if self.verbosity >= 3:
besttrees = nlargest(100, parsetrees, key=itemgetter(1))
print('100-best parse trees:\n%s' % '\n'.join(
'%d. %s %s' % (n + 1, probstr(prob), treestr)
for n, (treestr, prob, _) in enumerate(besttrees)))
print('sum of probabitilies: %g\n' %
sum((prob[1] if isinstance(prob, tuple) else prob)
for _, prob, _ in besttrees))
if self.verbosity >= 4:
print('Chart:\n%s' % chart)
if parsetrees:
try:
resultstr, prob, fragments = max(
parsetrees, key=itemgetter(1))
parsetree, noparse = self.postprocess(resultstr, n)
if not all(a for a in parsetree.subtrees()):
raise ValueError('empty nodes in tree: %s' % parsetree)
if not len(parsetree.leaves()) == len(sent):
raise ValueError('leaves missing. original tree: %s\n'
'postprocessed: %r' % (resultstr, parsetree))
except Exception: # pylint: disable=W0703
logging.error("something's amiss. %s", ''.join(
traceback.format_exception(*sys.exc_info())))
parsetree, prob, noparse = self.noparse(
stage, sent, tags, lastsuccessfulparse)
else:
lastsuccessfulparse = parsetree
msg += probstr(prob) + ' '
else:
fragments = None
parsetree, prob, noparse = self.noparse(
stage, sent, tags, lastsuccessfulparse)
elapsedtime = time.clock() - begin
msg += '%.2fs cpu time elapsed\n' % (elapsedtime)
yield DictObj(name=stage.name, parsetree=parsetree, prob=prob,
parsetrees=parsetrees, fragments=fragments,
noparse=noparse, elapsedtime=elapsedtime, msg=msg)
def postprocess(self, treestr, stage=-1):
"""Take parse tree and apply postprocessing."""
parsetree = Tree.parse(treestr, parse_leaf=int)
if self.stages[stage].split:
mergediscnodes(unbinarize(parsetree, childchar=':',
expandunary=False))
saveheads(parsetree, self.binarization.tailmarker)
unbinarize(parsetree, expandunary=False)
removefanoutmarkers(parsetree)
if self.relationalrealizational:
parsetree = rrbacktransform(parsetree,
self.relationalrealizational['adjunctionlabel'])
if self.transformations:
reversetransform(parsetree, self.transformations)
return parsetree, False
def noparse(self, stage, sent, tags, lastsuccessfulparse):
"""Return parse from previous stage or a dummy parse."""
# use successful parse from earlier stage if available
if lastsuccessfulparse is not None:
parsetree = lastsuccessfulparse.copy(True)
else: # Produce a dummy parse for evaluation purposes.
default = defaultparse([(n, t) for n, t
in enumerate(tags or (len(sent) * ['NONE']))])
parsetree = Tree.parse('(%s %s)' % (stage.grammar.start,
default), parse_leaf=int)
noparse = True
prob = 1.0
return parsetree, prob, noparse
def readgrammars(resultdir, stages, postagging=None, top='ROOT'):
"""Read the grammars from a previous experiment.
Expects a directory ``resultdir`` which contains the relevant grammars and
the parameter file ``params.prm``, as produced by ``runexp``."""
for n, stage in enumerate(stages):
logging.info('reading: %s', stage.name)
rules = gzip.open('%s/%s.rules.gz' % (resultdir, stage.name)).read()
lexicon = codecs.getreader('utf-8')(gzip.open('%s/%s.lex.gz' % (
resultdir, stage.name)))
grammar = Grammar(rules, lexicon.read(),
start=top, bitpar=stage.mode.startswith('pcfg')
or re.match(r'[-.e0-9]+\b', rules), binarized=stage.binarized)
backtransform = outside = None
if stage.dop:
if stage.estimates is not None:
raise ValueError('not supported')
if stage.dop == 'doubledop':
backtransform = gzip.open('%s/%s.backtransform.gz' % (
resultdir, stage.name)).read().splitlines()
if n and stage.prune:
_ = grammar.getmapping(stages[n - 1].grammar,
striplabelre=re.compile(b'@.+$'),
neverblockre=re.compile(b'^#[0-9]+|.+}<'),
splitprune=stage.splitprune and stages[n - 1].split,
markorigin=stages[n - 1].markorigin)
else:
# recoverfragments() relies on this mapping to identify
# binarization nodes
_ = grammar.getmapping(None,
neverblockre=re.compile(b'.+}<'))
elif n and stage.prune: # dop reduction
_ = grammar.getmapping(stages[n - 1].grammar,
striplabelre=re.compile(b'@[-0-9]+$'),
neverblockre=re.compile(stage.neverblockre)
if stage.neverblockre else None,
splitprune=stage.splitprune and stages[n - 1].split,
markorigin=stages[n - 1].markorigin)
if stage.mode == 'dop-rerank':
grammar.getrulemapping(
stages[n - 1].grammar, re.compile(br'@[-0-9]+\b'))
probsfile = '%s/%s.probs.npz' % (resultdir, stage.name)
if os.path.exists(probsfile):
probmodels = np.load(probsfile) # pylint: disable=no-member
for name in probmodels.files:
if name != 'default':
grammar.register(unicode(name), probmodels[name])
else: # not stage.dop
if n and stage.prune:
_ = grammar.getmapping(stages[n - 1].grammar,
neverblockre=re.compile(stage.neverblockre)
if stage.neverblockre else None,
splitprune=stage.splitprune and stages[n - 1].split,
markorigin=stages[n - 1].markorigin)
if stage.estimates in ('SX', 'SXlrgaps'):
if stage.estimates == 'SX' and grammar.maxfanout != 1:
raise ValueError('SX estimate requires PCFG.')
if stage.mode != 'plcfrs':
raise ValueError('estimates require parser w/agenda.')
outside = np.load( # pylint: disable=no-member
'%s/%s.outside.npz' % (resultdir, stage.name))['outside']
logging.info('loaded %s estimates', stage.estimates)
elif stage.estimates:
raise ValueError('unrecognized value; specify SX or SXlrgaps.')
if stage.mode.startswith('pcfg-bitpar'):
if grammar.maxfanout != 1:
raise ValueError('bitpar requires a PCFG.')
_sumsto1, msg = grammar.testgrammar()
logging.info('%s: %s', stage.name, msg)
stage.update(grammar=grammar, backtransform=backtransform,
outside=outside)
if postagging and postagging.method == 'unknownword':
postagging.unknownwordfun = UNKNOWNWORDFUNC[postagging.model]
postagging.lexicon = {w for w in stages[0].grammar.lexicalbyword
if not w.startswith(UNK)}
postagging.sigs = {w for w in stages[0].grammar.lexicalbyword
if w.startswith(UNK)}
def exportbitpargrammar(stage):
"""(re-)export bitpar grammar with current weights."""
if not hasattr(stage, 'rulesfile'):
stage.rulesfile = tempfile.NamedTemporaryFile()
stage.lexiconfile = tempfile.NamedTemporaryFile()
stage.rulesfile.seek(0)
stage.rulesfile.truncate()
if stage.grammar.currentmodel == 0:
stage.rulesfile.write(stage.grammar.origrules)
else:
stage.rulesfile.writelines(
'%g\t%s\n' % (weight, line.split(None, 1)[1])
for weight, line in
zip(stage.grammar.models[stage.grammar.currentmodel],
stage.grammar.origrules.splitlines()))
stage.rulesfile.flush()
stage.lexiconfile.seek(0)
stage.lexiconfile.truncate()
lexicon = stage.grammar.origlexicon.replace(
'(', '-LRB-').replace(')', '-RRB-')
lexiconfile = codecs.getwriter('utf-8')(stage.lexiconfile)
if stage.grammar.currentmodel == 0:
lexiconfile.write(lexicon)
else:
weights = iter(stage.grammar.models[stage.grammar.currentmodel,
stage.grammar.numrules:])
lexiconfile.writelines('%s\t%s\n' % (line.split(None, 1)[0],
'\t'.join('%s %g' % (tag, next(weights))
for tag in line.split()[1::2]))
for line in lexicon.splitlines())
stage.lexiconfile.flush()
def probstr(prob):
"""Render probability / number of subtrees as string."""
if isinstance(prob, tuple):
return 'subtrees=%d, p=%.4g ' % (abs(prob[0]), prob[1])
return 'p=%.4g' % prob
def which(program):
"""Return first match for program in search path."""
for path in os.environ.get('PATH', os.defpath).split(":"):
if path and os.path.exists(os.path.join(path, program)):
return os.path.join(path, program)
raise ValueError('%r not found in path; please install it.' % program)
__all__ = ['DictObj', 'Parser', 'doparsing', 'exportbitpargrammar',
'initworker', 'probstr', 'readgrammars', 'readinputbitparstyle',
'which', 'worker', 'workerfunc']
if __name__ == '__main__':
main()
|
pombredanne/disco-dop
|
discodop/parser.py
|
parser.py
|
py
| 27,717 |
python
|
en
|
code
| null |
github-code
|
6
|
25636796283
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
colorList = ["blue", "purple", "pink", "yellow", "green"]
for color in colorList:
print(color)
# In[2]:
n = 0
numbers = list(range(10))
for num in numbers:
print (num)
# In[1]:
n = 0
while n<10:
print(n)
n = n+1
# In[1]:
n = 0
while n <= 10:
print(n)
n = n + 1
else:
print("greater than 10")
# In[3]:
n = 30
sum = 0
i = 1
while i <= n:
sum = sum+i
i=i+1
print(sum)
# In[ ]:
# In[ ]:
|
madelinedq/HW1_deQuillacq_Madeline
|
HW2_deQuillacq_Madeline.py
|
HW2_deQuillacq_Madeline.py
|
py
| 503 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36397304694
|
"""
Table of Contents
1. drop_null_columns: Drop columns that exceed a threshold of null values.
"""
from pyspark.sql import functions as F, DataFrame
from ..parsing.melting import melt
def drop_null_columns(df: DataFrame, threshold: float = 1.0, subset: list = None) -> DataFrame:
"""
Drop columns that exceed a threshold of null values.
Inputs
df: DataFrame.
threshold: Threshold value. If a column has at least this fraction of nulls, it will be removed.
subset: List of columns to check. All others will be kept by default regardless of the null count.
Output
Updated DataFrame.
Example
df = df.drop_null_columns(df, 0.5, ["col1", "col2"])
Will remove col1 or col2 if either is at least 50% null.
"""
# If looking only at a subset of columns, downselect, otherwise, evaluate all columns.
if subset is None:
temp_df = df
else:
temp_df = df.select(subset)
# List of columns being evaluated.
columns_evaluated = temp_df.columns
# Replace each column with a 1 if null, 0 otherwise.
for col in columns_evaluated:
temp_df = temp_df.withColumn(col, F.when(F.col(col).isNull(), 1).otherwise(0))
# Sum the number of nulls, represented with a 1, for each column.
null_count = temp_df.agg(*[F.sum(c).alias(c) for c in columns_evaluated])
# Total row count.
count = temp_df.count()
# Pivot this null_count DataFrame to do a few operations to find when there are too many nulls.
# The names of the columns will now be listed in a column called "categories".
null_count = melt(null_count, value_vars=columns_evaluated)
null_count = (
null_count
# Compute the fraction of rows that are null.
.withColumn("fraction", F.col("values") / F.lit(count))
# Keep those rows that meet or exceed the threshold.
.where(F.col("fraction") >= F.lit(threshold))
# Now, pivot again so that the column names are restored.
.groupBy().pivot("categories").sum("fraction")
)
# Get the list of the columns that need to be dropped; drop and return.
columns_to_drop = null_count.columns
df = df.drop(*columns_to_drop)
return df
|
phil-trinh/pyspark_utilities_sample
|
transformations/calculations/null_funcs.py
|
null_funcs.py
|
py
| 2,241 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9633424069
|
# W.A.P in Python to count the total no. of words in a string. #
str = input("Enter the string :- ")
total = 1
for i in range(len(str)) : # len() function returns the length of the string.
if(str[i]==" ") :
total+=1
print("The total number of words in the string is ",total)
# ALTERNATIVE METHOD #
# str = input("Enter the string :- ")
# total = 1
# total = total + str.count(" ") # count() function is used to count the total number of occurances of any specified character.
# print("The total number of words in the string is ",total)
|
sunny-ghosh/Python-projects
|
count_string.py
|
count_string.py
|
py
| 615 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12110709697
|
import numpy as np
from tqdm import tqdm
flip_inst = {}
flip_inst['e'] = [1, 0]
flip_inst['w'] = [-1, 0]
flip_inst['se'] = [0, -1]
flip_inst['sw'] = [-1, -1]
flip_inst['ne'] = [1, 1]
flip_inst['nw'] = [0, 1]
def flip_tile(instr, tiles):
tile = np.array([0, 0])
while instr:
for fi, dir in flip_inst.items():
if instr.startswith(fi):
tile += dir
instr = instr[len(fi):]
continue
if list(tile) in tiles:
del tiles[tiles.index(list(tile))]
else:
tiles.append(list(tile))
return tiles
def count_neighbors(matrix, x, y):
num = 0
for _, dir in flip_inst.items():
if matrix[x + dir[0], y + dir[1]] == 1:
num += 1
return num
def evolve(matrix):
to_flip = np.zeros(matrix.shape)
for x in range(1, matrix.shape[0]-1):
for y in range(1, matrix.shape[1]-1):
neighbors = count_neighbors(matrix, x, y)
# Any black tile with zero or more than 2 black tiles immediately
# adjacent to it is flipped to white.
if matrix[x, y] == 1:
if neighbors == 0 or neighbors > 2:
to_flip[x, y] = 1
# Any white tile with exactly 2 black tiles immediately adjacent to
# it is flipped to black.
else:
if neighbors == 2:
to_flip[x, y] = 1
matrix[to_flip == 1] = (matrix[to_flip == 1] + 1) % 2
return matrix
if __name__ == "__main__":
file_name = "test_24.txt"
file_name = "input_24.txt"
tiles = []
for line in open(file_name):
tiles = flip_tile(line.strip(), tiles)
print(len(tiles)) # 521
tiles = np.array(tiles)
delx = max(tiles[:, 0]) - min(tiles[:, 0])
dely = max(tiles[:, 1]) - min(tiles[:, 1])
matrix = np.zeros([200 + delx, 200 + dely], np.int)
for t in tiles:
matrix[t[0]+100, t[1]+100] = 1
for _ in tqdm(range(100)):
matrix = evolve(matrix)
print(np.sum(matrix)) # 4242
|
scjohnson/aoc_2020
|
solution_24.py
|
solution_24.py
|
py
| 2,048 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39188602086
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from blog import apiviews
router = DefaultRouter()
router.register('posts', apiviews.PostViewSet)
router.register('comments', apiviews.CommentViewSet)
router.register('replies', apiviews.ReplyViewSet)
router.register('users', apiviews.UserViewSet )
urlpatterns = [
path('', include(router.urls)),
path('rest-auth/', include('rest_auth.urls')),
path('rest-auth/registration/', include('rest_auth.registration.urls')),
path('categories', apiviews.CategoryListView.as_view(), name='categories-list'),
path('category-filter/<int:pk>/', apiviews.PostsInCategoryView.as_view(), name='category-detail')
]
# urlpatterns += router.urls
|
MahfuzKhandaker/blogapi
|
blog/urls.py
|
urls.py
|
py
| 736 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74436640828
|
import glob
import numpy as np
import pandas as pd
import nibabel as nib
import torch
from torch.utils.data import Dataset
# dataset class for the GenericObjectDecoding dataset
class GODData(Dataset):
FEATURES_PATH = "data/ds001246/derivatives/preproc-spm/output"
TARGETS_PATH = "data/ds001246"
TRAIN_CATEGORIES_PATH = "data/ds001246/stimulus_ImageNetTraining.csv"
TEST_CATEGORIES_PATH = "data/ds001246/stimulus_ImageNetTest.csv"
def __init__(
self,
subject="01",
session_id="01",
task="perception",
train=True,
limit_size=None,
):
session = f"{task}{'Training' if train else 'Test'}{session_id}"
# load data
feature_runs = sorted(glob.glob(f"{self.FEATURES_PATH}/sub-{subject}/ses-{session}/func/*"))
target_runs = sorted(glob.glob(f"{self.TARGETS_PATH}/sub-{subject}/ses-{session}/func/*events*"))
categories = pd.read_csv(self.TRAIN_CATEGORIES_PATH if train else self.TEST_CATEGORIES_PATH, sep="\t", header=None)
# process features and targets
features = []
targets = []
for f_run, t_run in zip(feature_runs, target_runs):
features_run = nib.load(f_run).get_fdata()
targets_run = pd.read_csv(t_run, sep="\t")
# remove resting states
features_run_pp = features_run[:, :, :, 8:-2]
targets_run_pp = targets_run[targets_run["event_type"] != "rest"]
# reshape features into (N, C, D, W, H)
features_run_pp = features_run_pp.transpose(3, 2, 1, 0).reshape(-1, 3, 50, 64, 64)
# extract category labels
targets_run_pp = targets_run_pp.merge(categories, left_on="stim_id", right_on=1)[2]
targets_run_pp = targets_run_pp.to_numpy().reshape(-1, 1)
features.append(features_run_pp)
targets.append(targets_run_pp)
features = np.vstack(features)
targets = np.vstack(targets)
# convert and store as tensors
self.features = torch.from_numpy(features).float()
self.targets = torch.from_numpy(targets).long() - 1
# flatten targets
self.targets = self.targets.squeeze()
# limit dataset size
if limit_size is not None:
self.features = self.features[:limit_size]
self.targets = self.targets[:limit_size]
def __len__(self):
return len(self.features)
def __getitem__(self, index):
feature = self.features[index]
target = self.targets[index]
return feature, target
|
v15hv4/ViT-fMRI
|
dataOLD.py
|
dataOLD.py
|
py
| 2,614 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29543052882
|
"""
Difficulty: Easy
Given a signed 32-bit integer x, return x with its digits reversed.
If reversing x causes the value to go outside the signed 32-bit integer range [-231, 231 - 1], then return 0.
Assume the environment does not allow you to store 64-bit integers (signed or unsigned).
Example 1:
Input: x = 123
Output: 321
Example 2:
Input: x = -123
Output: -321
Example 3:
Input: x = 120
Output: 21
Constraints:
* -2^31 <= x <= 2^31 - 1
"""
class Solution:
# approach 1: divide by 10, get remainder, add to result
def reverse(self, x: int) -> int:
result = ""
y = abs(x)
while y > 0:
quot, rem = y // 10, y % 10
result += str(rem)
y = quot
if not result:
return 0
result = int(result)
if result < (-2**31) or result > (2**31 - 1):
return 0
if x < 0:
result = -1 * result
return result
# approach 2: convert to string, reverse, convert back to int
def reverse(self, x: int) -> int:
if x < 0:
return -1 * int(str(x)[1:][::-1])
else:
return int(str(x)[::-1])
|
ali-izhar/daily-commit-challenge
|
int/reverse.py
|
reverse.py
|
py
| 1,186 |
python
|
en
|
code
| 2 |
github-code
|
6
|
14504710213
|
import os
import numpy as np
import pandas as pd
from PIL import Image
from collections import Counter
import torch
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
import torchvision.transforms as T
import spacy
spacy_eng = spacy.load("en_core_web_sm")
# defining the transform to be applied
transforms = T.Compose([
T.Resize(226),
T.RandomCrop(224),
T.ToTensor(),
T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
transforms_advanced = T.Compose([
T.Resize(226),
T.RandomCrop(224),
T.ToTensor(),
T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
T.RandomAffine(10),
T.RandomGrayscale(0.05),
T.RandomHorizontalFlip(0.05),
T.RandomVerticalFlip(0.05),
T.GaussianBlur(5),
T.RandomErasing(0.05)
])
class Vocabulary:
def __init__(self, freq_threshold):
# setting the pre-reserved tokens int to string tokens
self.pad_idx = 0
self.sos_idx = 1
self.eos_idx = 2
self.unk_idx = 3
self.pad = "<PAD>"
self.sos = "<SOS>"
self.eos = "<EOS>"
self.unk = "<UNK>"
self.itos = {self.pad_idx: self.pad,
self.sos_idx: self.sos,
self.eos_idx: self.eos,
self.unk_idx: self.unk}
# string to int tokens
# its reverse dict self.itos
self.stoi = {v: k for k, v in self.itos.items()}
self.freq_threshold = freq_threshold
def __len__(self):
return len(self.itos)
def size(self):
return len(self.itos)
@staticmethod
def tokenize(text):
return [token.text.lower() for token in spacy_eng.tokenizer(text)]
def build_vocab(self, sentence_list):
frequencies = Counter()
idx = 4
for sentence in sentence_list:
for word in self.tokenize(sentence):
frequencies[word] += 1
# add the word to the vocab if it reaches minum frequecy threshold
if frequencies[word] == self.freq_threshold:
self.stoi[word] = idx
self.itos[idx] = word
idx += 1
def numericalize(self, text):
""" For each word in the text corresponding index token for that word form the vocab built as list """
tokenized_text = self.tokenize(text)
return [self.stoi[token] if token in self.stoi else self.stoi["<UNK>"] for token in tokenized_text]
class FlickrDataset(Dataset):
"""
FlickrDataset
"""
def __init__(self,
root_dir,
captions_file,
transform=None,
freq_threshold=5,
vocab=None,
data_limit=None,
do_augmentation=False,
augmentation_probability=0.2):
self.root_dir = root_dir
self.df = pd.read_csv(captions_file)
self.transform = transform
self.random = np.random.RandomState()
self.do_augmentation = do_augmentation
self.augmentation_probability = augmentation_probability
# Get image and caption colum from the dataframe
self.imgs = self.df["image"]
self.captions = self.df["caption"]
# If needed truncating the data for faster running
if data_limit is not None:
self.imgs = self.imgs[:data_limit]
self.captions = self.captions[:data_limit]
# Initialize vocabulary and build vocab
if vocab is None:
self.vocab = Vocabulary(freq_threshold)
self.vocab.build_vocab(self.captions.tolist())
else:
self.vocab = vocab
def __len__(self):
# return len(self.df)
return self.imgs.shape[0]
def __getitem__(self, idx):
caption = self.captions[idx]
img_name = self.imgs[idx]
img_location = os.path.join(self.root_dir, img_name)
img_pil = Image.open(img_location).convert("RGB")
# do some random augmentations
if not self.do_augmentation:
img = transforms(img_pil)
else:
img = transforms_advanced(img_pil)
# numericalize the caption text
caption_vec = []
caption_vec += [self.vocab.stoi["<SOS>"]]
caption_vec += self.vocab.numericalize(caption)
caption_vec += [self.vocab.stoi["<EOS>"]]
return img, torch.tensor(caption_vec)
class CapsCollate:
"""
Collate to apply the padding to the captions with dataloader
"""
def __init__(self, vocab, batch_first=False, max_len=0):
self.pad_idx = vocab.pad_idx
self.eos_idx = vocab.eos_idx
self.batch_first = batch_first
self._max_len = max_len
def __call__(self, batch):
imgs = [item[0].unsqueeze(0) for item in batch]
imgs = torch.cat(imgs, dim=0)
targets = [item[1] for item in batch]
targets = pad_sequence(targets, batch_first=self.batch_first, padding_value=self.pad_idx)
if self._max_len > 0:
if targets.shape[1] >= self._max_len:
targets = targets[:, :self._max_len]
else:
pad_tensor = torch.ones(size=(targets.shape[0], self._max_len - targets.shape[1]),
dtype=torch.long) * self.pad_idx
targets = torch.cat([targets, pad_tensor], dim=1)
targets[:, -1] = targets[:, -1].where(targets[:, -1] == self.pad_idx, torch.tensor(self.eos_idx))
return imgs, targets
|
danarip/ImageCaptionGenerator
|
source/data_preprocessing.py
|
data_preprocessing.py
|
py
| 5,555 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73154395388
|
import os
import pickle
import argparse
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from Model.INPLIM import Doctor
from data_utils import CodeLevelDataset
from utils import train_eval
def args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, default='',
help='Set the root path of the dataset')
parser.add_argument('--devices', type=str, default='cpu',
help='Setting the IDs of GPU devices.')
parser.add_argument('--epochs', type=int, default=15,
help='Setting the number of epochs to run.')
parser.add_argument('--batch_size', type=int, default=512,
help='Setting the mini-batch size.')
parser.add_argument('--weight_decay', type=float, default=1e-3,
help='Setting weight decay')
parser.add_argument('--lr', type=float, default=1e-3,
help='Setting the learning rate.')
parser.add_argument('--dim', type=int, default=128,
help='Setting the inner dim of the model.')
parser.add_argument('--max_len', type=int, default=200,
help='Setting the maximum number of code to use for a patient.')
parser.add_argument('--drop_context', type=float, default=0.3,
help='Setting drop rate of the context-aware branch.')
parser.add_argument('--drop_time', type=float, default=0.3,
help='Setting drop rate of the time-aware branch.')
parser.add_argument('--save_model', action='store_true',
help='Whether to save the parameters of the trained model.',
default=True)
parser.add_argument('--save_dir', type=str, default='./saved_models',
help='Setting the dir of saving trained model.')
return parser
def main(opts):
if opts.devices != 'cpu':
os.environ['CUDA_VISIBLE_DEVICES'] = opts.devices
dataset = pickle.load(open(opts.data_root, 'rb'))
train_set = CodeLevelDataset(dataset=dataset, max_len=opts.max_len, phase='train')
valid_set = CodeLevelDataset(dataset=dataset, max_len=opts.max_len, phase='val')
test_set = CodeLevelDataset(dataset=dataset, max_len=opts.max_len, phase='test')
train_loader = DataLoader(train_set, batch_size=opts.batch_size, num_workers=2, shuffle=True)
val_loader = DataLoader(valid_set, batch_size=1, num_workers=1, shuffle=False)
test_loader = DataLoader(test_set, batch_size=1, num_workers=1, shuffle=False)
net = Doctor(features=dataset['features'], out_dim=2, emb_dim=opts.dim, dropout_context=opts.drop_context,
dropout_time=opts.drop_time)
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=opts.lr, weight_decay=opts.weight_decay, eps=0)
if opts.devices != 'cpu':
net = torch.nn.DataParallel(net).cuda()
criterion = criterion.cuda()
best_auc, best_epoch, best_test_nll, best_test_auc_roc = 0, 0, 0, 0
model_dict = {}
for epoch in range(opts.epochs):
train_eval(opts, net, criterion, optimizer, train_loader, epoch, phase='Train')
_, auc = train_eval(opts, net, criterion, optimizer, val_loader, epoch, phase='Valid')
if auc > best_auc:
best_auc, best_epoch = auc, epoch
best_test_nll, best_test_auc_roc = train_eval(opts, net, criterion, optimizer, test_loader, epoch,
phase='Test')
model_dict['opts'] = opts
model_dict['states'] = net.state_dict()
print('Best Test NLL:{:.4f}\t Best AUROC:{:.4f}'.format(best_test_nll, best_test_auc_roc))
if not os.path.exists(opts.save_dir):
os.makedirs(opts.save_dir)
if opts.save_model:
torch.save(model_dict, os.path.join(opts.save_dir, 'Model-AUC-{:.4f}.pth'.format(best_test_auc_roc)))
if __name__ == '__main__':
opts = args().parse_args()
main(opts)
|
xlbryantx/INPLIM
|
main.py
|
main.py
|
py
| 4,064 |
python
|
en
|
code
| 3 |
github-code
|
6
|
21489679841
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 14 14:19:49 2021
@author: 姜高晓
"""
import numpy as np
from scipy.fftpack import fft,ifft
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import ParameterGrid, GridSearchCV
R=np.array([ [0,0,1300],
[5000,0,1700],
[0,5000,1700],
[5000,5000,1300]
])
#%% 梯度下降法
def L(r,A):
s=0
for i in range(4):
s+=(np.sqrt(((r-R[i])**2).sum())-A[i])**2
return s
def dL(r,A):
s=np.zeros(3)
for i in range(4):##i表示标号
s[0]+=1*(np.sqrt(((r-R[i])**2).sum())-A[i])*(r[0]-R[i][0])/(np.sqrt(((r-R[i])**2).sum()))*1
s[1]+=1*(np.sqrt(((r-R[i])**2).sum())-A[i])*(r[1]-R[i][1])/(np.sqrt(((r-R[i])**2).sum()))*1
s[2]+=1*(np.sqrt(((r-R[i])**2).sum())-A[i])*(r[2]-R[i][2])/(np.sqrt(((r-R[i])**2).sum()))*1
return s
alpha=0.1
Nstep=800
pre=[]
for i in range(1,325):
d=np.loadtxt("./out正常/"+str(i//1)+".正常.txt")
#d=np.loadtxt("./out异常/"+str(i//1)+".异常.txt")
d1=np.zeros(4)
d1[0]=d[:,1].mean()
d1[1]=d[:,2].mean()
d1[2]=d[:,3].mean()
d1[3]=d[:,4].mean()
rn=[np.array([2000,2000,2000])]
for i in range(Nstep):
rn.append(rn[-1]-alpha*dL(rn[-1],d1))
rn=np.array(rn)
l=[rn[-50:,0].mean(),rn[-50:,1].mean(),rn[-50:,2].mean()]
lstd=np.array([rn[-500:,0].var(),rn[-500:,1].var(),rn[-500:,2].var()])
pre.append([L(l,d1),lstd.sum()])
pre=np.array(pre)
kam1=pre[:,0].min()
kam2=pre[:,1].max()
#mean=
pre[:,0]=pre[:,0]/kam1
pre[:,1]=pre[:,1]/kam2
pre1=np.array(pre)
a1=pre[:,0].mean()
a2=pre[:,1].mean()
#%%朴素值
pre=[]
for i in range(1,325):
#d=np.loadtxt("./out正常/"+str(i//1)+".正常.txt")
d=np.loadtxt("./out异常/"+str(i//1)+".异常.txt")
d1=np.zeros(4)
d1[0]=d[:,1].mean()
d1[1]=d[:,2].mean()
d1[2]=d[:,3].mean()
d1[3]=d[:,4].mean()
rn=[np.array([2000,2000,2000])]
for i in range(Nstep):
rn.append(rn[-1]-alpha*dL(rn[-1],d1))
rn=np.array(rn)
l=[rn[-50:,0].mean(),rn[-50:,1].mean(),rn[-50:,2].mean()]
lstd=np.array([rn[-500:,0].var(),rn[-500:,1].var(),rn[-500:,2].var()])
pre.append([L(l,d1),lstd.sum()])
pre=np.array(pre)
pre[:,0]=pre[:,0]/kam1
pre[:,1]=pre[:,1]/kam2
#plt.plot(pre)
#plt.show()
b1=pre[:,0].mean()
b2=pre[:,1].mean()
# 126 124 53
# 118 128 63
# 119 122 138 正常
#1.4 1.4 0.57
pre2=np.array(pre)
a=open("./4.txt",encoding='utf-8').read().splitlines()
for i in range(len(a)):
a[i]=a[i].split(':')
a=np.array(a)
b=np.zeros((len(a[:,0])//4,5))
for i in range(len(a[:,0])//4):
b[i,0]=a[4*i,1]
b[i,1]=a[4*i,5]
b[i,2]=a[4*i+1,5]
b[i,3]=a[4*i+2,5]
b[i,4]=a[4*i+3,5]
pre=[]
for i in range(10):
rn=[np.array([2000,2000,2000])]
d1=b[i,1:]
for j in range(Nstep):
rn.append(rn[-1]-alpha*dL(rn[-1],d1))
rn=np.array(rn)
l=[rn[-50:,0].mean(),rn[-50:,1].mean(),rn[-50:,2].mean()]
lstd=np.array([rn[-500:,0].var(),rn[-500:,1].var(),rn[-500:,2].var()])
pre.append([L(l,d1),lstd.sum()])
pre=np.array(pre)
pre[:,0]=pre[:,0]/kam1
pre[:,1]=pre[:,1]/kam2
out=pre
aaaaa=out[:,0]*out[:,1]
label=[]
for i in range(10):
A1=len(np.where(pre1[:,0]>out[i,0])[0])
A2=(len(np.where(pre2[:,0]>out[i,0])[0]))
label.append(A2/(A1+A2))
np.array(label)>0.5
|
GazerJ/Math_2021_HW
|
math/4.2╖╓└α╞≈╫╘╢¿╥σ╖╓└α╞≈.py
|
4.2╖╓└α╞≈╫╘╢¿╥σ╖╓└α╞≈.py
|
py
| 3,803 |
python
|
en
|
code
| 3 |
github-code
|
6
|
1584185561
|
# This is mostly lifted from django-storages' sftp backend: Their license:
#
# SFTP storage backend for Django.
# Author: Brent Tubbs <[email protected]>
# License: MIT
#
# Modeled on the FTP storage by Rafal Jonca <[email protected]>
from __future__ import print_function
try:
import ssh
except ImportError:
import paramiko as ssh
import os
import posixpath
import warnings
from django.conf import settings
from django.core.files.base import File
try:
from io import StringIO
except ImportError:
# Python 2 fallbacks
from cStringIO import StringIO
from localdevstorage.base import BaseStorage
class SftpStorage(BaseStorage):
def __init__(self, location=None, base_url=None, user=None, host=None, root_path=None):
warnings.warn(
'The SFTP backend is unsupported and untested. '
'Usage is not recommended!'
)
self._host = host or settings.LOCALDEVSTORAGE_SFTP_HOST
self._root_path = root_path or settings.LOCALDEVSTORAGE_SFTP_ROOT_PATH
# if present, settings.SFTP_STORAGE_PARAMS should be a dict with params
# matching the keyword arguments to paramiko.SSHClient().connect(). So
# you can put username/password there. Or you can omit all that if
# you're using keys.
self._params = getattr(settings, 'SFTP_STORAGE_PARAMS', {})
self._params['username'] = user or settings.LOCALDEVSTORAGE_SFTP_USER
# for now it's all posix paths. Maybe someday we'll support figuring
# out if the remote host is windows.
self._pathmod = posixpath
super(SftpStorage, self).__init__(location, base_url)
def _connect(self):
self._ssh = ssh.SSHClient()
# automatically add host keys from current user.
self._ssh.load_host_keys(os.path.expanduser(os.path.join("~", ".ssh", "known_hosts")))
# and automatically add new host keys for hosts we haven't seen before.
self._ssh.set_missing_host_key_policy(ssh.AutoAddPolicy())
try:
self._ssh.connect(self._host, **self._params)
except ssh.AuthenticationException as e:
raise
except Exception as e:
print(e)
if not hasattr(self, '_sftp'):
self._sftp = self._ssh.open_sftp()
@property
def sftp(self):
"""Lazy SFTP connection"""
if not hasattr(self, '_sftp'):
self._connect()
return self._sftp
def _get(self, name):
try:
return SFTPStorageFile(name, self, 'rb')
except IOError:
pass
def _exists_upstream(self, name):
try:
f = SFTPStorageFile(name, self, 'rb')
f.close()
return True
except Exception:
return False
def _read(self, name):
remote_path = self._remote_path(name)
return self.sftp.open(remote_path, 'rb')
def _remote_path(self, name):
return self._join(self._root_path, name)
def _join(self, *args):
# Use the path module for the remote host type to join a path together
return self._pathmod.join(*args)
class SFTPStorageFile(File):
def __init__(self, name, storage, mode):
self._name = name
self._storage = storage
self._mode = mode
self._is_dirty = False
self.file = StringIO()
self._is_read = False
@property
def size(self):
if not hasattr(self, '_size'):
self._size = self._storage.size(self._name)
return self._size
def read(self, num_bytes=None):
if not self._is_read:
self.file = self._storage._read(self._name)
self._is_read = True
return self.file.read(num_bytes)
def write(self, content):
raise NotImplementedError
def close(self):
if self._is_dirty:
self._storage._save(self._name, self.file.getvalue())
self.file.close()
|
beniwohli/django-localdevstorage
|
localdevstorage/sftp.py
|
sftp.py
|
py
| 3,964 |
python
|
en
|
code
| 50 |
github-code
|
6
|
38586292194
|
import cv2
import numpy as np
img = cv2.imread('images/saitama.jpg')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # convert it to hsv
width = img.shape[1]
height = img.shape[0]
channel = img.shape[2]
increase_v = 40
decrease_s = 10
step = 2
# bien doi hinh anh
print("chon huong di cua animation: ")
print("1.Left -> Right")
print("2.Right -> Left")
print("3.Down")
print("4.Up")
flag = input()
if flag == '3':
# huong di xuong
for y in range(1, height, step):
h, s, v = cv2.split(hsv[0: y, :])
v = np.where(v <= 255 - increase_v, v + increase_v, 255)
s = np.where(s >= 0 + decrease_s, s - decrease_s, 0)
hsv[0: y, :] = cv2.merge((h, s, v))
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('animation', img)
cv2.waitKey(1)
elif flag == '1':
# huong sang phai
for x in range(1, width, step):
h, s, v = cv2.split(hsv[:, 0: x])
v = np.where(v <= 255 - increase_v, v + increase_v, 255)
s = np.where(s >= 0 + decrease_s, s - decrease_s, 0)
hsv[:, 0:x] = cv2.merge((h, s, v))
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('animation', img)
cv2.waitKey(1)
elif flag == '2':
# huong sang trai
for x in range(width - 2, 0, -step):
h, s, v = cv2.split(hsv[:, x: -1])
v = np.where(v <= 255 - increase_v, v + increase_v, 255)
s = np.where(s >= 0 + decrease_s, s - decrease_s, 0)
hsv[:, x:-1] = cv2.merge((h, s, v))
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('animation', img)
cv2.waitKey(1)
elif flag == '4':
# huong len tren
for y in range(height - 2, 0, -step):
h, s, v = cv2.split(hsv[y: -1, :])
v = np.where(v <= 255 - increase_v, v + increase_v, 255)
s = np.where(s >= 0 + decrease_s, s - decrease_s, 0)
hsv[y: -1, :] = cv2.merge((h, s, v))
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('animation', img)
cv2.waitKey(1)
else:
print('nhap cac gia tri tu 1 den 4')
cv2.destroyAllWindows()
|
19522515/CS231.L21-Computer-Vision-Project
|
Source code/pptanimation_swipe.py
|
pptanimation_swipe.py
|
py
| 2,089 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40467024636
|
# 해시
import sys
input = sys.stdin.readline
n,m = map(int, input().split())
graph = {}
for _ in range(n):
address, num = input().rstrip().split()
graph[address] = num
for _ in range(m):
temp = input().rstrip()
print(graph[temp])
|
Cho-El/coding-test-practice
|
백준 문제/해시/비밀번호 찾기.py
|
비밀번호 찾기.py
|
py
| 248 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72599581948
|
import unittest
import bioschemas
from bioschemas.extractors import ExtractorFromHtml
config = bioschemas.DEFAULT_CONFIG
class TestExtractors(unittest.TestCase):
def test_jsonld_extraction_from_html(self):
html = '''<script type="application/ld+json">
{
"@context": "http://bioschemas.org",
"@type": "PhysicalEntity",
"name": "Gene arcA E. coli str. K-12 substr. MG1655 b4401",
"additionalType": "http://www.ebi.ac.uk/ols/ontologies/so/terms?obo_id=SO:0000704",
"identifier": "b4401",
"url": "http://localhost:8080/synbiomine/report.do?id=2026346"
}
</script>
'''
e = ExtractorFromHtml(config)
jsonlds = e._extract_jsonld_from_html(html)
self.assertEqual(len(jsonlds), 1)
jsonld = jsonlds[0]
self.assertEqual(jsonld['name'], 'Gene arcA E. coli str. K-12 substr. MG1655 b4401')
self.assertEqual(jsonld['additionalType'], 'http://www.ebi.ac.uk/ols/ontologies/so/terms?obo_id=SO:0000704')
self.assertEqual(jsonld['identifier'], 'b4401')
self.assertEqual(jsonld['url'], 'http://localhost:8080/synbiomine/report.do?id=2026346')
|
buzzbangorg/bsbang-crawler
|
bioschemas/test_extractors.py
|
test_extractors.py
|
py
| 1,197 |
python
|
en
|
code
| 4 |
github-code
|
6
|
25907426422
|
whiskyPriceInBgn = float(input())
beerLiters = float(input())
wineLiters = float(input())
rakiaLiters = float(input())
whiskyLiters = float(input())
rakiaPrice = whiskyPriceInBgn / 2
winePrice = rakiaPrice - (0.4 * rakiaPrice)
beerPrice = rakiaPrice - (0.8 * rakiaPrice)
totalSum = (whiskyPriceInBgn * whiskyLiters) + (beerLiters * beerPrice) + (wineLiters * winePrice) + (rakiaPrice * rakiaLiters)
print(f"{totalSum:.2f}")
|
skipter/Programming-Basics-Python
|
Python Basics October 2018/Python-Simple-Operations-and-Calculations-Exercise/AlcoholMarket.py
|
AlcoholMarket.py
|
py
| 425 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74514085946
|
#!/usr/bin/env python3
from __future__ import print_function
from __future__ import division
# System level imports
import sys
import os
import argparse
import logging
import time
import math
import numpy as np
import matplotlib.pyplot as plt
from numpy.core.defchararray import index
import controller2d
#import controller2d_AR as controller2d
import configparser
import local_planner
import behavioural_planner
import cv2
from math import sin, cos, pi, tan, sqrt
from utils import compute_middle_point
from agents import Agent
from sidewalk import point_in_sidewalks
from converter import Converter
import os
# Script level imports
sys.path.append(os.path.abspath(sys.path[0] + '/..'))
import live_plotter as lv # Custom live plotting library
from carla import sensor
from carla.client import make_carla_client, VehicleControl
from carla.settings import CarlaSettings
from carla.tcp import TCPConnectionError
from carla.controller import utils
from carla.sensor import Camera
from carla.image_converter import labels_to_array, depth_to_array, to_bgra_array
from carla.planner.city_track import CityTrack
from AVD_BP.carla_object_detector.carla_yolo_model.OD import load_model,predict,postprocess
from AVD_BP.carla_object_detector.carla_yolo_model.config import VEHICLE_TAG,PERSON_TAG
SERVER_HOST = "193.205.163.183"
SERVER_PORT = 6018
LOCAL_HOST = "localhost"
LOCAL_PORT = 2000
SIMULATION_PERFECT = False
###############################################################################
# CONFIGURABLE PARAMENTERS DURING EXAM
###############################################################################
PLAYER_START_INDEX = 15 #20 #89 #148 #91 # spawn index for player
DESTINATION_INDEX = 139 #40# 133 #61 #142 # Setting a Destination HERE
NUM_PEDESTRIANS = 500 # total number of pedestrians to spawn
NUM_VEHICLES = 500 # total number of vehicles to spawn
SEED_PEDESTRIANS = 0 # seed for pedestrian spawn randomizer
SEED_VEHICLES = 1 # seed for vehicle spawn randomizer
###############################################################################
ITER_FOR_SIM_TIMESTEP = 10 # no. iterations to compute approx sim timestep
WAIT_TIME_BEFORE_START = 1.00 # game seconds (time before controller start)
TOTAL_RUN_TIME = 5000.00 # game seconds (total runtime before sim end)
TOTAL_FRAME_BUFFER = 300 # number of frames to buffer after total runtime
CLIENT_WAIT_TIME = 3 # wait time for client before starting episode
# used to make sure the server loads
# consistently
DESIRED_SPEED = 5.0
WINDOWS_OS = os.name == 'nt'
WEATHER = "DEFAULT"
WEATHERID = {
"DEFAULT": 0,
"CLEARNOON": 1,
"CLOUDYNOON": 2,
"WETNOON": 3,
"WETCLOUDYNOON": 4,
"MIDRAINYNOON": 5,
"HARDRAINNOON": 6,
"SOFTRAINNOON": 7,
"CLEARSUNSET": 8,
"CLOUDYSUNSET": 9,
"WETSUNSET": 10,
"WETCLOUDYSUNSET": 11,
"MIDRAINSUNSET": 12,
"HARDRAINSUNSET": 13,
"SOFTRAINSUNSET": 14,
}
#SIMWEATHER = WEATHERID[WEATHER] # set simulation weather
FIGSIZE_X_INCHES = 8 # x figure size of feedback in inches
FIGSIZE_Y_INCHES = 8 # y figure size of feedback in inches
PLOT_LEFT = 0.1 # in fractions of figure width and height
PLOT_BOT = 0.1
PLOT_WIDTH = 0.8
PLOT_HEIGHT = 0.8
DIST_THRESHOLD_TO_LAST_WAYPOINT = 2.0 # some distance from last position before
# simulation ends
# Planning Constants
NUM_PATHS = 7
BP_LOOKAHEAD_BASE = 16.0 # m
BP_LOOKAHEAD_TIME = 1.0 # s
PATH_OFFSET = 1.5 # m
CIRCLE_OFFSETS = [-1.0, 1.0, 3.0] # m
CIRCLE_RADII = [1.5, 1.5, 1.5] # m
TIME_GAP = 1.0 # s
PATH_SELECT_WEIGHT = 10
A_MAX = 2.5 # m/s^2
SLOW_SPEED = 2.0 # m/s
STOP_LINE_BUFFER = 3.5 # m
LEAD_VEHICLE_LOOKAHEAD = 20.0 # m
LP_FREQUENCY_DIVISOR = 2 # Frequency divisor to make the
# local planner operate at a lower
# frequency than the controller
# (which operates at the simulation
# frequency). Must be a natural
# number.
# Path interpolation parameters
INTERP_MAX_POINTS_PLOT = 10 # number of points used for displaying
# selected path
INTERP_DISTANCE_RES = 0.01 # distance between interpolated points
# controller output directory
CONTROLLER_OUTPUT_FOLDER = os.path.dirname(os.path.realpath(__file__)) +\
'/controller_output/'
AGENTS_CHECK_RADIUS = 30
# Camera parameters
camera_parameters = {}
camera_parameters['x'] = 1.8
camera_parameters['y'] = 0.0
camera_parameters['z'] = 1.3
camera_parameters['pitch'] = 0.0
camera_parameters['roll'] = 0.0
camera_parameters['yaw'] = 0.0
camera_parameters['width'] = 224#200
camera_parameters['height'] = 224#200
camera_parameters['fov'] = 90
camera_parameters_bis = {}
camera_parameters_bis['x'] = 1.8
camera_parameters_bis['y'] = 0.0
camera_parameters_bis['z'] = 1.3
camera_parameters_bis['pitch'] = 0.0
camera_parameters_bis['roll'] = 0.0
camera_parameters_bis['yaw'] = 0.0
camera_parameters_bis['width'] = 224#200
camera_parameters_bis['height'] = 224#200
camera_parameters_bis['fov'] = 120
camera_parameters_view = {}
camera_parameters_view['x'] = -5.0
camera_parameters_view['y'] = 0.0
camera_parameters_view['z'] = 2.5
camera_parameters_view['pitch'] = -15.0
camera_parameters_view['roll'] = 0.0
camera_parameters_view['yaw'] = 0.0
camera_parameters_view['width'] = 500
camera_parameters_view['height'] = 500
camera_parameters_view['fov'] = 90
def rotate_x(angle):
R = np.mat([[ 1, 0, 0],
[ 0, cos(angle), -sin(angle) ],
[ 0, sin(angle), cos(angle) ]])
return R
def rotate_y(angle):
R = np.mat([[ cos(angle), 0, sin(angle) ],
[ 0, 1, 0 ],
[-sin(angle), 0, cos(angle) ]])
return R
def rotate_z(angle):
R = np.mat([[ cos(angle), -sin(angle), 0 ],
[ sin(angle), cos(angle), 0 ],
[ 0, 0, 1 ]])
return R
def find_pedestrians_and_vehicles_from_camera(net, camera_data, seg_data, depth_data, current_x, current_y, current_z, current_yaw, camera_parameters, bis=False):
"""
Args:
- net: rete addestrata a fare object detection su immagini
- camera_data: dati della telecamera
- seg_data: dati della telecamera di semantic segmentation
- depth_data: dati dalla telecamera di profondità
Returns:
- world_frame_pedestrians: lista di coordinate (x,y) dei pedoni rilevati dalla telecamera nel mondo reale
- world_frame_vehicles: come sopra ma per i veicoli
"""
converter = Converter(camera_parameters)
###################################
# GET BBs
bb = predict(net,camera_data)
camera_data, bb_dict= postprocess(camera_data,bb)
if bis:
cv2.imshow("Detection box bis",camera_data)
else:
cv2.imshow("Detection box",camera_data)
cv2.waitKey(10)
#bbs vehicle and pedestrian
## bb_p and bb_v are lists like [[(x,y),width,height]]
# NOTE to access to a specific pixel from bb x,y -> camera_data[y,x]
#list of pedestrians bounding boxes
bb_p = bb_dict[PERSON_TAG]
# list of bounding boxis
bb_v = bb_dict[VEHICLE_TAG]
###################################
# MARK PEDESTRIAN BB ON SIDEWAYS
# only pedestrian bb
# found point in the middle of bb vertex like X, x1 refer to (x,y) from one bb in bb_p
# x1--------x2
# | |
# x3---X----x4
#
# if X is on sidewalk (or x3 or x4) mark this pedestrian as on sidewalk
# in this section for each pedestrian bb check if point X is on sidewalk
# USE FUNCTION : point_in_sidewalks(semSeg_data, point) NOTE: point must be provided as (y,x)
count=0
sidewalk= {} #contains only the X on sidewalk, True if X is on sidewalk otherwise False
for bb in bb_p:
middle_point = compute_middle_point(bb[0][0], bb[0][1], bb[1], bb[2])
on_sidewalk = point_in_sidewalks(seg_data, middle_point)
sidewalk[count] = on_sidewalk
count+=1
###################################
# FOR EACH BB WE CAN CHOOSE X POINT DESCIBED IN PREVIUS SECTION TO GET VEHICLES POSITION
# IN 3D WORLD COORDINATE FRAME
# USING DEPTH CAMERA GET PEDESTRIAN BB AND VEHICLE BB IN WORLD COORDINATES FRAME
# USE this to convert a pixel in 3D pixel should be [x,y,1] pixel_depth = depth_data[y1][x1]
#converter.convert_to_3D(pixel,pixel_depth,current_x,current_y,current_yaw)
world_frame_vehicles = [] #list of tuples of converted pixel in the world
for vehicle in bb_v:
middle_point = compute_middle_point(vehicle[0][0], vehicle[0][1], vehicle[1], vehicle[2])
middle_point = (min(middle_point[0],camera_parameters['height']-1), min(middle_point[1], camera_parameters['width']-1))
pixel = [middle_point[0], middle_point[1]]
pixel_depth = depth_data[middle_point[1], middle_point[0]]*1000
world_frame_point= converter.convert_to_3D(pixel, pixel_depth, current_x, current_y,current_z,current_yaw)
world_frame_vehicles.append(world_frame_point)
world_frame_pedestrians = [] #list of tuples of converted pixel in the world
for pedestrian in bb_p:
middle_point = compute_middle_point(pedestrian[0][0], pedestrian[0][1], pedestrian[1], pedestrian[2])
middle_point = (min(middle_point[0],camera_parameters['height']-1), min(middle_point[1], camera_parameters['width']-1))
pixel = [middle_point[0], middle_point[1]]
pixel_depth = depth_data[middle_point[1], middle_point[0]]*1000
world_frame_point= converter.convert_to_3D(pixel, pixel_depth, current_x, current_y,current_z,current_yaw)
world_frame_pedestrians.append(world_frame_point)
return world_frame_vehicles, world_frame_pedestrians, sidewalk
# Transform the obstacle with its boundary point in the global frame
# bounding_box.transform.location, bounding_box.extent ,bounding_box.transform.rotation
def obstacle_to_world(location, dimensions, orientation):
box_pts = []
x = location.x
y = location.y
z = location.z
yaw = orientation.yaw * pi / 180
xrad = dimensions.x
yrad = dimensions.y
zrad = dimensions.z
# Border points in the obstacle frame
cpos = np.array([
[-xrad, -xrad, -xrad, 0, xrad, xrad, xrad, 0 ],
[-yrad, 0, yrad, yrad, yrad, 0, -yrad, -yrad]])
# Rotation of the obstacle
rotyaw = np.array([
[np.cos(yaw), np.sin(yaw)],
[-np.sin(yaw), np.cos(yaw)]])
# Location of the obstacle in the world frame
cpos_shift = np.array([
[x, x, x, x, x, x, x, x],
[y, y, y, y, y, y, y, y]])
cpos = np.add(np.matmul(rotyaw, cpos), cpos_shift)
for j in range(cpos.shape[1]):
box_pts.append([cpos[0,j], cpos[1,j]])
return box_pts
def make_carla_settings(args):
"""Make a CarlaSettings object with the settings we need.
"""
settings = CarlaSettings()
# There is no need for non-agent info requests if there are no pedestrians
# or vehicles.
get_non_player_agents_info = False
if (NUM_PEDESTRIANS > 0 or NUM_VEHICLES > 0):
get_non_player_agents_info = True
# Base level settings
settings.set(
SynchronousMode=True,
SendNonPlayerAgentsInfo=get_non_player_agents_info,
NumberOfVehicles=NUM_VEHICLES,
NumberOfPedestrians=NUM_PEDESTRIANS,
SeedVehicles=SEED_VEHICLES,
SeedPedestrians=SEED_PEDESTRIANS,
WeatherId=WEATHERID[args.weather],
QualityLevel=args.quality_level)
# Common cameras settings
cam_height = camera_parameters['z']
cam_x_pos = camera_parameters['x']
cam_y_pos = camera_parameters['y']
camera_pitch = camera_parameters['pitch']
camera_roll = camera_parameters['roll']
camera_yaw = camera_parameters['yaw']
camera_width = camera_parameters['width']
camera_height = camera_parameters['height']
camera_fov = camera_parameters['fov']
cam_height_bis = camera_parameters_bis['z']
cam_x_pos_bis = camera_parameters_bis['x']
cam_y_pos_bis = camera_parameters_bis['y']
camera_pitch_bis = camera_parameters_bis['pitch']
camera_roll_bis = camera_parameters_bis['roll']
camera_yaw_bis = camera_parameters_bis['yaw']
camera_width_bis = camera_parameters_bis['width']
camera_height_bis = camera_parameters_bis['height']
camera_fov_bis = camera_parameters_bis['fov']
# Declare here your sensors
camera0 = Camera("CameraRGB")
camera0.set_image_size(camera_width, camera_height)
camera0.set(FOV=camera_fov)
camera0.set_position(cam_x_pos, cam_y_pos, cam_height)
camera0.set_rotation(camera_pitch, camera_roll, camera_yaw)
camera0bis = Camera("CameraRGBbis")
camera0bis.set_image_size(camera_width_bis, camera_height_bis)
camera0bis.set(FOV=camera_fov_bis)
camera0bis.set_position(cam_x_pos_bis, cam_y_pos_bis, cam_height_bis)
camera0bis.set_rotation(camera_pitch_bis, camera_roll_bis, camera_yaw_bis)
camera1 = Camera("CameraSemSeg", PostProcessing="SemanticSegmentation")
camera1.set_image_size(camera_width, camera_height)
camera1.set(FOV=camera_fov)
camera1.set_position(cam_x_pos, cam_y_pos, cam_height)
camera1.set_rotation(camera_pitch, camera_roll, camera_yaw)
camera1bis = Camera("CameraSemSegbis", PostProcessing="SemanticSegmentation")
camera1bis.set_image_size(camera_width_bis, camera_height_bis)
camera1bis.set(FOV=camera_fov_bis)
camera1bis.set_position(cam_x_pos_bis, cam_y_pos_bis, cam_height_bis)
camera1bis.set_rotation(camera_pitch_bis, camera_roll_bis, camera_yaw_bis)
camera2 = Camera("CameraDepth", PostProcessing="Depth")
camera2.set_image_size(camera_width, camera_height)
camera2.set(FOV=camera_fov)
camera2.set_position(cam_x_pos, cam_y_pos, cam_height)
camera2.set_rotation(camera_pitch, camera_roll, camera_yaw)
camera2bis = Camera("CameraDepthbis", PostProcessing="Depth")
camera2bis.set_image_size(camera_width_bis, camera_height_bis)
camera2bis.set(FOV=camera_fov_bis)
camera2bis.set_position(cam_x_pos_bis, cam_y_pos_bis, cam_height_bis)
camera2bis.set_rotation(camera_pitch_bis, camera_roll_bis, camera_yaw_bis)
settings.add_sensor(camera0)
settings.add_sensor(camera0bis)
settings.add_sensor(camera1)
settings.add_sensor(camera1bis)
settings.add_sensor(camera2)
settings.add_sensor(camera2bis)
if not args.local:
# Common cameras settings
cam_height = camera_parameters_view['z']
cam_x_pos = camera_parameters_view['x']
cam_y_pos = camera_parameters_view['y']
camera_pitch = camera_parameters_view['pitch']
camera_roll = camera_parameters_view['roll']
camera_yaw = camera_parameters_view['yaw']
camera_width = camera_parameters_view['width']
camera_height = camera_parameters_view['height']
camera_fov = camera_parameters_view['fov']
# Declare here your sensors
camera3 = Camera("CameraRGBView")
camera3.set_image_size(camera_width, camera_height)
camera3.set(FOV=camera_fov)
camera3.set_position(cam_x_pos, cam_y_pos, cam_height)
camera3.set_rotation(camera_pitch, camera_roll, camera_yaw)
settings.add_sensor(camera3)
return settings
class Timer(object):
""" Timer Class
The steps are used to calculate FPS, while the lap or seconds since lap is
used to compute elapsed time.
"""
def __init__(self, period):
self.step = 0
self._lap_step = 0
self._lap_time = time.time()
self._period_for_lap = period
def tick(self):
self.step += 1
def has_exceeded_lap_period(self):
if self.elapsed_seconds_since_lap() >= self._period_for_lap:
return True
else:
return False
def lap(self):
self._lap_step = self.step
self._lap_time = time.time()
def ticks_per_second(self):
return float(self.step - self._lap_step) /\
self.elapsed_seconds_since_lap()
def elapsed_seconds_since_lap(self):
return time.time() - self._lap_time
def get_current_pose(measurement):
"""Obtains current x,y,yaw pose from the client measurements
Obtains the current x,y, and yaw pose from the client measurements.
Args:
measurement: The CARLA client measurements (from read_data())
Returns: (x, y, yaw)
x: X position in meters
y: Y position in meters
yaw: Yaw position in radians
"""
x = measurement.player_measurements.transform.location.x
y = measurement.player_measurements.transform.location.y
z = measurement.player_measurements.transform.location.z
pitch = math.radians(measurement.player_measurements.transform.rotation.pitch)
roll = math.radians(measurement.player_measurements.transform.rotation.roll)
yaw = math.radians(measurement.player_measurements.transform.rotation.yaw)
return (x, y, z, pitch, roll, yaw)
def get_start_pos(scene):
"""Obtains player start x,y, yaw pose from the scene
Obtains the player x,y, and yaw pose from the scene.
Args:
scene: The CARLA scene object
Returns: (x, y, yaw)
x: X position in meters
y: Y position in meters
yaw: Yaw position in radians
"""
x = scene.player_start_spots[0].location.x
y = scene.player_start_spots[0].location.y
yaw = math.radians(scene.player_start_spots[0].rotation.yaw)
return (x, y, yaw)
def get_player_collided_flag(measurement,
prev_collision_vehicles,
prev_collision_pedestrians,
prev_collision_other):
"""Obtains collision flag from player. Check if any of the three collision
metrics (vehicles, pedestrians, others) from the player are true, if so the
player has collided to something.
Note: From the CARLA documentation:
"Collisions are not annotated if the vehicle is not moving (<1km/h) to avoid
annotating undesired collision due to mistakes in the AI of non-player
agents."
"""
player_meas = measurement.player_measurements
current_collision_vehicles = player_meas.collision_vehicles
current_collision_pedestrians = player_meas.collision_pedestrians
current_collision_other = player_meas.collision_other
collided_vehicles = current_collision_vehicles > prev_collision_vehicles
collided_pedestrians = current_collision_pedestrians > \
prev_collision_pedestrians
collided_other = current_collision_other > prev_collision_other
return (collided_vehicles or collided_pedestrians or collided_other,
current_collision_vehicles,
current_collision_pedestrians,
current_collision_other)
def send_control_command(client, throttle, steer, brake,
hand_brake=False, reverse=False):
"""Send control command to CARLA client.
Send control command to CARLA client.
Args:
client: The CARLA client object
throttle: Throttle command for the sim car [0, 1]
steer: Steer command for the sim car [-1, 1]
brake: Brake command for the sim car [0, 1]
hand_brake: Whether the hand brake is engaged
reverse: Whether the sim car is in the reverse gear
"""
control = VehicleControl()
# Clamp all values within their limits
steer = np.fmax(np.fmin(steer, 1.0), -1.0)
throttle = np.fmax(np.fmin(throttle, 1.0), 0)
brake = np.fmax(np.fmin(brake, 1.0), 0)
control.steer = steer
control.throttle = throttle
control.brake = brake
control.hand_brake = hand_brake
control.reverse = reverse
client.send_control(control)
def create_controller_output_dir(output_folder):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
def store_trajectory_plot(graph, fname):
""" Store the resulting plot.
"""
create_controller_output_dir(CONTROLLER_OUTPUT_FOLDER)
file_name = os.path.join(CONTROLLER_OUTPUT_FOLDER, fname)
graph.savefig(file_name)
def write_trajectory_file(x_list, y_list, v_list, t_list, collided_list):
create_controller_output_dir(CONTROLLER_OUTPUT_FOLDER)
file_name = os.path.join(CONTROLLER_OUTPUT_FOLDER, 'trajectory.txt')
with open(file_name, 'w') as trajectory_file:
for i in range(len(x_list)):
trajectory_file.write('%3.3f, %3.3f, %2.3f, %6.3f %r\n' %\
(x_list[i], y_list[i], v_list[i], t_list[i],
collided_list[i]))
def write_collisioncount_file(collided_list):
create_controller_output_dir(CONTROLLER_OUTPUT_FOLDER)
file_name = os.path.join(CONTROLLER_OUTPUT_FOLDER, 'collision_count.txt')
with open(file_name, 'w') as collision_file:
collision_file.write(str(sum(collided_list)))
def make_correction(waypoint,previuos_waypoint,desired_speed):
dx = waypoint[0] - previuos_waypoint[0]
dy = waypoint[1] - previuos_waypoint[1]
if dx < 0:
moveY = -1.5
elif dx > 0:
moveY = 1.5
else:
moveY = 0
if dy < 0:
moveX = 1.5
elif dy > 0:
moveX = -1.5
else:
moveX = 0
waypoint_on_lane = waypoint
waypoint_on_lane[0] += moveX
waypoint_on_lane[1] += moveY
waypoint_on_lane[2] = desired_speed
return waypoint_on_lane
def found_nearest_object(position,objects_position,objects_just_assoicated):
"""
Given the list of objects position found the index of the object position
nearest to the given position.
All indices just used are provided in objects_just_associated list
"""
THRESHOLD_DISTANCE = 3
min_index = None
min_dist = math.inf
for i, object_position in enumerate(objects_position): #from camera0
x_point, y_point = object_position[0][0], object_position[1][0] # prendere i dati dagli attributi di world_frame
dist = np.subtract(position,[x_point, y_point])
norm = np.linalg.norm(dist)
# an association is found
if norm < min_dist and norm < THRESHOLD_DISTANCE and i not in objects_just_assoicated:
min_dist = norm
min_index = i
return min_index
def association_vehicle_pedestrian(perfect_data, real_data, real_data_bis, sidewalk=None, sidewalk_bis = None, pedestrian=False):
"""
Associates real data position to available perfect data agent. The real data are provided by two different cameras.
Args:
perfect_data (list): list of real agents
real_data (np.ndarray): A 3x1 array with [x;y;z] provided by camera 0.
real_data_bis (np.ndarray): A 3x1 array with [x;y;z] provided by camera bis.
sidewalk (dict): dictionary where items are like (index:boolean). The keys regards to
index of pedestrian in the real_data array and the values means that
this pedestrian is on sidewalk
sidewalk_bis (dict): dictionary where items are like (index:boolean). The keys regards to
index of pedestrian in the real_data_bis array and the values means that
this pedestrian is on sidewalk
pedestrian (boolean): if true the data analyzed regargds to pedestrians otherwise vehicles.
"""
# THRESHOLD_DISTANCE = 2.5
THRESHOLD_SPEED = 0.15
indices_associated = []
data_to_consider = []
indices_associated_bis = []
vehicle_dict = {}
# for each real data to associate to given detected data
for d in perfect_data:
x, y = d.get_position()
min_index= found_nearest_object([x,y],real_data,indices_associated)
min_index_bis = found_nearest_object([x,y],real_data_bis,indices_associated_bis)
# real objcet index.
association_index = None
# sidewalk for pedestrian association
sidewalk_to_consider = None
pose = None
#if a perfect object is associated to both real_data and real_data_bis we
# decide to associate it to real_data object
if min_index is None and min_index_bis != None:
association_index = min_index_bis
pose = real_data_bis[association_index]
sidewalk_to_consider = sidewalk_bis
indices_associated_bis.append(min_index_bis)
elif min_index != None:
association_index = min_index
pose = real_data[association_index]
sidewalk_to_consider = sidewalk
indices_associated.append(min_index)
# if an association is found
if association_index is not None:
# pose = real_data[association_index]
position = (pose[0][0],pose[1][0])
#position = d.get_position()
yaw = d.get_orientation()
bb = d.get_bounding_box()
speed = d.get_speed()
id = d.get_id()
if not pedestrian:
vehicle = Agent(id,position,bb,yaw,speed,"Vehicle")
data_to_consider.append(vehicle)
if not SIMULATION_PERFECT:
vehicle_dict[id] = vehicle
else:
# if the detected pedestrian is one sidewalk and its speed is less than THRESHOLD_SPEED
# no association must be made
if sidewalk_to_consider is not None and not(sidewalk_to_consider[association_index] and speed<THRESHOLD_SPEED):
data_to_consider.append(Agent(id,position,bb,yaw,speed,"Pedestrian"))
return data_to_consider, vehicle_dict
def agent_entering_management(current_agents,last_agents, entering,vehicles_dict = None):
"""
Agents entering manangements
Args:
current_agents (list): list of all pedestrian (or vehicles) that are detected in the current frame
last_agents (list); list of all pedestrians (or vehicles) that were considered in the last frame
entering (dict): dictionary where the key are the id of entering agents and the value is a list of two field.
the first indicated the number of frame in which it was consider an entering agent while the second contains
the informations regarding the specific agent.
"""
agents_to_consider = []
MIN_ENTERING_FRAME = 2
# entering pedestrian
# STEP 1: update entering objects
for current_agent in current_agents: # from each pedestrian in current frame
id = current_agent.get_id()
# this boolean var check if a pedestrain is just detected in the scene
# if in next iteration it is not updated to True means that this object is
# an entering object
check_existing = False
for last_agent in last_agents:
if id == last_agent.get_id(): # check if it just detected in the last frame
check_existing = True
# print(f"[ENTERING FUNC] {id} is already seen")
agents_to_consider.append(current_agent)
if vehicles_dict is not None:
vehicles_dict[id] = current_agent
break
# if a match between the current and last frame is check_existing
# so it is an entering object
if not check_existing:
if id in entering:
entering[id][0]+=1
entering[id][1] = current_agent # update location and speed
else:
# print(f"\n[INFUNC] insert {current_agent._type} {id}\n")
entering[id] = [1,current_agent]
# STEP 2: for each entering object check if enough frame have passed from entering condition
entering_ids = list(entering.keys())
for id in entering_ids:
counter = entering[id][0]
if counter == MIN_ENTERING_FRAME:
agents_to_consider.append( entering[id][1])
if vehicles_dict is not None:
vehicles_dict[id] = entering[id][1]
# there is no need to flag this object as entering object because now it is a real object
del entering[id]
# STEP 3: delete all entering object that are not are detected in the current frame
# thats means that they were FALSE POSITIVE objects
for id in entering_ids:
# flag to detect wich object can maintains the entering conditions
check_entering_condition = False
for current_agent in current_agents:
if id == current_agent.get_id():
check_entering_condition = True
break
if not check_entering_condition:
del entering[id]
return agents_to_consider
def agents_outgoing_managements(current_agents,last_agents, outgoing, vehicle_dict=None):
"""
Agents outgoing manangements (ghost situation)
Args:
current_agents (list): list of all pedestrian (or vehicles) that are detected in the current frame
last_agents (list); list of all pedestrians (or vehicles) that were considered in the last frame
outgoing (dict): dictionary where the key are the id of ghost agents and the value is a list of two field.
the first indicated the number of frame in which it was a ghost while the second contains
the informations regarding the specific agent.
"""
agents_to_consider = []
MAX_GHOST_FRAME = 5
# STEP 1: update ghost object
for last_agent in last_agents:
id = last_agent.get_id()
check_ghost = True
for current_agent in current_agents:
if id == current_agent.get_id():
check_ghost = False
break
# update number of frame where this object is a ghost
if check_ghost:
if id in outgoing:
outgoing[id][0]+=1
else:
outgoing[id] = [1, last_agent]
# delete agents that are not ghost yet
else:
if id in outgoing:
del outgoing[id]
# STEP 2: check which object should be delete from ghost condition
ids_ghost = list(outgoing.keys())
for id in ids_ghost:
if outgoing[id][0] < MAX_GHOST_FRAME:
agent = outgoing[id][1]
agents_to_consider.append(agent)
if vehicle_dict is not None:
vehicle_dict[id]=agent
else:
del outgoing[id] # if MAX_GHOST_FRAME are passed
return agents_to_consider
def exec_waypoint_nav_demo(args, host, port):
""" Executes waypoint navigation demo.
"""
with make_carla_client(host, port) as client:
print('Carla client connected.')
settings = make_carla_settings(args)
# Now we load these settings into the server. The server replies
# with a scene description containing the available start spots for
# the player. Here we can provide a CarlaSettings object or a
# CarlaSettings.ini file as string.
scene = client.load_settings(settings)
# Refer to the player start folder in the WorldOutliner to see the
# player start information
player_start = args.start
# Notify the server that we want to start the episode at the
# player_start index. This function blocks until the server is ready
# to start the episode.
print('Starting new episode at %r...' % scene.map_name)
client.start_episode(player_start)
#############################################
# Load Configurations
#############################################
# Load configuration file (options.cfg) and then parses for the various
# options. Here we have two main options:
# live_plotting and live_plotting_period, which controls whether
# live plotting is enabled or how often the live plotter updates
# during the simulation run.
config = configparser.ConfigParser()
config.read(os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'options.cfg'))
demo_opt = config['Demo Parameters']
# Get options
enable_live_plot = demo_opt.get('live_plotting', 'true').capitalize()
enable_live_plot = enable_live_plot == 'True'
live_plot_period = float(demo_opt.get('live_plotting_period', 0))
# Set options
live_plot_timer = Timer(live_plot_period)
# Settings Mission Planner
mission_planner = CityTrack("Town01")
#############################################
# Determine simulation average timestep (and total frames)
#############################################
# Ensure at least one frame is used to compute average timestep
num_iterations = ITER_FOR_SIM_TIMESTEP
if (ITER_FOR_SIM_TIMESTEP < 1):
num_iterations = 1
# Gather current data from the CARLA server. This is used to get the
# simulator starting game time. Note that we also need to
# send a command back to the CARLA server because synchronous mode
# is enabled.
measurement_data, sensor_data = client.read_data()
car_extent_x = measurement_data.player_measurements.bounding_box.extent.x
car_extent_y = measurement_data.player_measurements.bounding_box.extent.y
# get traffic light information
traffic_lights = [] #[id, [x,y],yaw]
for agent in measurement_data.non_player_agents:
if agent.HasField("traffic_light"):
traffic_lights.append([agent.id,
agent.traffic_light.transform.location.x,agent.traffic_light. transform.location.y,
agent.traffic_light.transform.rotation.yaw,agent.traffic_light.state])
sim_start_stamp = measurement_data.game_timestamp / 1000.0
# Send a control command to proceed to next iteration.
# This mainly applies for simulations that are in synchronous mode.
send_control_command(client, throttle=0.0, steer=0, brake=1.0)
# Computes the average timestep based on several initial iterations
sim_duration = 0
for i in range(num_iterations):
# Gather current data
measurement_data, sensor_data = client.read_data()
# Send a control command to proceed to next iteration
send_control_command(client, throttle=0.0, steer=0, brake=1.0)
# Last stamp
if i == num_iterations - 1:
sim_duration = measurement_data.game_timestamp / 1000.0 -\
sim_start_stamp
# Outputs average simulation timestep and computes how many frames
# will elapse before the simulation should end based on various
# parameters that we set in the beginning.
SIMULATION_TIME_STEP = sim_duration / float(num_iterations)
print("SERVER SIMULATION STEP APPROXIMATION: " + \
str(SIMULATION_TIME_STEP))
TOTAL_EPISODE_FRAMES = int((TOTAL_RUN_TIME + WAIT_TIME_BEFORE_START) /\
SIMULATION_TIME_STEP) + TOTAL_FRAME_BUFFER
#############################################
# Frame-by-Frame Iteration and Initialization
#############################################
# Store pose history starting from the start position
measurement_data, sensor_data = client.read_data()
start_timestamp = measurement_data.game_timestamp / 1000.0
start_x, start_y, start_z, start_pitch, start_roll, start_yaw = get_current_pose(measurement_data)
send_control_command(client, throttle=0.0, steer=0, brake=1.0)
x_history = [start_x]
y_history = [start_y]
yaw_history = [start_yaw]
time_history = [0]
speed_history = [0]
collided_flag_history = [False] # assume player starts off non-collided
#############################################
# Settings Waypoints
#############################################
starting = scene.player_start_spots[args.start]
destination = scene.player_start_spots[args.dest]
# Starting position is the current position
# (x, y, z, pitch, roll, yaw)
source_pos = [starting.location.x, starting.location.y, starting.location.z]
source_ori = [starting.orientation.x, starting.orientation.y]
source = mission_planner.project_node(source_pos)
# Destination position
destination_pos = [destination.location.x, destination.location.y, destination.location.z]
destination_ori = [destination.orientation.x, destination.orientation.y]
destination = mission_planner.project_node(destination_pos)
waypoints = []
waypoints_route = mission_planner.compute_route(source, source_ori, destination, destination_ori)
desired_speed = DESIRED_SPEED
turn_speed = 2.5
intersection_nodes = mission_planner.get_intersection_nodes()
intersection_pair = []
turn_cooldown = 0
prev_x = False
prev_y = False
# Put waypoints in the lane
previuos_waypoint = mission_planner._map.convert_to_world(waypoints_route[0])
for i in range(1,len(waypoints_route)):
point = waypoints_route[i]
waypoint = mission_planner._map.convert_to_world(point)
current_waypoint = make_correction(waypoint,previuos_waypoint,desired_speed)
dx = current_waypoint[0] - previuos_waypoint[0]
dy = current_waypoint[1] - previuos_waypoint[1]
is_turn = ((prev_x and abs(dy) > 0.1) or (prev_y and abs(dx) > 0.1)) and not(abs(dx) > 0.1 and abs(dy) > 0.1)
prev_x = abs(dx) > 0.1
prev_y = abs(dy) > 0.1
if point in intersection_nodes:
prev_start_intersection = mission_planner._map.convert_to_world(waypoints_route[i-2])
center_intersection = mission_planner._map.convert_to_world(waypoints_route[i])
start_intersection = mission_planner._map.convert_to_world(waypoints_route[i-1])
end_intersection = mission_planner._map.convert_to_world(waypoints_route[i+1])
start_intersection = make_correction(start_intersection,prev_start_intersection,turn_speed)
end_intersection = make_correction(end_intersection,center_intersection,turn_speed)
dx = start_intersection[0] - end_intersection[0]
dy = start_intersection[1] - end_intersection[1]
if abs(dx) > 0 and abs(dy) > 0:
intersection_pair.append((center_intersection,len(waypoints)))
waypoints[-1][2] = turn_speed
middle_point = [(start_intersection[0] + end_intersection[0]) /2, (start_intersection[1] + end_intersection[1]) /2]
centering = 0.75
middle_intersection = [(centering*middle_point[0] + (1-centering)*center_intersection[0]), (centering*middle_point[1] + (1-centering)*center_intersection[1])]
# Point at intersection:
A = [[start_intersection[0], start_intersection[1], 1],
[end_intersection[0], end_intersection[1], 1],
[middle_intersection[0], middle_intersection[1], 1]]
b = [-start_intersection[0]**2 - start_intersection[1]**2,
-end_intersection[0]**2 - end_intersection[1]**2,
-middle_intersection[0]**2 - middle_intersection[1]**2]
coeffs = np.matmul(np.linalg.inv(A), b)
x = start_intersection[0]
center_x = -coeffs[0]/2
center_y = -coeffs[1]/2
r = sqrt(center_x**2 + center_y**2 - coeffs[2])
theta_start = math.atan2((start_intersection[1] - center_y),(start_intersection[0] - center_x))
theta_end = math.atan2((end_intersection[1] - center_y),(end_intersection[0] - center_x))
theta = theta_start
start_to_end = 1 if theta_start < theta_end else -1
while (start_to_end==1 and theta < theta_end) or (start_to_end==-1 and theta > theta_end):
waypoint_on_lane = [0,0,0]
waypoint_on_lane[0] = center_x + r * cos(theta)
waypoint_on_lane[1] = center_y + r * sin(theta)
waypoint_on_lane[2] = turn_speed
waypoints.append(waypoint_on_lane)
theta += (abs(theta_end - theta_start) * start_to_end) / 10
turn_cooldown = 4
else:
waypoint = mission_planner._map.convert_to_world(point)
if turn_cooldown > 0:
target_speed = turn_speed
turn_cooldown -= 1
else:
target_speed = desired_speed
waypoint_on_lane = make_correction(waypoint,previuos_waypoint,target_speed)
waypoints.append(waypoint_on_lane)
previuos_waypoint = waypoint
waypoints = np.array(waypoints)
print("[MAIN] n waypoints -> ", len(waypoints))
with open("waypoints.txt","w") as f:
for x,y,v in waypoints:
f.writelines(f"{x}, {y}, {v}\n")
#############################################
# Controller 2D Class Declaration
#############################################
# This is where we take the controller2d.py class
# and apply it to the simulator
controller = controller2d.Controller2D(waypoints)
#############################################
# Vehicle Trajectory Live Plotting Setup
#############################################
# Uses the live plotter to generate live feedback during the simulation
# The two feedback includes the trajectory feedback and
# the controller feedback (which includes the speed tracking).
lp_traj = lv.LivePlotter(tk_title="Trajectory Trace")
lp_1d = lv.LivePlotter(tk_title="Controls Feedback")
###
# Add 2D position / trajectory plot
###
trajectory_fig = lp_traj.plot_new_dynamic_2d_figure(
title='Vehicle Trajectory',
figsize=(FIGSIZE_X_INCHES, FIGSIZE_Y_INCHES),
edgecolor="black",
rect=[PLOT_LEFT, PLOT_BOT, PLOT_WIDTH, PLOT_HEIGHT])
trajectory_fig.set_invert_x_axis() # Because UE4 uses left-handed
# coordinate system the X
# axis in the graph is flipped
trajectory_fig.set_axis_equal() # X-Y spacing should be equal in size
# Add waypoint markers
trajectory_fig.add_graph("waypoints", window_size=len(waypoints),
x0=waypoints[:,0], y0=waypoints[:,1],
linestyle="-", marker="", color='g')
# Add trajectory markers
trajectory_fig.add_graph("trajectory", window_size=TOTAL_EPISODE_FRAMES,
x0=[start_x]*TOTAL_EPISODE_FRAMES,
y0=[start_y]*TOTAL_EPISODE_FRAMES,
color=[1, 0.5, 0])
# Add starting position marker
trajectory_fig.add_graph("start_pos", window_size=1,
x0=[start_x], y0=[start_y],
marker=11, color=[1, 0.5, 0],
markertext="Start", marker_text_offset=1)
trajectory_fig.add_graph("obstacles_points",
window_size=8 * (NUM_PEDESTRIANS + NUM_VEHICLES) ,
x0=[0]* (8 * (NUM_PEDESTRIANS + NUM_VEHICLES)),
y0=[0]* (8 * (NUM_PEDESTRIANS + NUM_VEHICLES)),
linestyle="", marker="+", color='b')
nearest_tl = []
tl_dict = {}
# we compute here traffic lights filter because they are stationary objects.
for i,tl in enumerate(traffic_lights):
# compute distances vector between waypoints and current traffic light
temp = waypoints[:,:2] - tl[1:3]
# compute module fpr each distances vector
dist = np.linalg.norm(temp,axis=1)
# verify if there is at least one traffic_light
# along waypoints trajectory and plot it.
# For each i-th waypoint we consider a circle of
# radius 5 and centered in i-th waypoint. If traffic lights
# point is in almost a circle we considered it.
TRAFFIC_LIGHT_DISTANCE = 10 # sperimentaly computed
if len(np.where(dist<TRAFFIC_LIGHT_DISTANCE)[0]>0):
nearest_tl.append(tl[:-1]) # not interested to store status information here
#get id and status
tl_dict[tl[0]]=tl[-1]
if enable_live_plot:
trajectory_fig.add_graph(f"{tl[0]}",
window_size=1,
x0=[tl[1]], y0=[tl[2]],
marker=11, color=[1, 0.5, 0],
markertext=f"{i}", marker_text_offset=1)
nearest_tl = np.array(nearest_tl)
print("SHAPE:")
print(nearest_tl.shape)
# Add end position marker
trajectory_fig.add_graph("end_pos", window_size=1,
x0=[waypoints[-1, 0]],
y0=[waypoints[-1, 1]],
marker="D", color='r',
markertext="End", marker_text_offset=1)
# Add car marker
trajectory_fig.add_graph("car", window_size=1,
marker="s", color='b', markertext="Car",
marker_text_offset=1)
# Add lead car information
trajectory_fig.add_graph("leadcar", window_size=1,
marker="s", color='g', markertext="Lead Car",
marker_text_offset=1)
# Add lookahead path
trajectory_fig.add_graph("selected_path",
window_size=INTERP_MAX_POINTS_PLOT,
x0=[start_x]*INTERP_MAX_POINTS_PLOT,
y0=[start_y]*INTERP_MAX_POINTS_PLOT,
color=[1, 0.5, 0.0],
linewidth=3)
# Add local path proposals
for i in range(NUM_PATHS):
trajectory_fig.add_graph("local_path " + str(i), window_size=200,
x0=None, y0=None, color=[0.0, 0.0, 1.0])
###
# Add 1D speed profile updater
###
forward_speed_fig =\
lp_1d.plot_new_dynamic_figure(title="Forward Speed (m/s)")
forward_speed_fig.add_graph("forward_speed",
label="forward_speed",
window_size=TOTAL_EPISODE_FRAMES)
forward_speed_fig.add_graph("reference_signal",
label="reference_Signal",
window_size=TOTAL_EPISODE_FRAMES)
# Add throttle signals graph
throttle_fig = lp_1d.plot_new_dynamic_figure(title="Throttle")
throttle_fig.add_graph("throttle",
label="throttle",
window_size=TOTAL_EPISODE_FRAMES)
# Add brake signals graph
brake_fig = lp_1d.plot_new_dynamic_figure(title="Brake")
brake_fig.add_graph("brake",
label="brake",
window_size=TOTAL_EPISODE_FRAMES)
# Add steering signals graph
steer_fig = lp_1d.plot_new_dynamic_figure(title="Steer")
steer_fig.add_graph("steer",
label="steer",
window_size=TOTAL_EPISODE_FRAMES)
# live plotter is disabled, hide windows
if not enable_live_plot:
lp_traj._root.withdraw()
lp_1d._root.withdraw()
#############################################
# Local Planner Variables
#############################################
wp_goal_index = 0
local_waypoints = None
path_validity = np.zeros((NUM_PATHS, 1), dtype=bool)
lp = local_planner.LocalPlanner(NUM_PATHS,
PATH_OFFSET,
CIRCLE_OFFSETS,
CIRCLE_RADII,
PATH_SELECT_WEIGHT,
TIME_GAP,
A_MAX,
SLOW_SPEED,
STOP_LINE_BUFFER)
bp = behavioural_planner.BehaviouralPlanner(BP_LOOKAHEAD_BASE,
LEAD_VEHICLE_LOOKAHEAD,
nearest_tl,
tl_dict)
#############################################
# Scenario Execution Loop
#############################################
# Iterate the frames until the end of the waypoints is reached or
# the TOTAL_EPISODE_FRAMES is reached. The controller simulation then
# ouptuts the results to the controller output directory.
reached_the_end = False
skip_first_frame = True
# Initialize the current timestamp.
current_timestamp = start_timestamp
# Initialize collision history
prev_collision_vehicles = 0
prev_collision_pedestrians = 0
prev_collision_other = 0
# vehicles_dict = {}
####################################
vehicles_entering = {}
pedestrians_entering = {}
pedestrians_outgoing= {}
vehicles_outgoing = {}
# the aboves data structure are structured in this way:
# entering = {
# id1: [counter, agent_object],
# id2: [counter, agent_object],
# ....
# }
#
# list of last frame ids
pedestrians_last_frame = []
vehicles_last_frame = []
###################################
# DETECTOR
net = load_model()
for frame in range(TOTAL_EPISODE_FRAMES):
# Gather current data from the CARLA server
measurement_data, sensor_data = client.read_data()
# UPDATE HERE the obstacles list
obstacles = []
_vehicles_dict = {}
# Update pose and timestamp
prev_timestamp = current_timestamp
current_x, current_y, current_z, current_pitch, current_roll, current_yaw = \
get_current_pose(measurement_data)
current_speed = measurement_data.player_measurements.forward_speed
current_timestamp = float(measurement_data.game_timestamp) / 1000.0
# Wait for some initial time before starting the demo
if current_timestamp <= WAIT_TIME_BEFORE_START:
send_control_command(client, throttle=0.0, steer=0, brake=1.0)
continue
else:
current_timestamp = current_timestamp - WAIT_TIME_BEFORE_START
# Store history
x_history.append(current_x)
y_history.append(current_y)
yaw_history.append(current_yaw)
speed_history.append(current_speed)
time_history.append(current_timestamp)
# Store collision history
collided_flag,\
prev_collision_vehicles,\
prev_collision_pedestrians,\
prev_collision_other = get_player_collided_flag(measurement_data,
prev_collision_vehicles,
prev_collision_pedestrians,
prev_collision_other)
collided_flag_history.append(collided_flag)
if frame % (LP_FREQUENCY_DIVISOR) == 0:
# update traffic_lights status
###################################
# GET BGR
camera_data = sensor_data.get('CameraRGB', None)
if camera_data is not None:
# to_bgra_array returns an image with 4 channels with last channel all zeros
camera_data = to_bgra_array(camera_data)[:,:,:3]
camera_data = np.copy(camera_data)
camera_data_bis = sensor_data.get("CameraRGBbis", None)
if camera_data_bis is not None:
camera_data_bis = to_bgra_array(camera_data_bis)[:,:,:3]
camera_data_bis = np.copy(camera_data_bis)
#output segmentation
seg_data = sensor_data.get('CameraSemSeg', None)
if seg_data is not None:
seg_data = seg_data.data
seg_data_bis = sensor_data.get('CameraSemSegbis', None)
if seg_data_bis is not None:
seg_data_bis = seg_data_bis.data
#depth camera
depth_data = sensor_data.get('CameraDepth', None)
if depth_data is not None:
depth_data = depth_data.data
depth_data_bis = sensor_data.get('CameraDepthbis', None)
if depth_data_bis is not None:
depth_data_bis = depth_data_bis.data
# print("-"*50)
world_frame_vehicles, world_frame_pedestrians,sidewalk = find_pedestrians_and_vehicles_from_camera(net, camera_data, seg_data, depth_data, current_x, current_y, current_z, current_yaw, camera_parameters)
wfv_bis, wfp_bis, sidewalk_bis = find_pedestrians_and_vehicles_from_camera(net, camera_data_bis, seg_data_bis, depth_data_bis, current_x, current_y, current_z,current_yaw, camera_parameters_bis, True)
# world_frame_vehicles, world_frame_pedestrians,sidewalk = find_pedestrians_and_vehicles_from_camera(net, camera_data, seg_data, depth_data, current_x, current_y, current_yaw, camera_parameters)
# wfv_bis, wfp_bis, sidewalk_bis = find_pedestrians_and_vehicles_from_camera(net, camera_data_bis, seg_data_bis, depth_data_bis, current_x, current_y, current_yaw, camera_parameters_bis, True)
# world_frame_vehicles += wfv_bis
# world_frame_pedestrians += wfp_bis
# for p in world_frame_vehicles:
# print("CAMERA 0 vehicles ", p)
# print()
# for p in wfv_bis:
# print("CAMERA BIS vehicles ", p)
# print()
###############################################
# BELOW CARLA PERFECT DATA
pedestrians = []
vehicles = []
for agent in measurement_data.non_player_agents:
if agent.HasField("traffic_light"):
if agent.id in tl_dict:
tl_dict[agent.id] = agent.traffic_light.state
if agent.HasField("pedestrian"):
location = agent.pedestrian.transform.location
dimensions = agent.pedestrian.bounding_box.extent
orientation = agent.pedestrian.transform.rotation
dist = np.subtract([current_x,current_y], [location.x,location.y])
norm = np.linalg.norm(dist)
# filter only pedestrian that are in a radiud of 30 metres
if norm < AGENTS_CHECK_RADIUS:
bb = obstacle_to_world(location, dimensions, orientation)
#takes only verteces of pedestrians bb
bb = bb[0:-1:2]
orientation = orientation.yaw*math.pi/180
speed = agent.pedestrian.forward_speed
# print("REAL PED: ", location.x,location.y)
pedestrian = Agent(agent.id,[location.x,location.y],bb,orientation,speed,"Pedestrian")
pedestrians.append(pedestrian)
if id in pedestrians_outgoing:
# print(f"[MAIN] Update position of ghost {id}, {pedestrian}")
# update its data because in the current frame this object can be still occludeed
pedestrians_outgoing[id][1] = pedestrian
if agent.HasField("vehicle"):
location = agent.vehicle.transform.location
dimensions = agent.vehicle.bounding_box.extent
orientation = agent.vehicle.transform.rotation
dist = np.subtract([current_x,current_y], [location.x,location.y])
norm = np.linalg.norm(dist)
# filter only vehicle that are in a radiud of AGENTS_CHECK_RADIUS metres
if norm < AGENTS_CHECK_RADIUS:
id = agent.id
speed = agent.vehicle.forward_speed
bb = obstacle_to_world(location, dimensions, orientation)
#takes only verteces of pedestrians bb
bb = bb[0:-1:2]
# print("REAL VEHICLE: ", location.x,location.y)
vehicle = Agent(id,[location.x,location.y],bb,orientation.yaw,speed,"Vehicle")
vehicles.append(vehicle)
if id in vehicles_outgoing:
# update its data because in the current frame this object can be still occludeed
# print(f"[MAIN] Update position of ghost {id}, {vehicle}")
vehicles_outgoing[id][1] = vehicle
if SIMULATION_PERFECT:
_vehicles_dict[id] = vehicle
#########################################
# here make data association (remember to valuate it only on x and y)
# input-> world_frame_vehicles, world_frame_pedestrians, sidewalk
# output-> np array di pedoni
pedestrian_associated,_ = association_vehicle_pedestrian(pedestrians,
world_frame_pedestrians,wfp_bis,sidewalk,sidewalk_bis,True)
vehicles_associated, vehicles_dict = association_vehicle_pedestrian(vehicles,
world_frame_vehicles,wfv_bis)
# pedestrians_to_consider = pedestrian_associated
# vehicles_to_consider = vehicles_associated
pedestrians_to_consider = []
vehicles_to_consider = []
######## entering management
output_p = agent_entering_management(pedestrian_associated,pedestrians_last_frame,pedestrians_entering)
output_v = agent_entering_management(vehicles_associated,vehicles_last_frame,vehicles_entering,vehicles_dict)
pedestrians_to_consider += output_p
vehicles_to_consider += output_v
output_p = agents_outgoing_managements(pedestrians_to_consider,pedestrians_last_frame,pedestrians_outgoing)
output_v = agents_outgoing_managements(vehicles_to_consider,vehicles_last_frame,vehicles_outgoing,vehicles_dict)
pedestrians_to_consider += output_p
vehicles_to_consider += output_v
pedestrians_last_frame = pedestrians_to_consider
vehicles_last_frame = vehicles_to_consider
# last_frame_agents = vehicles_associated + pedestrian_associated
#######
if SIMULATION_PERFECT:
vehicles_dict = _vehicles_dict
if not SIMULATION_PERFECT:
pedestrians = np.array(pedestrians_to_consider)
vehicles = np.array(vehicles_to_consider)
else:
pedestrians = np.array(pedestrians,dtype=object)
vehicles = np.array(vehicles)
# set current info about traffic light (status), pedestrian and vehicle
bp.set_tl_dict(tl_dict)
bp.set_pedestrians(pedestrians)
bp.set_vehicles(vehicles)
bp.set_vehicles_dict(vehicles_dict)
camera_data = sensor_data.get('CameraRGBView', None)
if camera_data is not None:
camera_data = to_bgra_array(camera_data)[:,:,:3]
cv2.imshow("CameraRGB", camera_data)
cv2.waitKey(10)
# Execute the behaviour and local planning in the current instance
# Note that updating the local path during every controller update
# produces issues with the tracking performance (imagine everytime
# the controller tried to follow the path, a new path appears). For
# this reason, the local planner (LP) will update every X frame,
# stored in the variable LP_FREQUENCY_DIVISOR, as it is analogous
# to be operating at a frequency that is a division to the
# simulation frequency.
if frame % LP_FREQUENCY_DIVISOR == 0:
# Compute open loop speed estimate.
open_loop_speed = lp._velocity_planner.get_open_loop_speed(current_timestamp - prev_timestamp)
# Calculate the goal state set in the local frame for the local planner.
# Current speed should be open loop for the velocity profile generation.
ego_state = [current_x, current_y, current_yaw, open_loop_speed]
# Set lookahead based on current speed.
bp.set_lookahead(BP_LOOKAHEAD_BASE + BP_LOOKAHEAD_TIME * open_loop_speed)
if True:
if WINDOWS_OS:
os.system("cls")
else:
os.system("clear")
print(f"[LOGINFO]: from {args.start} to {args.dest}\t[DESIRED_SPEED]: {DESIRED_SPEED} m/s")
print(f"[WEATHER]: {args.weather}")
print(f"[PEDESTRIANS]: {NUM_PEDESTRIANS}, {SEED_PEDESTRIANS}\t[VEHICLES]: {NUM_VEHICLES}, {SEED_VEHICLES}\n")
# Perform a state transition in the behavioural planner.
bp.transition_state(waypoints, ego_state, current_speed)
states = ["FOLLOW_LANE", "DECELERATE_TO_STOP", "STAY_STOPPED"]
print(f"[CURRENT_STATE]: {states[bp._state]}", end="\t")
print(f"[COLLISION]: {'Yes' if collided_flag else 'No'}")
print(f"[EGO_POS]: ({round(current_x, 2)}, {round(current_y, 2)})", end='\t')
print(f"[EGO_YAW]: {round(current_yaw*180/math.pi, 2)} deg", end='\t')
print(f"[EGO_SPEED]: {round(current_speed,2)} m/s")
print(f"[PEDESTRIAN_COLLISION_PREDICTED]: {'Yes' if bp._pedestrian_detected else 'No'}")
print(f"[VEHICLE_COLLISION_PREDICTED]: {'Yes' if bp._car_collision_predicted else 'No'}")
# print(f"[PED_POS]: (XXX.XX, XXX.XX)", end='\t')
# print(f"[PED_YAW]: X.XX deg", end='\t')
# print(f"[PED_SPEED]: X.XX m/s")
leader = bp._lead_vehicle
if leader is None:
print(f"[LEAD_POS]: (XXX.XX, XXX.XX)", end='\t')
print(f"[LEAD_YAW]: X.XX deg", end='\t')
print(f"[LEAD_SPEED]: X.XX m/s")
else:
leader_pos = leader.get_position()
print(f"[LEAD_POS]: ({round(leader_pos[0], 2)}, {round(leader_pos[1], 2)})", end='\t')
print(f"[LEAD_YAW]: {round(leader.get_orientation(), 2)} deg", end='\t')
print(f"[LEAD_SPEED]: {round(leader.get_speed(), 2)} m/s")
tl = bp._current_traffic_light
if len(tl) != 0:
print(f"[T_LIG_POS]: ({round(tl[1],2)}, {round(tl[2],2)})", end='\t')
print(f"[T_LIG_YAW]: {round(tl[3],2)} deg", end='\t')
statuses = ["GREEN", "YELLOW", "RED"]
print(f"[T_LIG_STATUS]: {statuses[bp._tl_dict[tl[0]]]}")
else:
print(f"[T_LIG_POS]: (XXX.XX, XXX.XX)", end='\t')
print(f"[T_LIG_YAW]: X.XX deg", end='\t')
print(f"[T_LIG_STATUS]: X.XX m/s")
else:
bp.transition_state(waypoints, ego_state, current_speed)
# Compute the goal state set from the behavioural planner's computed goal state.
goal_state_set = lp.get_goal_state_set(bp._goal_index, bp._goal_state, waypoints, ego_state)
# Calculate planned paths in the local frame.
paths, path_validity = lp.plan_paths(goal_state_set)
# Transform those paths back to the global frame.
paths = local_planner.transform_paths(paths, ego_state)
# Perform collision checking.
collision_check_array = lp._collision_checker.collision_check(paths, [])
# Compute the best local path.
best_index = lp._collision_checker.select_best_path_index(paths, collision_check_array, bp._goal_state)
# If no path was feasible, continue to follow the previous best path.
if best_index == None:
best_path = lp._prev_best_path
else:
best_path = paths[best_index]
lp._prev_best_path = best_path
if best_path is not None:
# Compute the velocity profile for the path, and compute the waypoints.
desired_speed = bp._goal_state[2]
decelerate_to_stop = bp._state == behavioural_planner.DECELERATE_TO_STOP
lead_car_state = None
if bp._lead_vehicle is not None:
lead_car_pos = bp._lead_vehicle.get_position()
lead_car_speed = bp._lead_vehicle.get_speed()
lead_car_state = [lead_car_pos[0],lead_car_pos[1],lead_car_speed]
local_waypoints = lp._velocity_planner.compute_velocity_profile(best_path, desired_speed, ego_state, current_speed, decelerate_to_stop, lead_car_state, bp._follow_lead_vehicle)
if local_waypoints != None:
# Update the controller waypoint path with the best local path.
# This controller is similar to that developed in Course 1 of this
# specialization. Linear interpolation computation on the waypoints
# is also used to ensure a fine resolution between points.
wp_distance = [] # distance array
local_waypoints_np = np.array(local_waypoints)
for i in range(1, local_waypoints_np.shape[0]):
wp_distance.append(
np.sqrt((local_waypoints_np[i, 0] - local_waypoints_np[i-1, 0])**2 +
(local_waypoints_np[i, 1] - local_waypoints_np[i-1, 1])**2))
wp_distance.append(0) # last distance is 0 because it is the distance
# from the last waypoint to the last waypoint
# Linearly interpolate between waypoints and store in a list
wp_interp = [] # interpolated values
# (rows = waypoints, columns = [x, y, v])
for i in range(local_waypoints_np.shape[0] - 1):
# Add original waypoint to interpolated waypoints list (and append
# it to the hash table)
wp_interp.append(list(local_waypoints_np[i]))
# Interpolate to the next waypoint. First compute the number of
# points to interpolate based on the desired resolution and
# incrementally add interpolated points until the next waypoint
# is about to be reached.
num_pts_to_interp = int(np.floor(wp_distance[i] /\
float(INTERP_DISTANCE_RES)) - 1)
wp_vector = local_waypoints_np[i+1] - local_waypoints_np[i]
wp_uvector = wp_vector / np.linalg.norm(wp_vector[0:2])
for j in range(num_pts_to_interp):
next_wp_vector = INTERP_DISTANCE_RES * float(j+1) * wp_uvector
wp_interp.append(list(local_waypoints_np[i] + next_wp_vector))
# add last waypoint at the end
wp_interp.append(list(local_waypoints_np[-1]))
# Update the other controller values and controls
controller.update_waypoints(wp_interp)
###
# Controller Update
###
if local_waypoints != None and local_waypoints != []:
controller.update_values(current_x, current_y, current_yaw,
current_speed,
current_timestamp, frame)
controller.update_controls()
cmd_throttle, cmd_steer, cmd_brake = controller.get_commands()
else:
cmd_throttle = 0.0
cmd_steer = 0.0
cmd_brake = 0.0
# Skip the first frame or if there exists no local paths
if skip_first_frame and frame == 0:
pass
elif local_waypoints == None:
pass
else:
# Update live plotter with new feedback
trajectory_fig.roll("trajectory", current_x, current_y)
trajectory_fig.roll("car", current_x, current_y)
if lead_car_state is not None:
current_lead_car_x = lead_car_state[0]
current_lead_car_y = lead_car_state[1]
else:
current_lead_car_x = 0
current_lead_car_y = 0
trajectory_fig.roll("leadcar", current_lead_car_x, current_lead_car_y)
# Load parked car points
obstacles = np.array(obstacles)
if len(obstacles) > 0:
x = obstacles[:,:,0]
y = obstacles[:,:,1]
trajectory_fig.roll("obstacles_points", x, y)
forward_speed_fig.roll("forward_speed",
current_timestamp,
current_speed)
forward_speed_fig.roll("reference_signal",
current_timestamp,
controller._desired_speed)
throttle_fig.roll("throttle", current_timestamp, cmd_throttle)
brake_fig.roll("brake", current_timestamp, cmd_brake)
steer_fig.roll("steer", current_timestamp, cmd_steer)
# Local path plotter update
if frame % LP_FREQUENCY_DIVISOR == 0:
path_counter = 0
for i in range(NUM_PATHS):
# If a path was invalid in the set, there is no path to plot.
if path_validity[i]:
# Colour paths according to collision checking.
if not collision_check_array[path_counter]:
colour = 'r'
elif i == best_index:
colour = 'k'
else:
colour = 'b'
trajectory_fig.update("local_path " + str(i), paths[path_counter][0], paths[path_counter][1], colour)
path_counter += 1
else:
trajectory_fig.update("local_path " + str(i), [ego_state[0]], [ego_state[1]], 'r')
# When plotting lookahead path, only plot a number of points
# (INTERP_MAX_POINTS_PLOT amount of points). This is meant
# to decrease load when live plotting
wp_interp_np = np.array(wp_interp)
path_indices = np.floor(np.linspace(0,
wp_interp_np.shape[0]-1,
INTERP_MAX_POINTS_PLOT))
trajectory_fig.update("selected_path",
wp_interp_np[path_indices.astype(int), 0],
wp_interp_np[path_indices.astype(int), 1],
new_colour=[1, 0.5, 0.0])
# Refresh the live plot based on the refresh rate
# set by the options
if enable_live_plot and \
live_plot_timer.has_exceeded_lap_period():
lp_traj.refresh()
lp_1d.refresh()
live_plot_timer.lap()
# Output controller command to CARLA server
send_control_command(client,
throttle=cmd_throttle,
steer=cmd_steer,
brake=cmd_brake)
# Find if reached the end of waypoint. If the car is within
# DIST_THRESHOLD_TO_LAST_WAYPOINT to the last waypoint,
# the simulation will end.
dist_to_last_waypoint = np.linalg.norm(np.array([
waypoints[-1][0] - current_x,
waypoints[-1][1] - current_y]))
if dist_to_last_waypoint < DIST_THRESHOLD_TO_LAST_WAYPOINT:
reached_the_end = True
if reached_the_end:
break
# End of demo - Stop vehicle and Store outputs to the controller output
# directory.
if reached_the_end:
print("Reached the end of path. Writing to controller_output...")
else:
print("Exceeded assessment time. Writing to controller_output...")
# Stop the car
send_control_command(client, throttle=0.0, steer=0.0, brake=1.0)
# Store the various outputs
store_trajectory_plot(trajectory_fig.fig, 'trajectory.png')
store_trajectory_plot(forward_speed_fig.fig, 'forward_speed.png')
store_trajectory_plot(throttle_fig.fig, 'throttle_output.png')
store_trajectory_plot(brake_fig.fig, 'brake_output.png')
store_trajectory_plot(steer_fig.fig, 'steer_output.png')
write_trajectory_file(x_history, y_history, speed_history, time_history,
collided_flag_history)
write_collisioncount_file(collided_flag_history)
def main():
"""Main function.
Args:
-v, --verbose: print debug information
-l, --local: use local server
-w, --weather: weather simulation
-s, --start: player start index
-d, --dest: player destination index
-a, --autopilot: enable autopilot
-q, --quality-level: graphics quality level [Low or Epic]
-i, --images-to-disk: save images to disk
-c, --carla-settings: Path to CarlaSettings.ini file
"""
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'--local', '-l',
action='store_true',
dest = 'local'
)
argparser.add_argument(
'--weather', '-w',
metavar='weather',
type=str,
default=WEATHER,
help='Weather simulation'
)
argparser.add_argument(
'-s', '--start',
metavar='S',
default = PLAYER_START_INDEX,
type=int,
help='Player start index')
argparser.add_argument(
'-d', '--dest',
metavar='D',
default = DESTINATION_INDEX,
type=int,
help='Player destination index')
argparser.add_argument(
'-a', '--autopilot',
action='store_true',
help='enable autopilot')
argparser.add_argument(
'-q', '--quality-level',
choices=['Low', 'Epic'],
type=lambda s: s.title(),
default='Low',
help='graphics quality level.')
argparser.add_argument(
'-c', '--carla-settings',
metavar='PATH',
dest='settings_filepath',
default=None,
help='Path to a "CarlaSettings.ini" file')
args = argparser.parse_args()
# Logging startup info
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
if not args.local:
host = SERVER_HOST; port = SERVER_PORT
else:
host = LOCAL_HOST; port = LOCAL_PORT
#host = "192.168.1.128"; port = 2000
logging.info('listening to server %s:%s', host, port)
args.out_filename_format = '_out/episode_{:0>4d}/{:s}/{:0>6d}'
# Execute when server connection is established
while True:
try:
exec_waypoint_nav_demo(args, host, port)
print('Done.')
return
except TCPConnectionError as error:
logging.error(error)
time.sleep(1)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
|
AlfonsoCom/AVD_BP
|
main.py
|
main.py
|
py
| 80,582 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32796024261
|
from gevent import monkey
monkey.patch_all()
import gevent
import socket
import re
import dns
import log
LOG = log.get_logger('dns-proxy')
class DNSServer(object):
def __init__(self, host='0.0.0.0', port=53, nameserver='114.114.114.114'):
self.sock = None
self.host = host
self.port = port
self.nameserver = nameserver
self.engine = MatchEngine('./resolv.txt', const={'current': '192.168.199.180'})
def on_query(self, sip, sport, req):
def lookup_remote_nameserver(que):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if s.sendto(dns.pack(que), (self.nameserver, 53)) == 0:
LOG.error('failed to query')
raise Exception('query failed')
_resp = s.recv(2048)
LOG.debug("raw response: {}".format(repr(_resp)))
resp = dns.unpack(_resp)
return resp
# end lookup_remote_nameserver
LOG.debug("raw query: {}".format(repr(req)))
que = dns.unpack(req)
LOG.debug("query: {}".format(que))
host = self.engine.lookup(que.questions[0].qname)
if not host:
# reslov from remote nameserver.
resp = lookup_remote_nameserver(que)
else:
qh = que.header
qq = que.questions[0]
resp = dns.DNSResponse(
header=dns.DNSHeader(
id=qh.id, qr=1, opcode=qh.opcode,
aa=qh.aa, tc=qh.tc, rd=qh.rd, ra=qh.ra,
rcode=qh.rcode, qdcount=1, ancount=1, nscount=0, arcount=0),
questions=que.questions,
answers=[dns.DNSAnswer(
name=qq.qname, type=1, class_=1, ttl=255,
rdlength=4, rdata=host)])
_resp = dns.pack(resp)
LOG.debug("raw response: {}".format(repr(_resp)))
LOG.debug("response: {}".format(resp))
self.sock.sendto(_resp, (sip, sport))
def serve_forever(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind((self.host, self.port))
try:
while True:
msg, (ip, port) = self.sock.recvfrom(2048)
gevent.spawn(self.on_query, ip, port, msg)
except KeyboardInterrupt:
LOG.info("exit.")
finally:
self.sock.close()
class MatchEngine(object):
def _read_rules_from_file(self, f):
_rules = {}
with open(f) as fr:
rules = fr.read().split('\n')[:-1]
for rule in rules:
domain, host = rule.split()
if host[0] == '<' and host[-1] == '>':
host = self._const[host[1:-1]]
_rules[re.compile(domain)] = host
return _rules
def __init__(self, resolv_file, const=None):
self.resolv_file = resolv_file
self._const = const if isinstance(const, dict) else {}
self._rules = self._read_rules_from_file(self.resolv_file)
def lookup(self, domain):
for domain_rule, host in self._rules.items():
if domain_rule.match(domain):
return host
return None
def reload(self):
self._rules = self._read_rules_from_file(self.resolv_file)
if __name__ == '__main__':
ds = DNSServer()
ds.serve_forever()
|
PeerXu/death-star
|
death_star/dns_proxy.py
|
dns_proxy.py
|
py
| 3,334 |
python
|
en
|
code
| 8 |
github-code
|
6
|
13876656972
|
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel
import boto3
import json
app = FastAPI()
origins = [
"https://ai.galaxychain.zone",
"https://galaxychain.zone",
"http://localhost:3000",
"https://galaxychain.zone",
"https://ai-api.galaxychain.zone",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Amazon SageMaker Runtime Client
session = boto3.Session()
sagemaker_runtime = session.client('sagemaker-runtime')
# Sagemaker endpoint name.
endpoint_name = 'sm-endpoint-gpt-j-6b'
class DataIn(BaseModel):
text: str
datas: dict
@app.get("/elb")
async def server_status():
response_body = {
'status': 200,
'data': "Healthy"
}
return jsonable_encoder(response_body)
@app.post('/gen-text')
async def text_generate(ai_params: DataIn):
response_body = {
'status': 100,
'data': ""
}
payload = {
'inputs': "",
'parameters': {
'max_length': 100,
'do_sample': True,
'no_repeat_ngram_size': 2,
'temperature': 0.75,
'top_k': 10,
'top_p': 0.95,
'early_stopping': True,
}
}
user_text = ai_params.dict()['text']
if user_text == "":
response_body['status'] = 400
response_body['data'] = "Please enter text."
return jsonable_encoder(response_body)
tempr_param = float(ai_params.dict()['datas']['randomness'])
if tempr_param < 0.01:
tempr_param = 0.01
top_k_param = int(ai_params.dict()['datas']['fluency'])
if top_k_param < 1:
top_k_param = 1
payload['inputs'] = user_text
payload['parameters']['temperature'] = tempr_param
payload['parameters']['top_k'] = top_k_param
try:
response = sagemaker_runtime.invoke_endpoint(
EndpointName=endpoint_name,
ContentType='application/json',
Body=json.dumps(payload)
)
result = json.loads(response['Body'].read().decode())
raw_text = result[0]['generated_text']
res_text = str(raw_text).replace(user_text, "").replace("\n", " ").replace('"', "")
response_body['status'] = 200
response_body['data'] = res_text
return jsonable_encoder(response_body)
except:
response_body['status'] = 503
response_body['data'] = "AI server is overloaded"
return jsonable_encoder(response_body)
|
galaxynetwork/story-ai-supporter
|
app.py
|
app.py
|
py
| 2,629 |
python
|
en
|
code
| 1 |
github-code
|
6
|
21101364933
|
import os
import tempfile
from shutil import rmtree
import pytest
import responses
from faker import Faker
from flask import Response, Flask
from flask.testing import FlaskClient
from statuspage2slack.statuspage_constants import ComponentStatus, \
IncidentStatus, IncidentImpact
fake = Faker()
STATUSPAGE_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
test_file_path = os.path.realpath(__file__)
test_file_folder = os.path.dirname(test_file_path)
@pytest.fixture
def component_update_request(old_component_status, new_component_status):
creation_datetime = fake.past_datetime()
update_datetime = fake.past_datetime(start_date=creation_datetime)
component_id = fake.bothify(text='????????????')
update_id = fake.bothify(text='????????????')
return {
"component_update": {
"created_at": update_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
"new_status": new_component_status.value,
"old_status": old_component_status.value,
"id": update_id,
"component_id": component_id
},
"component": {
"created_at": creation_datetime.strftime(
STATUSPAGE_DATETIME_FORMAT),
"id": component_id,
"name": "Some Component",
"status": new_component_status.value
}
}
@pytest.fixture
def incident_update_request(incident_update, incident_impact, incident_status):
creation_datetime = fake.past_datetime()
monitoring_datetime = fake.past_datetime(start_date=creation_datetime)
resolved_datetime = fake.past_datetime(start_date=creation_datetime)
update_datetime = fake.past_datetime(start_date=creation_datetime)
name = fake.sentence(nb_words=6, variable_nb_words=True,
ext_word_list=None)
return {
"incident": {
"backfilled": False,
"created_at":
creation_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
"impact": incident_impact.value,
"impact_override": None,
"monitoring_at":
monitoring_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
"resolved_at":
resolved_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
"shortlink": fake.url(),
"status": incident_status.value,
"updated_at": update_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
"name": name,
"incident_updates": [incident_update]
}
}
@pytest.fixture()
def incident_update(incident_status):
body = fake.paragraph()
creation_datetime = fake.past_datetime()
display_datetime = fake.past_datetime(start_date=creation_datetime)
update_datetime = fake.past_datetime(start_date=creation_datetime)
return {
"body": body,
"created_at": creation_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
"display_at": display_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
"status": incident_status.value,
"updated_at": update_datetime.strftime(STATUSPAGE_DATETIME_FORMAT),
}
@pytest.mark.parametrize("old_component_status", ComponentStatus)
@pytest.mark.parametrize("new_component_status", ComponentStatus)
def test_component_update(flask_client: FlaskClient,
component_update_request, used_templates,
request_mocker: responses.RequestsMock):
response: Response = flask_client.post('/', json=component_update_request)
assert 200 <= response.status_code < 300
assert len(used_templates) == 1
(template, context) = used_templates.pop()
assert template.name == 'component_update.json'
component_update = component_update_request['component_update']
component = component_update_request['component']
assert context['component_update'] == component_update
assert context['component'] == component
@pytest.mark.parametrize("incident_status", IncidentStatus)
@pytest.mark.parametrize("incident_impact", IncidentImpact)
def test_incident_update(flask_client: FlaskClient, incident_update_request,
used_templates):
#request_mocker: responses.RequestsMock):
response: Response = flask_client.post('/', json=incident_update_request)
assert 200 <= response.status_code < 300
assert len(used_templates) == 1
(template, context) = used_templates.pop()
assert template.name == 'incident_update.json'
assert context['incident'] == incident_update_request['incident']
def test_invalid_request(flask_client: FlaskClient):
response: Response = flask_client.post('/', data='dummy')
assert 400 <= response.status_code < 500
@pytest.mark.parametrize("old_component_status",
[ComponentStatus.DEGRADED_PERFORMANCE])
@pytest.mark.parametrize("new_component_status", [ComponentStatus.OPERATIONAL])
@pytest.mark.parametrize("incident_status", [IncidentStatus.MONITORING])
@pytest.mark.parametrize("incident_impact", [IncidentImpact.CRITICAL])
@pytest.mark.parametrize("flag", ['COMPONENT_MESSAGES_ENABLED',
'INCIDENT_MESSAGES_ENABLED'])
def test_false_enabled_flags(flask_app: Flask, flask_client: FlaskClient,
component_update_request, incident_update_request,
used_templates, flag):
flask_app.config.update({
flag: False
})
if flag == 'INCIDENT_MESSAGES_ENABLED':
response: Response = flask_client.post('/',
json=incident_update_request)
elif flag == 'COMPONENT_MESSAGES_ENABLED':
response: Response = flask_client.post('/',
json=component_update_request)
else:
assert False, "Unexpected flag value"
assert 200 <= response.status_code < 300
assert len(used_templates) == 0
@pytest.mark.parametrize("incident_status", [IncidentStatus.MONITORING])
@pytest.mark.parametrize("incident_impact", [IncidentImpact.CRITICAL])
@pytest.mark.parametrize("env_dict", [
{'TEMPLATE_FOLDER': test_file_folder + '/templates'}
])
def test_change_template_folder(change_env, flask_client: FlaskClient,
incident_update_request, used_templates,
request_mocker: responses.RequestsMock,
env_dict):
template_name = 'incident_update.json'
response: Response = flask_client.post('/', json=incident_update_request)
assert 200 <= response.status_code < 300
assert len(used_templates) == 1
(template, context) = used_templates.pop()
assert template.name == template_name
assert os.path.realpath(template.filename) == os.path.realpath(
env_dict['TEMPLATE_FOLDER'] + '/' + template_name)
def test_copy_templates(flask_app: Flask):
runner = flask_app.test_cli_runner()
folder = tempfile.gettempdir() + '/templates/'
rmtree(folder, ignore_errors=True)
result = runner.invoke(args=['webhook', 'copy-templates', folder])
|
Cobliteam/statuspage2slack
|
tests/test_webhook.py
|
test_webhook.py
|
py
| 7,087 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26433388736
|
import pandas as pd
import numpy as np
import json
import os
from pydub import AudioSegment
from scipy.io.wavfile import read
import stft
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import normalize
from sklearn.decomposition import PCA
import random
import signal
import cPickle as pickle
from functools import partial
import multiprocessing as mp
import time
from multiprocessing.pool import Pool
audio_path = '/media/jon/external_data/audio/'
# audio_path = "data/target_species/"
species_list = ["Poecile_atricapillus",
"Poecile_rufescens",
"Regulus_calendula",
"Regulus_satrapa"]
def make_dirs(audio_path):
'''
Read species names from json and create wav file directory for each species
'''
f = []
for (_, _, filename) in os.walk(audio_path + "json_files"):
f.extend(filename)
for name in f:
os.makedirs(audio_path + name + '_wav')
def dir_list(audio_path):
'''
INPUT:
path to data file directories
OUTPUT:
For each directory containing mp3 files, generate a new directory
to recieve wav files. Return a list of tuples containing mp3 and wav
directory paths.
'''
directory_list = []
for (_, _, filename) in os.walk(audio_path + "json_files"):
for name in filename:
input_directory = audio_path + name
output_directory = input_directory + "_wav"
directory_list.append((input_directory, output_directory))
return directory_list
def make_file_list(directory_list):
'''
INPUT:
list of tuples containing input, output directories
OUTPUT:
list of tuples containing input, output file names
'''
file_list = []
for directory in directory_list:
for (_, _, filenames) in os.walk(directory[0]):
for file_id in filenames:
mp3_file = (directory[0] + "/" + file_id)
wav_file = (directory[1] + "/" + file_id[:-3] + "wav")
file_list.append((mp3_file, wav_file))
return file_list
def make_wav_list(directory_list):
'''
INPUT:
list of tuples containing wav file directories
OUTPUT:
list of tuples containing path and file_id for all wav files
'''
wav_list = []
for directory in directory_list:
for (_, _, filenames) in os.walk(directory[1]):
for file_id in filenames:
wav_file = (directory[1] + "/" + file_id)
wav_list.append((wav_file, file_id))
return wav_list
def file_list(path, species):
'''
Create a list of files for further processing
'''
file_list = []
for sp in species:
for (_, _, filenames) in os.walk(path + sp + "_wav/"):
for f in filenames:
file_list.append(path + sp + "_wav/" + f)
return file_list
def make_mono(file_list):
'''
overwrite wav files as mono - other functions will have errors with stereo files
'''
for f in file_list:
sound = AudioSegment.from_wav(f)
sound = sound.set_channels(1)
sound.export(f, format="wav")
def make_spec(file_list):
'''
INPUT:
list of wav file - files will be converted to mono in function
OUTPUT:
dictionary with filename as key, spectrogram as value
'''
spectrograms = {}
for f in file_list:
sound = AudioSegment.from_wav(f)
sound = sound.set_channels(1)
sound.export("temp", format="wav")
a = read("temp")
# arr = np.array(a[1], dtype=float) already np array - don't need to convert
spec = stft.spectrogram(a[1])
spectrograms[f] = spec
return spectrograms
def norm_spec(spectrograms):
'''
INPUT:
dict of file name: spectrogram
OUTPUT:
dict of file name: l2 normalized spectrogram
'''
norm = {}
for k in spectrograms.keys():
norm[k] = normalize(spectrograms[k], norm="l2")
return norm
def whiten(normalized):
'''
INPUT:
dict of file name: spectrogram
OUTPUT:
dict of file name: pca whitened spectrogram
'''
whitened = {}
pca = PCA(n_components=40, copy=False, whiten=True)
for k in normalized.keys():
whitened[k] = pca.fit_transform(normalized[k])
def random_sample(species_files, n=10):
'''
INPUT:
a dict of species, file list pairs
OUTPUT:
a randomly selected list of n files from each species
'''
subset = []
for k, v in species_files:
subset.extend([v[i] for i in random.sample(xrange(len(v)))])
return subset
|
jonathanwoodard/Melospiza
|
source/transform_audio.py
|
transform_audio.py
|
py
| 4,665 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5114460856
|
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm as LogNorm
def compare_fields(delta_F, delta_DM, R_sm, pc_meta):
# from Metin 2019
'''
A function for comparing the fields of delta_F and delta_DM with hist2d.
'''
fig = plt.figure(figsize = (10, 7))
bins_f = np.arange(-0.6,0.5, 0.01)
bins_dm = np.arange(0., 8., 0.04)
hist2d, edges_dm, edges_f = np.histogram2d(np.ndarray.flatten(delta_DM.field_data), np.ndarray.flatten(delta_F.field_data),
bins=[bins_dm, bins_f],density=True)
X, Y = np.meshgrid(edges_dm, edges_f, indexing='ij')
plt.pcolormesh(X,Y, hist2d, cmap='Greys',
norm=LogNorm(vmin=2e-3, vmax=100.))
cbar = plt.colorbar()
cbar.set_label('normalized density')
XCon, YCon = np.meshgrid(edges_dm[0:-1]+(edges_dm[1]-edges_dm[0])/2 ,
edges_f[0:-1]+(edges_f[1]-edges_f[1])/2 ,
indexing='ij')
# plt.contour(XCon,YCon, hist2d, levels = 3)
plt.xlabel('$\\delta_{DM}$')
plt.ylabel('$\\delta_{F}$')
plt.title('$\\delta_{DM} - \\delta_{F}$ of '
+ '{}, \nRA: {}, DE: {} '.format(pc_meta['Name'], pc_meta['RA'], pc_meta['DE'])
+ '$R_{sm}$ = ' + str(R_sm))
return fig
def compare_fields_general(field_1, field_2, extent, ncell_1, ncell_2, vmin = 2e-3, vmax = 100, countour = True):
# from Metin 2019
'''
extent = [x_1, y_1, x_2, y_2]
'''
fig = plt.figure(figsize = (10, 10))
x_1, y_1, x_2, y_2 = extent
bins_1 = np.linspace(x_1, x_2, ncell_1)
bins_2 = np.linspace(y_1, y_2, ncell_2)
hist2d, edges_1, edges_2 = np.histogram2d(np.ndarray.flatten(field_1.field_data), np.ndarray.flatten(field_2.field_data),
bins=[bins_1, bins_2],density=True)
X, Y = np.meshgrid(edges_1, edges_2, indexing='ij')
plt.pcolormesh(X,Y, hist2d, cmap='Greys',
norm=LogNorm(vmin=vmin, vmax=vmax))
cbar = plt.colorbar()
cbar.set_label('normalized density')
XCon, YCon = np.meshgrid(edges_1[0:-1]+(edges_1[1]-edges_1[0])/2 ,
edges_2[0:-1]+(edges_2[1]-edges_2[1])/2 ,
indexing='ij')
if countour:
plt.contour(XCon,YCon, hist2d, levels = 5)
return fig
def sphere_kernel(radius, normalize = True):
size = int(radius)*2+1
grid = np.array(np.meshgrid(np.arange(size), np.arange(size), np.arange(size)))
kernel = ((grid - int(radius))**2).sum(axis=0) < int(radius)**2
if normalize:
kernel = kernel / kernel.sum()
return kernel
|
pointeee/preheat2022_public
|
misc_func.py
|
misc_func.py
|
py
| 2,753 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33195061509
|
import numpy as np
import cv2
import copy
from time import sleep
import datetime
# from progress.bar import Bar
def Rodar(cam):
capture = cv2.VideoCapture(cam)
background_subtractor = cv2.bgsegm.createBackgroundSubtractorMOG()
#length = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
# bar = Bar('Processing Frames', max=length)
first_iteration_indicator = 1
while True:
sleep(1/60)
ret, frame = capture.read()
# If first frame
if first_iteration_indicator == 1:
first_frame = copy.deepcopy(frame)
height, width = frame.shape[:2]
accum_image = np.zeros((height, width), np.uint8)
first_iteration_indicator = 0
else:
filter = background_subtractor.apply(frame) # remove the background
threshold = 2
maxValue = 2
ret, th1 = cv2.threshold(filter, threshold, maxValue, cv2.THRESH_BINARY)
# add to the accumulated image
accum_image = cv2.add(accum_image, th1)
color_image_video = cv2.applyColorMap(accum_image, cv2.COLORMAP_SUMMER)
video_frame = cv2.addWeighted(frame, 0.7, color_image_video, 0.7, 0)
color_image = cv2.applyColorMap(accum_image, cv2.COLORMAP_HOT)
result_overlay = cv2.addWeighted(frame, 0.7, color_image, 0.7, 0)
relatorio = datetime.datetime.now();
data = "{:02d}-{:02d}-{:02d}".format(relatorio.day, relatorio.month, relatorio.replace(year=20).year)
if relatorio.hour == 22 and relatorio.minute == 29 and relatorio.second == 1:
cv2.imwrite("static/reports/report_" + data + ".jpg", result_overlay)
#cv2.imshow("Video Original" , result_overlay)
ret, jpeg = cv2.imencode('.jpg', result_overlay)
send_frame = jpeg.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + send_frame + b'\r\n\r\n')
# bar.next()
# bar.finish()
# save the final heatmap
# cv2.imwrite('diff-overlay.jpg', result_overlay)
# cleanup
#capture.release()
#cv2.destroyAllWindows()
|
rnanc/MOBYDATA
|
services/motion_heatmap.py
|
motion_heatmap.py
|
py
| 2,177 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19686502833
|
import sys
import time
from datetime import datetime
from textwrap import dedent
import requests
import telegram
from environs import Env
from loguru import logger
from telegram import ParseMode
def send_telegram_message(chat_id: int, bot: telegram.Bot, telegram_message: str) -> None:
bot.send_message(chat_id=chat_id, text=telegram_message, parse_mode=ParseMode.HTML)
logger.debug(f'Сообщение {telegram_message} отправлено в чат Телеграмма')
def generate_telegram_message(response_json: dict) -> str:
emojy = {
'ufo': '👾',
'true': '✅',
'false': '❌',
}
new_attempts = response_json['new_attempts'][0]
is_negative = f'{emojy["false"]}Работа не выполнена{emojy["false"]}' \
if new_attempts['is_negative'] is True \
else f'{emojy["true"]}Работа сдана{emojy["true"]}'
telegram_message = dedent(f"""
{emojy["ufo"]}<b>{new_attempts['lesson_title']}</b>{emojy["ufo"]}
{is_negative}
{new_attempts['lesson_url']}
""")
return telegram_message
def get_new_checks(devman_api_token: str, bot: telegram.Bot, chat_id: int, timeout: int = 300) -> None:
timestamp = datetime.now().timestamp()
headers = {'Authorization': f'Token {devman_api_token}'}
params = {'timestamp': timestamp}
reconnect_time = 0.1
while True:
url = f'https://dvmn.org/api/long_polling/'
try:
response = requests.get(url, headers=headers, params=params, timeout=timeout)
response.raise_for_status()
checked_tasks = response.json()
logger.debug(checked_tasks)
if checked_tasks.get('status') == 'found':
telegram_message = generate_telegram_message(checked_tasks)
send_telegram_message(chat_id, bot, telegram_message)
timestamp = checked_tasks.get('timestamp_to_request') or checked_tasks.get('last_attempt_timestamp')
params = {'timestamp': timestamp}
reconnect_time = 0.1
except requests.exceptions.ReadTimeout as error:
logger.warning(f'Таймаут запроса отработал раньше чем сервер ответил: {error}. Делаем повторный запрос.')
params = {'timestamp': timestamp}
continue
except requests.exceptions.ConnectionError:
time.sleep(reconnect_time)
reconnect_time *= 2
logger.warning(f'Потеря соединения. Повторный запрос через {reconnect_time} секунд')
continue
except requests.exceptions.HTTPError as http_error:
time.sleep(reconnect_time)
reconnect_time *= 2
logger.warning(f'Запрос вернул ответ {http_error.response}. Повторное подключение через {reconnect_time}')
continue
def main():
env = Env()
env.read_env()
devman_api_token = env.str('DEVMAN_TOKEN_API')
telegram_api_key = env.str('TELEGRAM_API_KEY')
telegram_chat_id = env.int('TELEGRAM_CHAT_ID')
bot = telegram.Bot(token=telegram_api_key)
logger_level = 'DEBUG' if env.bool('DEBUG_MODE', False) else 'INFO'
logger.level(logger_level)
logger.add(sys.stdout, format='{time} {level} {message}')
send_telegram_message(telegram_chat_id, bot, telegram_message='Бот запущен')
while True:
try:
get_new_checks(devman_api_token, bot, telegram_chat_id)
except Exception as exception:
telegram_message = dedent(
f"""
Бот упал с ошибкой:
{exception}""",
)
send_telegram_message(telegram_chat_id, bot, telegram_message)
if __name__ == '__main__':
main()
|
wepeqoor1/check_success_request
|
check_request.py
|
check_request.py
|
py
| 3,916 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19154810876
|
from numpy import *
from time import sleep
import json
import urllib2
# 数据导入函数
def loadDataSet(fileName): # 打开一个含有分隔符的文本文件
numFeat = len(open(fileName).readline().split('\t')) - 1 # 获得特征数,减1是因为最后一列是因变量
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = []
curLine = line.strip().split('\t')
for i in range(numFeat):
lineArr.append(float(curLine[i])) # 将每个数字读入lineArr
dataMat.append(lineArr) # 将每个样本读入dataMat
labelMat.append(float(curLine[-1])) # curLine最后一个元素读入labelMat
return dataMat, labelMat
# 标准回归函数:正规方程(Normal Equation)计算最佳拟合直线
def standRegres(xArr, yArr):
xMat = mat(xArr)
yMat = mat(yArr).T # 转置为列向量
xTx = xMat.T * xMat
if linalg.det(xTx) == 0.0: # 计算xTx的行列式
print("This matrix is singular, cannot do inverse") # 这是奇异阵,不可逆
return # xTx是奇异阵,无法计算
ws = xTx.I * (xMat.T * yMat) # .I是求逆;计算得到回归系数
return ws
# 局部加权线性回归函数:此处使用高斯核,k是高斯核中的参数;与testPoint越近,权重会越大
# 与kNN一样,该加权模型认为样本点距离越近,越可能符合同一个线性模型
# 注意区分此处的权重weights和回归系数ws,回归系数的计算中加入了权重
def lwlr(testPoint, xArr, yArr, k=1.0):
xMat = mat(xArr)
yMat = mat(yArr).T # 转置为列向量
m = shape(xMat)[0] # 样本个数
weights = mat(eye((m))) # m阶对角权重矩阵
for j in range(m): # 下面两行创建权重矩阵
diffMat = testPoint - xMat[j, :]
weights[j, j] = exp(diffMat * diffMat.T / (-2.0 * k ** 2))
xTx = xMat.T * (weights * xMat)
if linalg.det(xTx) == 0.0: # 如果xTx的行列式为0
print("This matrix is singular, cannot do inverse") # 这是奇异阵,不可逆
return # xTx是奇异阵,无法计算
ws = xTx.I * (xMat.T * (weights * yMat))
return testPoint * ws
def lwlrTest(testArr, xArr, yArr, k=1.0): # 遍历数据点,尝试对每个点都适用lwlr,这有助于求解k的大小
m = shape(testArr)[0] # 样本数
yHat = zeros(m) # 预测值
for i in range(m):
yHat[i] = lwlr(testArr[i], xArr, yArr, k)
return yHat
def lwlrTestPlot(xArr, yArr, k=1.0): # 与lwlrTest唯一的不同是先对X排序
yHat = zeros(shape(yArr)) # 对画图更容易
xCopy = mat(xArr)
xCopy.sort(0)
for i in range(shape(xArr)[0]):
yHat[i] = lwlr(xCopy[i], xArr, yArr, k)
return yHat, xCopy
def rssError(yArr, yHatArr): # 需要yArr和yHatArr都是数组
return ((yArr - yHatArr) ** 2).sum() # 最小二乘法计算代价函数
# 岭回归
def ridgeRegres(xMat, yMat, lam=0.2): # lam是单位矩阵前的系数;lambda是Python关键字,此处使用lam代替
xTx = xMat.T * xMat
denom = xTx + eye(shape(xMat)[1]) * lam
if linalg.det(denom) == 0.0:
print("This matrix is singular, cannot do inverse") # 如果lam是0,denom仍是奇异阵,无法计算
return
ws = denom.I * (xMat.T * yMat)
return ws
def ridgeTest(xArr, yArr): # 用一组lambda测试结果
xMat = mat(xArr)
yMat = mat(yArr).T # 转置为列向量
yMean = mean(yMat, 0) # 每列求平均值
yMat = yMat - yMean
# 对特征做标准化处理
xMeans = mean(xMat, 0) # 每列求平均值
xVar = var(xMat, 0) # 每列求方差
xMat = (xMat - xMeans) / xVar # 标准化计算
numTestPts = 30 # 在30个不同的lambda下调用ridgeRegres
wMat = zeros((numTestPts, shape(xMat)[1]))
for i in range(numTestPts):
ws = ridgeRegres(xMat, yMat, exp(i - 10))
wMat[i, :] = ws.T
return wMat
def regularize(xMat): # 标准化处理
inMat = xMat.copy() # 必须使用copy,否则得到索引
inMeans = mean(inMat, 0) # 计算平均值
inVar = var(inMat, 0) # 计算方差
inMat = (inMat - inMeans) / inVar # 标准化
return inMat
# 前向逐步线性回归:与lasso做法相近但计算简单
def stageWise(xArr, yArr, eps=0.01, numIt=100): # eps是每次迭代需要调整的步长;numIt表示迭代次数
xMat = mat(xArr)
yMat = mat(yArr).T # 转置为列向量
yMean = mean(yMat, 0)
yMat = yMat - yMean # 也可以使ys标准化,但会减小相关系数
xMat = regularize(xMat)
m, n = shape(xMat)
returnMat = zeros((numIt, n)) # 每次迭代都打印w向量,用于分析算法执行的过程和效果
ws = zeros((n, 1))
wsTest = ws.copy() # 必须使用.copy(),否则得到的是ws的索引
wsMax = ws.copy()
for i in range(numIt): # 贪心算法,每一步尽可能减小误差
lowestError = inf # 无穷大infinity
for j in range(n): # 对于每个特征
for sign in [-1, 1]:
wsTest = ws.copy()
wsTest[j] += eps * sign
yTest = xMat * wsTest
rssE = rssError(yMat.A, yTest.A) # 计算平方误差
if rssE < lowestError: # 比较,取最小误差
lowestError = rssE
wsMax = wsTest # 最小误差时的ws
ws = wsMax.copy()
returnMat[i, :] = ws.T
return returnMat
# 购物信息的获取函数
def searchForSet(retX, retY, setNum, yr, numPce, origPrc):
sleep(10)
myAPIstr = 'AIzaSyD2cR2KFyx12hXu6PFU-wrWot3NXvko8vY'
searchURL = 'https://www.googleapis.com/shopping/search/v1/public/products?key=%s&country=US&q=lego+%d&alt=json' % (
myAPIstr, setNum)
pg = urllib2.urlopen(searchURL)
retDict = json.loads(pg.read())
for i in range(len(retDict['items'])):
try:
currItem = retDict['items'][i]
if currItem['product']['condition'] == 'new':
newFlag = 1
else:
newFlag = 0
listOfInv = currItem['product']['inventories']
for item in listOfInv:
sellingPrice = item['price']
if sellingPrice > origPrc * 0.5:
print("%d\t%d\t%d\t%f\t%f" % (yr, numPce, newFlag, origPrc, sellingPrice))
retX.append([yr, numPce, newFlag, origPrc])
retY.append(sellingPrice)
except:
print('problem with item %d' % i)
def setDataCollect():
scrapePage('setHtml/lego8288.html', 'out.txt', 2006, 800, 49.99)
scrapePage('setHtml/lego10030.html', 'out.txt', 2002, 3096, 269.99)
scrapePage('setHtml/lego10179.html', 'out.txt', 2007, 5195, 499.99)
scrapePage('setHtml/lego10181.html', 'out.txt', 2007, 3428, 199.99)
scrapePage('setHtml/lego10189.html', 'out.txt', 2008, 5922, 299.99)
scrapePage('setHtml/lego10196.html', 'out.txt', 2009, 3263, 249.99)
# 交叉验证测试岭回归
def crossValidation(xArr, yArr, numVal=10): # numVal是交叉验证的次数
m = len(yArr) # 样本个数
indexList = range(m) # [1,2,...,m]
errorMat = zeros((numVal, 30)) # 误差矩阵,numVal行30列
for i in range(numVal):
trainX = [] # 训练集容器
trainY = []
testX = [] # 测试集容器
testY = []
random.shuffle(indexList) # 对indexList进行混洗
for j in range(m): # 以indexList前90%的值建立训练集
if j < m * 0.9:
trainX.append(xArr[indexList[j]])
trainY.append(yArr[indexList[j]])
else: # 剩下10%作为测试集
testX.append(xArr[indexList[j]])
testY.append(yArr[indexList[j]])
wMat = ridgeTest(trainX, trainY) # 从ridgeRegression得到30个回归系数
for k in range(30): # ridgeTest()使用30个不同的lambda创建了30组不同的回归系数
matTestX = mat(testX)
matTrainX = mat(trainX)
meanTrain = mean(matTrainX, 0)
varTrain = var(matTrainX, 0)
matTestX = (matTestX - meanTrain) / varTrain # 训练集标准化
yEst = matTestX * mat(wMat[k, :]).T + mean(trainY)
errorMat[i, k] = rssError(yEst.T.A, array(testY))
meanErrors = mean(errorMat, 0) # 按列计算30组回归系数的平均误差
minMean = float(min(meanErrors))
bestWeights = wMat[nonzero(meanErrors == minMean)] # nonzero获得索引,找到最优回归系数
# 建立模型可不标准化
# 当标准化 Xreg = (x-meanX)/var(x)
# 或不标准化: x*w/var(x) - meanX/var(x) +meanY
xMat = mat(xArr)
yMat = mat(yArr).T
meanX = mean(xMat, 0)
varX = var(xMat, 0)
# 岭回归使用了数据标准化,而standRegres()没有,为了将上述比较可视化还需将数据还原
unReg = bestWeights / varX
print("the best model from Ridge Regression is:\n", unReg)
print("with constant term: ", -1 * sum(multiply(meanX, unReg)) + mean(yMat))
|
yhshu/Machine-Learning-in-Action
|
Ch08-LinearRegression/regression.py
|
regression.py
|
py
| 9,114 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
32726100359
|
from collections import deque
from pathlib import Path
import random
from PIL import Image, ImageTk
from tkinter import Tk, Label
from typing import Callable, Optional, Sequence
from abc import ABC, abstractmethod
import numpy as np
import torch
import gym
from collections import namedtuple
from ..game.play import (Problem)
EpisodeData = namedtuple('EpisodeData', "obs reward done info".split()) # type: ignore
class GymProblem(Problem):
def __init__(self, gym: gym.Env, seed: int = 0) -> None:
self._gym = gym
self.action_space = gym.action_space
self.observation_space = gym.observation_space
self.reward_range = gym.reward_range
self._episode_n = 0
self._episode_data = None
self._gym.seed(seed)
self.reset()
def reward(self):
return self._episode_data.reward
def observation(self):
return self._episode_data.obs
def done(self):
return self._episode_data.done
def reset(self):
obs = self._gym.reset()
self._episode_data = EpisodeData(obs, 0, False, dict())
return obs
def step(self, a):
x = self._gym.step(a)
self._episode_data = EpisodeData(*x)
return x
def render(self, *a, **kw):
self._gym.render(*a, **kw)
def episode_reset(self, episode_n):
self._episode_n = episode_n
return self.reset()
def __getattr__(self, a):
return getattr(self._gym, a)
# copy of the un-exported method from collections.abc._check_methods
def _check_methods(C, *methods):
mro = C.__mro__
for method in methods:
for B in mro:
if method in B.__dict__:
if B.__dict__[method] is None:
return NotImplemented
break
else:
return NotImplemented
return True
class RenderIO(ABC):
@abstractmethod
def write(self, pil: Image) -> None:
pass
@abstractmethod
def close(self) -> None:
pass
@classmethod
def __subclasshook__(cls, subclass):
return _check_methods(subclass, "out", "close")
class RenderShow(RenderIO):
def __init__(self, tk = Tk):
self.tk = tk()
def write(self, pil: Image):
img = ImageTk.PhotoImage(pil)
panel = Label(self.tk, image = img)
panel.pack(side = "bottom", fill = "both", expand = "yes")
self.tk.update_idletasks()
self.tk.update()
def close(self):
self.tk.destroy()
class RenderSave(RenderIO):
def __init__(self, img_save_dir: Path = Path("rewards")) -> None:
self.img_save_dir = img_save_dir
self.count = 0
def _img_path(self):
return self.img_save_dir / "render_{:%04d}.png".format(self.count)
def write(self, pil: Image, count: Optional[int] = None):
count = count or self.count
pil.save(str(self._img_path))
self.count += 1
def close(self):
self.count = 0
class GymImgEnv(Problem):
def __init__(self, args, renderio: Callable[[], RenderIO] = RenderSave) -> None:
self.device = args.device
# self.ale = atari_py.ALEInterface()
# self.ale.setInt('random_seed', args.seed)
# self.ale.setInt('max_num_frames', args.max_episode_length)
# self.ale.setFloat('repeat_action_probability', 0) # Disable sticky actions
# self.ale.setInt('frame_skip', 0)
# self.ale.setBool('color_averaging', False)
# self.ale.loadROM(atari_py.get_game_path(args.game)) # ROM loading must be done after setting options
self.ale = gym.make(args.game + "-v0")
actions = self.ale.getMinimalActionSet()
self.actions = dict((i, e) for i, e in zip(range(len(actions)), actions))
self.lives = 0 # Life counter (used in DeepMind training)
self.life_termination = False # Used to check if resetting only from loss of life
self.window = args.history_length # Number of frames to concatenate
self.state_buffer = deque(
[], maxlen=args.history_length
) # type: Sequence
self.training = True # Consistent with model training mode
self.renderio = renderio()
def _get_state(self):
state = Image.fromarray(
self.ale.getScreenGrayscale().squeeze()
).resize((84, 84), resample=Image.BILINEAR)
return torch.tensor(np.asarray(state),
dtype=torch.float32, device=self.device).div_(255)
def _reset_buffer(self):
for _ in range(self.window):
self.state_buffer.append(torch.zeros(84, 84, device=self.device))
def reset(self):
if self.life_termination:
self.life_termination = False # Reset flag
self.ale.act(0) # Use a no-op after loss of life
else:
# Reset internals
self._reset_buffer()
self.ale.reset_game()
# Perform up to 30 random no-ops before starting
for _ in range(random.randrange(30)):
self.ale.act(0) # Assumes raw action 0 is always no-op
if self.ale.game_over():
self.ale.reset_game()
# Process and return "initial" state
observation = self._get_state()
self.state_buffer.append(observation)
self.lives = self.ale.lives()
return torch.stack(list(self.state_buffer), 0)
def step(self, action):
# Repeat action 4 times, max pool over last 2 frames
frame_buffer = torch.zeros(2, 84, 84, device=self.device)
reward, done = 0, False
for t in range(4):
reward += self.ale.act(self.actions.get(action))
if t == 2:
frame_buffer[0] = self._get_state()
elif t == 3:
frame_buffer[1] = self._get_state()
done = self.ale.game_over()
if done:
break
observation = frame_buffer.max(0)[0]
self.state_buffer.append(observation)
# Detect loss of life as terminal in training mode
if self.training:
lives = self.ale.lives()
# Lives > 0 for Q*bert
if lives < self.lives and lives > 0:
# Only set flag when not truly done
self.life_termination = not done
done = True
self.lives = lives
# Return state, reward, done
return torch.stack(list(self.state_buffer), 0), reward, done
# Uses loss of life as terminal signal
def train(self):
self.training = True
# Uses standard terminal signal
def eval(self):
self.training = False
def action_space(self):
return len(self.actions)
def render(self):
pil = Image.fromarray(self.ale.getScreenRGB()[:, :, ::-1])
self.renderio.write(pil)
def close(self):
self.renderio.close()
|
wecacuee/floyd-warshal-rl
|
fwrl/prob/gym.py
|
gym.py
|
py
| 6,947 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37460236670
|
import tkinter as tk
import pandas as pd
import os
from PathManager import locationManager as lm
def error_popup(msg):
"""Super simple pop-up to indicate an error has occured."""
popup = tk.Tk()
popup.wm_title("!")
label = tk.Label(popup, text=msg)
label.pack(side="top", fill="x", pady=10)
B1 = tk.Button(popup, text="Okay", command = popup.destroy)
B1.pack()
popup.mainloop()
class textLog:
def __init__(self):
self.log = pd.DataFrame(columns=[''])
self.lm = lm()
self.current_loc = self.lm.get_current_loc()
def append(self, description, location):
"""Adds row to log. Not commited until WRITE is run"""
self.log = self.log.append(pd.DataFrame({'Description':description, 'Location':location}))
def WRITE(self):
self.log.to_csv(os.path.join(self.current_location, 'REFERENCE_ERROR.csv'), index=False)
|
Hamza-crypto/QuickBooks-importer-script-python
|
ErrorLogging.py
|
ErrorLogging.py
|
py
| 902 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19626071979
|
# coding: utf-8
from sqlalchemy import Column, DateTime, Integer, String, text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from base import db_url
Base = declarative_base()
metadata = Base.metadata
db = create_engine(db_url)
session_maker = sessionmaker(bind=db)
class Case(Base):
__tablename__ = 'case'
ID = Column(Integer, primary_key=True)
NAME = Column(String(20))
SCHEMA = Column(String(100))
KEYWORDS = Column(String(100))
RESPONSE = Column(String(500))
TOTAL_COUNT = Column(Integer)
COMMENTS = Column(String(50))
# 把字典变成对象
def __init__(self, **kwargs):
self.NAME = kwargs.get('name')
self.SCHEMA = kwargs.get('schema')
self.KEYWORDS = kwargs.get('keywords')
self.RESPONSE = kwargs.get('response')
self.TOTAL_COUNT = kwargs.get('total_count')
self.COMMENTS = kwargs.get('comments')
# 把对象变成字典
def __str__(self):
return {'id': self.ID,
'name': self.NAME,
'schema': self.SCHEMA,
'keywords': self.KEYWORDS,
'total_count': self.TOTAL_COUNT,
'comments': self.COMMENTS}
class Task(Base):
__tablename__ = 'task'
ID = Column(Integer, primary_key=True)
NAME = Column(String(20))
TEAM = Column(String(10))
PLATFORM = Column(String(20))
CASES = Column(String(100), nullable=False)
COMMENTS = Column(String(50))
def __init__(self, obj):
self.NAME = obj.get('name')
self.TEAM = obj.get('team')
self.PLATFORM = obj.get('platform')
self.CASES = obj.get('cases')
self.COMMENTS = obj.get('comments')
def __str__(self):
return {'id': self.ID,
'name': self.NAME,
'team': self.TEAM,
'platform': self.PLATFORM,
'cases': self.CASES,
'comments': self.COMMENTS}
class TroubledLog(Base):
__tablename__ = 'troubled_log'
ID = Column(Integer, primary_key=True)
TASK_ID = Column(Integer)
TASK_NAME = Column(String(20))
STATE = Column(String(10))
CREATE_TIME = Column(DateTime, server_default=text("CURRENT_TIMESTAMP"))
LOG_SIZE = Column(Integer)
OFFSET = Column(Integer)
def __init__(self, **kwargs):
self.TASK_ID = kwargs.get('task_id')
self.TASK_NAME = kwargs.get('task_name')
self.STATE = kwargs.get('state')
self.LOG_SIZE = kwargs.get('log_size')
self.OFFSET = kwargs.get('offset')
def __str__(self):
return {'id': self.ID,
'taskId': self.TASK_ID,
'taskName': self.TASK_NAME,
'state': self.STATE,
'createTime': self.CREATE_TIME,
'logSize': self.LOG_SIZE,
'offset': self.OFFSET}
class TroubledLogDetail(Base):
__tablename__ = 'troubled_log_detail'
ID = Column(Integer, primary_key=True)
LOG_ID = Column(Integer)
CASE_ID = Column(Integer)
CASE_NAME = Column(String(20))
TROUBLED_STRATEGY = Column(String(20))
TROUBLED_RESPONSE = Column(String)
STATE = Column(String(20))
IS_CRASH = Column(String(5))
CRASH_LOG = Column(String(500))
SCREEN_SHOT = Column(String)
CREATE_TIME = Column(DateTime, server_default=text("CURRENT_TIMESTAMP"))
def __init__(self, **kwargs):
self.LOG_ID = kwargs.get('log_id')
self.CASE_ID = kwargs.get('case_id')
self.CASE_NAME = kwargs.get('case_name')
self.TROUBLED_STRATEGY = kwargs.get('troubled_strategy')
self.TROUBLED_RESPONSE = kwargs.get('troubled_response')
self.STATE = kwargs.get('state')
self.IS_CRASH = kwargs.get('is_crash')
self.CRASH_LOG = kwargs.get('crash_log')
self.SCREEN_SHOT = kwargs.get('screen_shot')
def __str__(self):
return {'id': self.ID,
'logId': self.LOG_ID,
'caseId': self.CASE_ID,
'caseName': self.CASE_NAME,
'troubledStrategy': self.TROUBLED_STRATEGY,
'troubledResponse': self.TROUBLED_RESPONSE,
'isCrash': self.IS_CRASH,
'crashLog': self.CRASH_LOG,
'screenShot': self.SCREEN_SHOT,
'createTime': self.CREATE_TIME}
|
natsuforyou/troubledmonkey
|
models.py
|
models.py
|
py
| 4,401 |
python
|
en
|
code
| 3 |
github-code
|
6
|
43535916514
|
import argparse
import sys
import logging
from mutagene.profiles.profile import calc_profile
logger = logging.getLogger(__name__)
genome_error_message = """requires genome name argument -g hg19, hg38, mm10, see http://hgdownload.cse.ucsc.edu/downloads.html for more
Use mutagene fetch to download genome assemblies"""
class ProfileMenu(object):
def __init__(self, parser):
parser.add_argument("--infile", "-i", nargs='*', help="Input file format", type=argparse.FileType('r'))
parser.add_argument('--outfile', "-o", nargs='?', type=argparse.FileType('w'), default=sys.stdout,
help="Name of output file, will be generated in TSV format")
parser.add_argument('--genome', "-g", help="Location of genome assembly file", type=str)
parser.add_argument('--input-format', "-f", help="Input format: auto, MAF, VCF", type=str, default='auto')
# for backwards compatibility with 0.8.X add a hidden action that would just take anything as a valid input
parser.add_argument('action', nargs="?", metavar="")
def callback(self, args):
# print('ProfileMenu', args.action)
self.calculate(args)
def calculate(self, args):
# print("Calculating...")
if not args.infile:
logger.warning("Provide input file in VCF or MAF format (-i) and a corresponding genome assembly (-g)")
return
if not args.genome:
logger.warning(genome_error_message)
return
calc_profile(args.infile, args.outfile, args.genome, args.input_format)
|
neksa/mutagene
|
mutagene/cli/profile_menu.py
|
profile_menu.py
|
py
| 1,614 |
python
|
en
|
code
| 3 |
github-code
|
6
|
2533690932
|
from typing import List, Optional
import filters as f
from iota import Address
from iota.commands import FilterCommand, RequestFilter
from iota.commands.core.find_transactions import FindTransactionsCommand
from iota.commands.core.were_addresses_spent_from import \
WereAddressesSpentFromCommand
from iota.crypto.addresses import AddressGenerator
from iota.crypto.types import Seed
from iota.filters import SecurityLevel, Trytes
import asyncio
__all__ = [
'GetNewAddressesCommand',
]
class GetNewAddressesCommand(FilterCommand):
"""
Executes ``getNewAddresses`` extended API command.
See :py:meth:`iota.api.Iota.get_new_addresses` for more info.
"""
command = 'getNewAddresses'
def get_request_filter(self):
return GetNewAddressesRequestFilter()
def get_response_filter(self):
pass
async def _execute(self, request: dict) -> dict:
checksum: bool = request['checksum']
count: Optional[int] = request['count']
index: int = request['index']
security_level: int = request['securityLevel']
seed: Seed = request['seed']
return {
'addresses':
await self._find_addresses(
seed,
index,
count,
security_level,
checksum,
),
}
async def _find_addresses(
self,
seed: Seed,
index: int,
count: Optional[int],
security_level: int,
checksum: bool
) -> List[Address]:
"""
Find addresses matching the command parameters.
"""
generator = AddressGenerator(seed, security_level, checksum)
if count is None:
# Connect to Tangle and find the first unused address.
for addy in generator.create_iterator(start=index):
# We use addy.address here because the commands do
# not work on an address with a checksum
# Execute two checks concurrently
responses = await asyncio.gather(
WereAddressesSpentFromCommand(self.adapter)(
addresses=[addy.address],
),
FindTransactionsCommand(self.adapter)(
addresses=[addy.address],
),
)
# responses[0] -> was it spent from?
# responses[1] -> any transaction found?
if responses[0]['states'][0] or responses[1].get('hashes'):
continue
return [addy]
return generator.get_addresses(start=index, count=count)
class GetNewAddressesRequestFilter(RequestFilter):
def __init__(self) -> None:
super(GetNewAddressesRequestFilter, self).__init__(
{
# Everything except ``seed`` is optional.
'checksum': f.Type(bool) | f.Optional(default=False),
'count': f.Type(int) | f.Min(1),
'index': f.Type(int) | f.Min(0) | f.Optional(default=0),
'securityLevel': SecurityLevel,
'seed': f.Required | Trytes(Seed),
},
allow_missing_keys={
'checksum',
'count',
'index',
'securityLevel',
},
)
|
iotaledger/iota.py
|
iota/commands/extended/get_new_addresses.py
|
get_new_addresses.py
|
py
| 3,422 |
python
|
en
|
code
| 344 |
github-code
|
6
|
70270071869
|
from sqlalchemy import Column
from sqlalchemy import Integer, String
from sqlalchemy.orm import relationship
from app.models.base import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String, nullable=False)
email = Column(String, nullable=False, unique=True, index=True)
lists = relationship('List', back_populates='owner', cascade='all, delete-orphan')
def get_api_repr(self, include_email=False):
api_repr = {
"id": self.id,
"username": self.username,
}
if include_email:
api_repr['email'] = self.email
return api_repr
|
cyber-chuvash/todolist-API
|
app/models/user.py
|
user.py
|
py
| 678 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32506800793
|
#!/usr/bin/env python3
"""
Read API data directly via internet and output to pipe
"""
import json
import logging
import time
import requests
from .. import defs
from . import net
from .order import ApiOrder, API_CHANNEL_SSE_NAME
from .pkt import BlocksatPkt, BlocksatPktHandler
logger = logging.getLogger(__name__)
MAX_SEQ_NUM = 2**31 # Maximum transmission sequence number
DEFAULT_REGIONS = list(range(0, len(defs.satellites)))
class DemoRx():
"""Demo receiver
"""
def __init__(self,
server,
socks,
kbps,
tx_event,
channel,
regions=None,
tls_cert=None,
tls_key=None,
poll=False,
sock_by_region=False):
""" DemoRx Constructor
Args:
server : API server address where the order lives.
socks : Instances of UdpSock over which to send the packets
kbps : Target bit rate in kbps.
tx_event : SSE event to use as trigger for transmissions.
channel : API channel number.
regions : Regions to process and potentially confirm Tx.
tls_key : API client key (for Tx confirmation).
tls_cer : API client certificate (for Tx confirmation).
poll : Poll messages directly from the Satellite API queue
instead of listening to server-sent events.
sock_by_region : Map each UdpSock to a region so that each socket
serves messages on a single region only. Requires the socks
parameter to have the same length as the regions parameter.
"""
# Validate args
assert (isinstance(socks, list))
assert (all([isinstance(x, net.UdpSock) for x in socks]))
# Configs
self.server = server
self.socks = socks
self.kbps = kbps
self.tx_event = tx_event
self.channel = channel
self.regions_list = DEFAULT_REGIONS if not regions else regions
self.regions_set = set(self.regions_list)
self.tls_cert = tls_cert
self.tls_key = tls_key
self.poll = poll
self.admin = tls_cert is not None and tls_key is not None
if sock_by_region and len(self.regions_list) != len(socks):
raise ValueError(
"Number of sockets must be equal to the number of regions")
self.sock_by_region = sock_by_region
def _send_pkts(self, pkts, socks):
"""Transmit Blocksat packets of the API message over all sockets
Transmit and sleep (i.e., block) to guarantee the target bit rate.
Args:
pkts : List of BlocksatPkt objects to be send over sockets
socks : List of sockets over which to send packets.
"""
assert (isinstance(pkts, list))
assert (all([isinstance(x, BlocksatPkt) for x in pkts]))
byte_rate = self.kbps * 1e3 / 8 # bytes / sec
next_tx = time.time()
for i, pkt in enumerate(pkts):
# Send the same packet on all sockets
for sock in socks:
sock.send(pkt.pack())
logger.debug("Send packet %d - %d bytes" % (i, len(pkt)))
# Throttle
if (byte_rate > 0):
tx_delay = len(pkt) / byte_rate
next_tx += tx_delay
sleep = next_tx - time.time()
if (sleep > 0):
time.sleep(sleep)
def _handle_event(self, event_data):
"""Handle event broadcast by the SSE server
Args:
event_data (dict): Event data.
"""
order = json.loads(event_data)
logger.debug("Order: " + json.dumps(order, indent=4, sort_keys=True))
# Proceed when the event matches the target Tx trigger event
if (order["status"] != self.tx_event):
return
self._handle_order(order)
def _handle_order(self, order_info):
"""Fetch the order data and send it over UDP
Args:
order_info (dict): Dictionary with the order's Tx sequence number
and message size.
"""
# The 'regions' field of the order info has different contents in
# polling and SSE mode. In SSE mode, it contains the missing regions
# for transmission, whereas, in polling mode (reading from
# /order/:uuid), it contains all the original regions, regardless of
# whether or not the transmission is pending. Nevertheless, when
# operating in polling mode as admin (fetching from
# /admin/order/:uuid), the order info includes the "tx_confirmations"
# field, which can be used to adjust the regions field such that it
# contains the missing regions only.
order_regions = set(order_info['regions'])
if 'tx_confirmations' in order_info:
confirmed_tx_regions = set(order_info['tx_confirmations'])
order_regions = order_regions - confirmed_tx_regions
# Ensure the order includes a region covered by this instance
served_regions = order_regions & self.regions_set
if (served_regions == set()):
logger.debug("Demo-Rx region(s) not covered by this order")
return
seq_num = order_info["tx_seq_num"]
logger.info("Message %-5d\tSize: %d bytes\t" %
(seq_num, order_info["message_size"]))
# Get the API message data
order = ApiOrder(self.server,
seq_num=seq_num,
tls_cert=self.tls_cert,
tls_key=self.tls_key)
data = order.get_data()
if (data is None):
logger.debug("Empty message. Skipping...")
return
# Define the sockets over which the order should be transmitted
tx_socks = []
if self.sock_by_region:
for region, sock in zip(self.regions_list, self.socks):
if region in order_regions:
tx_socks.append(sock)
else:
tx_socks = self.socks
# Split API message data into Blocksat packet(s)
tx_handler = BlocksatPktHandler()
tx_handler.split(data, seq_num, self.channel)
pkts = tx_handler.get_frags(seq_num)
if (self.kbps > 0):
logger.debug("Transmission is going to take: "
"{:g} sec".format(len(data) * 8 / (self.kbps * 1e3)))
# Send the packet(s)
self._send_pkts(pkts, tx_socks)
# Send transmission confirmation to the server
order.confirm_tx(list(served_regions))
def run_sse_client(self):
"""Server-sent Events (SSE) Client"""
logger.info("Connecting with Satellite API server...")
sleep = False
while (True):
try:
if sleep:
time.sleep(2)
sleep = False
sse_channel = API_CHANNEL_SSE_NAME[self.channel]
endpoint = '/admin/subscribe/' if self.admin else '/subscribe/'
r = requests.get(self.server + f"{endpoint}{sse_channel}",
stream=True,
cert=(self.tls_cert, self.tls_key))
r.raise_for_status()
logger.info("Connected. Waiting for events...\n")
# Continuously wait for events
event_line = 'event:' + sse_channel
event_next = False
for line in r.iter_lines():
if not line:
continue
dec_line = line.decode()
if dec_line.startswith(':'): # comment to be ignored
continue
logger.debug(line)
if dec_line.startswith(event_line):
event_next = True
continue
if event_next and dec_line.startswith('data:'):
self._handle_event(dec_line.replace('data:', ''))
event_next = False
except requests.exceptions.HTTPError as e:
logger.error(e)
break
except requests.exceptions.ChunkedEncodingError as e:
logger.debug(e)
pass
except requests.exceptions.ConnectionError as e:
logger.debug(e)
sleep = True
pass
except requests.exceptions.RequestException as e:
logger.debug(e)
sleep = True
pass
except KeyboardInterrupt:
exit()
logger.info("Reconnecting...")
def run_poll_client(self):
"""Polling-based client"""
order_mgr = ApiOrder(self.server,
tls_cert=self.tls_cert,
tls_key=self.tls_key)
tx_set = set()
while (True):
try:
tx_orders = order_mgr.get_orders(['transmitting'],
self.channel,
queue='transmitting')
# There can only be one order in transmitting state at a time
if len(tx_orders) > 1:
logger.warning("More than one order in transmitting "
"state on channel {}".format(self.channel))
# Filter out any repeated orders (already transmitted), except
# for those the server is explicitly retransmitting.
new_orders = list()
for order_info in tx_orders:
is_retransmission = 'retransmission' in order_info and \
order_info['retransmission'] is not None and \
'retry_count' in order_info['retransmission']
tx_attempt = 0 if not is_retransmission else \
order_info['retransmission']['retry_count']
order_id = "{}-{}".format(order_info['tx_seq_num'],
tx_attempt)
if order_id not in tx_set:
tx_set.add(order_id)
new_orders.append(order_info)
if new_orders:
for order_info in new_orders:
logger.debug(
"Order: " +
json.dumps(order_info, indent=4, sort_keys=True))
self._handle_order(order_info)
else:
time.sleep(1)
except requests.exceptions.ConnectionError as e:
logger.debug(e)
time.sleep(1)
pass
except KeyboardInterrupt:
exit()
def run(self):
"""Run the demo-rx transmission loop"""
if self.poll:
self.run_poll_client()
else:
self.run_sse_client()
|
Blockstream/satellite
|
blocksatcli/api/demorx.py
|
demorx.py
|
py
| 11,171 |
python
|
en
|
code
| 949 |
github-code
|
6
|
72298298427
|
import pickle
from flask import Flask, request, jsonify
import numpy as np
# Load model and dv
with open("dv.pkl", "rb") as f_in:
dv = pickle.load(f_in)
with open("rf_model.pkl", "rb") as f_in:
model = pickle.load(f_in)
# instantiate
app = Flask('atx_housing_prediction')
# set path: /predict
@app.route('/predict', methods=['POST']) # HTTP Request: Post
def predict():
# Get data
data = request.get_json()
# Extract features
X = dv.transform([data])
# Make prediction
y_pred = np.expm1(model.predict(X)[0]) # predict and convert back from log1p
return jsonify({'price': float(y_pred)}) # cast
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=9696)
|
michaelfronda/ATXHousePrice
|
predict.py
|
predict.py
|
py
| 733 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21091581358
|
import sys
import time
import yaml
from watchdog.observers import Observer
from watchdog.events import *
import ftplib
config_file = "client.yml"
def get_config(index):
with open(config_file) as f:
return yaml.load(f, Loader=yaml.FullLoader)[index]
class MyHandler(FileSystemEventHandler):
def on_modified(self, event):
files = get_config("files")
if event.src_path.replace("./", "") in files:
print("log file %s changed!" % event.src_path)
session = ftplib.FTP(get_config("server_ip"), get_config("user_name"), get_config("password"))
file = open(event.src_path, "rb")
session.storbinary(f"STOR {event.src_path}", file)
if __name__ == "__main__":
print("Monitored Files: " + str(get_config("files")))
event_handler = MyHandler()
observer = Observer()
observer.schedule(event_handler, path='.', recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
yifan-ivan/FileSynchronizer
|
client.py
|
client.py
|
py
| 1,061 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10793618381
|
"""Code to interface with the SMA inverters and return the results."""
# Robust initialization and shutdown code courtesy of
# https://github.com/wbenny/python-graceful-shutdown.git
import logging
import sys
import os
import asyncio
import aiohttp
from delayedints import DelayedKeyboardInterrupt
from pvsite import Site
import version
import logfiles
from readconfig import read_config
from exceptions import FailedInitialization
_LOGGER = logging.getLogger("sbhistory")
class SBHistory:
class NormalCompletion(Exception):
pass
class FailedInitialization(Exception):
pass
def __init__(self, config):
self._config = config
self._loop = asyncio.new_event_loop()
self._session = None
self._site = None
def run(self):
try:
try:
with DelayedKeyboardInterrupt():
self._start()
except KeyboardInterrupt:
_LOGGER.critical("Received KeyboardInterrupt during startup")
raise
self._wait()
raise SBHistory.NormalCompletion
except (KeyboardInterrupt, SBHistory.NormalCompletion, SBHistory.FailedInitialization):
# The _stop() is also shielded from termination.
try:
with DelayedKeyboardInterrupt():
self._stop()
except KeyboardInterrupt:
_LOGGER.critical("Received KeyboardInterrupt during shutdown")
async def _astart(self):
self._session = aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False))
self._site = Site(self._session, self._config)
result = await self._site.start()
if not result:
raise SBHistory.FailedInitialization
async def _astop(self):
_LOGGER.info("Closing sbhistory application")
if self._site:
await self._site.stop()
if self._session:
await self._session.close()
async def _await(self):
await self._site.run()
def _start(self):
self._loop.run_until_complete(self._astart())
def _wait(self):
self._loop.run_until_complete(self._await())
def _stop(self):
self._loop.run_until_complete(self._astop())
def main():
"""Set up and start sbhistory."""
try:
config = read_config(checking=False)
except FailedInitialization as e:
print(f"{e}")
return
logfiles.start(config)
_LOGGER.info(f"sbhistory inverter utility {version.get_version()}, PID is {os.getpid()}")
try:
sbhistory = SBHistory(read_config(checking=True))
sbhistory.run()
except FailedInitialization as e:
_LOGGER.error(f"{e}")
except Exception as e:
_LOGGER.error(f"Unexpected exception: {e}")
if __name__ == '__main__':
if sys.version_info[0] >= 3 and sys.version_info[1] >= 8:
main()
else:
print("python 3.8 or better required")
|
sillygoose/sbhistory
|
sbhistory/sbhistory.py
|
sbhistory.py
|
py
| 2,975 |
python
|
en
|
code
| 2 |
github-code
|
6
|
28462595619
|
import os
import sys
from distutils.core import setup
from distutils.core import Extension
# detect python version
version = []
if hasattr(sys.version_info, 'major'):
version.append(sys.version_info.major)
version.append(sys.version_info.minor)
else:
version = sys.version_info[0:2]
# detect boost_python library name
pattern = 'ld -o /dev/null --allow-shlib-undefined -lXXX > /dev/null 2>&1'
boost_python = 'boost_python%d%d' % (version[0], version[1])
if os.system(pattern.replace('XXX', boost_python)) != 0:
boost_python = 'boost_python-py%d%d' % (version[0], version[1])
if os.system(pattern.replace('XXX', boost_python)) != 0:
boost_python = 'boost_python-%d.%d' % (version[0], version[1])
if os.system(pattern.replace('XXX', boost_python)) != 0:
boost_python = 'boost_python-%d.%d' % (version[0], version[1])
if os.system(pattern.replace('XXX', boost_python)) != 0:
print('can\'t find boost_python library')
sys.exit(1)
print('checking boost_python library name: ' + boost_python)
# initialize setup
setup(name='mcache',
version='1.0.5',
description='Python wrapper around libmcache - memcache client library',
author='Michal Bukovsky',
author_email='[email protected]',
url='http://cml.kancelar.seznam.cz/email',
ext_modules=[Extension('mcache',
['mcache.cc'],
libraries=[boost_python,
'boost_system',
'boost_thread',
'mcache',
'z'],
extra_compile_args=['-W',
'-Wall',
'-Wextra',
'-Wconversion',
'-std=c++14'])])
|
seznam/mcache-client
|
python/setup.py
|
setup.py
|
py
| 2,009 |
python
|
en
|
code
| 4 |
github-code
|
6
|
10865376818
|
import logging
import json
import datetime
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.gaussian_process import GaussianProcessRegressor
from aggregating.utils import flatten_X, generate_train_set, memory_efficient_predict
from stochastic_models import MaxCallStochasticModel
### general MPI helpers
def generate_logger_MPI(logfile, level,rank):
"""
generate logger for MPI
:param logfile: relative path to file
:type logfile: str
:param level: logging level (info,debug,..)
:type level: logging.level
:param rank: the rank of the process for which to create a logger
:return: logger
:rtype: logging.logger
"""
logging.basicConfig(filename=logfile, level=level,
format='%(asctime)s.%(msecs)03d %(levelname)s %(name)s - %(funcName)s : %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',)
logger = logging.getLogger("rank%i" % rank )
return logger
def write_results(basename,results,Config):
res_dict = {'N_train': Config.N_train, 'N_test': Config.N_test,'mgrid': Config.M_grid, 'alpha_grid': Config.alpha_grid, 'errors': results}
with open("logs/" + basename + f'{str(datetime.datetime.now().strftime("%Y-%m-%d.%H-%M-%S"))}.json', 'w') as fp:
json.dump(res_dict, fp)
def write_boosting_results(basename, results, Config):
res_dict = {'N_train': Config.N_train, 'N_test': Config.N_test,'Ratios': Config.Ratios, 'test_errors': results}
with open("logs/" + basename + f'{str(datetime.datetime.now().strftime("%Y-%m-%d.%H-%M-%S"))}.json', 'w') as fp:
json.dump(res_dict, fp)
#### Bagging MPI helpers
def generate_bagging_train_indices(N_train,alpha,M):
"""
creates indices that represent M trainingsets, used for bagging (replacement within and in between the sets)
:param N: trainset len
:type X: int
:param alpha: fractional size of each trainset
:type alpha: float
:param M: number of trainsets to create
:type M: int
:return: list of indices that represent the M trainsets
:rtype: List[np.ndarray(M*alpha)]
"""
n = round(N_train*alpha)
indices_list = []
for i in range(M):
indices = np.random.choice(N_train,size=n,replace=True)
indices_list.append(indices)
return indices_list
def generate_test_sets(trials, N_test,Delta, d,generator=MaxCallStochasticModel):
"""
generate #trials test sets of given dimensions using the util func in aggregating
:return: X_test_lists, y_test_list of specified dimensions; stacked into a single numpy array (trials, N,Delta*d / 1)
"""
X_test_list = []
y_test_list = []
for _ in range(trials):
X_test,y_test = generate_train_set(N_test,Delta,d,generator)
X_test_list.append(X_test)
y_test_list.append(y_test)
return np.stack(X_test_list,axis=0), np.stack(y_test_list,axis=0)
def train_and_evaluate(model, X_train, y_train, X_test_list):
"""
trains a gpr on the trainset and then performs inference on the test sets
:param model: the model to train
:type model: sklearn GPR
:param X_train: Train datapoints
:type X_train: [type]
:param y_train: labels
:type y_train: [type]
:param Y_test_list: test sets
:type Y_test_list: list of numpy arrays
:return: predictions, sigma for each of the X_test sets
:rtype: List of tuples of numpy arrays
"""
assert isinstance(model, GaussianProcessRegressor)
## train
model.fit(X_train,y_train)
## evaluate
result_list = []
for x_test in X_test_list:
mu, sigma = memory_efficient_predict(model,x_test,max_size=20000)
result_list.append((mu,sigma))
return result_list
def soft_prediction(predictor_lists,epsilon = 1e-10):
"""
creates the soft prediction of the bagging ensemble using the individual predictions
:param predictor_lists: the individual predictions & sigmas
:type predictor_lists: [[mu_i, sigma_i]]
:return: single list with predictions
:rtype: List of np.ndarray for each of the predicted sets
"""
predictions = np.zeros(predictor_lists[0][0].shape[0])
sigmas = np.zeros(predictor_lists[0][0].shape[0])
for predictor_list in predictor_lists:
mu,sigma = predictor_list
mu = mu.flatten()
predictions = predictions + ( mu / (sigma + epsilon))
sigmas = sigmas + (1/ (sigma + epsilon))
predictions = predictions / sigmas
return predictions
def hard_prediction(predictor_lists):
"""
creates the hard prediction of the bagging ensemble using the individual predictions
:param predictor_lists: the individual predictions & sigmas
:type predictor_lists: [[mu_i, sigma_i]]
:return: single list with predictions
:rtype: List of np.ndarray for each of the predicted sets
"""
predictions = np.zeros(predictor_lists[0][0].shape[0])
npredictors = len(predictor_lists)
for predictor_list in predictor_lists:
mu,sigma = predictor_list
mu = mu.flatten()
predictions = predictions + mu
predictions = predictions / npredictors
return predictions
def trials_soft_prediction(predictors_results,trials):
"""
gets the predictions for a list of the evaluations for each predictor in an ensemble, for a number of trials
:param predictors_results: [[(mu_predictor_i,trial_j;sigma_predictor_i,trial_j) for j in range(trials)] for i in range(M)]
:type predictors_results: [type]
:param trials: # trials
:type trials: [type]
"""
prediction_list = []
for trial in range(trials):
predictions = [predictor[trial] for predictor in predictors_results]
prediction = soft_prediction(predictions)
prediction_list.append(prediction)
return prediction_list
def trials_hard_prediction(predictors_results,trials):
"""
gets the HARD predictions for a list of the evaluations for each predictor in an ensemble, for a number of trials
:param predictors_results: [[(mu_predictor_i,trial_j;sigma_predictor_i,trial_j) for j in range(trials)] for i in range(M)]
:type predictors_results: [type]
:param trials: # trials
:type trials: [type]
"""
prediction_list = []
for trial in range(trials):
predictions = [predictor[trial] for predictor in predictors_results]
prediction = hard_prediction(predictions)
prediction_list.append(prediction)
return prediction_list
|
tlpss/ML-Project2
|
mpi/utils.py
|
utils.py
|
py
| 6,471 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37824702150
|
#do slice
# %%
L = list(range(100))
n = L[0:10]
print(n)
# %%
n = L[:10:2]
print(n)
# %%
n = L[::5]
print(n)
# %%
def trim(s):
if s == '':
return s
while s[0] == ' ':
s = s[1:]
if s == '':
return s
while s[-1] == ' ':
s = s[:-1]
return s
# 测试:
if trim('hello ') != 'hello':
print('测试失败!')
elif trim(' hello') != 'hello':
print('测试失败!')
elif trim(' hello ') != 'hello':
print('测试失败!')
elif trim(' hello world ') != 'hello world':
print('测试失败!')
elif trim('') != '':
print('测试失败!')
elif trim(' ') != '':
print('测试失败!')
else:
print('测试成功!')
# %%
#Iteration
def findMinAndMax(L):
if L == []:
return (None, None)
min = max = L[0]
for i in L:
if i < min:
min = i
if i > max:
max = i
return (min, max)
# 测试
if findMinAndMax([]) != (None, None):
print('测试失败!')
elif findMinAndMax([7]) != (7, 7):
print('测试失败!')
elif findMinAndMax([7, 1]) != (1, 7):
print('测试失败!')
elif findMinAndMax([7, 1, 3, 9, 5]) != (1, 9):
print('测试失败!')
else:
print('测试成功!')
# %%
print(list(range(20)))
# %%
[x * x for x in range(1, 11)]
# %%
[x * x for x in range(1, 11) if x % 2 == 0]
# %%
[m + n for m in 'ABC' for n in 'XYZ']
# %%
L1 = ['Hello', 'World', 18, 'Apple', None]
L2 = [s.lower() for s in L1 if isinstance(s, str)]
# 测试:
print(L2)
if L2 == ['hello', 'world', 'apple']:
print('测试通过!')
else:
print('测试失败!')
# %%
g = (x * x for x in range(10))
# %%
def triangles(max):
results = [1]
yield results
while True:
results = [v+w for v, w in zip([0]+results, results+[0])]
yield results
for i, row in enumerate(triangles(max)):
if i > 9:
break
print(i, row)
# [1]
# [1, 1]
# [1, 2, 1]
# [1, 3, 3, 1]
# [1, 4, 6, 4, 1]
# [1, 5, 10, 10, 5, 1]
# [1, 6, 15, 20, 15, 6, 1]
# [1, 7, 21, 35, 35, 21, 7, 1]
# [1, 8, 28, 56, 70, 56, 28, 8, 1]
# [1, 9, 36, 84, 126, 126, 84, 36, 9, 1]
if row == [
[1],
[1, 1],
[1, 2, 1],
[1, 3, 3, 1],
[1, 4, 6, 4, 1],
[1, 5, 10, 10, 5, 1],
[1, 6, 15, 20, 15, 6, 1],
[1, 7, 21, 35, 35, 21, 7, 1],
[1, 8, 28, 56, 70, 56, 28, 8, 1],
[1, 9, 36, 84, 126, 126, 84, 36, 9, 1]
]:
print('测试通过!')
else:
print('测试失败!')
# %%
def f2(a, b, c=0, **kw):
x = f3(a)
print('a =', a, 'x =', x)
return x
# %%
def f3(test):
best = test + 2
return best
f2(3, 1)
# %%
|
AlaiaS/Python-Learning
|
Features.py
|
Features.py
|
py
| 2,610 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72532105789
|
import json
from http import HTTPStatus
from typing import Any, Literal
import httpx
from pydantic import BaseModel, Field
from simcore_service_api_server.utils.http_calls_capture_processing import (
PathDescription,
enhance_from_openapi_spec,
)
class HttpApiCallCaptureModel(BaseModel):
"""
Captures relevant information of a call to the http api
"""
name: str
description: str
method: Literal["GET", "PUT", "POST", "PATCH", "DELETE"]
host: str
path: PathDescription | str
query: str | None = None
request_payload: dict[str, Any] | None = None
response_body: dict[str, Any] | list | None = None
status_code: HTTPStatus = Field(default=HTTPStatus.OK)
@classmethod
def create_from_response(
cls,
response: httpx.Response,
name: str,
description: str = "",
enhance_from_openapi_specs: bool = True,
) -> "HttpApiCallCaptureModel":
request = response.request
path: PathDescription | str
if enhance_from_openapi_specs:
path = enhance_from_openapi_spec(response)
else:
path = response.request.url.path
return cls(
name=name,
description=description or f"{request}",
method=request.method,
host=request.url.host,
path=path,
query=request.url.query.decode() or None,
request_payload=json.loads(request.content.decode())
if request.content
else None,
response_body=response.json() if response.content else None,
status_code=HTTPStatus(response.status_code),
)
def __str__(self) -> str:
return f"{self.description: self.request_desc}"
@property
def request_desc(self) -> str:
return f"{self.method} {self.path}"
def as_response(self) -> httpx.Response:
return httpx.Response(status_code=self.status_code, json=self.response_body)
def get_captured_as_json(name: str, response: httpx.Response) -> str:
capture_json: str = HttpApiCallCaptureModel.create_from_response(
response, name=name
).json(indent=1)
return f"{capture_json}"
|
ITISFoundation/osparc-simcore
|
services/api-server/src/simcore_service_api_server/utils/http_calls_capture.py
|
http_calls_capture.py
|
py
| 2,202 |
python
|
en
|
code
| 35 |
github-code
|
6
|
232662350
|
import os
import sys
import glob
import subprocess
import glob
from pefile import PE
name = "ReBarDxe"
version = "1.0"
GUID = "a8ee1777-a4f5-4345-9da4-13742084d31e"
shell = sys.platform == "win32"
buildtype = "RELEASE"
def filesub(filep, f, r):
# Read in the file
with open(filep, 'r') as file :
filedata = file.read()
# Replace the target string
filedata = filedata.replace(f, r)
# Write the file out again
with open(filep, 'w') as file:
file.write(filedata)
def set_bit(data, bit):
"""Sets a specific bit."""
return data | (1 << bit)
def set_nx_compat_flag(pe):
"""Sets the nx_compat flag to 1 in the PE/COFF file."""
dllchar = pe.OPTIONAL_HEADER.DllCharacteristics
dllchar = set_bit(dllchar, 8) # 8th bit is the nx_compat_flag
pe.OPTIONAL_HEADER.DllCharacteristics = dllchar
pe.merge_modified_section_data()
return pe
if len(sys.argv) > 1:
buildtype = sys.argv[1].upper()
# 3 arguments = Github Actions
if len(sys.argv) == 3:
print("TARGET: ", os.environ['TARGET'])
print("TARGET_ARCH: ", os.environ['TARGET_ARCH'])
print("TOOL_CHAIN_TAG: ", os.environ['TOOL_CHAIN_TAG'])
# setup Conf/target.txt
filesub("./Conf/target.txt", "DEBUG", os.environ['TARGET'])
filesub("./Conf/target.txt", "IA32", os.environ['TARGET_ARCH'])
filesub("./Conf/target.txt", "VS2015x86", os.environ['TOOL_CHAIN_TAG'])
else:
os.chdir("../..")
subprocess.run(["build", "--platform=ReBarUEFI/ReBarDxe/ReBar.dsc"], shell=shell, env=os.environ, stderr=sys.stderr, stdout=sys.stdout)
ReBarDXE = glob.glob(f"./Build/ReBarUEFI/{buildtype}_*/X64/ReBarDxe.efi")
if len(ReBarDXE) != 1:
print("Build failed")
sys.exit(1)
# set NX_COMPAT
pe = PE(ReBarDXE[0])
set_nx_compat_flag(pe)
os.remove(ReBarDXE[0])
pe.write(ReBarDXE[0])
print(ReBarDXE[0])
print("Building FFS")
os.chdir(os.path.dirname(ReBarDXE[0]))
try:
os.remove("pe32.sec")
os.remove("name.sec")
os.remove("ReBarDxe.ffs")
except FileNotFoundError:
pass
subprocess.run(["GenSec", "-o", "pe32.sec", "ReBarDxe.efi", "-S", "EFI_SECTION_PE32"], shell=shell, env=os.environ, stderr=sys.stderr, stdout=sys.stdout)
subprocess.run(["GenSec", "-o", "name.sec", "-S", "EFI_SECTION_USER_INTERFACE", "-n", name], shell=shell, env=os.environ, stderr=sys.stderr, stdout=sys.stdout)
subprocess.run(["GenFfs", "-g", GUID, "-o", "ReBarDxe.ffs", "-i", "pe32.sec", "-i" ,"name.sec", "-t", "EFI_FV_FILETYPE_DRIVER", "--checksum"], shell=shell, env=os.environ, stderr=sys.stderr, stdout=sys.stdout)
try:
os.remove("pe32.sec")
os.remove("name.sec")
except FileNotFoundError:
pass
print("Finished")
|
xCuri0/ReBarUEFI
|
ReBarDxe/buildffs.py
|
buildffs.py
|
py
| 2,663 |
python
|
en
|
code
| 562 |
github-code
|
6
|
36935275213
|
# this is focused on speed
# it may not run everything
import pathlib
import numpy as np
from tinygrad.ops import MovementOps, ProcessingOps
from tinygrad.llops.ops_gpu import require_init_gpu, clbuild, get_cl_queue, get_cl_ctx
from tinygrad.llops.ops_gpu import contiguous
from tinygrad.llops.ops_gpu import unary_op as unary_op_gpu, binary_op as binary_op_gpu, reduce_op as reduce_op_gpu
from tinygrad.helpers import prod
from tinygrad.shapetracker import ShapeTracker
import pyopencl as cl
from copy import deepcopy
def roundup(x, n=4): return (x+(n-1))//n * n
def flip(x): return (x[1], x[0])
class OpenCLBuffer:
def __init__(self, shape, hostbuf=None, _buf=None, _image=None):
require_init_gpu()
self.shapetracker = deepcopy(shape) if isinstance(shape, ShapeTracker) else ShapeTracker(*shape)
self._buf = _buf
self._image = _image
self.dtype = np.float32
if hostbuf is not None:
# TODO: lazy?
self._buf = cl.Buffer(get_cl_ctx(), cl.mem_flags.READ_WRITE, 4*roundup(prod(shape)))
cl.enqueue_copy(get_cl_queue(), self._buf, hostbuf.astype(np.float32).ravel())
def clone(self):
return OpenCLBuffer(self.shapetracker, _buf=self._buf, _image=self._image)
@property
def shape(self): return self.shapetracker.shape
@staticmethod
def fromCPU(x):
return OpenCLBuffer(x.shape, x)
def toCPU(self):
data = np.empty(self.shape, dtype=np.float32)
if self.shapetracker.contiguous == False:
tmp = OpenCLBuffer(self.shapetracker.shape)
contiguous(None, self, self.shapetracker, tmp)
else:
tmp = self
cl.enqueue_copy(get_cl_queue(), data, tmp.cl, is_blocking=True)
return data
@property
def cl(self):
if self._buf is None:
self._buf = cl.Buffer(get_cl_ctx(), cl.mem_flags.READ_WRITE, 4*roundup(prod(self.shape)))
if self._image is not None:
assert prod(self.shape) == prod(self._image.shape)*4
print(f"converting {self.shape} back to buffer, image shape is {self._image.shape}")
clbuild("from_image", """
__kernel void from_image(
read_only image2d_t in,
__global float4 *out) {
const sampler_t smp = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;
int2 l;
l.y = get_global_id(1);
l.x = get_global_id(0);
int W = get_image_width(in);
out[l.y*W + l.x] = read_imagef(in, smp, l);
}
""")(self._image.shape, None, self._image, self._buf)
self._image = None
return self._buf
@property
def image(self):
if self._image is None:
assert self.shape[2] == 4 and len(self.shape) == 3
fmt = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT)
self._image = cl.Image(get_cl_ctx(), cl.mem_flags.READ_WRITE, fmt, shape=flip(self.shape))
if self._buf is not None:
assert prod(self.shape) == prod(self._image.shape)*4
print(f"converting {self.shape} to image with shape {self._image.shape}")
clbuild("to_image", """
__kernel void to_image(
__global const float4 *in,
write_only image2d_t out) {
int2 l;
l.y = get_global_id(1);
l.x = get_global_id(0);
int W = get_image_width(out);
write_imagef(out, l, in[l.y*W + l.x]);
}
""")(self._image.shape, None, self._buf, self._image)
self._buf = None
return self._image
def unary_op(ctx, op, x):
# TODO: this doesn't actually have to be contiguous
x = contiguous(ctx, x, x.shapetracker) if not x.shapetracker.contiguous else x
return unary_op_gpu(ctx, op, x)
def binary_op(ctx, op, x, y):
x = contiguous(ctx, x, x.shapetracker) if not x.shapetracker.contiguous else x
y = contiguous(ctx, y, y.shapetracker) if not y.shapetracker.contiguous else y
return binary_op_gpu(ctx, op, x, y)
def reduce_op(ctx, op, x, new_shape):
x = contiguous(ctx, x, x.shapetracker) if not x.shapetracker.contiguous else x
return reduce_op_gpu(ctx, op, x, new_shape)
def movement_op(ctx, op, x, arg=None):
xc = x.clone()
# convert from image if the buffer can change shape
if op in [MovementOps.EXPAND, MovementOps.SLICE]: xc.cl
xc.shapetracker.movement_op(op, arg)
if not xc.shapetracker.contiguous: return contiguous(ctx, xc, xc.shapetracker)
else: return xc
def load(x):
with open(x) as f:
ret = f.read()
return ret
def conv(x,w,ret,C):
print(x.shapetracker.expr(), w.shapetracker.expr())
print(x.shape, w.shape, ret.shape)
options = []
if C.cin == 1: options.append("-DDEPTHWISE")
if C.bs > 1:
options.append("-DBATCH")
assert C.py == 0, "batched conv doesn't work with y-padding"
conv_prg = clbuild("conv", load(pathlib.Path(__file__).parent.parent.parent / 'accel/opencl/conv.cl'), tuple(options))
assert C.cout%4 == 0
kernel_args = [C.cout//4, (C.ox+3)//4, C.bs*C.oy]
conv_args = [max(1, C.cin//4), C.groups*C.cin//4, max(1, C.rcout//4), C.cout//4, C.ox, C.oy, C.iy, C.W, C.H, C.px, C.py, C.xs, C.ys, C.dx, C.dy]
print(conv_args, kernel_args)
conv_prg(kernel_args, None, x.image, w.image, ret.image, *[np.int16(x) for x in conv_args])
def processing_op(ctx,op,x,w,out_shape,C):
assert op == ProcessingOps.CONV, f"{op} isn't supported"
ret = ctx.buffer((C.bs*C.oy, C.ox*C.cout//4, 4))
conv(x, w, ret, C)
return ret
def test_image():
hostbuf = np.random.randn(5,8,4).astype(np.float32)
x = OpenCLBuffer((5,8,4), hostbuf)
assert np.allclose(x.toCPU(), hostbuf)
print(x.image)
assert np.allclose(x.toCPU(), hostbuf)
if __name__ == "__main__":
test_image()
|
henrylao/tinygrad
|
accel/opencl/ops_opencl.py
|
ops_opencl.py
|
py
| 5,655 |
python
|
en
|
code
| null |
github-code
|
6
|
26530831301
|
import json
from oneview_redfish_toolkit.api.composition_service import CompositionService
from oneview_redfish_toolkit.tests.base_test import BaseTest
class TestCompositionService(BaseTest):
"""Tests for CompositionService class"""
def setUp(self):
"""Tests preparation"""
# Loading CompositionService mockup result
with open(
'oneview_redfish_toolkit/mockups/redfish/CompositionService.json'
) as f:
self.composition_service_mockup = json.load(f)
def test_class_instantiation(self):
# Tests if class is correctly instantiated
try:
compostion_service = CompositionService()
except Exception as e:
self.fail("Failed to instantiate CompositionService class."
" Error: {}".format(e))
self.assertIsInstance(compostion_service, CompositionService)
def test_serialize(self):
# Tests the serialize function result against known result
try:
compostion_service = CompositionService()
except Exception as e:
self.fail("Failed to instantiate CompositionService class."
" Error: {}".format(e))
try:
expected_result = json.loads(compostion_service.serialize())
except Exception as e:
self.fail("Failed to serialize. Error: ".format(e))
self.assertEqualMockup(self.composition_service_mockup,
expected_result)
|
HewlettPackard/oneview-redfish-toolkit
|
oneview_redfish_toolkit/tests/api/test_composition_service.py
|
test_composition_service.py
|
py
| 1,507 |
python
|
en
|
code
| 16 |
github-code
|
6
|
17972375760
|
import dash
from dash import Dash, html, Output, Input, dcc, callback
import dash_bootstrap_components as dbc
import pandas as pd
import plotly.express as px
import dash_ag_grid as dag
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.LUX], suppress_callback_exceptions=True,
meta_tags=[{'name': 'viewport','content': 'width=device-width, initial-scale=1.0, maximum-scale=1.2, minimum-scale=0.5,'}])
server = app.server
df = pd.read_csv('data/nobel.csv')
color_mapping = {
'medicine': 'limegreen',
'physics': 'darkkhaki',
'chemistry': 'goldenrod',
'peace': 'darkslategray',
'literature': 'darkviolet',
'economics': 'darkcyan',
'male':'royalblue',
'female':' fuchsia',
'org': 'silver',
}
columnDefs = [
{'field': 'nobel year'},
{'field': 'firstname'},
{'field': 'lastname'},
{'field': 'category'},
{'field': 'motivation'},
{'field': 'gender'},
{'field': 'age'},
{'field': 'birth_country'},
]
def figMap():
# world map of Country Category
dfMap = df.groupby(['alpha-3','birth_country']).size().reset_index(name='count')
figMap = px.choropleth(dfMap, locations='alpha-3', color='count', hover_name='birth_country')
figMap.update_layout(paper_bgcolor='rgb(248,248,255)')
return figMap
def figGender():
# histogram for Gender Category
dfGroup = df.groupby(['nobel year', 'category', 'gender']).size().reset_index(name='count')
figGender = px.histogram(dfGroup, x="nobel year", y='count', color="gender",
marginal="rug", # or violin, rug
hover_data=dfGroup.columns, labels={'count': 'Count of Gender'}).update_layout(yaxis_title='Count of Gender', paper_bgcolor='#F8F8FF')
return figGender
def figCat():
# Sun burst chart
figSun = px.sunburst(df, path=['category', 'gender']).update_layout(margin=dict(l=0, r=0, t=0, b=0),paper_bgcolor='#F8F8FF')
figSun.update_traces(marker_colors=[color_mapping[cat] for cat in figSun.data[-1].labels])
# Bar chart
dfGroup = df.groupby(['nobel year', 'category']).size().reset_index(name='count')
figBar = px.histogram(dfGroup, x='nobel year', y='count', color='category', barmode='group',
labels={'count': 'Number of Nobel Prize Received'},
color_discrete_map=color_mapping)
figBar.update_layout(yaxis_title='Number of Nobel Prize Received',paper_bgcolor='#F8F8FF')
return figSun, figBar
gender_layout = dbc.Row([
dbc.Col([
dbc.Card([
dbc.CardBody([
dbc.Row([
dbc.Col([
html.H3(['Gender'])
]),
]),
html.Hr(),
dbc.Row([
dbc.Col([
dcc.Graph(figure=figGender())
])
])
])
],
class_name='bg-card mb-5'
)
])
])
category_layout = dbc.Row([
dbc.Col([
dbc.Card([
dbc.CardBody([
dbc.Row([
dbc.Col([
html.H3(['Category'])
]),
]),
html.Hr(),
dbc.Row([
dbc.Col([
html.P(['Year']),
dcc.Dropdown(options=[x for x in df['nobel year'].unique()], id='dropdown_year'),
], width=2),
dbc.Col([
dcc.Loading(children=[dcc.Graph(figure={}, id='cat-sun')])
], width=3),
dbc.Col([
dcc.Loading(children=[dcc.Graph(figure={}, id='cat-bar')])
], width=7)
])
])
],
class_name='bg-card mb-5'
)
])
])
country_layout = dbc.Row([
dbc.Col([
dbc.Card([
dbc.CardBody([
dbc.Row([
dbc.Col([
html.H3(['Country']),
html.Label(['Based on birth country'])
])
]),
html.Hr(),
dbc.Row([
dbc.Col([
html.P(['Country']),
dcc.Dropdown(options=sorted([x for x in df['birth_country'].unique()], key=lambda x: (str(type(x)), x))[1:],
id='dropdown-country')
], width=4)
]),
html.Br(),
dbc.Row([
dbc.Col([
dcc.Graph(figure=figMap()),
html.Br(),
dag.AgGrid(
id='grid-table',
rowData=df.to_dict('records'),
columnDefs=columnDefs,
defaultColDef={"resizable": True, "sortable": True, "filter": True, "minWidth":115},
dashGridOptions={"pagination": True, "paginationPageSize":8, "domLayout": "autoHeight"},
)
])
])
])
],
class_name='bg-card mb-5'
)
])
])
app.layout = dbc.Container([
dbc.Row([
dbc.Col([
html.H1(['The Nobel Prize'])
])
],
class_name='mt-3 mb-2'
),
dcc.Tabs(id='input-tabs', value='gender-tab', children=[
dcc.Tab(label='Gender', value='gender-tab'),
dcc.Tab(label='Category', value='category-tab'),
dcc.Tab(label='Country', value='country-tab')
]),
html.Div(id='output-tabs', children={})
])
# callback for updating interactive Category
@callback(
Output('cat-sun', 'figure'),
Output('cat-bar', 'figure'),
Input('dropdown_year', 'value'),
)
def update_cat(select_year):
dff = df.copy()
if select_year:
# update sunburst chart
figSun = px.sunburst(dff[dff['nobel year'] == select_year], path=['category', 'gender']).update_layout(margin=dict(l=0, r=0, t=0, b=0),paper_bgcolor='#F8F8FF')
figSun.update_traces(marker_colors=[color_mapping[cat] for cat in figSun.data[-1].labels])
# update barchart
dffGroup = dff.groupby(['nobel year', 'category']).size().reset_index(name='count')
mark = (dffGroup['nobel year'] == select_year)
figBar = px.histogram(dffGroup[mark], x='nobel year', y='count', color='category', barmode='group',
labels={'count': 'Number of Nobel Prize Received'},
color_discrete_map=color_mapping)
figBar.update_layout(yaxis_title='Number of Nobel Prize Received',paper_bgcolor='#F8F8FF')
figBar.update_xaxes(visible=False)
return figSun, figBar
else:
return figCat()
# callback for updating interactive Country
@callback(
Output('grid-table', 'rowData'),
Input('dropdown-country', 'value')
)
def update_country(select_country):
dff = df.copy()
if select_country:
mask = (dff['birth_country'] == select_country)
dff = dff[mask]
return dff.to_dict('records')
else:
return dff.to_dict('records')
@callback(
Output('output-tabs', 'children'),
Input('input-tabs', 'value')
)
def content(tab):
if tab == 'gender-tab':
return gender_layout
elif tab == 'category-tab':
return category_layout
elif tab == 'country-tab':
return country_layout
if __name__ == '__main__':
app.run_server(debug=True)
|
Natcha-Phonkamhaeng/nobel-viz
|
src/app.py
|
app.py
|
py
| 6,264 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73118020027
|
import os
import torch
import matplotlib.pyplot as plt
from config.config import cfg
def get_supports(m):
"""
Returns the number of samples and the percentage of support for each activity in the ground truth data of a given dataset.
Args:
- m (str): the name of the dataset
Returns:
- supports (dict): a dictionary containing the number of samples for each activity in the ground truth data
- supports_perc (dict): a dictionary containing the percentage of support for each activity in the ground truth data
"""
c = cfg()
cfg_ = c.get_config()
dataset_path = cfg_.dataset_path
num_activities = 24
supports = { i : 0 for i in range(num_activities) }
path_gt = os.path.join(dataset_path, m, f"gt_{m}.pt")
data_gt = torch.load(path_gt)
for label in data_gt:
supports[torch.argmax(label).item()] += 1
supports_perc = { i : round(supports[i]/len(data_gt),4) for i in range(num_activities) }
print(f"{len(data_gt)} samples.\n")
return supports, supports_perc
def ig_stats(m):
"""
Computes statistics about the instance graphs in the specified dataset split.
Args:
m (str): The dataset split to compute statistics for. Must be one of "train", "valid", or "test".
Returns:
None
"""
file = m + ".g"
if m == "train":
file = "training.g"
with open('_data/' + file, 'r') as f:
contents = f.read()
graphs = contents.split('\n\n')
print(f"Number of instance graphs: {len(graphs)}")
num_nodes_per_graph = [0 for _ in range(len(graphs))]
for n, graph in enumerate(graphs):
graph = graph[3:]
graph_elems = graph.split('\n') #graph_elems = array di stringhe; ogni stringa è un elemento di un grafo
for elem in graph_elems:
if elem[0] == 'v':
num_nodes_per_graph[n] += 1
print(f"Mean number of nodes per ig: {sum(num_nodes_per_graph)/len(num_nodes_per_graph):.2f}")
print(f"Standard deviation of number of nodes per ig: {torch.std(torch.tensor(num_nodes_per_graph, dtype = torch.float64)):.2f}")
print(f"Min number of nodes per ig: {min(num_nodes_per_graph)}")
print(f"Max number of nodes per ig: {max(num_nodes_per_graph)}")
if __name__ == '__main__':
modes = ["train","val", "test"]
plot_supports = True
for m in modes:
print(f"{m.capitalize()} dataset")
ig_stats(m)
print("After preprocessing:", end = " ")
supports, supports_perc = get_supports(m)
if plot_supports:
"""
plot barchart of supports percentages per class with 24 classes.
Each bar is labeled with value of support per class and the class number
Set the title with the mode name
"""
plt.figure()
plt.bar(range(24), supports_perc.values())
plt.xlabel("Class")
plt.ylabel("Support %")
plt.xticks(range(24))
for i, v in enumerate(supports_perc.values()):
plt.text(i-0.25, v + 0.005, f"{(v*100):.2f}", color='black', fontweight='bold', size = 'small')
plt.title(f"Support percentage per class in {m} dataset")
plt.show()
plt.figure()
plt.bar(range(24), supports.values())
plt.xlabel("Class")
plt.ylabel("Support")
plt.xticks(range(24))
for i, v in enumerate(supports.values()):
plt.text(i-0.25, v + 10, f"{v}", color='black', fontweight='bold', size = 'small')
plt.title(f"Support per class in {m} dataset")
plt.show()
|
Vito-Scaraggi/mpgnnap
|
data_info.py
|
data_info.py
|
py
| 3,704 |
python
|
en
|
code
| 1 |
github-code
|
6
|
2519404412
|
''' Library for data importation and feature selection'''
###############################################################################
# Author: Zane Markel
# Created: 6 MAR 2014
#
# Name: mldata
# Description : Library for importing/exporting data and feature selection
#
###############################################################################
# sklearn models can import svmlight files for data
# In general, sklearn uses the following data format:
# attributes X = n_samples vector of n_features feature vector
# labels Y = n_samples vector of class values
# scikit learn will provide most of the baseline funtionality
import numpy as np
import numpy.lib.recfunctions as rfunc
import random
import sys
# TODO: make it easy to import and export data
# check out numpy.recarray.tofile and .fromfile
def load_data(csv):
''' Import a csv file. Returns a structured array of the following format:
[[name, feature0, ..., featuren, label], [name, ..., label], ...]
Note: "--" will be treated as a comment delimiter. I do not expect to use
comments, but numpy.genfromtxt needs something.'''
dformat = extract_headers(open(csv))
# Load file data as a matrix
data = np.genfromtxt(csv, delimiter=", ", dtype=dformat, skip_header=1, \
comments='--')
return data
def save_data(data, fname):
''' Takes a record array (like that returned by mldata.load_data) and
saves it as a .csv file that could be imported by mldata.load_data. '''
# Open file for writing:
out = open(fname, 'w')
# Get header names
hnames = data.dtype.names
# Print header names
hline = ', '.join(hnames) + '\n'
out.write(hline)
# For every line in the array...
for record in data:
# Print that line
# (Use list comprehension to get a string version of the record)
recline = ', '.join((str(x) for x in record)) + '\n'
out.write(recline)
# For clarity
return
def select_sample(seed, data, howmany, fractionMalware=-1):
''' Grabs a sample of data to use for learning.
seed = seed to use (so that sample can be reproduced)
data = the large dataset to use.
howmany = how many records to select.
fractionMalware = percent of records (0 to 1) that will be malicious
default (-1) indicates no preference.'''
# don't try to choose more records than there are in the data
if(howmany >= len(data)):
sys.stderr.write("SAMPLE IS ENTIRE DATASET!")
return data
# decide which record indices to pick
random.seed(seed)
if(fractionMalware == -1): # No preference for infected:clean ratio
indices = random.sample(range(len(data)), howmany)
else:
# get indices of malicious and benign records
benind = np.where(data['isMalware'] == 0)[0]
malind = np.where(data['isMalware'] == 1)[0]
# get number of malicious and benign records that are requested
nummal = int(round(fractionMalware * float(howmany)))
numben = howmany - nummal
# get samples of those indices
# there's going to be an error if you ask for more than requested
malind = random.sample(malind, nummal)
benind = random.sample(benind, numben)
# concatenate the sample indices together
indices = malind + benind
# Shuffle indices so that the malicious records do not come before the benign records
random.shuffle(indices)
# return only those record indices of data
return data[indices]
def data_components(data):
''' Converts a structured array of data into simple arrays containing the
features (2d array), labels, record names, and the feature names.
This is intended to be used after preprocessing as the final step before
doing the actual learning.
Returns (features, labels, recordfilenames, featurenames)'''
# Get filenames
recnames = data['Name']
# Get labels
labels = data['isMalware']
# Get features
features = rm_feat_name(data, 'Name')
features = rm_feat_name(features, 'isMalware')
featnames = features.dtype.names
simplefeatures = features.view(np.int64).reshape(features.shape + (-1,))
return (simplefeatures, labels, recnames, featnames)
def rm_feat_num(features, num):
''' Return features, with a feature removed based on column (num)ber '''
names = list(features.dtype.names)
new_names = names[:num] + names[num+1:]
return features[new_names]
def rm_feat_name(features, name):
''' Return features, with a feature "name" removed'''
names = list(features.dtype.names)
if name in names:
names.remove(name)
return features[names]
def append_feat(data, name, fieldarray):
''' Appends fieldarray to data with the name 'name'. This allows new
features to be added easily.
Because all new features will be built differently, it is up to you to
construct the fieldarray properly.
This is basically just a recast of numpy.lib.recfunctions.rec_append_fields
, so that I do not have to look up the function again.'''
return rfunc.rec_append_fields(data, name, fieldarray)
def extract_headers(openfile):
''' Extract the header line names and return a numpy.dtype for the
dtype field of numpy.loadtxt'''
# for now just return the line as a string
# Read the line
headerline = openfile.readline()
# Get the names
nmes = headerline.strip().replace('"','').replace(' ','').split(',')
# Generate types
formats = ['i8']*len(nmes) # most entries will be 64-bit integers
formats[nmes.index('Name')] = 'a255' # Name field will be a string
# Generate dictionary
dtdict = {'names':tuple(nmes), 'formats':tuple(formats) }
# Return numpy.dtype object
return np.dtype(dtdict)
|
zanemarkel/trident
|
learn/spr14mldata.py
|
spr14mldata.py
|
py
| 5,881 |
python
|
en
|
code
| 2 |
github-code
|
6
|
27773611300
|
import requests
import os
from telepyrobot.setclient import TelePyroBot
from pyrogram import filters
from pyrogram.types import Message
from telepyrobot import COMMAND_HAND_LER
__PLUGIN__ = os.path.basename(__file__.replace(".py", ""))
__help__ = f"""
Url Shortner Plugin for https://da.gd
**Usage:**
`{COMMAND_HAND_LER}short <long-link>`: Will return shortlink of the long-link.
`{COMMAND_HAND_LER}unshort <shortlink>`: Will return long url of the shortlink.
"""
@TelePyroBot.on_message(filters.command("short", COMMAND_HAND_LER) & filters.me)
async def short_link(c: TelePyroBot, m: Message):
input_str = m.text.split(None, 1)[1]
sample_url = f"https://da.gd/s?url={input_str}"
response_api = requests.get(sample_url).text
if response_api:
await m.edit_text(f"**Generated Link:**\n {response_api} for {input_str}.")
else:
await m.edit_text("something is wrong. please try again later.")
@TelePyroBot.on_message(filters.command("unshort", COMMAND_HAND_LER) & filters.me)
async def unshort_link(c: TelePyroBot, m: Message):
input_str = m.text.split(None, 1)[1]
if not input_str.startswith("http"):
input_str = "http://" + input_str
if not input_str.startswith("http://da.gd"):
await m.edit_text("`I can only unshort da.gd links`")
r = requests.get(input_str, allow_redirects=False)
if str(r.status_code).startswith("3"):
await m.edit_text(
f"Input URL: {input_str}\nReDirected URL: {r.headers['Location']}"
)
else:
await m.edit_text(f"Input URL {input_str} returned status_code {r.status_code}")
|
Divkix/TelePyroBot
|
telepyrobot/plugins/url_shortner.py
|
url_shortner.py
|
py
| 1,615 |
python
|
en
|
code
| 40 |
github-code
|
6
|
41745621937
|
import os
import sqlite3
from bs4 import BeautifulSoup
def scan_folder(parentfile, diff):
for file_name in os.listdir(parentfile):
if "_" in file_name:
diff = eachfile(file_name, parentfile, diff)
else:
current_path = "".join((parentfile, "/", file_name))
if os.path.isdir(current_path):
scan_folder(current_path, diff)
return diff
def eachfile(file_name,parentfile, diff):
conn = sqlite3.connect('healthReviewsChangeDB.db')
cur = conn.cursor()
dbcol = ''
dbTable = ''
story_ID = file_name[:5]
filename =file_name[6:]
if "About" in filename:
dbcol = 'About'
dbTable = 'Review'
elif "Activity" in filename:
dbcol = 'Activity'
dbTable = 'Review'
elif filename == 'Date':
dbcol = 'StoryTime'
dbTable = 'Review'
elif "Feel_Tag" in filename:
dbcol = 'feelTag'
dbTable = 'Review'
elif "Good_Tag" in filename:
dbcol = 'goodTag'
dbTable = 'Review'
elif "Improved_Tag" in filename:
dbcol = 'improvedTag'
dbTable = 'Review'
elif "Progress" in filename:
dbcol = 'Progress'
dbTable = 'Review'
elif "Similar" in filename:
dbcol = 'similarTag'
dbTable = 'Review'
elif "Story" in filename:
dbcol = 'Story'
dbTable = 'Review'
elif "Title" in filename:
dbcol = 'Title'
dbTable = 'Review'
elif "Username" in filename:
dbcol = 'Username'
dbTable = 'Review'
elif filename.endswith("Response"):
dbcol = 'Response'
story_ID = filename[:5]
dbTable = 'Response'
elif "Response_Header" in filename:
dbcol = 'ResponseInfo'
story_ID = filename[:5]
dbTable = 'Response'
elif "Response_Time" in filename:
dbcol = 'ResponseTime'
story_ID = filename[:5]
dbTable = 'Response'
elif filename.endswith("Update"):
dbcol = 'UpdateText'
story_ID = filename[:5]
dbTable = 'userUpdates'
elif "Update_date" in filename:
dbcol = 'updateTime'
story_ID = filename[:5]
dbTable = 'userUpdates'
exeStat = "SELECT "+ dbcol+ " FROM "+dbTable+" WHERE StoryID IS "+ story_ID +";"
AllDBcontent = cur.execute(exeStat)
for eachcontent in AllDBcontent:
with open(parentfile+"/"+file_name, 'r') as reader:
content = reader.read()
if eachcontent[0] != content:
diff.append(file_name)
cur.close()
return diff
def main():
parentfile = "/mnt/c/Users/Juju/DB/project/webscrape/realScrape"
diff = []
scan_folder(parentfile, diff)
if diff:
print("The file name that the content is different from the Database:", diff)
print("Number of files: ", str(len(diff)))
else:
print("Test OK")
if __name__ == "__main__":
main()
|
22650684/Webscraping-Project
|
testing/dbMatchFile.py
|
dbMatchFile.py
|
py
| 3,089 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19314688451
|
# -*- coding: utf-8 -*-
'''
A module for startup settings
'''
from __future__ import absolute_import
import logging
import os.path
import sys
from requests.structures import CaseInsensitiveDict # pylint: disable=import-error,3rd-party-local-module-not-gated
# Import local libs
# This file may be loaded out of __pycache__, so the
# directory of its .py may not be in the search path.
IMPORT_PATH = os.path.dirname(__file__)
if IMPORT_PATH.endswith('__pycache__'):
IMPORT_PATH = os.path.dirname(IMPORT_PATH)
sys.path.append(IMPORT_PATH)
try:
import _nisysmgmt_grains # pylint: disable=import-error,3rd-party-local-module-not-gated
finally:
# Remove the extra search path that we added to sys.path
sys.path.remove(IMPORT_PATH)
log = logging.getLogger(__name__)
try:
import salt.modules.cmdmod as cmd
import salt.serializers.json as json
import salt.ext.six.moves.configparser as configparser
except ImportError:
log.critical("Salt is not available")
# Define the module's virtual name
__virtualname__ = 'startup'
NIRTINI_PATH = '/etc/natinst/share/ni-rt.ini'
NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg'
FWSETENV_PATH = '/sbin/fw_setenv'
def __virtual__():
'''
Only load this module if the nirtcfg command exist and is older NILinuxRT
:return: True if environment is set up and False otherwise
'''
if 'NILinuxRT' in __grains__['os_family'] and 'nilrt' == __grains__['lsb_distrib_id']:
return True
return False, 'The startup_settings module cannot be loaded.'
def get_all(json_format=False):
'''
.. note::
Get all of these settings:
- NoFPGAApp
- NoApp
- ConsoleOut
- EmbeddedUI
- LabVIEWAccess
:param json_format: If true, returns the result in JSON format
:return: Returns settings
CLI Example:
.. code-block:: bash
salt '*' startup.get_all
salt '*' startup.get_all True
'''
settings = {'NoFPGAApp': get('nofpgaapp'),
'NoApp': get('noapp'),
'ConsoleOut': get('consoleout'),
'LabVIEWAccess': get('labviewaccess')}
cpuarch = __grains__.get('cpuarch')
if cpuarch == 'x86_64':
settings['EmbeddedUI'] = get('embeddedui')
if not json_format:
return settings
return json.serialize(settings)
def get(setting):
'''
.. note::
Get one of these settings:
- NoFPGAApp
- NoApp
- ConsoleOut
- EmbeddedUI
- LabVIEWAccess
:param setting: Name of setting.
:return: Returns value of that setting or -1 if error.
CLI Example:
.. code-block:: bash
salt '*' startup.get noapp
'''
setting = setting.strip().lower()
system_settings = {'nofpgaapp': 'NoFPGAApp.enabled',
'noapp': 'NoApp.enabled',
'consoleout': 'ConsoleOut.enabled',
'embeddedui': 'ui.enabled'}
lvrt_settings = {'labviewaccess': 'RTTarget.RTProtocolAllowed'}
config = configparser.RawConfigParser(dict_type=CaseInsensitiveDict)
config.read(NIRTINI_PATH)
if setting in system_settings:
return config.get('systemsettings', system_settings[setting]).strip('\"')
elif setting in lvrt_settings:
return config.get('lvrt', lvrt_settings[setting]).strip('\"')
return -1
def enable_console_out(enable=True):
'''
.. note::
Enable or disable ConsoleOut
:param enable: If true enable ConsoleOut else disable ConsoleOut. Default is True.
CLI Example:
.. code-block:: bash
salt '*' startup.enable_console_out
'''
cmd.run('{0} --set section=systemsettings,token={1},value={2}'.format(NIRTCFG_PATH, 'ConsoleOut.enabled', enable))
cmd.run('{0} consoleoutenable={1}'.format(FWSETENV_PATH, enable))
system_settings = _nisysmgmt_grains.get_last_known_startup_settings(__grains__)
system_settings['ConsoleOut'] = str(enable)
__salt__['event.fire']({'force_refresh': True}, 'grains_refresh')
return True
def enable_no_fpga_app(enable=True):
'''
.. note::
Enable or disable NoFPGAApp
:param enable: If true enable NoFPGAApp else disable NoFPGAApp. Default is True.
CLI Example:
.. code-block:: bash
salt '*' startup.enable_no_fpga_app
'''
cmd.run('{0} --set section=systemsettings,token={1},value={2}'.format(NIRTCFG_PATH, 'NoFPGAApp.enabled', enable))
system_settings = _nisysmgmt_grains.get_last_known_startup_settings(__grains__)
system_settings['NoFPGAApp'] = str(enable)
__salt__['event.fire']({'force_refresh': True}, 'grains_refresh')
return True
def enable_no_app(enable=True):
'''
.. note::
Enable or disable NoApp
:param enable: If true enable NoApp else disable NoApp. Default is True.
CLI Example:
.. code-block:: bash
salt '*' startup.enable_no_app
'''
cmd.run('{0} --set section=systemsettings,token={1},value={2}'.format(NIRTCFG_PATH, 'NoApp.enabled', enable))
system_settings = _nisysmgmt_grains.get_last_known_startup_settings(__grains__)
system_settings['NoApp'] = str(enable)
__salt__['event.fire']({'force_refresh': True}, 'grains_refresh')
return True
def enable_embedded_ui(enable=True):
'''
.. note::
Enable or disable Embedded UI
:param enable: If true enable Embedded UI else disable Embedded UI. Default is True.
CLI Example:
.. code-block:: bash
salt '*' startup.enable_embedded_ui
'''
cmd.run('{0} --set section=systemsettings,token={1},value={2}'.format(NIRTCFG_PATH, 'ui.enabled', enable))
system_settings = _nisysmgmt_grains.get_last_known_startup_settings(__grains__)
system_settings['EmbeddedUI'] = str(enable)
__salt__['event.fire']({'force_refresh': True}, 'grains_refresh')
return True
def enable_labview_access(enable=True):
'''
.. note::
Enable or disable LabVIEW Project Access
:param enable: If true enable LabVIEW Project Access else disable LabVIEW Project Access. Default is True.
CLI Example:
.. code-block:: bash
salt '*' startup.enable_labview_access
'''
cmd.run('{0} --set section=lvrt,token={1},value={2}'.format(NIRTCFG_PATH, 'RTTarget.RTProtocolAllowed', enable))
system_settings = _nisysmgmt_grains.get_last_known_startup_settings(__grains__)
system_settings['LabVIEWAccess'] = str(enable)
__salt__['event.fire']({'force_refresh': True}, 'grains_refresh')
return True
|
BKnight760/ubuntu-systemlink-salt-minion
|
var/lib/salt/minion/extmods/modules/startup_settings.py
|
startup_settings.py
|
py
| 6,757 |
python
|
en
|
code
| 1 |
github-code
|
6
|
5975605110
|
from . import sql, chunk, rarity_info, BASE_PATH, Page
from datetime import datetime as dt, timedelta as td
from io import BytesIO
from pandas import DataFrame
from PIL import Image
from random import choice, choices, random
import json
import logging
import requests as r
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_format = logging.Formatter('[%(asctime)s - %(name)s - %(levelname)s] %(message)s')
stream_handler.setFormatter(stream_format)
log.addHandler(stream_handler)
#############
# Constants #
#############
UPDATE_ROULETTE = '''update roulette
set amount = (select tr.amount from tmp_roulette tr where tr.user_id = roulette.user_id and tr.poke_id = roulette.poke_id and tr.shiny = roulette.shiny)
where roulette.user_id in (select tr.user_id from tmp_roulette tr where tr.user_id = roulette.user_id and tr.poke_id = roulette.poke_id and tr.shiny = roulette.shiny)
and roulette.poke_id in (select tr.poke_id from tmp_roulette tr where tr.user_id = roulette.user_id and tr.poke_id = roulette.poke_id and tr.shiny = roulette.shiny)
and roulette.shiny in (select tr.shiny from tmp_roulette tr where tr.user_id = roulette.user_id and tr.poke_id = roulette.poke_id and tr.shiny = roulette.shiny);'''
shiny_chance = 1/8192
default_rewards = {
'levels': 0,
'cash': 0,
'rolls': 0,
'rewards': 0
}
#############
# Functions #
#############
# Pokemon #
def get_all_pokemon():
df = sql('select * from pokemon')
return [Pokemon(**d) for d in df.to_dict('records')]
def get_pokemon(args):
shiny = 0
name = ' '.join(args) if isinstance(args, tuple) else args
if isinstance(name, str):
if name[-1] == 'S':
name = name[:-1]
shiny = 1
try:
id = float(name)
pkmn = get_pokemon_by_id(id)
except ValueError:
pkmn = get_pokemon_by_name(name)
if not pkmn:
return None
pkmn.shiny = shiny
return pkmn
def get_pokemon_by_id(id):
df = sql('select * from pokemon where id = ?', (id,))
if df.empty:
return None
return Pokemon(**df.to_dict('records')[0])
def get_pokemon_by_name(name):
df = sql('select * from pokemon where lower(name) = ?', (name.lower(),))
if df.empty:
return None
return Pokemon(**df.to_dict('records')[0])
def get_all_pokemon_by_rarity(rarity):
df = sql('select * from pokemon where rarity = ?', (rarity,))
return [Pokemon(**d) for d in df.to_dict('records')]
def get_random_pokemon_by_rarity(rarity):
return choice(get_all_pokemon_by_rarity(rarity))
# Pokedex #
def get_user_pokedex(user):
df = sql('select r.user_id, r.amount, r.shiny, p.* from roulette r left join pokemon p on p.id = r.poke_id where r.user_id = ?', (user.id,))
if df.empty:
return []
return [PokedexEntry(**d) for d in df.to_dict('records')]
def get_user_pokedex_entry(user, pkmn):
df = sql('select r.user_id, r.amount, r.shiny, p.* from roulette r left join pokemon p on p.id = r.poke_id where r.user_id = ? and r.poke_id = ? and r.shiny = ?', (user.id, pkmn.id, pkmn.shiny))
if df.empty:
return None
return PokedexEntry(**df.to_dict('records')[0])
def get_duplicate_pokedex_extries(user, rarity):
return [pkdx for pkdx in get_user_pokedex(user) if pkdx.amount > 1 and pkdx.rarity <= rarity]
def add_or_update_user_pokedex_entry_from_pokemon(user, pkmn_list, pkmn_counter):
user_pkdx = get_user_pokedex(user)
unique = list(set(pkmn_list))
new = [p for p in unique if p not in user_pkdx]
updating = [p for p in unique if p not in new]
if new:
new_chunks = chunk(new, 249)
for nc in new_chunks:
vals = []
sql_str = 'insert into roulette values '
for p in nc:
sql_str += ' (?,?,?,?),'
vals.extend((user.id, p.id, pkmn_counter[p], p.shiny))
sql(sql_str[:-1], vals)
if updating:
sql('drop table if exists tmp_roulette')
sql('create table tmp_roulette (user_id INTEGER, poke_id INTEGER, amount INTEGER, shiny INTEGER)')
pkdx_map = {pkdx.id: pkdx for pkdx in user_pkdx}
updating_chunks = chunk(updating, 249)
for uc in updating_chunks:
vals = []
sql_str = 'insert into tmp_roulette values '
for p in uc:
sql_str += ' (?,?,?,?),'
amt = pkdx_map.get(p.id).amount + pkmn_counter[p]
vals.extend((user.id, p.id, amt, p.shiny))
sql(sql_str[:-1], vals)
sql(UPDATE_ROULETTE)
def add_or_update_user_pokedex_entry_from_pokedex_entries(user, pokedex_entries):
new = [pkdx for pkdx in pokedex_entries if pkdx.amount == -1]
updating = [pkdx for pkdx in pokedex_entries if pkdx.amount >= 0]
deleting = []
if new:
new_chunks = chunk(new, 249)
for nc in new_chunks:
vals = []
sql_str = 'insert into roulette values '
for p in nc:
sql_str += ' (?,?,?,?),'
vals.extend([p.user_id, p.id, 1, p.shiny])
sql(sql_str[:-1], vals)
if updating:
sql('drop table if exists tmp_roulette')
sql('create table tmp_roulette (user_id INTEGER, poke_id INTEGER, amount INTEGER, shiny INTEGER)')
updating_chunks = chunk(updating, 249)
for uc in updating_chunks:
vals = []
sql_str = 'insert into tmp_roulette values '
for p in uc:
sql_str += ' (?,?,?,?),'
vals.extend(p.to_row)
if p.amount == 0:
deleting.append(p)
sql(sql_str[:-1], vals)
sql(UPDATE_ROULETTE)
if deleting:
sql('delete from roulette where amount = 0')
deleting_chunks = chunk(deleting, 249)
for dc in deleting_chunks:
vals = [user.id]
sql_str = 'delete from battle where user_id = ? and poke_id in ('
for p in dc:
sql_str += '?,'
vals.append(p.id)
sql_str = f'{sql_str[:-1]})'
sql(sql_str, vals)
# PokeBattle #
def get_pokemon_info(id):
df = sql('select * from poke_info where poke_id = ?', (id,))
return df.to_dict('records')[0]
def get_all_user_poke_battle(user, level=1):
df = sql('select b.user_id, b.level, b.exp, pi.hp, pi.attack, pi.defense, p.* from battle b left join poke_info pi on b.poke_id = pi.poke_id left join pokemon p on b.poke_id = p.id where b.user_id = ? and level >= ?', (user.id, level))
if df.empty:
return []
return [PokeBattle(**d) for d in df.to_dict('records')]
def get_user_poke_battle(user, poke_id):
df = sql('select b.user_id, b.level, b.exp, pi.hp, pi.attack, pi.defense, p.* from battle b left join poke_info pi on b.poke_id = pi.poke_id left join pokemon p on b.poke_id = p.id where b.user_id = ? and b.poke_id = ?', (user.id, poke_id))
if df.empty:
return None
return PokeBattle(**df.to_dict('records')[0])
def get_user_total_level(user):
pkbts = get_all_user_poke_battle(user)
if pkbts:
return sum([pkbt.level for pkbt in pkbts])
return 0
def create_user_poke_battle(user, pkmn):
pkin = get_pokemon_info(pkmn.id)
pkbt = PokeBattle(user.id, 1, 0, pkin['hp'], pkin['attack'], pkin['defense'], id=pkmn.id, name=pkmn.name, rarity=pkmn.rarity, shiny=pkmn.shiny)
sql('insert into battle values (?,?,?,?)', pkbt.pokebattle_creation_row)
return pkbt
# Daycare #
def get_user_daycare(user):
df = sql('select d.user_id, d.enter_time, d.rewards, p.* from daycare d left join pokemon p on d.poke_id = p.id where user_id = ?', (user.id,))
if df.empty:
return None
return Daycare(**df.to_dict('records')[0])
def create_user_daycare(user, pkmn):
pkdc = Daycare(user.id, dt.now(), default_rewards, **pkmn.to_dict())
sql('insert into daycare values (?,?,?,?)', pkdc.daycare_creation_row)
return pkdc
def delete_user_daycare(user):
sql('delete from daycare where user_id = ?', (user.id,))
# Badge #
def get_badges(user):
df = sql('select b.user_id, b.level, b.amount, p.* from badges b left join pokemon p on b.poke_id = p.id where user_id = ?', (user.id,))
if df.empty:
return []
return [Badge(**d) for d in df.to_dict('records')]
def get_badge(user, poke_id):
df = sql('select b.user_id, b.level, b.amount, p.* from badges b left join pokemon p on b.poke_id = p.id where user_id = ? and poke_id = ?', (user.id, poke_id))
if df.empty:
return ()
return Badge(**df.to_dict('records')[0])
def add_badge(user, poke_id):
sql('insert into badges values (?,?,?,?)', (user.id, poke_id, 1, 1))
# General #
def roll_pokemon(user):
ret = []
hunting = get_pokemon_by_id(user._hunting['poke_id']) if user._hunting['poke_id'] else None
for i in range(6):
chance = min(rarity_info.get(i+1).get('chance') * (1.1 ** user.get_upgrade('baserarity')), .99)
if random() <= chance:
if hunting and hunting.rarity == i+1:
all_pkmn = get_all_pokemon_by_rarity(i+1)
pkmn_weights = [(1 if hunting != p else min(3, 1 * ((1.02 + .02 * user.get_upgrade('huntchance')) ** max(user._hunting['caught'], 1)))) for p in all_pkmn]
pkmn = choices(all_pkmn, weights=pkmn_weights)[0]
else:
pkmn = get_random_pokemon_by_rarity(i+1)
shiny = shiny_chance * (1.1 ** user.get_upgrade('shinyrarity'))
if random() <= shiny:
pkmn.shiny = 1
ret.append(pkmn)
else:
ret.append(None)
return ret
def roll_all_pokemon(user):
tmp = []
for _ in range(int(user.stored_rolls)):
tmp.extend([1,2,3,4,5,6])
df = DataFrame(tmp, columns=['rarity'])
all_pkmn = {1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: []}
for pkmn in get_all_pokemon():
all_pkmn[pkmn.rarity].append(pkmn)
caught = []
hunting = get_pokemon_by_id(user._hunting['poke_id']) if user._hunting['poke_id'] else None
user_chance = 1.1 ** user.get_upgrade('baserarity')
user_shiny = shiny_chance * 1.1 ** user.get_upgrade('shinyrarity')
for row in df.values.tolist():
chance = min(rarity_info.get(row[0]).get('chance') * user_chance, .99)
if random() <= chance:
if hunting and hunting.rarity == row[0]:
pkmn_weights = [(1 if hunting != p else min(3, 1 * ((1.02 + .02 * user.get_upgrade('huntchance')) ** max(user._hunting['caught'], 1)))) for p in all_pkmn[row[0]]]
pkmn = choices(all_pkmn[row[0]], weights=pkmn_weights)[0]
else:
pkmn = choice(all_pkmn[row[0]])
if random() <= user_shiny:
pkmn.shiny = 1
caught.append(pkmn)
else:
caught.append(None)
df['caught'] = caught
return df
def gen_result_pic(pkmn_rolls):
ids = [(p.id if p else 'x') for p in pkmn_rolls]
imgs = [Image.open(f'{BASE_PATH}/rsc/{i}.png') for i in ids]
w = sum([i.size[0] for i in imgs])
h = max([i.size[1] for i in imgs])
bg = Image.new('RGBA', (w, h), color=(255,255,255,0))
x = 0
for img in imgs:
img = img.convert('RGBA')
bg.paste(img, (x, h-img.size[1]), img)
x += img.size[0]
bg.save(f'{BASE_PATH}/rsc/tmp.png')
def get_pkmn_colour(url):
resp = r.get(url)
im = Image.open(BytesIO(resp.content))
im.thumbnail((1, 1))
return im.getpixel((0, 0))
###########
# Classes #
###########
class Pokemon:
def __init__(self, id, name, rarity, shiny=0):
self.id = id
self.name = name
self.rarity = rarity
self.shiny = shiny
@property
def url(self):
url_name = self.name
if url_name == 'NidoranF':
url_name = 'nidoran-f'
if url_name == 'NidoranM':
url_name = 'nidoran-m'
if url_name == 'Meowstic':
url_name = 'meowstic-m'
if 'Mega' in url_name:
if url_name[-1] == 'X':
suffix = 'megax'
elif url_name[-1] == 'Y':
suffix = 'megay'
else:
suffix = 'mega'
url_name = f'{self.name.split(" ")[1]}-{suffix}'
url_name = url_name.lower().replace(':','').replace('.','').replace("'",'').replace(' ','-')
if self.shiny:
return f'https://projectpokemon.org/images/shiny-sprite/{url_name}.gif'
return f'https://projectpokemon.org/images/normal-sprite/{url_name}.gif'
@property
def icon(self):
url_name = self.name
if url_name == 'NidoranF':
url_name = 'nidoran-f'
if url_name == 'NidoranM':
url_name = 'nidoran-m'
if url_name == 'Meowstic':
url_name = 'meowstic-m'
if 'Mega' in url_name:
if url_name[-1] == 'X':
suffix = 'mega-x'
elif url_name[-1] == 'Y':
suffix = 'mega-y'
else:
suffix = 'mega'
url_name = f'{self.name.split(" ")[1]}-{suffix}'
url_name = url_name.lower().replace(':','').replace('.','').replace("'",'').replace(' ','-')
if self.shiny:
return f'https://img.pokemondb.net/sprites/home/shiny/{url_name}.png'
return f'https://img.pokemondb.net/sprites/home/normal/{url_name}.png'
def embed(self, user):
pkin = get_pokemon_info(self.id)
owned = get_user_pokedex_entry(user, self)
desc = ':star:' * self.rarity + '\n'
desc += f'**PokeCash value** - {rarity_info[self.rarity]["cash"]}\n**Roll refund value** - {rarity_info[self.rarity]["rolls"]}\n'
desc += f'**HP:** {pkin["hp"]} | **ATK:** {pkin["attack"]} | **DEF:** {pkin["defense"]}\n'
if owned:
desc += f'**Owned:** Yes - **{owned.amount}**\n'
if owned in user._party:
desc += '**In Party:** Yes\n'
else:
desc += '**In Party:** No\n'
else:
desc += f'**Owned:** No\n'
user_badge = get_badge(user, self.id)
if user_badge:
desc += f'**Badge:** {user_badge.display.replace(self.name, "")}\n'
else:
desc += '**Badge:** N\n'
r, g, b, _ = get_pkmn_colour(self.icon)
return Page(f'{int(self.id)} - {self.name}', desc, colour=(r, g, b), icon=self.icon, image=self.url)
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'rarity': self.rarity,
'shiny': self.shiny
}
def __eq__(self, p):
if p == None:
return False
try:
return self.user_id == p.user_id and self.id == p.id
except AttributeError:
return self.id == p.id
def __bool__(self):
return self.id > 0
def __repr__(self):
return f'Pokemon({self.id}, {self.name})'
def __str__(self):
return f'{self.id} - {self.name}'
def __hash__(self):
if '.' in str(self.id):
if str(self.id).split('.')[-1] == '0':
return int(self.id)
else:
return hash(str(self.id))
# return self.id
class PokedexEntry(Pokemon):
def __init__(self, user_id, amount, **kwargs):
super().__init__(**kwargs)
self.user_id = user_id
self.amount = amount
@property
def to_row(self):
return (self.user_id, self.id, self.amount, self.shiny)
def to_dict(self):
return {
'user_id': self.user_id,
'poke_id': self.id,
'amount': self.amount,
'shiny': self.shiny
}
class PokeBattle(Pokemon):
def __init__(self, user_id, level, exp, hp, attack, defense, **kwargs):
super().__init__(**kwargs)
self.user_id = user_id
self.level = level
self.exp = exp
self.next_lvl_exp = (self.level + 1) ** 3
self.hp = int((((2 * hp * self.level) // 100) + self.level + 10) // 1)
self.current_hp = self.hp
self.attack = int((((2 * attack * self.level) // 100) + 5) // 1)
self.defense = int((((2 * defense * self.level) // 100) + 5) // 1)
self.loaded = self.to_dict().copy()
@property
def pokebattle_creation_row(self):
return (
self.user_id,
self.id,
self.level,
self.exp
)
@classmethod
def from_id(cls, id, level=1):
pkmn = get_pokemon_by_id(id)
pkin = get_pokemon_info(pkmn.id)
return cls(1, level, 0, pkin['hp'], pkin['attack'], pkin['defense'], id=pkmn.id, name=pkmn.name, rarity=pkmn.rarity)
def embed(self, user):
pkin = get_pokemon_info(self.id)
owned = get_user_pokedex_entry(user, self)
desc = ':star:' * self.rarity + '\n'
desc += f'**PokeCash value** - {rarity_info[self.rarity]["cash"]}\n**Roll refund value** - {rarity_info[self.rarity]["rolls"]}\n'
desc += f'At lvl **{self.level}** | **HP:** {self.hp} | **ATK:** {self.attack} | **DEF:** {self.defense}\n'
if owned:
desc += f'**Owned:** Yes - **{owned.amount}**\n'
if owned in user._party:
desc += '**In Party:** Yes\n'
else:
desc += '**In Party:** No\n'
else:
desc += f'**Owned:** No\n'
user_badge = get_badge(user, self.id)
if user_badge:
desc += f'**Badge:** {user_badge.display.replace(self.name, "")}\n'
else:
desc += '**Badge:** N\n'
r, g, b, _ = get_pkmn_colour(self.icon)
return Page(f'{self.id} - {self.name}', desc, colour=(r, g, b), icon=self.icon, image=self.url)
def add_exp(self, exp):
if self.level >= 100:
self.level = 100
return False
starting_level = self.level
while exp > 0 and self.level < 100:
exp_to_lvl = max(self.next_lvl_exp - self.exp, 0)
if exp >= exp_to_lvl:
self.exp += exp_to_lvl
self.level_up()
exp -= exp_to_lvl
else:
self.exp += exp
exp = 0
return self.level > starting_level
def level_up(self):
if self.level >= 100:
return
self.level += 1
self.next_lvl_exp = (self.level + 1) ** 3
def full_health(self):
self.current_hp = self.hp
def to_dict(self):
return {
'user_id': self.user_id,
'poke_id': self.id,
'level': self.level,
'exp': self.exp
}
def update(self):
current = self.to_dict()
sql_str = 'update battle set '
col_val = []
for k in ['level', 'exp']:
if current[k] != self.loaded[k]:
col_val.append((k, current[k]))
sql_str += ', '.join([f'{col} = ?' for col, _ in col_val])
sql_str += ' where user_id = ? and poke_id = ?'
vals = [v for _, v in col_val]
vals.extend([self.user_id, self.id])
if not col_val:
return
return sql(sql_str, vals)
class Daycare(Pokemon):
def __init__(self, user_id, enter_time, rewards, **kwargs):
super().__init__(**kwargs)
self.user_id = user_id
self._enter_time = dt.strptime(enter_time, '%Y-%m-%d %H:%M:%S') if isinstance(enter_time, str) else enter_time
self._rewards = json.loads(rewards) if isinstance(rewards, str) else rewards
self.loaded = self.to_dict().copy()
@property
def enter_time(self):
return dt.strftime(self._enter_time, '%Y-%m-%d %H:%M:%S')
@property
def rewards(self):
return json.dumps(self._rewards)
@property
def daycare_creation_row(self):
return (
self.user_id,
self.id,
self.enter_time,
self.rewards
)
def generate_rewards(self, upgrade_level):
total_seconds = (dt.now() - self._enter_time).total_seconds()
rewards = total_seconds // (43200 - 3600 * upgrade_level)
while rewards > self._rewards['rewards']:
rw = choice([1, 2, 3, 4, 5, 6])
if rw == 1:
self._rewards['levels'] += 1
elif rw == 2:
self._rewards['cash'] += rarity_info[self.rarity]['cash']
elif rw == 3:
self._rewards['rolls'] += rarity_info[self.rarity]['rolls']
self._rewards['rewards'] += 1
self.update()
def embed(self, user):
desc = 'Welcome to the Daycare!\nTo claim your pokemon use **.daycare claim**\n\n'
desc += f'**{self.name}** {":star:" * self.rarity}\n\n'
for reward, value in self._rewards.items():
if reward == 'rewards':
pass
else:
desc += f'**{reward.capitalize()}:** {value}\n'
r, g, b, _ = get_pkmn_colour(self.icon)
return Page(f'{self.id} - {self.name}', desc, colour=(r, g, b), icon=self.icon, image=self.url, footer=f'Rewards rolled: {self._rewards["rewards"]}')
def to_dict(self):
return {
'user_id': self.user_id,
'poke_id': self.id,
'enter_time': self.enter_time,
'rewards': self.rewards
}
def update(self):
current = self.to_dict()
sql_str = 'update daycare set '
col_val = []
if current['rewards'] != self.loaded['rewards']:
col_val.append(('rewards', current['rewards']))
sql_str += ', '.join([f'{col} = ?' for col, _ in col_val])
sql_str += ' where user_id = ? and poke_id = ?'
vals = [v for _, v in col_val]
vals.extend([self.user_id, self.id])
if not col_val:
return
return sql(sql_str, vals)
class Badge(Pokemon):
def __init__(self, user_id, level, amount, **kwargs):
super().__init__(**kwargs)
self.user_id = user_id
self.level = level
self.amount = amount
self.loaded = self.to_dict().copy()
@property
def display(self):
if self.level == 1:
return f':third_place: {self.name}'
if self.level == 2:
return f':second_place: {self.name}'
if self.level == 3:
return f':first_place: {self.name}'
return f':military_medal: {self.name} x{self.amount}'
def to_dict(self):
return {
'user_id': self.user_id,
'poke_id': self.id,
'level': self.level,
'amount': self.amount
}
def update(self):
current = self.to_dict()
sql_str = 'update badges set '
col_val = []
for k in ['level', 'amount']:
if current[k] != self.loaded[k]:
col_val.append((k, current[k]))
sql_str += ', '.join([f'{col} = ?' for col, _ in col_val])
sql_str += ' where user_id = ? and poke_id = ?'
vals = [v for _, v in col_val]
vals.extend([self.user_id, self.id])
if not col_val:
return
return sql(sql_str, vals)
|
austinmh12/DiscordBots
|
TestBot/test_cogs/pokerouletteFunctions/pokemon.py
|
pokemon.py
|
py
| 20,054 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23740308683
|
people = ["Domey K", "Oscarrr", "Jakee", "Crumbs", "Davers", "Jebewok", "Conrr"]
searchName = input("Searched name ")
foundName = False
currentRecordNum = 0
while foundName == False and currentRecordNum < len(people):
if searchName == people[currentRecordNum]:
foundName = True
print(f"Name: {searchName} was found in position {currentRecordNum + 1}")
else:
currentRecordNum += 1
if foundName == False:
print(f"Name: {searchName} was never found")
|
Zoobdude/Computer-science-tasks
|
searching/linearSearchWithWhile.py
|
linearSearchWithWhile.py
|
py
| 486 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73471512828
|
from gtts import gTTS #Tratamento de ádio
import os
import time
def text_in_audio(text):
'''
Converte texto em áudio, salva em audio.mp3 e reproduz esse.
'''
try:
# Converte o conteúdo de text em áudio.
tts = gTTS(text=text, lang='pt-br')
# Salva o coteúdo do áudio em audio.mp3.
tts.save('audio.mp3')
# Reproduz audio.mp3 em segundo plano.
os.system('mpg123 audio.mp3 2> /dev/null &')
return 1
except:
return 0
def rm_audio():
'''
Remove o arquivo audio.mp3.
'''
try:
os.system('rm audio.mp3')
return 1
except:
return 0
def typing_effect(texto):
'''
Produz o efeito de digitação.
'''
try:
for letra in texto:
print(letra, end='', flush=True)
time.sleep(0.09)
return 1
except:
return 0
|
arturj9/chatgpt-python
|
utils.py
|
utils.py
|
py
| 976 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
25693208335
|
# -*- coding:utf-8 -*-
# 10.1 文件和异常
# 10.1 练习
# part_1 Python 学习笔记
# 读取整个文件 使用方法read()
filename = 'Python学习.txt'
with open(filename) as file_object:
contents = file_object.read()
print(contents.rstrip())
print("\n")
# 打印时遍历文件
with open(filename) as file_object_1:
contents_1 = file_object_1.readlines()
for line in contents_1:
print(line.rstrip())
print("\n")
# 将各行储存在一个列表中
with open(filename) as file_object_2:
contents_2 = file_object_2.readlines()
learn_py = ''
for line in contents_2:
learn_py += line.strip()
print(learn_py)
print("\n")
# 注意此处 一定要在创建变量 变量为空字符串 接for循环 在+= 这里加方法strip()
# part_2 使用方法replace()替换目标文件中的某一字符串
with open(filename) as file_replace:
lines = file_replace.read()
c = lines.replace('Python', 'C')
print(c)
# 这里只能使用方法read() 下面尝试方法readlines()
with open(filename) as file_1:
lines = file_1.readlines()
py = ''
for line in lines:
py += line.rstrip()
c = py.replace('Python', 'C')
print(c)
|
Troysps/learn_python
|
80/10.1从文件中读取数据.py
|
10.1从文件中读取数据.py
|
py
| 1,196 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37256463861
|
import torch
import torch.nn as nn
from torch.utils import data
from torch.optim import Adam, SGD
from tensorboardX import SummaryWriter
from tqdm import trange
from datetime import datetime
import argparse
from dataset import LoadADHD200
from model import SpatialActivation
def train(lr=0.001, device='cuda', epochs=10,
img_path = "./data/adhd/data/",save_path="./model/",
load_model=True, batch_size=4, load_epochs=1, encoder="se",
optim='sgd', momentum=0.9, step_size=2, gamma=0.95, parallel=False):
AutoEncoder = SpatialActivation()
AutoEncoder.to(device)
if load_model:
AutoEncoder.load_state_dict(torch.load("{}{}_{}.pth".format(save_path, encoder, load_epochs)))
if parallel:
AutoEncoder = nn.DataParallel(AutoEncoder, device_ids=[0, 1])
if optim == 'sgd':
optimizer = SGD(AutoEncoder.parameters(), lr=lr, momentum=momentum)
elif optim == 'adam':
optimizer = Adam(AutoEncoder.parameters(), lr=lr)
mse_loss = nn.MSELoss()
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
print("loading data......")
data_loader = data.DataLoader(LoadADHD200(img_path=img_path),
batch_size=batch_size,
shuffle=True)
print("data load complete.")
TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now())
writer = SummaryWriter("./logdir/" + TIMESTAMP)
for epoch in trange(1, epochs + 1):
total_loss = 0
for img, target_img in data_loader:
img = img.to(device)
decode, _, _ = AutoEncoder(img)
loss = mse_loss(decode, img)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
total_loss = total_loss / len(data_loader)
writer.add_scalar("loss", total_loss, global_step=epoch)
writer.add_scalar("learning rate", optimizer.state_dict()['param_groups'][0]['lr'], global_step=epoch)
scheduler.step()
AutoEncoder_path = save_path + "{}_{}.pth".format(encoder, load_epochs + epoch)
if parallel:
torch.save(AutoEncoder.module.state_dict(), AutoEncoder_path)
else:
torch.save(AutoEncoder.state_dict(), AutoEncoder_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--device', default='cuda', type=str)
parser.add_argument('--epochs', default=20, type=int)
parser.add_argument('--img_path', default="./data/adhd/data/", type=str)
parser.add_argument('--save_path', default="./model/", type=str)
parser.add_argument('--load_model', default=False, type=bool)
parser.add_argument('--load_epochs', default=0, type=int)
parser.add_argument('--encoder', default='se', type=str)
parser.add_argument('--batch_size', default=4, type=int)
parser.add_argument('--optim', default='sgd', type=str)
parser.add_argument('--momentum', default=0.9, type=float)
parser.add_argument('--step_size', default=2, type=int)
parser.add_argument('--gamma', default=0.95, type=float)
parser.add_argument('--parallel', default=False, type=bool)
args = parser.parse_args()
train(lr=args.lr, device=args.device, epochs=args.epochs,
img_path=args.img_path, save_path=args.save_path,
load_model=args.load_model, batch_size=args.batch_size,
load_epochs=args.load_epochs, encoder=args.encoder,
optim=args.optim, momentum=args.momentum,
step_size=args.step_size, gamma=args.gamma, parallel=args.parallel)
|
WhatAboutMyStar/SCAAE
|
train.py
|
train.py
|
py
| 3,360 |
python
|
en
|
code
| 4 |
github-code
|
6
|
41554384385
|
import sys
import nimfa
import numpy as np
import scipy.sparse as sp
import pandas as pd
import gc
import os
import math
import mysql.connector
import random
import collections
from scipy.sparse.linalg import svds
from sklearn.model_selection import KFold
from multiprocessing import Pool
# import module
import machine_learning as ml
import evaluation as ev
import parsonal_value as pv
#np.set_printoptions(threshold=np.inf)
# Default values
CPU = 1
dataset = "movie.review"
eta0 = 0.45
repeate = 10
sepalate = 1
attribute = 5
def learning(method, train_matrix, train_index, data, user_list, item_list):
if method == "SVD":
u, s, vt = svds(train_matrix, k=attribute)
np.savetxt("u.csv", u, delimiter=",")
np.savetxt("s.csv", s, delimiter=",")
np.savetxt("vt.csv", vt, delimiter=",")
s_diag_matrix = np.diag(s)
return u
elif method == "ML3_liner":
u, v = pv.rmrate(train_index, data, user_list, item_list, attribute)
R = ml.pv_ml3(train_matrix, eta0, u, v, attribute)
return u
def learning2(method, train_matrix, train_index, data, user_list, item_list, u2):
if method == "SVD":
u, s, vt = svds(train_matrix, k=attribute)
s_diag_matrix = np.diag(s)
return np.dot(np.dot(u2, s_diag_matrix), vt)
elif method == "ML3_liner":
u, v = pv.rmrate(train_index, data, user_list, item_list, attribute)
R = ml.pv_ml3(train_matrix, eta0, u, v, attribute)
return np.dot(np.dot(u2, R), v.T)
#=======================================================================================================================
# Name : makeMatrix
# Argument : ALL ... All data from numpy
# Purpose ... porpese index
# Role : make user-item matrix from evaluation format
#=======================================================================================================================
def makeMatrix(data, index, user_list, item_list):
# lil matrix is a sparse matrix format (eliminated zero values)
matrix = sp.lil_matrix((len(user_list), len(item_list)))
# translate numpy into Dataframe
data = pd.DataFrame(assign_index(data, index))
for line in data.itertuples():
# line[1] ... userID
# line[2] ... itemID
# line[3] ... rating value
matrix[line[1], line[2]] = line[3]
return matrix.tocsr()
#=======================================================================================================================
# Name : assign_index
# Argument : ALL ... All data from numpy
# Purpose ... purpose index
# Role : assign separated index data into numpy format
#=======================================================================================================================
def assign_index(ALL, Purpose):
# attribute + 3 equals dataset format; user_ID, item_ID, time, attributes
# Assigned ... all data in numpy format
Assigned = np.zeros((len(Purpose), attribute + 3)).astype(np.int64)
for i, j in enumerate(Purpose):
Assigned[i] = ALL[j]
return Assigned
#=======================================================================================================================
# Name : users_in_testdata
# Argument : n ... top-N recommendation count
# test_matrix ... matrix witch elements include only in the test data
# user_list ... user's ID
# Role : make users list (the number of evaluations in test data is more than n)
#=======================================================================================================================
def users_in_testdata(n, test_matrix, user_list):
test_user_list = np.zeros(len(user_list)).astype(np.int64)
test_matrix = test_matrix.todense()
for i,t in enumerate(test_matrix):
if(t[t.nonzero()].size >= n):
test_user_list[i] = 1
return test_user_list
def nDCG(n, pred, test, user_list, item_list, count_dict):
# initialization
nDCG = np.zeros(len(user_list))
D02 = 0
D04 = 0
D06 = 0
D08 = 0
D10 = 0
l = 0
count = np.array([0.,0.,0.,0.,0.,0.,0.])
count_c = np.array([0,0,0,0,0,0,0])
for i,p in enumerate(pred):
# user : i
# predicted score : p
# initialize DCG and iDCG
DCG = 0.
iDCG = 0.
# extract test data for user i
t = test[i]
# ground_truth : non zero list for test data
ground_truth = t.nonzero()
# predicted score corresponding to test data
p_test = p[ground_truth]
# ranking of predicted score
ranking_p_arg = np.argsort(p_test)[::-1]
# item ID of test data
test_item = item_list[ground_truth]
# test data rating
truth = t[ground_truth]
# ranking of test data's ratings
ranking_t = np.sort(truth)[::-1]
# the number of evaluation in test data more than n
if len(ranking_p_arg) >= n:
# j : recommendation result of top-N
# k : item ID in test data
for j in range(n):
for k in range(len(test_item)):
# calculate DCG
if k == ranking_p_arg[j]:
if j == 0:
DCG = truth[k]
else:
DCG = DCG + (truth[k] / math.log(j + 1, 2))
# calculate iDCG
if j == 0:
iDCG = ranking_t[j]
else:
iDCG = iDCG + (ranking_t[j] / math.log(j + 1, 2))
# calc111ulate nDCG
nDCG[i] = DCG / iDCG
if nDCG[i] <= 0.2:
D02 = D02 + 1
elif nDCG[i] <= 0.4:
D04 = D04 + 1
elif nDCG[i] <= 0.6:
D06 = D06 + 1
elif nDCG[i] <= 0.8:
D08 = D08 + 1
else:
D10 = D10 + 1
if len(ranking_p_arg) <= 3:
count[0] = count[0] + nDCG[i]
count_c[0] = count_c[0] + 1
elif len(ranking_p_arg) <= 6:
count[1] = count[1] + nDCG[i]
count_c[1] = count_c[1] + 1
elif len(ranking_p_arg) <= 10:
count[2] = count[2] + nDCG[i]
count_c[2] = count_c[2] + 1
elif len(ranking_p_arg) <= 20:
count[3] = count[3] + nDCG[i]
count_c[3] = count_c[3] + 1
elif len(ranking_p_arg) <= 30:
count[4] = count[4] + nDCG[i]
count_c[4] = count_c[4] + 1
elif len(ranking_p_arg) <= 40:
count[5] = count[5] + nDCG[i]
count_c[5] = count_c[5] + 1
else:
count[6] = count[6] + nDCG[i]
count_c[6] = count_c[6] + 1
count = count / count_c
return nDCG, np.mean(nDCG), np.std(nDCG), np.max(nDCG), np.min(nDCG), D02, D04, D06, D08, D10, count, count_c
def precision(n, pred, test, user_list, item_list, count_dict):
# initialization
precision = np.zeros(len(user_list))
recall = np.zeros(len(user_list))
p00 = 0
p033 = 0
p066 = 0
p100 = 0
r02 = 0
r04 = 0
r06 = 0
r08 = 0
r10 = 0
count_pre = np.array([0.,0.,0.,0.,0.,0.,0.])
count_c_pre = np.array([0,0,0,0,0,0,0])
count_rec = np.array([0.,0.,0.,0.,0.,0.,0.])
count_c_rec = np.array([0,0,0,0,0,0,0])
count_recom = np.array([0.,0.,0.,0.,0.,0.,0.])
count_recom2 = np.array([0.,0.,0.,0.,0.,0.,0.])
x = np.array( [] )
for i, p in enumerate(pred):
#initialization
tp = 0
fp = 0
truth_all = 0
t = test[i]
ground_truth = t.nonzero()
ground_truth2 = p.nonzero()
ranking_p_arg = np.argsort(p[ground_truth])[::-1]
ranking_p_arg2 = np.argsort(p[ground_truth2])[::-1]
test_item = item_list[ground_truth]
if len(ranking_p_arg2) >= 3:
print(i,item_list[ranking_p_arg2[0:3]])
if i == 0:
x = np.append( x, ranking_p_arg2 )
else:
for v in range(n):
if count_dict[item_list[ranking_p_arg2[v]]] <= 3:
count_recom[0] += 1
if ranking_p_arg2[v] not in x:
x = np.append( x, ranking_p_arg2[v] )
count_recom2[0] += 1
elif count_dict[item_list[ranking_p_arg2[v]]] <= 6:
count_recom[1] += 1
if ranking_p_arg2[v] not in x:
x = np.append( x, ranking_p_arg2[v] )
count_recom2[1] += 1
elif count_dict[item_list[ranking_p_arg2[v]]] <= 10:
count_recom[2] += 1
if ranking_p_arg2[v] not in x:
x = np.append( x, ranking_p_arg2[v] )
count_recom2[2] += 1
elif count_dict[item_list[ranking_p_arg2[v]]] <= 20:
count_recom[3] += 1
if ranking_p_arg2[v] not in x:
x = np.append( x, ranking_p_arg2[v] )
count_recom2[3] += 1
elif count_dict[item_list[ranking_p_arg2[v]]] <= 30:
count_recom[4] += 1
if ranking_p_arg2[v] not in x:
x = np.append( x, ranking_p_arg2[v] )
count_recom2[4] += 1
elif count_dict[item_list[ranking_p_arg2[v]]] <= 40:
count_recom[5] += 1
if ranking_p_arg2[v] not in x:
x = np.append( x, ranking_p_arg2[v] )
count_recom2[5] += 1
else:
count_recom[6] += 1
if ranking_p_arg2[v] not in x:
x = np.append( x, ranking_p_arg2[v] )
count_recom2[6] += 1
if len(ranking_p_arg) >= 3:
# true ratings
truth = t[ground_truth]
for j in range(n):
for k in range(len(test_item)):
if k == ranking_p_arg[j]:
# good impression for items
if truth[k] >= 4.:
tp = tp + 1.0
# bad impression
else:
fp = fp + 1.0
# all items having good impression
for j in range(len(truth)):
if truth[j] >= 4.0:
truth_all += 1
# calculate precision
precision[i] = tp / (tp + fp)
# calculate recall
if truth_all > 0:
recall[i] = tp / truth_all
if precision[i] == 0:
p00 = p00 + 1
elif precision[i] < 0.4:
p033 = p033 + 1
elif precision[i] < 0.7:
p066 = p066 + 1
else:
p100 = p100 + 1
if recall[i] <= 0.2:
r02 = r02 + 1
elif recall[i] <= 0.4:
r04 = r04 + 1
elif recall[i] <= 0.6:
r06 = r06 + 1
elif recall[i] <= 0.8:
r08 = r08 + 1
else:
r10 = r10 + 1
if len(ranking_p_arg) <= 3:
count_pre[0] = count_pre[0] + precision[i]
count_c_pre[0] = count_c_pre[0] + 1
elif len(ranking_p_arg) <= 6:
count_pre[1] = count_pre[1] + precision[i]
count_c_pre[1] = count_c_pre[1] + 1
elif len(ranking_p_arg) <= 10:
count_pre[2] = count_pre[2] + precision[i]
count_c_pre[2] = count_c_pre[2] + 1
elif len(ranking_p_arg) <= 20:
count_pre[3] = count_pre[3] + precision[i]
count_c_pre[3] = count_c_pre[3] + 1
elif len(ranking_p_arg) <= 30:
count_pre[4] = count_pre[4] + precision[i]
count_c_pre[4] = count_c_pre[4] + 1
elif len(ranking_p_arg) <= 40:
count_pre[5] = count_pre[5] + precision[i]
count_c_pre[5] = count_c_pre[5] + 1
else:
count_pre[6] = count_pre[6] + precision[i]
count_c_pre[6] = count_c_pre[6] + 1
if len(ranking_p_arg) <= 3:
count_rec[0] = count_rec[0] + recall[i]
count_c_rec[0] = count_c_rec[0] + 1
elif len(ranking_p_arg) <= 6:
count_rec[1] = count_rec[1] + recall[i]
count_c_rec[1] = count_c_rec[1] + 1
elif len(ranking_p_arg) <= 10:
count_rec[2] = count_rec[2] + recall[i]
count_c_rec[2] = count_c_rec[2] + 1
elif len(ranking_p_arg) <= 20:
count_rec[3] = count_rec[3] + recall[i]
count_c_rec[3] = count_c_rec[3] + 1
elif len(ranking_p_arg) <= 30:
count_rec[4] = count_rec[4] + recall[i]
count_c_rec[4] = count_c_rec[4] + 1
elif len(ranking_p_arg) <= 40:
count_rec[5] = count_rec[5] + recall[i]
count_c_rec[5] = count_c_rec[5] + 1
else:
count_rec[6] = count_rec[6] + recall[i]
count_c_rec[6] = count_c_rec[6] + 1
count_pre = count_pre / count_c_pre
count_rec = count_rec / count_c_rec
return precision,recall,precision.mean(), precision.std(), precision.max(), precision.min(), recall.mean(), recall.std(), recall.max(), recall.min(), p00, p033, p066, p100, r02, r04, r06, r08, r10, count_pre, count_c_pre, count_rec, count_c_rec, count_recom , count_recom2
def search_lt_n(n, test_data):
lt_n = 0
for t in test_data:
if t[t.nonzero()].shape[0] < n:
lt_n = lt_n + 1
return lt_n
def calculate(method):
a=0.0
b=0.0
c=0.0
eta0 = 0.45
set_data = sys.argv[1]
setting = sys.argv[2]
Pat3_ave = np.zeros((10, 3))
Pat3_std = np.zeros((10, 3))
Pat3_max = np.zeros((10, 3))
Pat3_min = np.zeros((10, 3))
Rat3_ave = np.zeros((10,3))
Rat3_std = np.zeros((10,3))
Rat3_max = np.zeros((10,3))
Rat3_min = np.zeros((10,3))
nDCGat3_ave = np.zeros((10,3))
nDCGat3_std = np.zeros((10,3))
nDCGat3_max = np.zeros((10,3))
nDCGat3_min = np.zeros((10,3))
P0 = np.zeros((10,3)).astype(np.int64)
P03 = np.zeros((10,3)).astype(np.int64)
P06 = np.zeros((10,3)).astype(np.int64)
P10 = np.zeros((10,3)).astype(np.int64)
D02 = np.zeros((10,3)).astype(np.int64)
D04 = np.zeros((10,3)).astype(np.int64)
D06 = np.zeros((10,3)).astype(np.int64)
D08 = np.zeros((10,3)).astype(np.int64)
D10 = np.zeros((10,3)).astype(np.int64)
R02 = np.zeros((10,3)).astype(np.int64)
R04 = np.zeros((10,3)).astype(np.int64)
R06 = np.zeros((10,3)).astype(np.int64)
R08 = np.zeros((10,3)).astype(np.int64)
R10 = np.zeros((10,3)).astype(np.int64)
lt_3 = np.zeros((10,3)).astype(np.int64)
c_pre = np.array([0.,0.,0.,0.,0.,0.,0.])
c_rec = np.array([0.,0.,0.,0.,0.,0.,0.])
c_dcg = np.array([0.,0.,0.,0.,0.,0.,0.])
pre_count = np.array([0,0,0,0,0,0,0])
rec_count = np.array([0,0,0,0,0,0,0])
dcg_count = np.array([0,0,0,0,0,0,0])
if setting == '4':
#setting 4
user_Mu = np.loadtxt("./genre"+ set_data +"/data/d11/user.csv",delimiter=",").astype(np.int64)
user_Mv = np.loadtxt("./genre"+ set_data +"/data/d22/user.csv",delimiter=",").astype(np.int64)
test_user = np.loadtxt("./genre"+ set_data +"/data/d12/user.csv",delimiter=",").astype(np.int64)
item_Mu = np.loadtxt("./genre"+ set_data +"/data/d11/item.csv",delimiter=",").astype(np.int64)
item_Mv = np.loadtxt("./genre"+ set_data +"/data/d22/item.csv",delimiter=",").astype(np.int64)
test_item = np.loadtxt("./genre"+ set_data +"/data/d12/item.csv",delimiter=",").astype(np.int64)
data_Mu = np.loadtxt("./genre"+ set_data +"/data/d11/data.csv",delimiter=",").astype(np.int64)
data_Mv = np.loadtxt("./genre"+ set_data +"/data/d22/data.csv",delimiter=",").astype(np.int64)
test_data = np.loadtxt("./genre"+ set_data +"/data/d12/data.csv",delimiter=",").astype(np.int64)
train_index = np.loadtxt("./genre"+ set_data +"/data/d11/index.csv",delimiter=",").astype(np.int64)
train_index2 = np.loadtxt("./genre"+ set_data +"/data/d22/index.csv",delimiter=",").astype(np.int64)
train_index3 = np.loadtxt("./genre"+ set_data +"/data/d12/index.csv",delimiter=",").astype(np.int64)
elif setting == '5':
#setting 5
user_Mu = np.loadtxt("./genre"+ set_data +"/data/d22/user.csv",delimiter=",").astype(np.int64)
user_Mv = np.loadtxt("./genre"+ set_data +"/data/d11/user.csv",delimiter=",").astype(np.int64)
test_user = np.loadtxt("./genre"+ set_data +"/data/d21/user.csv",delimiter=",").astype(np.int64)
item_Mu = np.loadtxt("./genre"+ set_data +"/data/d22/item.csv",delimiter=",").astype(np.int64)
item_Mv = np.loadtxt("./genre"+ set_data +"/data/d11/item.csv",delimiter=",").astype(np.int64)
test_item = np.loadtxt("./genre"+ set_data +"/data/d21/item.csv",delimiter=",").astype(np.int64)
data_Mu = np.loadtxt("./genre"+ set_data +"/data/d22/data.csv",delimiter=",").astype(np.int64)
data_Mv = np.loadtxt("./genre"+ set_data +"/data/d11/data.csv",delimiter=",").astype(np.int64)
test_data = np.loadtxt("./genre"+ set_data +"/data/d21/data.csv",delimiter=",").astype(np.int64)
train_index = np.loadtxt("./genre"+ set_data +"/data/d22/index.csv",delimiter=",").astype(np.int64)
train_index2 = np.loadtxt("./genre"+ set_data +"/data/d11/index.csv",delimiter=",").astype(np.int64)
train_index3 = np.loadtxt("./genre"+ set_data +"/data/d21/index.csv",delimiter=",").astype(np.int64)
else:
print("Select setting.")
sys.exit()
for i in range(repeate):
j = 0
Mu_matrix = makeMatrix(data_Mu, train_index, user_Mu, item_Mu)
u = learning(method, Mu_matrix, train_index, data_Mu, user_Mu, item_Mu)
Mv_matrix = makeMatrix(data_Mv, train_index2, user_Mv, item_Mv)
pred = learning2(method, Mv_matrix, train_index2, data_Mv, user_Mv, item_Mv, u)
np.savetxt("./result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/pred_temp.csv", pred, delimiter=",")
test_matrix = makeMatrix(test_data, train_index3, test_user, test_item)
test_users = users_in_testdata(3, test_matrix, test_user)
# calculating precision, recall, and nDCG using "pred"
lt_3[i,j] = search_lt_n(3, test_matrix)
count_dict = collections.Counter(test_data[:,1])
pre,rec,Pat3_ave[i,j], Pat3_std[i,j], Pat3_max[i,j], Pat3_min[i,j], Rat3_ave[i,j], Rat3_std[i,j], Rat3_max[i,j], Rat3_min[i,j], P0[i,j], P03[i,j], P06[i,j], P10[i,j], R02[i,j], R04[i,j], R06[i,j], R08[i,j], R10[i,j], new_c_pre, new_pre_count, new_c_rec, new_rec_count, recom, recom2 = precision(3, pred, np.array(test_matrix.todense()), test_user, test_item,count_dict)
dcg, nDCGat3_ave[i,j], nDCGat3_std[i,j], nDCGat3_max[i,j], nDCGat3_min[i,j], D02[i,j], D04[i,j], D06[i,j], D08[i,j], D10[i,j], new_c_dcg, new_dcg_count = nDCG(3, pred, np.array(test_matrix.todense()), test_user, test_item,count_dict)
c_pre = c_pre + new_c_pre
c_rec = c_rec + new_c_rec
c_dcg = c_dcg + new_c_dcg
pre_count = pre_count + new_pre_count
rec_count = rec_count + new_rec_count
dcg_count = dcg_count + new_dcg_count
print("count:" + str(i) + ", precision=" + str(np.mean(pre[test_users.nonzero()])) + ", recall=" + str(np.mean(rec[test_users.nonzero()])) +", nDCG=" + str(np.mean(dcg[test_users.nonzero()])))
a += np.mean(pre[test_users.nonzero()])
b += np.mean(rec[test_users.nonzero()])
c += np.mean(dcg[test_users.nonzero()])
#del pred
#del train_matrix
#del test_matrix
gc.collect()
j = j + 1
c_pre = c_pre / 10
c_rec = c_rec / 10
c_dcg = c_dcg / 10
pre_count = pre_count / 10
rec_count = rec_count / 10
dcg_count = dcg_count / 10
print(c_pre)
print("Precision AVE : " + str(a / 10))
print("Recall AVE : " + str(b / 10))
print("nDCG AVE : " + str(c / 10))
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Pat3_ave.npy", Pat3_ave)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Pat3_std.npy", Pat3_std)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Pat3_max.npy", Pat3_max)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Pat3_min.npy", Pat3_min)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Rat3_ave.npy", Rat3_ave)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Rat3_std.npy", Rat3_std)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Rat3_max.npy", Rat3_max)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Rat3_min.npy", Rat3_min)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/nDCGat3_ave.npy", nDCGat3_ave)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/nDCGat3_std.npy", nDCGat3_std)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/nDCGat3_max.npy", nDCGat3_max)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/nDCGat3_min.npy", nDCGat3_min)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/P00.npy", P0)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/P03.npy", P03)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/P06.npy", P06)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/P10.npy", P10)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/D02.npy", D02)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/D04.npy", D04)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/D06.npy", D06)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/D08.npy", D08)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/D10.npy", D10)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/R02.npy", R02)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/R04.npy", R04)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/R06.npy", R06)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/R08.npy", R08)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/R10.npy", R10)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/lt_3.npy", lt_3)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/c_pre.npy", c_pre)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/c_rec.npy", c_rec)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/c_dcg.npy", c_dcg)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/pre_count.npy", pre_count)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/rec_count.npy", rec_count)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/dcg_count.npy", dcg_count)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/recom.npy", recom)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/recom2.npy", recom2)
if __name__ == "__main__":
# Pool : the number of CPU.
p = Pool(CPU)
'''
methods = ["SVD", "NMF", "RMrate_liner", "D1_liner", "D2_liner", "D3_liner", "D4_liner", "D5_liner", "RMrate_square",
"D1_square", "D2_square", "D3_square", "D4_square", "D5_square", "ML1_liner", "ML2_liner", "ML3_liner",
"ML1_square", "ML2_square", "ML3_square"]
'''
methods = ["SVD","ML3_liner"]
p.map(calculate,methods)
print("Program completed...")
|
Saito2982/CrossDomain
|
plot_domain.py
|
plot_domain.py
|
py
| 22,614 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34344103076
|
from random import randint as rand
import math
def MenuSelection(array):
while True:
listPrint(array)
print("\n>>")
choice = input()
try:
choice = int(choice)
if(type(array) == dict):
choice = list(array.keys())[choice]
else:
choice = array[choice]
except:
print("Bad stuff")
print()
continue
break
return choice
def listPrint(array, indent = False):
if(type(array) == dict):
for index, key in enumerate(array):
print(str(index) + " = " + str(key))
if (type(array[key]) == dict):
listPrint(zip(list(array[key].keys()), list(array[key].values())), True)
else:
listPrint(array[key], True)
print("\n")
else:
for index, item in enumerate(array):
if type(item) != str and type(item) != int:
temp = ""
for x in item[:-1]:
temp += str(x) + " -- "
temp += str(item[len(item) - 1])
item = temp
print((str(index) + " = " + str(item)) if not indent else ("|>" + " " + str(item)) )
class Entity():
def __init__(self):
self.maxHp = 0
self.hp = 0
self.Maxmana = 0
self.mana = 0
self.critChance = 0
self.critDamage = 0
def __str__(self):
return f"{self.hp} / {self.maxHp}"
class Player(Entity):
def __init__(self, jem, startingClass: dict = {} ):
super().__init__()
#vals = list(startingClass.values())
self.hpStat = jem
self.memoryStat = jem
self.strengthStat = jem
self.visionStat = jem
self.dexterityStat = jem
self.arcaneStat = jem
self.ReCalculateStats()
def ReCalculateStats(self):
initialise = False
if (self.maxHp > 0):
initialise = True
hpRatio = (self.hp / self.maxHp)
manaRatio = (self.mana / self.maxMana)
self.maxHp = round(((10 + (math.log(self.hpStat + 3) - .5) * 6) + (self.hpStat/2)) * 10) # 0 = 136, 7 = 243, 20 = 358
self.maxMana = round(((10 + (math.log(self.memoryStat + 3) - .5) * 6) + (self.hpStat/2)) * 5)
self.critChance = round(((self.visionStat/5) + 2 ) * 10)
self.critDamage = (round(math.log(self.dexterityStat, 10) * 1.2 + (self.dexterityStat/30), 2) if self.dexterityStat > 0 else 0) # Log returning strange result
if self.critDamage < 1:
self.critDamage = 1
if (initialise):
self.hp = self.maxHp * hpRatio
self.mana = self.maxMana * manaRatio
def MainMenu():
print("Welome to RPG\nWhat is your Name?\n\n>>")
playerName = input()
print("What would you like your starting class to be?")
########
# Stats
# Hp --> Increases max hp
# Memory --> Increases max mana
# Strength --> Increases damage of melee attacks
# Vision --> Increases critical hit chance
# Dexterity --> Increases critical hit damage
# Arcane --> Increases magic effectiveness - Damage, status effects etc
########
# Each class should start off with 25 stat points allocated
##### PUT THIS IS A JSON AT SOME POINT
classes = {
"Warrior":{
"Hp": 7,
"Memory": 2,
"Strength": 7,
"Vision": 3,
"Dexterity": 4,
"Arcane": 2
},
"Mage":{
"Hp": 4,
"Memory": 8,
"Strength": 2,
"Vision": 1,
"Dexterity": 2,
"Arcane": 8
},
"Thief":{
"Hp": 5,
"Memory": 1,
"Strength": 6,
"Vision": 6,
"Dexterity": 6,
"Arcane": 1
}
}
# Warrior has raw stats
# Mages can use magic
# Thieves have a chance to avoid damage as well as enhanced crits
SelectedClass = MenuSelection(classes)
print("\n\n ~~Stats~~\n")
for stat, val in zip(list(classes[SelectedClass].keys()), list(classes[SelectedClass].values())):
print(str(stat) + " --> " + str(val))
for x in range(51):
print(str(x) + " = " + str(Player(x).maxHp), end= " --- ")
print(str(x) + " = " + str(Player(x).maxMana), end= " --- ")
print(str(x) + " = " + str(Player(x).critChance), end= " --- ")
print(str(x) + " = " + str(Player(x).critDamage), end= " --- ")
print()
MainMenu()
|
LordMagusar/Python-RPG
|
main.py
|
main.py
|
py
| 3,831 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1433290010
|
import pytest
import stk
from .case_data import CaseData
@pytest.fixture(
scope="session",
params=(
lambda name: CaseData(
molecule=stk.BuildingBlock("C1=CC=CC=C1"),
sub_group_data={
"c6_planarity": [2.7518147481201438e-06],
"c5n1_planarity": [],
"x5_planarity": [],
"c#c_angle": [],
},
name=name,
),
lambda name: CaseData(
molecule=stk.BuildingBlock("C1N=CC(CCC2CCOC2)N=1"),
sub_group_data={
"c6_planarity": [],
"c5n1_planarity": [],
"x5_planarity": [1.3688005804646254e-06, 0.932064037529801],
"c#c_angle": [],
},
name=name,
),
lambda name: CaseData(
molecule=stk.BuildingBlock("C1=CC=C(C=C1)C#CC2=CN=CC=C2"),
sub_group_data={
"c6_planarity": [8.41286151020968e-08],
"c5n1_planarity": [5.678704369238556e-08],
"x5_planarity": [],
"c#c_angle": [179.00063441359868],
},
name=name,
),
),
)
def case_data(request) -> CaseData:
return request.param(
f"{request.fixturename}{request.param_index}",
)
|
JelfsMaterialsGroup/stko
|
tests/molecular/subgroup/conftest.py
|
conftest.py
|
py
| 1,311 |
python
|
en
|
code
| 18 |
github-code
|
6
|
43344955993
|
'''
This application service return tracks data to visualisation.
'''
import time
import math
from collections import defaultdict
from twisted.application.service import Service
from twisted.internet import defer
from twisted.python import log
import simplejson as json
__author__ = 'Boris Tsema'
# Select track data.
SELECT_DATA = """
SELECT
t.timestamp,
string_agg(
concat_ws(',', tg.track_label, t.lat::text, t.lon::text, t.alt::text, t.v_speed::text, t.g_speed::text, t.distance::text),
';')
FROM
track_data t,
tracks_group tg
WHERE
t.id = tg.track_id AND
tg.group_id = %s AND
t.timestamp BETWEEN %s AND %s
GROUP BY
t.timestamp
ORDER BY
t.timestamp;
"""
# Select track state.
SELECT_DATA_SNAPSHOTS = """
SELECT
s.timestamp,
s.snapshot,
tg.track_label
FROM
track_snapshot s,
tracks_group tg
WHERE
s.id = tg.track_id AND
tg.group_id = %s AND
s.timestamp BETWEEN %s AND %s;
"""
# Select track data just for some tracks.
SELECT_DATA_BY_LABEL = """
SELECT
t.timestamp,
string_agg(
concat_ws(',', tg.track_label, t.lat::text, t.lon::text, t.alt::text, t.v_speed::text, t.g_speed::text, t.distance::text),
';')
FROM
track_data t,
tracks_group tg
WHERE
t.id = tg.track_id AND
tg.group_id = %s AND
t.timestamp BETWEEN %s AND %s AND
tg.track_label in %s
GROUP BY
t.timestamp
ORDER BY
t.timestamp;
"""
# Select track state changes for some tracks.
SELECT_DATA_SNAPSHOTS_BY_LABEL = """
SELECT
s.timestamp,
s.snapshot,
tg.track_label
FROM
track_snapshot s,
tracks_group tg
WHERE
s.id = tg.track_id AND
tg.group_id = %s AND
s.timestamp BETWEEN %s AND %s AND
tg.track_label in %s;
"""
# Select last track point in the past for every track.
GET_HEADERS_DATA = """
WITH tdata AS (
SELECT
timestamp,
concat_ws(',', lat::text, lon::text, alt::text, v_speed::text, g_speed::text, distance::text) as data,
td.id,
row_number() OVER(PARTITION BY td.id ORDER BY td.timestamp DESC) AS rk
FROM track_data td,
tracks_group tg
WHERE
td.id = tg.track_id
AND tg.group_id = %s
AND td."timestamp" BETWEEN %s AND %s)
SELECT
tg.track_label, t.data, t.timestamp
FROM
tdata t,
tracks_group tg
WHERE
t.rk = 1 AND
tg.track_id = t.id;
"""
# Select last state in the past for every track.
GET_HEADERS_SNAPSHOTS = """
WITH
snaps AS (
SELECT
snapshot,
timestamp,
ts.id AS id,
tg.track_label as track_label,
row_number() OVER(PARTITION BY ts.id ORDER BY ts.timestamp DESC) AS rk
FROM
track_snapshot ts,
tracks_group tg
WHERE
ts.id = tg.track_id AND
tg.group_id = %s
AND ts.timestamp <= %s)
SELECT
s.track_label, s.snapshot, s.timestamp
FROM
snaps s,
tracks_group tg
WHERE
s.rk < 4
AND s.id = tg.track_id;
"""
class TrackVisualizationService(Service):
# Don't show pilots earlier then time - track_gap. In seconds
track_gap = 15000
def __init__(self, pool):
self.pool = pool
def startService(self):
Service.startService(self)
log.msg("Starting DB pool")
return self.pool.start()
def stopService(self):
Service.stopService(self)
return self.pool.close()
@defer.inlineCallbacks
def get_track_data(self, params):
'''
Return dict with track data according to specified protocol.
@param params: request parameters, consist of group_id (domain id of
tracks group), from_time (unixtime) to_time (unixtime),
start_positions (show of not last track's position in the past),
track_labels (return result only for tracks with specified labels).
@type params: dict
@return:
@rtype: dict
'''
# TODO: pass keyword arguments into function instead of dictionary.
result = dict()
group_id = params['group_id']
from_time = int(params['from_time'])
to_time = int(params['to_time'])
start_positions = params.get('start_positions')
track_labels = params.get('track_labels', '')
t1 = time.time()
if track_labels:
track_labels = track_labels.split(',')
tracks = yield self.pool.runQuery(SELECT_DATA_BY_LABEL,
(group_id, from_time,
to_time, tuple(track_labels)))
snaps = yield self.pool.runQuery(SELECT_DATA_SNAPSHOTS_BY_LABEL,
(group_id, from_time, to_time, tuple(track_labels)))
else:
tracks = yield self.pool.runQuery(SELECT_DATA,
(group_id, from_time,
to_time))
snaps = yield self.pool.runQuery(SELECT_DATA_SNAPSHOTS,
(group_id, from_time, to_time))
t2 = time.time()
result['timeline'] = self.prepare_result(tracks, snaps)
log.msg("data requested in %0.3f" % (t2 - t1))
if start_positions:
ts1 = time.time()
hdata = yield self.pool.runQuery(GET_HEADERS_DATA, (group_id,
from_time - self.track_gap, from_time))
hsnaps = yield self.pool.runQuery(GET_HEADERS_SNAPSHOTS,
(group_id, from_time))
ts2 = time.time()
start_data = self.prepare_start_data(hdata, hsnaps)
result['start'] = start_data
log.msg("start positions requested in %0.3f" % (ts2 - ts1))
defer.returnValue(result)
def prepare_start_data(self, hdata, hsnaps):
'''
Prepare last state of tracks from their coordinates and snapshots.
@param hdata: (contest_number, data, timestamp)
@type hdata: list of tuples
@param hsnaps: (contest_number, snapshot, timestamp)
@type hsnaps: list of tuples
@return: {'contest_number':{'data':[alt, lon, ...],
'state':'finished', 'statechanged_at': 2134} - that's not true
@rtype:
'''
# TODO: make this method static.
result = defaultdict(dict)
# Add last coords and speeds to result.
for row in hdata:
cont_number, data, timestamp = row
result[cont_number] = parse_result(data.split(','))
# Add last state to result.
for row in hsnaps:
cont_number, state, state_ts = row
try:
state = json.loads(state)
if 'in_air_true' in state:
result[cont_number]['in_air'] = True
del state[state.index('in_air_true')]
if 'in_air_false' in state:
result[cont_number]['in_air'] = False
del state[state.index('in_air_false')]
if 'es_taken' in state:
result[cont_number]['finish_time'] = int(state_ts)
if len(state) > 0:
# XXX: workaround
if result[cont_number].get('state') == 'finished':
continue
if result[cont_number].get('state') == 'es_taken' and \
not state[0] == 'finished':
continue
result[cont_number]['state'] = state[0]
except:
continue
for contest_number in result:
if not result[contest_number].has_key('state'):
result[contest_number]['state'] = 'not started'
if not result[contest_number].has_key('in_air'):
# TODO: everybody in air by default, is it ok?
result[contest_number]['in_air'] = True
return result
def prepare_result(self, tracks, snaps):
'''
@param tracks: [(timestamp, contest_number,lat,lon,
...;contest_number,lat,lon..),...]
@param snaps: [(timestamp, snapshot, contest_number), ...]
@type tracks: list of tuple
@return:{timestamp:{'contnumber':[lat,lon...], },}
@rtype:
'''
# TODO: does this method need to be part of interface or it can be
# static ?
result = defaultdict(dict)
for row in tracks:
for data in row[1].split(';'):
result[int(row[0])][str(data.split(',')[0])
] = parse_result(data.split(',')[1:])
for row in snaps:
timestamp, snapshot, contest_number = row
if result[timestamp].has_key(contest_number):
concrete_pilot = result[timestamp][contest_number]
try:
snapshot = json.loads(snapshot)
if 'in_air_true' in snapshot:
concrete_pilot['in_air'] = True
del snapshot[snapshot.index('in_air_true')]
elif 'in_air_false' in snapshot:
concrete_pilot['in_air'] = False
del snapshot[snapshot.index('in_air_false')]
except:
continue
if len(snapshot) > 0:
concrete_pilot['state'] = snapshot[0]
return result
def parse_result(data):
res = dict()
res['lat'], res['lon'], res['alt'], res['vspd'], res['gspd'], \
res['dist'] = data
def _float(num):
result = round(float(num), 6)
if math.isnan(result):
log.msg("Nan found in float.")
result = 0
if math.isinf(result):
log.msg("Infinity found in float.")
result = 1
return result
formats = dict(lat=_float, lon=_float, alt=int, gspd=_float, vspd=_float,
dist=int)
for key in res:
res[key] = formats[key](res[key])
return dict(dist=res['dist'],
spds=[res['gspd'], res['vspd']],
crds=[res['lat'], res['lon'], res['alt']])
|
DmitryLoki/gorynych
|
gorynych/processor/services/visualization.py
|
visualization.py
|
py
| 10,219 |
python
|
en
|
code
| 3 |
github-code
|
6
|
7897642460
|
import pygsheets
def init(secret_path, sheet_name): # Получение нужной таблицы
gc = pygsheets.authorize(client_secret=secret_path)
sh = gc.open(sheet_name)
wks = sh.sheet1
return wks
def get_all_table_data(wks): # Получение всех данных с таблицы
data_list = []
for row in wks.get_all_values():
if row[0] and row[0] != 'register':
data_list.append(row[:10])
return data_list
#def get_rows_ids(data): # Получение id всех записей
#ids = []
#for row in data:
#ids.append(row[2])
#return ids
def get_row_data_by_id(data, id): # Получение нужной записи по id
for row in data:
if row[2] == str(id):
return row
return 'Запись не найдена'
def update_row_data_by_id(wks, id, value_update): # Обновление нужной записи по id
data = wks.get_all_values()
for i in range(len(data)):
if str(id) in data[i]:
wks.update_value(f'J{i+1}', value_update)
wks = init('client_secret.json', 'Test')
table_data = get_all_table_data(wks)
print(table_data)
#print(get_row_data_by_id(table_data, 2028))
update_row_data_by_id(wks, 202, 'test')
|
FMaslina/gsheets
|
gsheets_integration.py
|
gsheets_integration.py
|
py
| 1,302 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
7794747310
|
from flask import Flask, render_template, request
import mushroom_data as md
import random
app = Flask(__name__)
@app.route('/', methods=['POST', 'GET'])
def main():
if request.method != 'POST':
return render_template('index.html',
cap_shape=md.cap_shape,
cap_surface=md.cap_surface,
cap_color=md.cap_color,
bruises=md.bruises,
odor=md.odor,
gill_attachment=md.gill_attachment,
gill_spacing=md.gill_spacing,
gill_size=md.gill_size,
gill_color=md.gill_color,
stalk_shape=md.stalk_shape,
stalk_root=md.stalk_root,
stalk_surface_above_ring=md.stalk_surface_above_ring,
stalk_surface_below_ring=md.stalk_surface_below_ring,
stalk_color_above_ring=md.stalk_color_above_ring,
stalk_color_below_ring=md.stalk_color_below_ring,
veil_color=md.veil_color,
ring_number=md.ring_number,
ring_type=md.ring_type,
spore_print_color=md.spore_print_color,
population=md.population,
habitat=md.habitat,
prediction=None
)
else:
collected_values = [collect_form_values()]
prediction = classify_mushroom(collected_values)
if prediction == 1:
prediction_value = 'Poisonous'
else:
prediction_value = 'Edible'
return render_template('index.html',
cap_shape=md.cap_shape,
cap_surface=md.cap_surface,
cap_color=md.cap_color,
bruises=md.bruises,
odor=md.odor,
gill_attachment=md.gill_attachment,
gill_spacing=md.gill_spacing,
gill_size=md.gill_size,
gill_color=md.gill_color,
stalk_shape=md.stalk_shape,
stalk_root=md.stalk_root,
stalk_surface_above_ring=md.stalk_surface_above_ring,
stalk_surface_below_ring=md.stalk_surface_below_ring,
stalk_color_above_ring=md.stalk_color_above_ring,
stalk_color_below_ring=md.stalk_color_below_ring,
veil_color=md.veil_color,
ring_number=md.ring_number,
ring_type=md.ring_type,
spore_print_color=md.spore_print_color,
population=md.population,
habitat=md.habitat,
prediction=prediction_value
)
def collect_form_values():
mushroom_values = [request.form.get('cap-shape'), request.form.get('cap-surface'),
request.form.get('cap-color'), request.form.get('bruises'),
request.form.get('odor'), request.form.get('gill-attachment'),
request.form.get('gill-spacing'), request.form.get('gill-size'),
request.form.get('gill-color'), request.form.get('stalk-shape'),
request.form.get('stalk-root'), request.form.get('stalk-surface-above-ring'),
request.form.get('stalk-surface-below-ring'),
request.form.get('stalk-color-above-ring'),
request.form.get('stalk-surface-below-ring'), request.form.get('veil-color'),
request.form.get('ring-number'), request.form.get('ring-type'),
request.form.get('spore-print-color'), request.form.get('population'),
request.form.get('habitat')]
return mushroom_values
def classify_mushroom(values):
prediction = md.model.predict(values)
return prediction
if __name__ == '__main__':
app.run()
|
sharmas1ddharth/Mushroom_Classification
|
app.py
|
app.py
|
py
| 4,515 |
python
|
en
|
code
| 2 |
github-code
|
6
|
2052216012
|
from fastai import vision, metrics
from fastai.callback import hooks
from fastai.utils import mem
import numpy as np
from os import path
import torch
vision.defaults.device = vision.defaults.device if torch.cuda.is_available() else torch.device('cpu')
# Download data and get path
fastai_path = vision.untar_data(vision.URLs.CAMVID)
PATH = str(fastai_path)
print('CAMVID paths:')
print(fastai_path.ls())
BATCH_SIZE = 64
WD = 1e-2
LR = 1e-4
PCT_START_FINETUNE = 0.9 # given the default of 0.3, it means that your LR is going up for 30% of your iterations and then decreasing over the last 70%
PCT_START = 0.8
EPOCHS_FINETUNE = 12
EPOCHS = 12
# Define images and label path
LABEL_PATH = path.sep.join([PATH, 'labels'])
IMAGE_PATH = path.sep.join([PATH, 'images'])
# Define paths of image and label
image_paths = vision.get_image_files(IMAGE_PATH)
label_paths = vision.get_image_files(LABEL_PATH)
# Load some samples to see what's inside
rand_indx = np.random.randint(0, len(image_paths))
sample_image_path = image_paths[rand_indx]
sample_image = vision.open_image(sample_image_path)
sample_image.show(figsize=(6, 6))
# Function to match between image and its label path. E.g. image path: /root/.fastai/data/camvid/images/0006R0_f02910.png; label path: /root/.fastai/data/camvid/labels/0006R0_f02910_P.png
segment_name_fn = lambda image_path: path.sep.join([LABEL_PATH, f'{image_path.stem}_P{image_path.suffix}'])
# Load image segmentation by defaults (segment image given in dataset) and vision.open_mask()
sample_label_path = segment_name_fn(sample_image_path)
sample_label = vision.open_image(sample_label_path)
sample_label.show(figsize=(6, 6))
# Note sample segment after preprocess based on vision.open_mask just has 1 depth instead of 3 depth as origin segment
sample_label_preprocessed = vision.open_mask(sample_label_path)
sample_label_preprocessed.show(figsize=(6, 6))
print(sample_label_preprocessed.data) # sample_label_preprocessed is also fastai tensor
# get image dimension (height and width)
image_size = np.array(sample_label_preprocessed.shape[1:])
data_size = image_size//2
objects_in_image = np.loadtxt(path.sep.join([PATH, 'codes.txt']), dtype=str)
# Determine batch size by gpu free memory to avoid CUDA out pf memory
if torch.cuda.is_available():
free = mem.gpu_mem_get_free_no_cache()
if free > 8200:
BATCH_SIZE = 8
else:
BATCH_SIZE = 4
print(f'Using batch size of {BATCH_SIZE}, have {free}MB of GPU RAM free')
origin_data = vision.SegmentationItemList.from_folder(IMAGE_PATH).split_by_fname_file(path.sep.join([PATH, 'valid.txt'])).label_from_func(segment_name_fn, classes=objects_in_image)
data = origin_data.transform(vision.get_transforms(), size=data_size, tfm_y=True).databunch(bs=BATCH_SIZE).normalize(vision.imagenet_stats) # tfm_y=True means apply transform to label
print(data.show_batch(2, figsize=(10, 7)))
print(data.show_batch(2, figsize=(10, 7), ds_type=vision.DatasetType.Valid))
# Define accuracy
object2id = {value: key for key, value in enumerate(objects_in_image)}
void_index = object2id['Void']
def camvid_accuracy(inputs, target):
target = target.squeeze(1)
mask = target != void_index
return (inputs.argmax(dim=1)[mask] == target[mask]).float().mean()
# Define model
learner = vision.unet_learner(data, vision.models.resnet34, metrics=camvid_accuracy, wd=WD)
# Find good LR
learner.lr_find()
learner.recorder.plot()
learner.fit_one_cycle(EPOCHS_FINETUNE, max_lr=slice(LR), pct_start=PCT_START_FINETUNE)
learner.save('stage-1-34-unet')
# Show results
learner.show_results(rows=3, figsize=(8, 9))
# After warming up, start to train all network
learner.unfreeze()
learner.fit_one_cycle(EPOCHS, max_lr=slice(LR/400, LR/4), pct_start=PCT_START)
learner.save('stage-2-34-unet')
|
lykhahaha/Mine
|
Fastai_Tutorial/lesson3-camvid.py
|
lesson3-camvid.py
|
py
| 3,776 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32576643958
|
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from django.views.generic import View
from django.http import HttpResponse
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import logout as log_out
from .models import Task
from .forms import TaskCreateForm
from .forms import TaskEditForm
from .forms import TaskDeleteForm
from .forms import TaskSearchForm
#Display the home page once user has logged in
def home(request):
if request.user.is_active == 0:
return redirect('signin')
tasks = Task.objects.filter(userid_id=request.user.id).order_by('-created_at')
args = {'tasks': tasks}
return render(request, 'home.html', args)
#Display the creation page and handle creation requests to insert into the database
def create(request):
if request.user.is_active == 0:
return redirect('signin')
if request.method == 'POST':
form = TaskCreateForm(data=request.POST)
if form.is_valid():
task = form.save(commit=False)
task.userid_id = request.user.id
task.title = form.cleaned_data.get('title')
task.bodym = form.cleaned_data.get('body')
task.completed_at = form.cleaned_data.get('date')
task.save()
return redirect('home')
else:
error = form.errors
return render(request, 'create.html', {'error' : error})
else:
return render(request, 'create.html', context = None)
#Search task bodies with given string
def search(request):
if request.user.is_active == 0:
return redirect('signin')
if request.method == 'POST':
form = TaskSearchForm(data=request.POST)
if form.is_valid():
search = form.cleaned_data.get('search')
tasks = Task.objects.filter(body__contains=search, userid_id = request.user.id)
args = {'tasks': tasks, 'search' : search}
return render(request, 'search.html', args)
else:
return render(request, 'home.html', context=None)
#Handle edits to task title, body, completion status and completion date and save to the database if exists
#Display the edit page when for a GET request
def edit(request, task_id):
if request.user.is_active == 0:
return redirect('signin')
if request.method == 'POST':
form = TaskEditForm(data=request.POST)
if form.is_valid():
count = Task.objects.filter(id = form.cleaned_data.get('id'), userid_id = request.user.id).count()
if count == 1:
task = Task.objects.get(id = form.cleaned_data.get('id'), userid_id = request.user.id)
task.title = form.cleaned_data.get('title')
task.body = form.cleaned_data.get('body')
task.completed_at = form.cleaned_data.get('date')
if bool(form.cleaned_data.get('completed')):
task.complete = 1
else:
task.complete = 0
task.save()
return redirect(home)
else:
return redirect('home')
else:
return render(request, 'home.html', context=None)
elif request.method == "GET":
count = Task.objects.filter(id = task_id, userid_id = request.user.id).count()
if count == 1:
task = Task.objects.get(id = task_id, userid_id = request.user.id)
task.completed_at = str(task.completed_at)
if "+" in task.completed_at:
splitdate = task.completed_at.split("+")
task.completed_at = splitdate[0]
args = {'task': task}
return render(request, 'edit.html', args)
else:
return render(request, 'home.html', context = None)
else:
return render(request, 'home.html', context = None)
#Delete tasks belonging to the user if exists
def delete(request):
if request.user.is_active == 0:
return redirect('signin')
if request.method == 'POST':
form = TaskDeleteForm(data=request.POST)
if form.is_valid():
count = Task.objects.filter(id = form.cleaned_data.get('taskid'), userid_id = request.user.id).count()
if count == 1:
task = Task.objects.filter(id = form.cleaned_data.get('taskid')).delete()
return redirect('home')
else:
return render(request, 'home.html', context=None)
else:
return render(request, 'home.html', context=None)
#Register, uses built-in mehtods
def register(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('home')
else:
form = UserCreationForm()
return render(request, 'register.html', {'form': form})
#Signin uses built-in methods
def signin(request):
if request.user.is_active == 1:
return redirect('home')
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active == 1:
request.session.set_expiry(86400) #sets the exp. value of the session
login(request, user)
return redirect('home')
else:
form = AuthenticationForm()
return render(request, 'signin.html', {'form': form})
#Log out uses built-in methods
def logout(request):
log_out(request)
form = AuthenticationForm()
return redirect('signin')
|
S4ADO/ADW_Django_A1
|
TaskManager/tasks/views.py
|
views.py
|
py
| 5,269 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38254642090
|
from django.test import TestCase
from hknweb.candidate.tests.models.utils import ModelFactory
class DuePaymentRequirementModelTests(TestCase):
def setUp(self):
semester = ModelFactory.create_semester(
semester="Spring",
year=0,
)
duepayment = ModelFactory.create_duepayment_requirement(
candidateSemesterActive=semester,
)
self.semester = semester
self.duepayment = duepayment
def test_str(self):
expected = "{} - {}".format(self.duepayment.name, self.semester)
actual = str(self.duepayment)
self.assertEqual(expected, actual)
|
Gabe-Mitnick/hknweb
|
hknweb/candidate/tests/models/requirements/payment/test_due_payment.py
|
test_due_payment.py
|
py
| 649 |
python
|
en
|
code
| null |
github-code
|
6
|
73814975546
|
from abc import ABCMeta, abstractmethod
from asyncio.queues import Queue as AioQueue
from queue import Queue
from bonobo.constants import BEGIN, END
from bonobo.errors import AbstractError, InactiveReadableError, InactiveWritableError
from bonobo.nodes import noop
BUFFER_SIZE = 8192
class Readable(metaclass=ABCMeta):
"""Interface for things you can read from."""
@abstractmethod
def get(self, block=True, timeout=None):
"""Read. Block/timeout are there for Queue compat."""
raise AbstractError(self.get)
class Writable(metaclass=ABCMeta):
"""Interface for things you can write to."""
@abstractmethod
def put(self, data, block=True, timeout=None):
"""Write. Block/timeout are there for Queue compat."""
raise AbstractError(self.put)
class Input(Queue, Readable, Writable):
def __init__(self, maxsize=BUFFER_SIZE):
Queue.__init__(self, maxsize)
self._runlevel = 0
self._writable_runlevel = 0
self.on_initialize = noop
self.on_begin = noop
self.on_end = noop
self.on_finalize = noop
def put(self, data, block=True, timeout=None):
# Begin token is a metadata to raise the input runlevel.
if data == BEGIN:
if not self._runlevel:
self.on_initialize()
self._runlevel += 1
self._writable_runlevel += 1
# callback
self.on_begin()
return
# Check we are actually able to receive data.
if self._writable_runlevel < 1:
raise InactiveWritableError("Cannot put() on an inactive {}.".format(Writable.__name__))
if data == END:
self._writable_runlevel -= 1
return Queue.put(self, data, block, timeout)
def _decrement_runlevel(self):
if self._runlevel == 1:
self.on_finalize()
self._runlevel -= 1
self.on_end()
def get(self, block=True, timeout=None):
if not self.alive:
raise InactiveReadableError("Cannot get() on an inactive {}.".format(Readable.__name__))
data = Queue.get(self, block, timeout)
if data == END:
self._decrement_runlevel()
if not self.alive:
raise InactiveReadableError(
"Cannot get() on an inactive {} (runlevel just reached 0).".format(Readable.__name__)
)
return self.get(block, timeout)
return data
def shutdown(self):
while self._runlevel >= 1:
self._decrement_runlevel()
def empty(self):
self.mutex.acquire()
while self._qsize() and self.queue[0] == END:
self._runlevel -= 1
Queue._get(self)
self.mutex.release()
return Queue.empty(self)
@property
def alive(self):
return self._runlevel > 0
class AioInput(AioQueue):
pass
|
python-bonobo/bonobo
|
bonobo/structs/inputs.py
|
inputs.py
|
py
| 2,922 |
python
|
en
|
code
| 1,564 |
github-code
|
6
|
38760730621
|
import dictionary
print(dictionary.d)
text = "I drive a red car in the city with a friend to go to the cinema"
translate = ""
words = text.split()
for w in words:
translate = translate + dictionary.d[w]
translate = translate + " "
print(translate)
|
marnace/Homework9thMarch
|
Homework9thMarch.py
|
Homework9thMarch.py
|
py
| 262 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20880620842
|
#@title Установка модуля РЈРР
from PIL import Image
from pathlib import Path
from tensorflow.keras.preprocessing import image
from tensorflow.keras.layers import Input, Dense, Conv2D, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from IPython import display as ipd
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import seaborn as sns
import gdown
import zipfile
import os
import random
import time
import gc
sns.set(style='darkgrid')
seed_value = 12
random.seed(seed_value)
np.random.seed(seed_value)
tf.random.set_seed(seed_value)
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class AccuracyCallback(tf.keras.callbacks.Callback):
def __init__(self):
self.train_acc = []
self.val_acc = []
self.train_loss = []
self.val_loss = []
self.times = []
def plot_graph(self):
plt.figure(figsize=(20, 14))
plt.subplot(2, 2, 1)
plt.title('Точность', fontweight='bold')
plt.plot(self.train_acc, label='Точность на обучащей выборке')
plt.plot(self.val_acc, label='Точность на проверочной выборке')
plt.xlabel('РРїРѕС…Р° обучения')
plt.ylabel('Доля верных ответов')
plt.legend()
plt.show()
def on_epoch_begin(self, epoch, logs=None):
self.start_time = time.time()
def on_epoch_end(self, epoch, logs=None):
self.train_acc.append(logs['accuracy'])
self.val_acc.append(logs['val_accuracy'])
self.train_loss.append(logs['loss'])
self.val_loss.append(logs['val_loss'])
t = round(time.time() - self.start_time, 1)
self.times.append(t)
if logs['val_accuracy'] > self.accuracymax:
self.accuracymax = logs['val_accuracy']
self.idxmax = epoch
print(f'РРїРѕС…Р° {epoch+1}'.ljust(10)+ f'Время обучения: {t}c'.ljust(25) + f'Точность РЅР° обучающей выборке: {bcolors.OKBLUE}{round(logs["accuracy"]*100,1)}%{bcolors.ENDC}'.ljust(50) +f'Точность РЅР° проверочной выборке: {bcolors.OKBLUE}{round(logs["val_accuracy"]*100,1)}%{bcolors.ENDC}')
self.cntepochs += 1
def on_train_begin(self, logs):
self.idxmax = 0
self.accuracymax = 0
self.cntepochs = 0
def on_train_end(self, logs):
ipd.clear_output(wait=True)
for i in range(self.cntepochs):
if i == self.idxmax:
print('\33[102m' + f'РРїРѕС…Р° {i+1}'.ljust(10)+ f'Время обучения: {self.times[i]}c'.ljust(25) + f'Точность РЅР° обучающей выборке: {round(self.train_acc[i]*100,1)}%'.ljust(41) +f'Точность РЅР° проверочной выборке: {round(self.val_acc[i]*100,1)}%'+ '\033[0m')
else:
print(f'РРїРѕС…Р° {i+1}'.ljust(10)+ f'Время обучения: {self.times[i]}c'.ljust(25) + f'Точность РЅР° обучающей выборке: {bcolors.OKBLUE}{round(self.train_acc[i]*100,1)}%{bcolors.ENDC}'.ljust(50) +f'Точность РЅР° проверочной выборке: {bcolors.OKBLUE}{round(self.val_acc[i]*100,1)}%{bcolors.ENDC}' )
self.plot_graph()
class TerraDataset:
bases = {
'Молочная_продукция' : {
'url': 'https://storage.yandexcloud.net/terraai/sources/milk.zip',
'info': 'Вы скачали базу с изображениями бутылок молока. База содержит 1500 изображений трех категорий: «Parmalat», «Кубанская буренка», «Семейный формат»',
'dir_name': 'milk_ds',
'task_type': 'img_classification',
'size': (96, 53),
},
'Пассажиры_автобуса' : {
'url': 'https://storage.yandexcloud.net/terraai/sources/bus.zip',
'info': 'Вы скачали базу с изображениями пассажиров автобуса. База содержит 9081 изображение двух категорий: «Входящие пассажиры», «Выходящие пасажиры»',
'dir_name': 'passengers',
'task_type': 'img_classification',
'size': (128, 64),
},
'Возгорания' : {
'url': 'https://storage.yandexcloud.net/terraai/sources/fire.zip',
'info': 'Вы скачали базу с изображениями возгораний. База содержит 6438 изображение двух категорий: «Есть возгорание», «Нет возгорания»',
'dir_name': 'fire',
'task_type': 'img_classification',
'size': (96, 76),
},
'авто' : {
'url': 'https://storage.yandexcloud.net/aiueducation/Intensive/cars.zip',
'info': 'Вы скачали базу с изображениями марок авто. База содержит 3427 изображений трех категорий: «Феррари», «Мерседес», «Рено»',
'dir_name': 'car',
'task_type': 'img_classification',
'size': (54, 96),
},
'майонез' : {
'url': 'https://storage.yandexcloud.net/aiueducation/Intensive/mayonnaise.zip',
'info': 'Вы скачали базу с изображениями брендов майонеза. База содержит 150 изображений трех категорий: «ЕЖК», «Махеев», «Ряба»',
'dir_name': 'mayonesse',
'task_type': 'img_classification',
'size': (96, 76),
},
}
def __init__(self, name):
'''
parameters:
name - название датасета
'''
self.base = self.bases[name]
self.sets = None
self.classes = None
def load(self):
'''
функция загрузки датасета
'''
print(f'{bcolors.BOLD}Загрузка датасета{bcolors.ENDC}',end=' ')
# Загурзка датасета из облака
fname = gdown.download(self.base['url'], None, quiet=True)
if Path(fname).suffix == '.zip':
# Распаковка архива
with zipfile.ZipFile(fname, 'r') as zip_ref:
zip_ref.extractall(self.base['dir_name'])
# Удаление архива
os.remove(fname)
# Вывод информационного блока
print(f'{bcolors.OKGREEN}Ok{bcolors.ENDC}')
print(f'{bcolors.OKBLUE}Рнфо:{bcolors.ENDC}')
print(f' {self.base["info"]}')
return self.base['task_type']
def samples(self):
'''
Функция визуализации примеров
'''
# Визуализация датасета изображений для задачи классификации
if self.base['task_type'] == 'img_classification':
# Получение списка классов (названия папок в директории)
self.classes = sorted(os.listdir(self.base['dir_name']))
# Построение полотная визуализации
f, ax = plt.subplots(len(self.classes), 5, figsize=(24, len(self.classes) * 4))
for i, class_ in enumerate(self.classes):
# Выбор случайного изображения
for j in range(5):
random_image = random.choice(
os.listdir(os.path.join(
self.base['dir_name'],
class_)))
img = Image.open(os.path.join(
self.base['dir_name'],
class_,
random_image))
ax[i, j].imshow(img)
ax[i, j].axis('off')
ax[i, j].set_title(class_)
plt.show()
def create_sets(self):
'''
Функция создания выборок
'''
x_train = []
y_train = []
x_test = []
y_test = []
print(f'{bcolors.BOLD}Создание наборов данных для обучения модели{bcolors.ENDC}', end=' ')
# Создание выборок для задачи классификации изображений
if self.base['task_type'] == 'img_classification':
# Получение списка директорий
self.classes = sorted(os.listdir(self.base['dir_name']))
counts = []
# Проход по всем папкам директории (по всем классам)
for j, d in enumerate(self.classes):
# Получение списка всех изображений очередного класса
files = sorted(os.listdir(os.path.join(self.base['dir_name'], d)))
# Параметр разделения выборок
counts.append(len(files))
count = counts[-1] * .9
# Проход по всем изображениям очередного класса
for i in range(len(files)):
# Загрузка очередного изображения
sample = np.array(image.load_img(os.path.join(
self.base['dir_name'],
d,
files[i]), target_size=self.base['size']))
# Добавление элемента в тестовую или проверочную выборку
if i<count:
x_train.append(sample)
y_train.append(j)
else:
x_test.append(sample)
y_test.append(j)
self.sets = (np.array(x_train)/255., np.array(y_train)), (np.array(x_test)/255., np.array(y_test))
# Вывод финальной информации
print(f'{bcolors.OKGREEN}Ok{bcolors.ENDC}')
print()
print(f'Размер созданных выборок:')
print(f' Обучающая выборка: {self.sets[0][0].shape}')
print(f' Метки обучающей выборки: {self.sets[0][1].shape}')
print(f' Проверочная выборка: {self.sets[1][0].shape}')
print(f' Метки проверочной выборки: {self.sets[1][1].shape}')
print()
print(f'Распределение по классам:')
f, ax =plt.subplots(1,2, figsize=(16, 5))
ax[0].bar(self.classes, np.array(counts)*0.9)
ax[0].set_title('Обучающая выборка')
ax[1].bar(self.classes, np.array(counts)*0.1, color='g')
ax[1].set_title('Проверочная выборка')
plt.show()
class TerraModel:
def __init__(self, task_type, trds):
self.model = None
self.task_type = task_type
self.trds = trds
@staticmethod
def create_layer(params):
'''
Функция создания слоя
'''
activation = 'relu'
params = params.split('-')
# Добавление входного слоя
if params[0].lower() == 'РІС…РѕРґРЅРѕР№':
return Input(shape=eval(params[1]))
# Добавление полносвязного слоя
if params[0].lower() == 'полносвязный':
if len(params)>2:
activation = params[2]
return Dense(eval(params[1]), activation=activation)
# Добавление выравнивающего слоя
if params[0].lower() == 'выравнивающий':
return Flatten()
# Добавление сверточного слоя (Conv2D)
if params[0].lower() == 'сверточный2д':
if len(params)>3:
activation = params[3]
return Conv2D(eval(params[1]), eval(params[2]), activation=activation, padding='same')
def create_model(self, layers):
'''
Функция создания нейронной сети
parameters:
layers - слои (текстом)
'''
if self.task_type=='img_classification':
layers += '-softmax'
layers = layers.split()
# Создание входного слоя
inp = self.create_layer(f'РІС…РѕРґРЅРѕР№-{self.trds.sets[0][0].shape[1:]}')
# Создание первого слоя
x = self.create_layer(layers[0]) (inp)
# Создание остальных слоев
for layer in layers[1:]:
x = self.create_layer(layer) (x)
self.model = Model(inp, x)
def train_model(self, epochs, use_callback=True):
'''
Функция обучения нейронной сети
parameters:
epochs - количество эпох
'''
# Обучение модели классификации изображений
if self.task_type=='img_classification':
self.model.compile(loss='sparse_categorical_crossentropy', optimizer = Adam(0.0001), metrics=['accuracy'])
accuracy_callback = AccuracyCallback()
callbacks = []
if use_callback:
callbacks = [accuracy_callback]
history = self.model.fit(self.trds.sets[0][0], self.trds.sets[0][1],
batch_size = self.trds.sets[0][0].shape[0]//25,
validation_data=(self.trds.sets[1][0], self.trds.sets[1][1]),
epochs=epochs,
callbacks=callbacks,
verbose = 0)
return history
def test_model(self):
'''
Функция тестирования модели
'''
# Тестирование модели классификации изображений
if self.task_type=='img_classification':
for i in range(10):
number = np.random.randint(self.trds.sets[1][0].shape[0])
sample = self.trds.sets[1][0][number]
print('Тестовое изображение:')
plt.imshow(sample) # Выводим изображение из тестового набора с заданным индексом
plt.axis('off') # Отключаем оси
plt.show()
pred = self.model.predict(sample[None, ...])[0]
max_idx = np.argmax(pred)
print()
print('Результат предсказания модели:')
for i in range(len(self.trds.classes)):
if i == max_idx:
print(bcolors.BOLD, end='')
print(f'Модель распознала класс «{self.trds.classes[i]}» на {round(100*pred[i],1)}%{bcolors.ENDC}')
print('---------------------------')
print('Правильный ответ: ',end='')
if max_idx == self.trds.sets[1][1][number]:
print(bcolors.OKGREEN, end='')
else:
print(bcolors.FAIL, end='')
print(self.trds.classes[self.trds.sets[1][1][number]],end=f'{bcolors.ENDC}\n')
print('---------------------------')
print()
print()
class TerraIntensive:
def __init__(self):
self.trds = None
self.trmodel = None
self.task_type = None
def load_dataset(self, ds_name):
self.trds = TerraDataset(ds_name)
self.task_type = self.trds.load()
def samples(self):
self.trds.samples()
def create_sets(self):
self.trds.create_sets()
def create_model(self, layers):
print(f'{bcolors.BOLD}Создание модели нейронной сети{bcolors.ENDC}', end=' ')
self.trmodel = TerraModel(self.task_type, self.trds)
self.trmodel.create_model(layers)
print(f'{bcolors.OKGREEN}Ok{bcolors.ENDC}')
def train_model(self, epochs):
self.trmodel.train_model(epochs)
def test_model(self):
self.trmodel.test_model()
def train_model_average(self, layers, cnt=10):
if self.task_type == 'img_classification':
print(f'{bcolors.BOLD}Определение среднего показателя точности модели на {cnt} запусках{bcolors.ENDC}')
print()
average_accuracy = []
average_val_accuracy = []
times=[]
for i in range(cnt):
start_time = time.time()
self.trmodel.create_model(layers)
history = self.trmodel.train_model(20, False).history
average_accuracy.append(np.max(history['accuracy']))
average_val_accuracy.append(np.max(history['val_accuracy']))
t = round(time.time() - start_time, 1)
times.append(t)
print(f'Запуск {i+1}'.ljust(10)+ f'Время обучения: {t}c'.ljust(25) + f'Точность на обучающей выборке: {bcolors.OKBLUE}{round(average_accuracy[-1]*100,1)}%{bcolors.ENDC}'.ljust(50) +f'Точность на проверочной выборке: {bcolors.OKBLUE}{round(average_val_accuracy[-1]*100,1)}%{bcolors.ENDC}')
gc.collect()
ipd.clear_output(wait=True)
print(f'{bcolors.BOLD}Определение среднего показателя точности модели на {cnt} запусках{bcolors.ENDC}')
print()
argmax_idx = np.argmax(average_val_accuracy)
for i in range(cnt):
if i == argmax_idx:
print('\33[102m' + f'Запуск {i+1}'.ljust(10)+ f'Время обучения: {times[i]}c'.ljust(25) + f'Точность на обучающей выборке: {round(average_accuracy[i]*100,1)}%'.ljust(41) +f'Точность на проверочной выборке: {round(average_val_accuracy[i]*100,1)}%'+ '\033[0m')
else:
print(f'Запуск {i+1}'.ljust(10)+ f'Время обучения: {times[i]}c'.ljust(25) + f'Точность на обучающей выборке: {bcolors.OKBLUE}{round(average_accuracy[i]*100,1)}%{bcolors.ENDC}'.ljust(50) +f'Точность на проверочной выборке: {bcolors.OKBLUE}{round(average_val_accuracy[i]*100,1)}%{bcolors.ENDC}' )
print()
print(f'{bcolors.BOLD}Средняя точность на обучающей выборке: {bcolors.ENDC}{round(np.mean(average_accuracy[i])*100,1)}%')
print(f'{bcolors.BOLD}Максимальная точность на обучающей выборке: {bcolors.ENDC}{round(np.max(average_accuracy[i])*100,1)}%')
print(f'{bcolors.BOLD}Средняя точность на проверочной выборке: {round(np.mean(average_val_accuracy[i])*100,1)}%')
print(f'{bcolors.BOLD}Максимальная точность на проверочной выборке: {round(np.max(average_val_accuracy[i])*100,1)}%')
terra_ai = TerraIntensive()
|
alexfeklin1234/neural_network
|
yandex_milk_data/yandex_milk.py
|
yandex_milk.py
|
py
| 26,223 |
python
|
uk
|
code
| 0 |
github-code
|
6
|
75188737786
|
import sys
sys.setrecursionlimit(250000)
# 이 문제는 이전에 학습한 스킬이 얼마나 많이 필요한지를 파악해야 하는 그래프 이론 문제
# T: 해당 인덱스의 스킬을 배우기 위해 우선적으로 배워야 하는 스킬
# A: 배워야 하는 스킬 모음
# 스킬 트리 T의 배열 A에서 모든 스킬을 습득하기 위해 배워야 하는 최소 스킬 수 반환
# 1
# 역순으로 배워야 하는 곳까지 DFS
def solution(T, A):
N = len(T)
learned = [False] * N # 각 스킬을 배웠는지 여부 (방문 처리)
answer = 0
for target in A:
skill = target
# 해당 스킬 및 필요 선수 스킬을 모두 배웠다면 다음 스킬 검사
while not learned[skill]:
answer += 1
learned[skill] = True
skill = T[skill] # 부모 스킬을 배웠는지 check
return answer
# 2
# 집합 사용
# def solution(T, A):
# skills = set()
# for skill in A:
# while skill not in skills:
# skills.add(skill)
# skill = T[skill]
# return len(skills)
T1 = [0, 0, 1, 1]
A1 = [2]
T2 = [0, 0, 0, 0, 2, 3, 3]
A2 = [2, 5, 6]
T3 = [0, 0, 1, 2]
A3 = [1, 2]
T4 = [0, 3, 0, 0, 5, 0, 5]
A4 = [4, 2, 6, 1, 0]
print(solution(T1, A1)) # 3
print(solution(T2, A2)) # 5
print(solution(T3, A3)) # 3
print(solution(T4, A4)) # 7
|
zacinthepark/Problem-Solving-Notes
|
programmers/스킬트리.py
|
스킬트리.py
|
py
| 1,378 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
30265323373
|
import arcade
SPACING = 20
MARGIN = 110
arcade.open_window(400, 400, "Square of diamonds")
arcade.set_background_color(arcade.color.AMARANTH_PINK)
arcade.start_render()
for row in range(10):
for column in range(10):
if (row%2==0 and column%2==0) or (row%2==1 and column%2==1):
x = column * SPACING + MARGIN
y = row * SPACING + MARGIN
arcade.draw_rectangle_filled(x,y,10,10,arcade.color.LEMON,45)
elif (row%2==0 and column%2==1) or (row%2==1 and column%2==0):
x = column * SPACING + MARGIN
y = row * SPACING + MARGIN
arcade.draw_rectangle_filled(x,y,10,10,arcade.color.GREEN_YELLOW,45)
arcade.finish_render()
arcade.run()
|
maryamsaeedi17/PyLearningWorks1
|
assignment13/drawsquare.py
|
drawsquare.py
|
py
| 723 |
python
|
en
|
code
| 6 |
github-code
|
6
|
39593057083
|
import sys
infile = sys.argv[1]
date = sys.argv[2]
time = sys.argv[3]
ping = "null"
server = "null"
download = "null"
upload = "null"
with open(infile, 'r') as f:
for line in f:
if "Hosted by " in line:
server = line.replace("Hosted by ","")
server = server.split(":")
ping = server[1].strip()
ping = ping.replace(" ms","")
server = server[0].strip()
if "Download: " in line:
download = line.replace("Download: ","")
download = download.replace(" Mbit/s","")
download = download.strip()
download = download.replace(" .","")
if download.startswith("."):
download = download[1:]
if "Upload:" in line:
upload = line.replace("Upload:","")
upload = upload.replace(" Mbit/s","")
upload = upload.strip()
upload = upload.replace(" .","")
if upload.startswith("."):
upload = upload[1:]
#Failed
if ping != "null":
if float(ping) >= 1800000.00:
ping = "null"
if upload != "null":
if float(upload) <= 00.00:
upload = "null"
if download != "null":
if float(download) <= 00.00:
download = "null"
print (str(date) + "|" + str(time) + "|" + str(server) + "|" + str(ping) + "|" + str(download) + "|" + str(upload))
|
rajdor/speedtest
|
speedtest_parser.py
|
speedtest_parser.py
|
py
| 1,437 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10377701341
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 24 12:15:41 2018
@author: yannis
"""
#Write a program to calculate the credit card balance after one year if a person only pays
#the minimum monthly payment required by the credit card company each month.
#The following variables contain values as described below:
#balance - the outstanding balance on the credit card
#annualInterestRate - annual interest rate as a decimal
#monthlyPaymentRate - minimum monthly payment rate as a decimal
#For each month, calculate statements on the monthly payment and remaining balance. At the end
#of 12 months, print out the remaining balance. Be sure to print out no more than two decimal
#digits of accuracy
#Monthly interest rate= (Annual interest rate) / 12.0
#Minimum monthly payment = (Minimum monthly payment rate) x (Previous balance)
#Monthly unpaid balance = (Previous balance) - (Minimum monthly payment)
#Updated balance each month = (Monthly unpaid balance) + (Monthly interest rate x Monthly unpaid balance)
#The interest rate each month
monthlyInterestRate = annualInterestRate / 12.0
#Loop over the months and calculate...
for i in range(12):
#The minimum payment each month
minimumMonthlyPayment = monthlyPaymentRate * balance
#What portion of the current balance will be unpaid after the minimum payment
monthlyUnpaidBalance = balance - minimumMonthlyPayment
#The new balance after the interest has been applied to it
updatedBalance = monthlyUnpaidBalance + (monthlyInterestRate * monthlyUnpaidBalance)
#The balance for next month will be the new updated balance after interest
balance = updatedBalance
#Print results
print( "Remaining balance: " + str(round(updatedBalance,2)))
|
2057536a/Programming_Python_MIT_EdX
|
week2/problemSet2/problem1.py
|
problem1.py
|
py
| 1,790 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8019970208
|
import json
from server.nordic import COMMANDS
import static.app.instructions.translations as tr
# from static.app.instructions.translations import _yes
from static.app.instructions.helpers import TXT, NumberedText
class NextPrevious:
def __init__(self, button_text, goto, active):
self.caption = button_text
self.goto = goto
self.active = active
class Next(NextPrevious):
def __init__(self, button_text="next", goto=1, active=True):
NextPrevious.__init__(self, button_text, goto, active)
class Previous(NextPrevious):
def __init__(self, button_text="previous", goto=-1, active=True):
NextPrevious.__init__(self, button_text, goto, active)
class DelayedCommand:
def __init__(self, command, delay):
self.command = command
self.delay = delay
# class ApiCommand:
# def __init__(self, commands, delay=None):
# for command in commands:
# if command not in COMMANDS.keys():
# raise UserWarning("{} not a valid nordic command".format(command))
# self.commands = {'commands': commands}
# if delay is not None:
# # self.delay = delay
# self.commands['delay'] = delay
class Commands:
def __init__(self, first_command, *next_commands):
if first_command not in COMMANDS.keys():
raise UserWarning("{} not a valid nordic command".format(first_command))
for _command in next_commands:
if _command.command not in COMMANDS.keys():
raise UserWarning("{} not a valid nordic command".format(_command.command))
self.commands=[first_command] + [cmd for cmd in next_commands]
# self.command = first_command
# self.commands = next_commands
class ToJson(json.JSONEncoder):
def __init__(self, lang='en'):
json.JSONEncoder.__init__(self, sort_keys=True)
self.lang = lang
def default(self, o):
if isinstance(o, TXT) or isinstance(o, NumberedText):
return o.get_text(self.lang)
# return getattr(o, self.lang)
else:
return o.__dict__
class Instruction:
def __init__(self, version):
self.version = version
self.products = []
def create_file(self, language):
return json
'''
product = {'type': 'dict',
'schema': {'title': {'type': 'string', 'required': True},
'steps': {'type': 'list', 'schema': step}}}
'''
class Product:
def __init__(self, title, steps):
self.title = title,
self.steps = steps
class Step:
def __init__(self, title, instructions, confirm=None, nav_next=Next(), nav_previous=Previous(), id=None):
for instruction in instructions:
if not isinstance(instruction, Row):
raise UserWarning("instruction is not of type Row.")
self.title = title
self.instructions = instructions
self.confirm = confirm
self.next = nav_next
self.previous = nav_previous
self.id = id
'''
instruction = {'type': 'dict', 'schema': {
'col1': col, 'col2': col, 'col3': col, 'col4': col}}
'''
'''
confirm = {'img': {'type': 'string', 'required': True},
'text': {'type': 'string', 'required': True},
'yes': {'type': 'integer'},
'no': {'type': 'integer'}}
'''
class Confirm:
def __init__(self, img, text, yes_text=tr._yes, no_text=tr._no, yes=1, no=0):
self.img = img
self.text = text
self.yes = yes
self.no = no
self.yes_text = yes_text
self.no_text = no_text
class UiElement:
def __init__(self, width):
if isinstance(width, int):
self.width = str(width) + '%'
else:
raise UserWarning("not an integer : {}".format(width))
class NavigationCommand:
def __init__(self,goto):
self.goto=goto
# class OkayCommand(Commands):
# def __init__(self,first_command=None, goto=None, *next_commands):
# if first_command is not None:
# Commands.__init__(self,first_command,*next_commands)
# #ApiCommand.__init__(self, commands, delay)
# if goto is not None:
# self.goto = goto
class Spacer(UiElement):
def __init__(self, width):
UiElement.__init__(self, width)
'''
pv_keypad = {'width': {'type': 'string', 'regex': '\d{1,2}%'},
'type': {'type': 'string', 'required': True, 'allowed': 'pv-keypad'},
'active_buttons': {'type': 'list',
'allowed': ['open', 'close', 'stop', 'tiltup', 'tiltdown', 'okay', 'cancel'],
'required': True},
'confirm': {'type': 'string',
'allowed': ['open', 'close', 'stop', 'tiltup', 'tiltdown', 'okay', 'cancel']},
'okay': {'type': 'dict',
'schema': {'commands': {'type': 'dict', 'schema': api_commands}, 'goto': {'type': 'integer'}}},
'cancel': {'type': 'integer', 'required': True}}
'''
class PvKeypad(UiElement):
allowed = ['open', 'close', 'tiltup', 'tiltdown', 'stop', 'okay', 'cancel']
def __init__(self, width, active_buttons, confirm=None, okay=None, cancel=None):
'''
:param width: defines the width in percentage of the element.
:param active_buttons: which buttons to activate
:param confirm: which button will have an "open confirm dialog" method to it.
:param okay: what actions should be taken when ok is clicked.
:param cancel: where should cancel take you ?
'''
UiElement.__init__(self, width)
self.type = 'pv-keypad'
self.active_buttons = active_buttons
for button in active_buttons:
if button not in PvKeypad.allowed:
raise UserWarning("'{}' not allowed as pvkeypad button".format(button))
if confirm is not None:
if confirm not in active_buttons:
raise UserWarning("'{}' not allowed as it is not an active button".format(confirm))
self.confirm = confirm
if okay is not None:
if 'okay' not in PvKeypad.allowed:
raise UserWarning("'okay' defined but not defined as an active button.")
self.okay = okay
if cancel is not None:
if 'cancel' not in PvKeypad.allowed:
raise UserWarning("'cancel' defined but not defined as an active button.")
self.cancel = cancel
'''
text = {'width': {'type': 'string', 'regex': '\d{1,2}%'},
'type': {'type': 'string', 'required': True, 'allowed': ['text']},
'content': {'type': 'string', 'required': True}}
'''
class Text(UiElement):
def __init__(self, width_percentage, content):
UiElement.__init__(self, width_percentage)
self.type = 'text'
self.content = content
'''
image = {'width': {'type': 'string', 'regex': '\d{1,2}%'},
'type': {'type': 'string', 'required': True, 'allowed': ['image']},
'src': {'type': 'string', 'required': True}}
'''
class Image(UiElement):
def __init__(self, width, src):
UiElement.__init__(self, width)
self.type = "image"
self.src = src
'''
next_prev_buttons = [{'type': 'boolean'},
{'type': 'dict',
'schema': {'caption': {'type': 'string', 'required': True},
'goto': {'type': 'integer'}}}]
'''
class Row:
allowed = [PvKeypad, Text, Image, Spacer]
def __init__(self, col1, col2=None, col3=None, col4=None):
self._check(col1)
self.col1 = col1
if col2 is not None:
self._check(col2)
self.col2 = col2
if col3 is not None:
self._check(col3)
self.col3 = col3
if col4 is not None:
self._check(col4)
self.col4 = col4
def _check(self, instance):
for _allowed in Row.allowed:
if isinstance(instance, _allowed):
return
raise UserWarning("not allowed: {} {}".format(repr(instance), repr(_allowed)))
|
sander76/nordic
|
static/app/instructions/components.py
|
components.py
|
py
| 8,161 |
python
|
en
|
code
| 0 |
github-code
|
6
|
618459417
|
from typing import List
import numpy as np
from hakaton.prediction.model import SkyhacksModel
from hakaton.util import model_util
class WagonDetectorSkyhacksModel(SkyhacksModel):
MODEL_STRUCTURE_FILE = "storedmodel/model-next-wagon-structure.json"
MODEL_WEIGHTS_FILE = "storedmodel/model-next-wagon-weights.h5"
def __init__(self, frame_cnt_required=3):
self._model = model_util.load(self.MODEL_STRUCTURE_FILE, self.MODEL_WEIGHTS_FILE)
self._frame_cnt_required = frame_cnt_required
def predict(self, train_images: List[np.ndarray], batch_size=None) -> List[object]:
x = np.asarray(train_images)
x = x.reshape(x.shape[0], -1)
predicted = self._model.predict(x, batch_size)
labels = self._parse_next_wagon_prediction(predicted, self._frame_cnt_required)
return labels.tolist()
def _parse_next_wagon_prediction(self, predicted: np.ndarray, frame_cnt_required=2):
wagon_numbers = list()
current_wagon_num = 0
frame_cnt = 0
found_locomotive = False
for i, label in enumerate(predicted):
if (label == 1):
frame_cnt += 1
else:
frame_cnt = 0
if (frame_cnt == frame_cnt_required):
if found_locomotive:
current_wagon_num += 1
wagon_numbers[-frame_cnt_required + 1:] = [current_wagon_num for i in range(frame_cnt_required - 1)]
else:
found_locomotive = True
wagon_numbers.append(current_wagon_num)
return np.array(wagon_numbers)
|
karynabierz/hakaton
|
hakaton/prediction/wagondetector_skyhacks_model.py
|
wagondetector_skyhacks_model.py
|
py
| 1,623 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26498728774
|
import sys
from typing import Set, TextIO, Any, Tuple
from collections import *
from functools import *
from itertools import *
Data = Tuple[Set[Tuple[int, int]], Set[Tuple[int, int]], int, int]
Result = int
def parse_input(buffer: TextIO) -> Data:
east = set()
south = set()
lines = [line.strip() for line in buffer.readlines() if line.strip()]
for y, line in enumerate(lines):
for x, c in enumerate(line):
if c == ">":
east.add((x, y))
elif c == "v":
south.add((x, y))
return (east, south, len(lines[0]), len(lines))
def part_1(data: Data) -> Result:
east, south, width, height = data
moved = True
steps = 0
while moved:
moved = False
new_east = set()
new_south = set()
for x, y in east:
if not ((x + 1) % width, y) in south and not ((x + 1) % width, y) in east:
new_east.add(((x + 1) % width, y))
moved = True
else:
new_east.add((x, y))
east = new_east
for x, y in south:
if not (x, (y + 1) % height) in east and not (x, (y + 1) % height) in south:
new_south.add((x, (y + 1) % height))
moved = True
else:
new_south.add((x, y))
south = new_south
steps += 1
return steps
if __name__ == "__main__":
if sys.stdin.isatty():
import os
data = parse_input(
open(os.path.join(os.path.dirname(__file__), "test_input.txt"))
)
else:
data = parse_input(sys.stdin)
print(f"Part 1: {part_1(data)}")
print(f"Part 2: 🎄")
|
arjandepooter/advent-of-code-2021
|
python/25/solution.py
|
solution.py
|
py
| 1,697 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10251263267
|
import collections
import random
import sys
from os import path as osp
import json
import pandas as pd
from fol import beta_query_v2
from fol.foq_v2 import parse_formula
from utils.util import read_indexing, load_graph, load_data_with_indexing
sys.path.append(osp.dirname(osp.dirname(__file__)))
stanford_data_path = 'data/FB15k-237-betae'
def random_e_ground(foq_formula):
for i, c in enumerate(foq_formula):
if c == 'e':
return foq_formula[:i] + "{" + str(random.randint(0, 99)) + "}" + foq_formula[i + 1:]
raise ValueError("Nothing to gound")
def random_p_ground(foq_formula):
for i, c in enumerate(foq_formula):
if c == 'p':
return foq_formula[:i] + "[" + str(random.randint(0, 99)) + "]" + foq_formula[i + 1:]
raise ValueError("Nothing to gound")
def complete_ground(foq_formula):
while 1:
try:
foq_formula = random_e_ground(foq_formula)
except:
break
while 1:
try:
foq_formula = random_p_ground(foq_formula)
except:
break
return foq_formula
def test_parse_formula():
for k, v in beta_query_v2.items():
obj = parse_formula(v)
assert obj.formula == v, print(obj.formula, v)
oobj = parse_formula(obj.formula)
assert oobj.formula == obj.formula
print(k, obj, obj.formula)
# we don't need this any more
def test_parse_grounded_formula():
for k, v in beta_query_v2.items():
gv = random_p_ground(random_e_ground(v))
obj = parse_formula(v)
gobj = parse_formula(gv)
oobj = parse_formula(obj.formula)
assert gobj.formula == oobj.formula
'''
ogobj = parse_formula(gobj.ground_formula)
assert gobj.ground_formula == ogobj.ground_formula
'''
def test_additive_ground():
for k, v in beta_query_v2.items():
obj = parse_formula(v)
for _ in range(10):
gv = random_p_ground(random_e_ground(obj.dumps))
obj.additive_ground(json.loads(gv))
assert obj.formula == obj.formula
'''
def test_embedding_estimation():
for k, v in beta_query_v2.items():
cg_formula = complete_ground(v)
obj = parse_formula(cg_formula)
for _ in range(10):
cg_formula = complete_ground(v)
obj.additive_ground(cg_formula)
print(f"multi-instantiation for formula {obj.ground_formula}")
obj.embedding_estimation(estimator=TransEEstimator())
'''
def test_sample():
ent2id, rel2id, proj_train, reverse_train, proj_valid, reverse_valid, proj_test, reverse_test = \
load_data_with_indexing(stanford_data_path)
for name in beta_query_v2:
query_structure = beta_query_v2[name]
ansclass = parse_formula(query_structure)
ans_sample = ansclass.random_query(proj_train, cumulative=True)
ans_check_sample = ansclass.deterministic_query(proj_train)
assert ans_sample == ans_check_sample
query_dumps = ansclass.dumps
brand_new_instance = parse_formula(query_structure)
brand_new_instance.additive_ground(json.loads(query_dumps))
ans_another = brand_new_instance.deterministic_query(proj_train)
assert ans_another == ans_sample
print(ansclass.dumps)
def test_backward_sample():
ent2id, rel2id, proj_train, reverse_train, proj_valid, reverse_valid, proj_test, reverse_test = \
load_data_with_indexing(stanford_data_path)
for name in beta_query_v2:
query_structure = beta_query_v2[name]
ansclass = parse_formula(query_structure)
ans_back_sample = ansclass.backward_sample(proj_train, reverse_train, requirement=None,
cumulative=True, meaningful_difference=False)
ans_check_back_sample = ansclass.deterministic_query(proj_train)
assert ans_check_back_sample == ans_back_sample
query_dumps = ansclass.dumps
check_instance = parse_formula(query_structure)
check_instance.additive_ground(json.loads(query_dumps))
ans_another = check_instance.deterministic_query(proj_train)
assert ans_another == ans_check_back_sample
print(name, ansclass.dumps)
for name in beta_query_v2:
query_structure = beta_query_v2[name]
ansclass = parse_formula(query_structure)
ans_back_sample = ansclass.backward_sample(proj_train, reverse_train, requirement=None,
cumulative=True, meaningful_difference=True)
ans_check_back_sample = ansclass.deterministic_query(proj_train)
assert ans_check_back_sample == ans_back_sample
query_dumps = ansclass.dumps
check_instance = parse_formula(query_structure)
check_instance.additive_ground(json.loads(query_dumps))
ans_another = check_instance.deterministic_query(proj_train)
assert ans_another == ans_check_back_sample
print(name, ansclass.dumps)
def test_benchmark_backward_sample():
ent2id, rel2id, proj_train, reverse_train, proj_valid, reverse_valid, proj_test, reverse_test = \
load_data_with_indexing(stanford_data_path)
formula_file = "outputs/test_generated_formula_anchor_node=3.csv"
df = pd.read_csv(formula_file)
for i, query_structure in enumerate(df['original']):
ansclass = parse_formula(query_structure)
ans_back_sample = ansclass.backward_sample(proj_train, reverse_train, requirement=None,
cumulative=True, meaningful_difference=True)
ans_check_back_sample = ansclass.deterministic_query(proj_train)
assert ans_check_back_sample == ans_back_sample
query_dumps = ansclass.dumps
check_instance = parse_formula(query_structure)
check_instance.additive_ground(json.loads(query_dumps))
ans_another = check_instance.deterministic_query(proj_train)
assert ans_another == ans_check_back_sample
print(i, ansclass.dumps)
if __name__ == "__main__":
test_parse_formula()
test_sample()
test_backward_sample()
test_benchmark_backward_sample()
# test_additive_ground()
# test_embedding_estimation()
# test_parse_grounded_formula()
# test_gen_foq_meta_formula()
|
HKUST-KnowComp/EFO-1-QA-benchmark
|
fol/test_foq_v2.py
|
test_foq_v2.py
|
py
| 6,319 |
python
|
en
|
code
| 17 |
github-code
|
6
|
13109904746
|
import pandas as pd
import yfinance as yf
import json
#csv_list = pd.read_csv('japan_all_stock.csv')
success_list = []
for num in range(1301, 10000):
try:
stock_data = yf.download(f'{num}.T', period = '1d', interval='1d')
success_list.append(f'{num}.T')
except:
continue
with open('japanese_success_stock.json', 'w') as json_file:
json.dump(success_list, json_file)
|
39xdgy/Playground_py
|
japan_stock_data.py
|
japan_stock_data.py
|
py
| 404 |
python
|
en
|
code
| 0 |
github-code
|
6
|
45308723306
|
import csv
import tqdm
import zoomeye.sdk as zoomeye
import json
import os
# 第一步读取文件
# 获得IP地址
# 使用SDK查询信息
# 保存信息
# 过滤信息
INPUT_FILE_NAME = '../csv/firewall_ip.csv'
# OUTPUT_FILE_NAME = 'csv/result.csv'
def read_csv(ip_list, csv_name):
with open(csv_name) as f:
f_csv = csv.reader(f)
# 获取header
headers = next(f_csv)
# 循环获取每一行的内容
for row in f_csv:
ip_list.append(row[0])
def init_zoomeye():
zm = zoomeye.ZoomEye()
zm.username = '********'
zm.password = '********'
zm.login()
return zm
def zoomeye_sdk(ip, zm):
data = zm.dork_search(ip, resource="host", facets=None)
return data
def write_file(data, ip, file_name):
# f = open(file_name, "w")
# json_data
json_data = {}
# key
ip = ip
# value
value = {}
for service in data:
# 构造字典类型 {ip:{service:{content},service:{content}}}
# key
key = service['portinfo']['service']
# value
content = service
# item service:{content}
item = {key: content}
value.update(item)
json_data = {"ip": ip, "result": value}
with open("../result_5688/"+file_name, "w") as f:
json.dump(json_data, f)
print("写入"+file_name+"文件")
def search_ip(ip_list):
dir_list = []
read_result_5688(dir_list)
# print(len(dir_list))
zm = init_zoomeye()
for ip in tqdm.tqdm(ip_list):
print("正在处理的IP为", ip)
if ip not in dir_list:
data = zoomeye_sdk(ip, zm)
for i in data:
print(i)
print("----------------")
#write_file(data, ip, ip+".json")
print(data)
else:
print(ip+"存在文件中")
def read_result_5688(dir_list):
path = "../result_5688/"
files = os.listdir(path)
for filename in files:
dir_list.append(os.path.splitext(filename)[0])
def main():
# 用来保存待查找的IP
ip_list = []
csv_name = INPUT_FILE_NAME
read_csv(ip_list, csv_name)
print("准备查找的列表为:"+str(ip_list))
# print(dir_list)
search_ip(ip_list)
if __name__ == '__main__':
main()
|
Judgegao/bitcoin_data
|
code/Cyberspace search engine/main.py
|
main.py
|
py
| 2,283 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36781902171
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
__author__ = 'ktulhy'
# TODO: убрать дублирование кода
ERROR = "\x1b[31m[---ERROR--] \x1b[0m"
SYSTEM = "\x1b[34m[--SYSTEM--] \x1b[0m"
INFO = "[---INFO---] "
WARNING = "\x1b[33m[--WARNING-] \x1b[0m"
test_types = []
from lxml import etree
def parse_inp(var):
attrib = var.attrib
v_type = attrib.get('type', None)
text = var.text
if None is v_type:
v_type = ""
typecast = ""
else:
typecast = "(" + v_type + ") "
if "char" in v_type:
text = '"' + text + '"'
return typecast + text
def parse_out(var):
return parse_inp(var)
def parse_file(conf, file):
includes = []
test_file = conf['folder'] + "tests/" + conf["test_file_prefix"] + file['name']
try:
f = open(test_file)
except FileNotFoundError:
print(ERROR + "Test file for '%s' not found , terminate" % file['name'])
return None, None
try:
xml_doc = etree.parse(f)
except etree.XMLSyntaxError:
print(ERROR + "Error parsing file '%s', terminate" % file['name'])
return None, None
xml_root = xml_doc.getroot()
xml_libraries = xml_root.find("libraries")
if (None != xml_libraries) and (None != xml_libraries.text):
for lib in xml_libraries.text.split(','):
includes.append(lib.rstrip().lstrip())
xml_tests = xml_root.find("tests")
if None == xml_tests:
print(WARNING + "Tests for file '%s' not written, please, check test file '%s'" % (file['name'], test_file))
print(ERROR + "Terminate")
return None, None
tests = []
for test in xml_tests.getiterator("test"):
t_attrib = test.attrib
t_type = t_attrib.get('type', None)
if t_type == 'IS_EQ_INT64':
pass
t_func = t_attrib.get('func', None)
if t_func is None:
print(WARNING + "In file '%s': Func does not contains, continue" % test_file)
continue
if t_type not in test_types:
print(WARNING + "In file '%s': Test type '%s' is not recognized, continue" % (test_file, t_type))
continue
_t_variables = test.find('variables')
if _t_variables is not None:
t_variables = _t_variables.text
if t_variables == None:
t_variables = ''
t_input = []
for inp in test.getiterator("inp"):
t_input.append(parse_inp(inp))
_t_output = test.find("out")
if _t_output is None:
print(WARNING + "Test for file '%s' has not output" % file['name'])
t_output = parse_out(_t_output)
tests.append({"type": t_type, "func": t_func, "variables": t_variables, "input": t_input, "output": t_output})
return tests, includes
class Test():
def __init__(self, string, libs):
self.string = string
self.libs = "\n".join(["#include <" + lib + ">" for lib in libs])
def __str__(self):
return self.string
def get_vars(self, t_index, var_index):
pass
def get_test(self, output):
pass
def get_out(self):
pass
test_types = {}
# ================ IS_[NOT_]_EQ_[U]INT[Ø,8,16,32,64] =================
class TestInt(Test):
def __init__(self, string, libs, _int_type, _compare, _print_int_type):
Test.__init__(self, string, libs)
self.int_type = _int_type
self.compare = _compare
self.print_int_type = _print_int_type
def get_vars(self, t_index, var_index):
self.var = "tFuncOutT%dV%d" % (t_index, var_index)
return "%s %s;" % (self.int_type, self.var), var_index + 1
def get_test(self, output):
return "(%s %s %s)" % (output, self.compare, self.var)
def get_out(self):
return '"%%%s\\n", %s' % (self.print_int_type, self.var)
for int_bits in ["", "8", "16", "32", "64"]:
for is_unsigned in [0, 1]:
for is_not_eq in [0, 1]:
int_type = "int"
int_type += (int_bits + "_t") if (int_bits != "") else ""
int_type = ("unsigned " if ("" == int_bits) else "u") + int_type
comp = "!=" if is_not_eq else "=="
print_int_type = "u" if is_unsigned else "d"
if int_bits != "":
print_int_type = "\" PRI" + print_int_type + int_bits + " \""
_is_eq_int = TestInt("IS_%sEQ_%sINT%s" % (
"NOT_" if is_not_eq else "",
"U" if is_unsigned else "", int_bits),
["inttypes.h", "stdlib.h"],
int_type,
comp,
print_int_type)
test_types[str(_is_eq_int)] = _is_eq_int
print_int_type = None
_is_eq_int = None
int_type = None
int_bits = None
is_unsigned = None
is_not_eq = None
# ============== IS_[NOT_]_EQ_STR ================
class TestStr(Test):
def __init__(self, string, libs, compare):
Test.__init__(self, string, libs)
self.compare = compare
def get_vars(self, t_index, var_index):
self.var = "tFuncOutT%dV%d" % (t_index, var_index)
return "char *%s;" % self.var, var_index + 1
def get_test(self, output):
return "(0 %s strcmp(%s, %s))" % (self.compare, output, self.var)
def get_out(self):
return '"%%s\\n", %s' % self.var
for is_not_eq in [0,1]:
_is_eq_int = TestStr("IS_%sEQ_STR" % ("NOT_" if is_not_eq else ""),
["string.h"],
"!=" if is_not_eq else "==")
test_types[str(_is_eq_int)] = _is_eq_int
_is_eq_int = None
is_not_eq = None
def generate_test_code(conf, file, tests, includes):
code = """
FILE *f = fopen("%s","wt");
if (NULL == f)
return 1488;\n""" % ("./" + conf["test_result_file_prefix"] + file['name'])
variables = ""
t_index = 0
var_index = 0
for test in tests:
t_type = test_types.get(test['type'], None)
if None is t_type:
continue
var_index = 0
_var_init, var_index = t_type.get_vars(t_index, var_index)
var_name = t_type.var
variables += " " + test['variables'].rstrip().lstrip().lstrip() + "\n"
variables += " " + _var_init + "\n"
code += """
/* TEST #%d for func '%s'*/
%s = %s(%s);
if %s
fprintf(f,"OK:");
else
fprintf(f,"WR:");
fprintf(f,%s);
fflush(f);
""" % (t_index, test['func'],
var_name, test['func'], ", ".join(test['input']),
t_type.get_test(test['output']),
t_type.get_out())
t_index += 1
includes.append("stdio.h")
return "\n\n/* === TESTED === */\n" +\
"\n".join(["#include <" + lib + ">" for lib in includes]) +\
"\n\nint main(void) {\n" +\
"/* Variables */" + \
variables +\
"/* Tests */" + \
code +\
"\n fclose(f);\n return 0;\n}"
|
AzaubaevViktor/c_tested
|
lib_tested.py
|
lib_tested.py
|
py
| 6,966 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42807499367
|
"""""""""""""""""""""""""""""""""""""""
Lab 5 - Find Similarity
04/1/2019 - Ken M. Amamori
CS2302 MW 10:30 - 11:50
Professor: Olac Fuentes
TA: Anindita Nath, Maliheh Zargaran
"""""""""""""""""""""""""""""""""""""""
import numpy as np
import time
import math
""""""""""""
class HashTableC(object):
# Builds a hash table of size 'size'
# Item is a list of (initially empty) lists
# Constructor
def __init__(self,size):
self.item = []
self.num_items = 0
for i in range(size):
self.item.append([])
""""""""""""
class BST(object):
# Constructor
def __init__(self, item=[], left=None, right=None):
self.item = item
self.left = left
self.right = right
""""""""""""
#Build HashTable
def ht(f, f2):
print("\nBuilding hush table.\n")
print("Hash Table stats:")
H = HashTableC(19) #create Hash Table of length 17
print("Initail table size", len(H.item))
start = int(time.time()) #starting time
for line in f: #read line by line, glove
data = line.split(' ')
if data[0].isalpha():
H = InsertC(H, data) #insert data
end = int(time.time()) #ending time
print("Total elements: ", H.num_items)
print("Final table size: ", len(H.item))
print("Load factor: ", H.num_items/len(H.item))
c, d = infolist(H)
print("Percentage of empty lists:", c/len(H.item)*100)
print("Standard deviation of the lengths of the lists:", d)
print(H.item[int(d)+1])
print("Running time for Hash Table construction:", (end-start))
print("\nReading word file to determine similarities.\n")
start2 = int(time.time()*1000)
for line2 in f2: #read line by line, word_pair
data2 = line2.split(',')
e0 = FindC(H, data2[0]) #return array if string found
e1 = FindC(H, data2[1]) #return array if string found
print("Similarity", data2[0:2], " = ", round(np.sum(e0*e1)/(math.sqrt(np.sum(e0*e0))*math.sqrt(np.sum(e1*e1))),4)) #compute the similarity
end2 = int(time.time()*1000) #ending time
print("\nRunning time for hash table query processing: ", (end2-start2))
#HT: return # of empty list and standard deviation of lengths of lists
def infolist(H):
c=0
m = H.num_items/len(H.item)
k=0
for a in H.item:
k += (len(a)-m)*(len(a)-m)
if a==[]: #[] found
c+=1
return c, math.sqrt(k*(1/(len(H.item))))
#HT:double the size of hashtable
def doubleSize(H):
H2 = HashTableC(2*len(H.item)+1) #size = 2*length+1
for a in H.item: #traverse table
if a!=[]: #not empty
for i in a: #traverse node since chaining
H2.item[h(i[0], len(H2.item))].append([i[0], i[1]])
H2.num_items+=1
return H2
#HT: insert k in H
def InsertC(H,k):
# Inserts k in appropriate bucket (list)
# Does nothing if k is already in the table
if H.num_items//len(H.item)==1: #recize table
H = doubleSize(H)
b = h(k[0],len(H.item)) #get the right index
H.item[b].append([k[0], np.array(k[1:]).astype(np.float)])
H.num_items+=1 #keep up with elements
return H
#HT: return the index to insert
def h(s,n):
r = 0
t=0
"""for p in s:
t += (n*math.sin(math.radians(3.599999*(ord(p)-97))))
return int(t//len(s))"""
for c in s:
r = (r*60 + ord(c))% n
return r
#HT: find k and return array if found
def FindC(H,k):
# Returns bucket (b) and index (i)
# If k is not in table, i == -1
b = h(k,len(H.item)) #get index
for i in range(len(H.item[b])): #traverse the node
if H.item[b][i][0] == k: #found
return H.item[b][i][1] #return array
return -1
#Build BST
def bst(f, f2):
print("\nBuilding binary search tree.\n")
T = None
start = int(time.time()) #starting time
for line in f: #get line by line
data = line.split(' ') #array separated by ' '
if data[0].isalpha():
T = Insert(T, [data[0], np.array(data[1:]).astype(np.float)]) #insert word+embeddings
end = int(time.time()) #ending time
print("Binary Search Tree stats:")
print("Number of nodes: ", count_nodes(T)) #num of nodes
print("Height: ", find_height(T)) #num of height
print("Running time for binary search tree construction:", (end-start))
print("\nReading word file to determine similarities.\n")
start = int(time.time()*1000) #starting time
for line2 in f2: #word pairs
data2 = line2.split(',') #words pair separated by ','
e0 = search_word(T, data2[0]) #search the 1st word, return array
e1 = search_word(T, data2[1]) #search the 2nd word, return array
print("Similarity", data2[0:2], " = ", round(np.sum(e0*e1)/(math.sqrt(np.sum(e0*e0))*math.sqrt(np.sum(e1*e1))),4)) #compute the similarity
end = int(time.time()*1000) #ending time
print("\nRunning time for binary search tree query processing: ", (end-start))
#BST: insert newitem into T
def Insert(T, newItem):
if T == None:
T = BST(newItem)
elif T.item[0] > newItem[0]:
T.left = Insert(T.left, newItem)
else:
T.right = Insert(T.right, newItem)
return T
#BST: find the height of a tree
def find_height(T):
if T is not None: #base case
return (1+max([(find_height(T.left)), find_height(T.right)])) #1 + (the higher number)
else:
return -1
#BST: count the number of nodes in T
def count_nodes(T):
if T is not None:
return 1 + count_nodes(T.left) + count_nodes(T.right)
return 0
#BST: search a string in the tree T, return array if it was found, None otherwise
def search_word(T, k):
temp = T #temporary variable for T
while temp is not None: #iterate through necessary nodes
if temp.item[0] == k: #found
temp.item[1]
return temp.item[1]
elif temp.item[0] > k: #smaller
temp = temp.left
else: #larger
temp = temp.right
return None #not found
c = input("Type 1 for binary search tree or 2 for hush table with chaining\nChoice: ")
f = open('glove.6b/glove.6B.50d.txt', encoding='utf-8') #file with vectors
f2 = open('word_pair.txt', encoding='utf-8') #file with pairs
if c=='1': #binary search tree
bst(f, f2)
elif c=='2': #hash table
ht(f, f2)
f.close()
f2.close()
print()
|
kmamamori/CS2302
|
lab5.py
|
lab5.py
|
py
| 5,992 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71522207867
|
from django.db import models
class Home(models.Model):
title = models.CharField(max_length = 100)
body = models.TextField()
decriptions = models.TextField(blank=True)
author = models.CharField(max_length = 200,blank=True)
img = models.ImageField(upload_to='posts',blank=True)
created = models.DateTimeField()
def __str__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey(Home,on_delete=models.CASCADE,related_name='comments')
name = models.CharField(max_length=80)
email = models.EmailField()
body = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=False)
class Meta:
ordering = ['created_on']
def __str__(self):
return 'Comment {} by {}'.format(self.body, self.name)
class Aboutme(models.Model):
name = models.CharField(max_length = 200,blank=True)
text = models.TextField()
img = models.ImageField(blank=True,upload_to='posts')
created = models.DateTimeField(auto_now_add=True)
|
linux-coffee/web
|
home/models.py
|
models.py
|
py
| 1,075 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12894651014
|
#!/usr/bin/env python
# coding: utf-8
#
# # Task 1- Prediction using Supervised ML
#
# ### Task: Predict the percentage of a student based on the no. of study hours.
# ## The Sparks Foundation(GRIP), July 2021
# #### By: Rishi Raj Dhar
# In[11]:
#importing the required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# In[12]:
#Reading the data
url="https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv"
# In[13]:
data=pd.read_csv(url)
# In[14]:
print(data)
# In[15]:
#See the first 5 rows of the data
data.head(5)
# In[16]:
#See the last 5 rows of the data
data.tail(5)
# In[17]:
data.shape
# In[18]:
data.info()
# In[19]:
data.describe()
# In[20]:
#Check for the null values if any.
data.isnull().sum()
# ### As there is no null values, we can now visualize our data.
# In[21]:
# Plotting the distribution of scores
sns.scatterplot(y=data['Scores'], x=data['Hours'])
plt.title('Marks vs Study hours', size=18)
plt.ylabel('Marks Percentage', size=15)
plt.xlabel('Hours Studied', size=15)
plt.show()
# #### From the above scatterplot, we can clearly see that there is a positive linear relation between the "Number of hours studied" and "Percentage of score". Now plotting a regression line to confirm the correlation.
#
# In[22]:
#plotting the regression line
sns.regplot(x=data['Hours'],y=data['Scores'])
plt.title('Regression Plot', size=20)
plt.ylabel('Marks Percentage', size=12)
plt.xlabel('Hours Studied', size=12)
plt.show()
#Correlation
print(data.corr())
# ### From the above output it is confirmed that the variables are postively correlated.
# # Preparing the data
# # The next step is to divide the data into "attributes"(inputs) and "labels"(outputs)
# In[23]:
#x- attributes, y- labels
x= data.iloc[:,:-1].values
y= data.iloc[:, 1].values
# ### Doing this by using Scikit-Learn's built-in train_test_split() method.
# In[24]:
#Splitting the data(Training & Test datasets)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.25, random_state=0)
# In[25]:
#We have split the dataset as 75% training data and 25% test data.
# #### Training the model
# ##### We will be using the Linear Regression which is a supervised machine learning algortithm
# In[26]:
from sklearn.linear_model import LinearRegression
lr= LinearRegression()
lr.fit(x_train, y_train)
print("Training complete.")
# # Making Predictions
# In[27]:
# Predicting the scores
y_pred=lr.predict(x_test)
y_pred
# In[28]:
df=pd.DataFrame({'Hours': [i[0] for i in x_test], 'Predicted Marks' : [k for k in y_pred]})
df
# In[29]:
# Comparing the Actual marks and the predicted marks
compare_scores = pd.DataFrame({'Actual Marks': y_test, 'Predicted Marks': y_pred})
compare_scores
# In[30]:
plt.scatter(x=x_test, y=y_test, color='blue')
plt.plot(x_test, y_pred, color='Black')
plt.title('Actual vs Predicted', size=20)
plt.ylabel('Actual Marks', size=15)
plt.xlabel('Predicted Marks', size=15)
plt.show()
# # Evaluating the model
# In[31]:
from sklearn import metrics as m
print('Accuracy of Actual and Predicted Scores R-Squared is:', m.r2_score(y_test,y_pred))
MSE= m.mean_squared_error(y_test, y_pred)
RMSE= np.sqrt(MSE)
MAE= m.mean_absolute_error(y_test,y_pred)
print('Mean Squared Error:', MSE)
print('Root Mean Squared Error:', RMSE)
print('Mean Absolute Error:', MAE)
# In[32]:
hours = [9.5]
answer = lr.predict([hours])
print('Score: {}'.format(round(answer[0],3)))
# ###### The accuracy is around 94% and the small value of error metrics indicates that the chances of error or wrong forecasting through the model are very less.
# ## ................................. END OF TASK 1..................................................
# In[ ]:
|
Rishirajdhar/griptask1
|
GRIP_TASK_1_Student_Scores.py
|
GRIP_TASK_1_Student_Scores.py
|
py
| 4,112 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.