seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
9021010670
|
import json
import time
import datetime
import requests
urla = input("请输入要查询的hash值:")
urlb = "https://apilist.tronscan.org/api/transaction-info?hash="
url = urlb + urla
resp = requests.get(url,verify=False)
#print(f"当前状态为:{resp}")
resp_dict = resp.json()
#print(resp_dict.keys())
print( )
pepo = resp_dict['contractData']
popo = pepo['owner_address']
popc = pepo['contract_address']
times = resp_dict['timestamp']
#交易数量,及网络协议
quantity = resp_dict['tokenTransferInfo']
quantit = quantity['amount_str']
ssxinxi = quantity['symbol']
ssxxin = quantity['tokenType']
num_quantit = float(quantit)
print("交易数量为:",num_quantit/1000000,"\n交易币种:",ssxinxi,"\n交易网络:",ssxxin)
#交易信息
#时间戳转换时间
timestamp = times
time_local = time.localtime(timestamp/1000)
datr = time.strftime("%Y-%m-%d %H:%M:%S",time_local)
print("交易时间为:",datr)
print( )
print(f"主地址是:{popo}\n次地址是:{popc}\n")
#地址判断
if popo != "TLX1LF1mjiEnqniWxQ6mELSqJmGMXoYHZG":
print("此hash地址与交易地址不符,请输入正确hash")
elif num_quantit < 58:
print('正确')
else:
print('数量不对,请联系你爹处理')
|
dirb-cll/Odin
|
tronscan_api.py
|
tronscan_api.py
|
py
| 1,270 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19850130214
|
# 5.11.2.py
# Ptogram take a 5 point score and converts it into a grade
def main():
print("This program takes a 5 point score and converts it into a Letter Grade.")
score = input("What is your score: ")
# List of Letter grades corresponding with Letter grades
grades = ["F","F","D","C","B","A"]
letScore = grades[int(score)]
# output the letter grade
print(f"The score {score} is a grade {letScore}.")
main()
|
mochapup/Python-Programming-2nd-edition-John-Zelle
|
Ch5/5.11.2.py
|
5.11.2.py
|
py
| 440 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3373952782
|
def get_divisors(number):
divisors = []
for x in range(1, number + 1):
if number % x == 0:
divisors.append(x)
return(divisors)
number = int(raw_input("Enter a number: "))
divisors = get_divisors(number)
# print(str(divisors))
if len(divisors) > 2:
print("This number is not prime.")
else:
print("This number is prime.")
|
usman-tahir/python-snippets
|
python-web-tutorial/check_primality_functions.py
|
check_primality_functions.py
|
py
| 342 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4817221286
|
import os
import threading
#Main function picture
from main_function_image import open_picture
from main_function_image import show_picture
from main_function_image import save_picture
from main_function_image import to_crop
from main_function_image import write_position
#Background
from picture_operation.background import main_background
#multiple objects
from picture_operation.multiple_objects import take_features_multi_obj
#rotation objects
from picture_operation.picture_orientation import take_features_position
def step_one():
"""
Input image
"""
path_picture = "dataset/image/current/current.jpg"
path_current = "dataset/image/current/"
oInput = input("Enter an image")
oInput = r"C:\Users\jeanbaptiste\Desktop\assiette\v2\dataset\image\test\assiette1.jpg"
img = open_picture(oInput)
save_picture(path_picture, img)
print("Treatement in progress... \n")
print("Treatment Background in progress ...")
img = main_background(path_picture)
save_picture(path_picture, img)
print("Treatment Background finish")
show_picture("img", img, 1, "y")
print("Separate objects...")
img = take_features_multi_obj(path_picture, "")
liste = os.listdir(path_current)
for i in liste:
im = path_current + i
img = open_picture(im)
if i not in ("current.jpg", "current_copy"):
print("Recuperate position")
_, positionx, positiony = to_crop(img)
write_position(positionx, positiony, str(im))
show_picture("display", img, 1, "y")
print("Separate objects finish")
print("Reposition of objects... ")
liste = os.listdir(path_current)
for i in liste:
if i not in ("current.jpg", "current_copy"):
img = path_current + i
img = take_features_position(img)
img, _, _ = to_crop(img)
save_picture(str(path_current + i), img)
show_picture("display", img, 1, "y")
print("Reposition of objects finsh")
print("\nTreatment finish")
#Detection
from object_detection.objects_detection import detection
#Label
from dataset.information_data.labels_function import treatment_read
from dataset.information_data.labels_function import read
#Main function
from main_function_image import recup_position
from main_function_image import draw
def step_two():
print("Detection in progress ...")
path_current = "dataset/image/current/"
path_models = "training/models/models/"
path_label = "dataset/information_data/label.py"
liste_picture = os.listdir(path_current)
model_list = os.listdir(path_models)
detections = []
images = []
for picture in liste_picture:
if picture != "current.jpg":
image = path_current + str(picture)
img = open_picture(image)
for models in model_list:
model = path_models + str(models)
labels = read(path_label, str(models))
for lab in labels:
information = treatment_read([lab])
w = int(information["dimension"][0])
h = int(information["dimension"][1])
try:
prediction = detection(model, w, h, img)
except:
pass
if prediction == int(information["label"]):
detections.append([information["name"],
recup_position(image)])
images.append(image)
break
else:
detections.append(["", recup_position(image)])
images.append(image)
break
#show_picture("picture", img, 1, "y")
#print(detections)
for nb, i in enumerate(detections):
if i[1] == None:pass
else:
img = draw(i, nb, images[nb])
show_picture("display", img, 1, "y")
save_picture("dataset/image/current/current_copy.jpg", img)
return detections
from scraping.object_category import main_scrap
from scraping.download_data import download_picture
def step_three(detection):
#Scrap
liste = []
for i in detection:
items = main_scrap(i)
for it in items:
liste.append(it)
liste_path = os.listdir("dataset/image/dataset")
for i in liste:
for j in liste_path:
if i == j:
liste.remove(i)
#Download
for i in liste:
path = "dataset/image/dataset/"
download_picture(i, path.format(i))
return liste
from picture_operation.delete import main_deleting
from picture_operation.multiple_objects import take_features_multi_obj
from picture_operation.picture_orientation import take_features_position
from picture_operation.background import main_background
def step_fourth(objects):
path_data = "dataset/image/dataset"
path_folder = "dataset/image/dataset/{}"
path_image = "dataset/image/dataset/{}/{}"
liste_path = os.listdir(path_data)
print(liste_path)
for i in liste_path:
picture_folder = os.listdir(path_folder.format(i))
print(path_folder.format(i))
for j in picture_folder:
print(path_image.format(i, j))
img = main_background(path_image.format(i, j))
save_picture(path_image.format(i, j), img)
for j in picture_folder:
print(path_image.format(i, j))
img = take_features_multi_obj(path_image.format(i, j))
for j in picture_folder:
print(path_image.format(i, j))
img = take_features_position(path_image.format(i, j))
save_picture(path_image.format(i, j), img)
for j in picture_folder:
print(path_image.format(i, j))
delete = main_deleting(path_image.format(i, j))
if delete is True:
os.remove(path_image.format(i, j))
from training.training import head_writting
from training.training import picture_writting
from training.training import train
from ecriture.write import writtte
import importlib
def step_five():
path_data = "dataset/image/dataset"
liste_path = os.listdir(path_data)
print(len(liste_path))
liste = []
write = writtte(len(liste_path))
if write:
from ecriture.to_thread import main_threading
liste = main_threading()
print(liste)
return liste
from main_function_image import define_size
from main_function_image import negativ_training
from training.training import head_writting
from training.training import picture_writting
from training.training import train
from dataset.information_data.labels_function import write_labels
def step_six(liste):
#Verify csv
path_data = "dataset/image/dataset"
path_folder = "dataset/image/dataset/{}"
path_image = "dataset/image/dataset/{}/{}"
liste_path = os.listdir(path_data)
path_label = "dataset/information_data/label.py"
for i in liste_path:
print(i)
picture_folder = os.listdir(path_folder.format(i))
if len(picture_folder) > 10 and i != "assiette":
for info_size in liste:
if info_size[2] == path_folder.format(i):
size = define_size(info_size)
number_pix = size[0] * size[1]
write_labels(path_label, "None", str(i),
"None", str(size[0]), str(size[1]), "None")
csv_name = "training/csv/in_training/" + str(i) + ".csv"
model_name = "training/models/in_training/" + str(i)
head_writting(csv_name, number_pix)
picture_writting(csv_name,
path_folder.format(i),
"",
size[0], size[1], "1")
negativ_training(i, csv_name, size)
train(csv_name, model_name)
def step_seven():
print("Detection in progress ...")
path_current = "dataset/image/current/"
path_models = "training/models/in_training/"
path_label = "dataset/information_data/label.py"
liste_picture = os.listdir(path_current)
model_list = os.listdir(path_models)
detections = []
images = []
for picture in liste_picture:
if picture != "current.jpg" and\
picture != "current_copy.jpg":
image = path_current + str(picture)
img = open_picture(image)
for models in model_list:
model = path_models + str(models)
labels = read(path_label, str(None))
for lab in labels:
information = treatment_read([lab])
w = int(information["dimension"][0])
h = int(information["dimension"][1])
try:
prediction = detection(model, w, h, img)
print(models, prediction)
except:
pass
if prediction == 1:
detections.append([information["name"], models])
images.append(image)
else:
pass
show_picture("picture", img, 1, "y")
print(detections)
def main():
#step_one()
#detection = step_two()
## via = []
## for i in detection:
## if i[0] not in ("?") and\
## i[1] != None :
## via.append(i[0])
#liste = step_three(via)
#print("\n We need to search this in a first time: ", liste)
#step_fourth(objects)
#liste = step_five()
liste = [[4.385468750000137, 2.9368750000000916, 'dataset/image/dataset/aliment'], [3.3115911458334373, 2.2479661458334026, 'dataset/image/dataset/bol'], [4.904211387434708, 1.5215619546248298, 'dataset/image/dataset/Couteau'], [4.7288758680557015, 1.298111979166708, 'dataset/image/dataset/Cuillere'], [4.772159391534541, 0.9914175485009131, 'dataset/image/dataset/Fourchette'], [4.148116883117012, 2.142094155844224, 'dataset/image/dataset/Paille'], [2.9227008928572342, 2.185962301587369, 'dataset/image/dataset/tasse'], [3.5509643817205414, 2.5156754032258855, 'dataset/image/dataset/verre']]
step_six(liste)
#step_seven()
if __name__ == "__main__":
main()
|
LeGrosLezard/qu-est-ce-qu-il-y-a-dans-une-salle-a-manger-
|
v2/main.py
|
main.py
|
py
| 10,897 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15134665057
|
from _base import *
from os import path
from psychopy.app.builder.experiment import Param
thisFolder = path.abspath(path.dirname(__file__))#the absolute path to the folder containing this path
iconFile = path.join(thisFolder,'code.png')
tooltip = _('Code: insert python commands into an experiment')
_localized = {'Begin Experiment': _('Begin Experiment'),
'Begin Routine': _('Begin Routine'),
'Each Frame': _('Each Frame'),
'End Routine': _('End Routine'),
'End Experiment': _('End Experiment')}
class CodeComponent(BaseComponent):
categories = ['Custom']#an attribute of the class, determines the section in the components panel
"""An event class for inserting arbitrary code into Builder experiments"""
def __init__(self, exp, parentName, name='code',
beginExp="",beginRoutine="",eachFrame="",endRoutine="",endExperiment=""):
super(CodeComponent, self).__init__(exp, parentName, name)
self.type='Code'
self.url="http://www.psychopy.org/builder/components/code.html"
#params
self.categories=['misc']
self.order = ['name', 'Begin Experiment', 'Begin Routine', 'Each Frame', 'End Routine', 'End Experiment'] # want a copy, else codeParamNames list gets mutated
self.params['Begin Experiment']=Param(beginExp, valType='extendedCode', allowedTypes=[],
updates='constant', allowedUpdates=[],
hint=_("Code at the start of the experiment (initialization); right-click checks syntax"),
label=_localized['Begin Experiment'])
self.params['Begin Routine']=Param(beginRoutine, valType='extendedCode', allowedTypes=[],
updates='constant', allowedUpdates=[],
hint=_("Code to be run at the start of each repeat of the Routine (e.g. each trial); right-click checks syntax"),
label=_localized['Begin Routine'])
self.params['Each Frame']=Param(eachFrame, valType='extendedCode', allowedTypes=[],
updates='constant', allowedUpdates=[],
hint=_("Code to be run on every video frame during for the duration of this Routine; right-click checks syntax"),
label=_localized['Each Frame'])
self.params['End Routine']=Param(endRoutine, valType='extendedCode', allowedTypes=[],
updates='constant', allowedUpdates=[],
hint=_("Code at the end of this repeat of the Routine (e.g. getting/storing responses); right-click checks syntax"),
label=_localized['End Routine'])
self.params['End Experiment']=Param(endRoutine, valType='extendedCode', allowedTypes=[],
updates='constant', allowedUpdates=[],
hint=_("Code at the end of the entire experiment (e.g. saving files, resetting computer); right-click checks syntax"),
label=_localized['End Experiment'])
# these inherited params are harmless but might as well trim:
for p in ['startType', 'startVal', 'startEstim', 'stopVal', 'stopType', 'durationEstim']:
del self.params[p]
def writeInitCode(self,buff):
buff.writeIndentedLines(unicode(self.params['Begin Experiment'])+'\n')
def writeRoutineStartCode(self,buff):
buff.writeIndentedLines(unicode(self.params['Begin Routine'])+'\n')
def writeFrameCode(self,buff):
buff.writeIndentedLines(unicode(self.params['Each Frame'])+'\n')
def writeRoutineEndCode(self,buff):
buff.writeIndentedLines(unicode(self.params['End Routine'])+'\n')
def writeExperimentEndCode(self,buff):
buff.writeIndentedLines(unicode(self.params['End Experiment'])+'\n')
|
honeymustard33/experiment_riskdetection
|
project/psycho/psychopy/app/builder/components/code.py
|
code.py
|
py
| 3,650 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71971104189
|
import kfserving
import os
from typing import Dict
import torch
import importlib
import sys
PYTORCH_FILE = "model.pt"
class PyTorchModel(kfserving.KFModel):
def __init__(self, name: str, model_class_name: str, model_dir: str):
super().__init__(name)
self.name = name
self.model_class_name = model_class_name
self.model_dir = model_dir
self.ready = False
self.model = None
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def load(self) -> bool:
model_file_dir = kfserving.Storage.download(self.model_dir, self.name)
model_file = os.path.join(model_file_dir, PYTORCH_FILE)
py_files = []
for filename in os.listdir(model_file_dir):
if filename.endswith('.py'):
py_files.append(filename)
if len(py_files) == 1:
model_class_file = os.path.join(model_file_dir, py_files[0])
elif len(py_files) == 0:
raise Exception('Missing PyTorch Model Class File.')
else:
raise Exception('More than one Python file is detected',
'Only one Python file is allowed within model_dir.')
model_class_name = self.model_class_name
# Load the python class into memory
sys.path.append(os.path.dirname(model_class_file))
modulename = os.path.basename(model_class_file).split('.')[0].replace('-', '_')
model_class = getattr(importlib.import_module(modulename), model_class_name)
# Make sure the model weight is transform with the right device in this machine
self.model = model_class().to(self.device)
self.model.load_state_dict(torch.load(model_file, map_location=self.device))
self.model.eval()
self.ready = True
return self.ready
def predict(self, request: Dict) -> Dict:
inputs = []
with torch.no_grad():
try:
inputs = torch.tensor(request["instances"]).to(self.device)
except Exception as e:
raise TypeError(
"Failed to initialize Torch Tensor from inputs: %s, %s" % (e, inputs))
try:
return {"predictions": self.model(inputs).tolist()}
except Exception as e:
raise Exception("Failed to predict %s" % e)
|
kubeflow/kfserving-lts
|
python/pytorchserver/pytorchserver/model.py
|
model.py
|
py
| 2,366 |
python
|
en
|
code
| 10 |
github-code
|
6
|
26829719818
|
#!/usr/bin/python
__author__ = "Michael Lienhardt and Jacopo Mauro"
__copyright__ = "Copyright 2017, Michael Lienhardt and Jacopo Mauro"
__license__ = "GPL3"
__version__ = "0.5"
__maintainer__ = "Michael Lienhardt"
__email__ = "michael [email protected]"
__status__ = "Prototype"
def identity(x): return x
######################################################################
# SPL CORE FUNCTIONS
######################################################################
def parse_package_name(package_name):
"""
this function splits a portage package name into relevant information
:param package_name: the package name to be split
:return: a tuple containing the group name of this package, its full version and its core version
"""
filename_split = package_name.split("-")
version_full, version = None, None
end = None
if len(filename_split) > 1:
check1 = filename_split[-1]
check2 = filename_split[-2]
if check1[0] == 'r' and check2[0].isdigit():
revision = check1
version = check2
version_full = version + "-" + revision
end = -2
elif check1[0].isdigit():
version = check1
version_full = version
end = -1
package_group = "-".join(filename_split[:end])
return package_group, version_full, version
def spl_core_create(package_group, version_full, version, slot, subslot):
return package_group, version_full, version, slot, subslot
def spl_core_get_spl_group_name(spl_core):
return spl_core[0]
def spl_core_get_version_full(spl_core):
return spl_core[1]
def spl_core_get_version(spl_core):
return spl_core[2]
def spl_core_get_slot(spl_core):
return spl_core[3]
def spl_core_get_subslot(spl_core):
return spl_core[4]
######################################################################
# TRANSLATE ATOMS INTO HASHABLE PATTERNS
######################################################################
def pattern_create_from_atom(atom):
"""
creates a pattern from a portage atom.
Note that we don't need to distinguish between `=` and `~` slot operation,
as they are only used to trigger compilation.
:param atom: the string of the atom
:return: the corresponding pattern
"""
# 1. version operator
vop = None
begin = 0
if atom[0] in "=<>~":
if atom[1] == "=":
vop = atom[:2]
begin = 2
else:
vop = atom[0]
begin = 1
# 2. slots
sop = None
slot = None
subslot = None
slot_position = atom.find(":")
if slot_position != -1:
slots = atom[slot_position+1:].split("/")
atom = atom[begin:slot_position]
slot = slots[0]
if slot == "*":
sop = "*"
slot = None
elif slot == "=":
sop = "="
slot = None
elif slot[-1] == "=":
sop = "="
slot = slot[:-1]
elif len(slots) > 1:
subslot = slots[1]
else:
atom = atom[begin:]
# 3. version
if vop is None: package_group, version_full, version = atom, None, None
else: package_group, version_full, version = parse_package_name(atom)
has_star = False
if (version_full is not None) and (version_full[-1] == "*"):
has_star = True
version_full = version_full[:-1]
if version[-1] == "*":
version = version[:-1]
# return vop, package_group, version_full, version, has_star, slot, subslot, sop
return vop, package_group, version_full, version, has_star, slot, subslot
def pattern_get_package_group(pattern):
"""
return the spl group name at the core of this pattern
:param pattern: the input pattern
:return: the spl group name of the input pattern
"""
return pattern[1]
def pattern_to_atom(pattern):
"""
translates a pattern into its corresponding portage atom
:param pattern: the pattern to translate
:return: the equivalent atom string
"""
atom = ""
if pattern[0]: atom += pattern[0]
atom += pattern[1]
if pattern[2]: atom += "-" + pattern[2]
if pattern[4]: atom += "*"
if pattern[5] or pattern[6]:# or pattern[7]:
atom += ":"
if pattern[5]: atom += pattern[5]
if pattern[6]: atom += "/" + pattern[6]
# if pattern[7]: atom += pattern[7]
return atom
def pattern_to_save_format(pattern):
"""
translates a pattern into a json-friendly dictionary
:param pattern: the pattern to translate
:return: the equivalent dictionary
"""
return {
'vop': pattern[0],
'package_group': pattern[1],
'version_full': pattern[2],
'version': pattern[3],
'has_star': pattern[4],
'slot': pattern[5],
'subslot': pattern[6],
'sop': pattern[7]
}
def pattern_from_save_format(save_format):
"""
translates a json-friendly representation of a pattern into a real pattern
:param save_format: the dictionary to translate
:return: the corresponding pattern
"""
return (
save_format['vop'], save_format['package_group'], save_format['version_full'], save_format['version'],
save_format['has_star'], save_format['slot'], save_format['subslot'], save_format['sop']
)
######################################################################
# MATCHING FUNCTIONS
######################################################################
def compare_extra_len(s):
c = s[0]
if c == '_': return (s[1] == 'p') and ((len(s) < 3) or (s[2] != 'r'))
elif c == "r": return False
else: return True
def get_int_len(s, lens, start):
i = start
while (i < lens) and s[i].isdigit(): i = i + 1
return i
def compare_version(s1, s2):
"""
Returns a positive number if s1 > 2, 0 if the two versions are equal and a negative number if s2 > s1
This function assumes that the parameters are valid portage versions
:param s1: the first version
:param s2: the second version
:return: a positive number if s1 > 2, 0 if the two versions are equal and a negative number if s2 > s1
"""
i = 0
len1 = len(s1)
len2 = len(s2)
maximum = min(len1, len2)
dot = False
number_alternative_mode = False
while (i < maximum) and (s1[i] == s2[i]):
# computes if the number should be managed as integers or fractional part
if s1[i] == '.':
dot = True
number_alternative_mode = False
elif dot:
if s1[i] == '0': number_alternative_mode = True
dot = False
elif number_alternative_mode and (not s1[i].isdigit()):
number_alternative_mode = False
i = i + 1
if i == maximum:
if len1 == len2: return 0
if len1 < len2: return -1 if compare_extra_len(s2[i:]) else 1
return 1 if compare_extra_len(s1[i:]) else -1
else:
if s1[i].isdigit() and s2[i].isdigit():
# 1. check the factional part, that starts with a 0
if s1[i] == '0': return -1
elif s2[i] == '0': return 1
elif number_alternative_mode: return ord(s1[i]) - ord(s2[i])
else: # 2. check the integer part
int_len_1 = get_int_len(s1, len1, i)
int_len_2 = get_int_len(s2, len2, i)
if int_len_1 < int_len_2: return -1
elif int_len_1 > int_len_2: return 1
else: return ord(s1[i]) - ord(s2[i])
else:
# captures "1*" > ".*", "1*" > "_*" and "1*" > "-*", in any context
if s1[i].isdigit(): return 1
elif s2[i].isdigit(): return -1
# captures ".*" > "_*" and ".*" > "-*", in any context
elif s1[i] == '.': return 1
elif s2[i] == '.': return -1
# captures "-*" > "_alpha* -- _rc*" and "_p-*" > "_pre*", in any context
elif s1[i] == '-':
cond = (s2[i] == "r") or (not ((s2[i] == "_") and s2[i+1] == 'p') and ((len2 < i+3) or (s2[i+2] != 'r')))
return 1 if cond else -1
elif s2[i] == '-':
cond = (s1[i] == "r") or (not ((s1[i] == "_") and s1[i+1] == 'p') and ((len1 < i+3) or (s1[i+2] != 'r')))
return -1 if cond else 1
# captures comparison between alpha characters
return ord(s1[i]) - ord(s2[i])
def match_only_package_group(pattern, package_group):
pattern_package_group = pattern_get_package_group(pattern)
if pattern_package_group == "*/*":
return True
elif (pattern_package_group[0] != "*") and (pattern_package_group[-1] != "*"):
return pattern_package_group == package_group
elif pattern_package_group[0] != "*":
pattern_subgroup = pattern_package_group[2:]
els = package_group[1].split("/")
return pattern_subgroup == els[-1]
else:
pattern_category = pattern_package_group[:-2]
els = package_group[1].split("/")
return pattern_category == els[-2]
def match_only_package_version(pattern, version_full, version):
pattern_vop, pattern_version_full, pattern_version, pattern_has_star = pattern[0], pattern[2], pattern[3], pattern[4]
if (pattern_version_full is None) or (pattern_vop is None):
return True
compare = compare_version(version_full, pattern_version_full)
if pattern_vop == ">=":
if compare < 0:
return False
elif pattern_vop == ">":
if compare <= 0:
return False
elif pattern_vop == "~":
if pattern_version != version:
return False
elif pattern_vop == "=":
if pattern_has_star:
if not version_full.startswith(pattern_version_full):
return False
else:
if compare != 0:
return False
elif pattern_vop == "<=":
if compare > 0:
return False
elif pattern_vop == "<":
if compare >= 0:
return False
return True
def match_only_slot(pattern, slot, subslot):
pattern_slot, pattern_subslot = pattern[5], pattern[6]
if pattern_slot:
if pattern_slot != slot:
return False
if pattern_subslot:
if pattern_subslot != subslot:
return False
return True
def match_package_path(pattern, package_name):
package_group, version_full, version = parse_package_name(package_name)
return match_only_package_group(pattern, package_group) and match_only_package_version(pattern, version_full, version)
def match_spl_full(pattern, spl_core):
return match_only_package_group(pattern, spl_core_get_spl_group_name(spl_core)) and match_spl_simple(pattern, spl_core)
def match_spl_simple(pattern, spl):
return (
match_only_package_version(pattern, spl_core_get_version_full(spl), spl_core_get_version(spl))
and match_only_slot(pattern, spl_core_get_slot(spl), spl_core_get_subslot(spl))
)
######################################################################
# DICTIONARY TO SET BASE CLASS
######################################################################
class dictSet(dict):
"""
This class is used as a container for
- required patterns (mapping between set names to sets of patterns)
- installed packages (mapping between package names to use flag selection)
"""
def add(self, key, val):
if key in self:
self[key].add(val)
else:
self[key] = {val}
def add_key(self, key):
if key not in self: self[key] = set()
def set(self, key, values):
self[key] = values
def update_set(self, dict_set):
for k, v in dict_set.iteritems():
if k in self:
self[k].update(v)
else: self[k] = v
def remove_with_key(self, key, val):
if len(self[key]) == 1: self.pop(key)
else: self[key].remove(val)
######################################################################
# SET MANIPULATION STRUCTURE
######################################################################
class SetManipulation(object):
"""
this class is used for encoding
- use flag manipulation (use in make.default, use.force, use.mask, use.stable.force and use.stable.mask)
- keyword list manipulation
As checked in the tests, "-*" is a valid atom in these files, and so it is included in this class
"""
def __init__(self):
self.positive = set()
self.negative = set()
self.remove_all = False
def add(self, element):
if element[0] == "-":
if element[1] != "*":
element = element[1:]
self.positive.discard(element)
if not self.remove_all:
self.negative.add(element)
else:
self.positive.clear()
self.negative.clear()
self.remove_all = True
else:
self.positive.add(element)
self.negative.discard(element)
def add_all(self, elements):
for element in elements: self.add(element)
return self
def update(self, set_manipulation):
if set_manipulation.remove_all:
self.positive = set_manipulation.positive
self.negative.clear()
self.remove_all = True
else:
self.positive.difference_update(set_manipulation.negative)
self.positive.update(set_manipulation.positive)
if not self.remove_all:
self.negative.update(set_manipulation.negative)
self.negative.difference_update(self.positive)
def get_elements(self): return self.positive | self.negative
def apply(self, s):
if self.remove_all:
s.clear()
s.update(self.positive)
else:
s.difference_update(self.negative)
s.update(self.positive)
def init(self):
return self.positive.copy()
class SetManipulationPattern(object):
"""
this class is used for all the set manipulation files that are guarded by a specific pattern
- use flag manipulation for specific patterns (package.use, package.use.force, etc)
- keyword manipulation for specific patterns (package.keywords, package.accept_keywords)
"""
def __init__(self):
self.list = []
def add(self, pattern, set_manipulation):
self.list.append( (pattern, set_manipulation) )
def update(self, set_manipulation_pattern):
self.list.extend(set_manipulation_pattern.list)
def apply(self, spl_core, s):
for pattern, set_manipulation in self.list:
if match_spl_full(pattern, spl_core):
set_manipulation.apply(s)
def init(self, spl_core):
res = set()
self.apply(spl_core, res)
return res
class PatternListManipulation(list):
"""
this class is used for simple list of patterns:
- package masking (package.mask, package.unmask)
"""
def add(self, string):
if string[0] == "-":
self.append( (False, pattern_create_from_atom(string[1:])) )
else:
self.append( (True, pattern_create_from_atom(string)) )
def add_all(self, elements):
for element in elements: self.add(element)
return self
update = list.extend
def contains(self, spl_core):
res = False
for add, pattern in self:
if match_spl_full(pattern, spl_core):
res = add
return res
######################################################################
# CONFIGURATION CLASS
######################################################################
class UseSelectionConfig(object):
"""
This class stores all the kind of use flag manipulation in portage:
- use variable in make.default
- use.force, use.mask, use.stable.force, use.stable.mask
- package.use, package.use.force, package.use.mask, package.use.stable.force and package.use.stable.mask
Additionally, this class have a method to apply these manipulations on a use flag selection
"""
def __init__(
self,
use=SetManipulation(), use_force=SetManipulation(), use_mask=SetManipulation(),
use_stable_force=SetManipulation(), use_stable_mask=SetManipulation(),
pattern_use=SetManipulationPattern(), pattern_use_force=SetManipulationPattern(),
pattern_use_mask=SetManipulationPattern(),
pattern_use_stable_force=SetManipulationPattern(), pattern_use_stable_mask=SetManipulationPattern()):
self.use = use
self.use_force = use_force
self.use_mask = use_mask
self.use_stable_force = use_stable_force
self.use_stable_mask = use_stable_mask
self.pattern_use = pattern_use
self.pattern_use_force = pattern_use_force
self.pattern_use_mask = pattern_use_mask
self.pattern_use_stable_force = pattern_use_stable_force
self.pattern_use_stable_mask = pattern_use_stable_mask
def update(self, config):
self.use.update(config.use)
self.use_force.update(config.use_force)
self.use_mask.update(config.use_mask)
self.use_stable_force.update(config.use_stable_force)
self.use_stable_mask.update(config.use_stable_mask)
self.pattern_use.update(config.pattern_use)
self.pattern_use_force.update(config.pattern_use_force)
self.pattern_use_mask.update(config.pattern_use_mask)
self.pattern_use_stable_force.update(config.pattern_use_stable_force)
self.pattern_use_stable_mask.update(config.pattern_use_stable_mask)
def get_use_force_mask(self, spl_core, is_stable):
force = self.use_force.init()
self.pattern_use_force.apply(spl_core, force)
if is_stable:
tmp = self.use_stable_force.init()
self.pattern_use_stable_force.apply(spl_core, tmp)
force.update(tmp)
mask = self.use_mask.init()
if is_stable:
tmp = self.use_stable_mask.init()
self.pattern_use_stable_mask.apply(spl_core, tmp)
mask.update(tmp)
return force, mask
def apply(self, spl_core, is_stable, selection):
self.use.apply(selection)
self.pattern_use.apply(spl_core, selection)
force = self.use_force.init()
self.pattern_use_force.apply(spl_core, force)
force, mask = self.get_use_force_mask(spl_core, is_stable)
selection.update(force)
selection.difference_update(mask)
def init(self, spl_core, is_stable):
res = set()
self.apply(spl_core, is_stable, res)
return res
def __eq__(self, o):
if isinstance(o, self.__class__):
return (
self.use == o.use and
self.use_force == o.use_force and
self.use_mask == o.use_mask and
self.use_stable_force == o.use_stable_force and
self.use_stable_mask == o.use_stable_mask and
self.pattern_use == o.pattern_use and
self.pattern_use_force == o.pattern_use_force and
self.pattern_use_mask == o.pattern_use_mask and
self.pattern_use_stable_force == o.pattern_use_stable_force and
self.pattern_use_stable_mask == o.pattern_use_stable_mask
)
else: return False
class MSPLConfig(object):
"""
This class contains all the information related to package/spl configuration
- architecture for the hardware
- use flag implicit declaration (for eapi4 and less, and for eapi5 and more)
- use flags that are hidden from the user (can be useful for consistent output to the user)
- use flag manipulation (before and after the manipulation specified in the package itself)
- package requirement
- packages provided outside of portage
- keywords and visibility (mask) manipulation
This class also contains a method "combine" to add the information contained in another MSPLConfig object into this
one and is used to compute this data incrementally from the different configuration files in portage.
Finally, we have the "apply" method that compute relevant data (visibility and use flag selection) for a package.
"""
def __init__(
self, arch=None,
use_declaration_eapi4=set(), use_declaration_eapi5=set(), use_declaration_hidden_from_user=set(),
use_selection_config=UseSelectionConfig(),
pattern_required=dictSet(), pattern_provided=set(),
pattern_mask=PatternListManipulation(), pattern_unmask=PatternListManipulation(),
accept_keywords=SetManipulation(), pattern_keywords=SetManipulationPattern(), pattern_accept_keywords=SetManipulationPattern()):
self.arch = arch
# sets of USE flags
self.use_declaration_eapi4 = use_declaration_eapi4
self.use_declaration_eapi5 = use_declaration_eapi5
self.use_declaration_hidden_from_user = use_declaration_hidden_from_user
# set manipulation (with equivalent pattern set manipulation)
self.use_selection_config = use_selection_config
self.use_selection_config_init = None
# mapping from package set name and set of pattern
self.pattern_required = pattern_required
self.pattern_required_flat = None
# sets of pattern
self.pattern_provided = pattern_provided
# set manipulation (with equivalent pattern set manipulation)
self.pattern_mask = pattern_mask
self.pattern_unmask = pattern_unmask
self.accept_keywords = accept_keywords
self.pattern_keywords = pattern_keywords
self.pattern_accept_keywords = pattern_accept_keywords
self.accept_keywords_full = None
# form incremental updates
self.new_masks = True
self.new_use_declaration_eapi4 = True
self.new_use_declaration_eapi5 = True
self.new_keywords_config = True
self.new_licenses_config = True
self.new_use_flag_config = True
def update(self, config):
if config.arch:
self.arch = config.arch
self.use_declaration_eapi4.update(config.use_declaration_eapi4)
self.use_declaration_eapi5.update(config.use_declaration_eapi5)
self.use_declaration_hidden_from_user.update(config.use_declaration_hidden_from_user)
self.use_selection_config.update(config.use_selection_config)
self.pattern_required.update_set(config.pattern_required)
self.pattern_provided.update(config.pattern_provided)
self.pattern_mask.update(config.pattern_mask)
self.pattern_unmask.update(config.pattern_unmask)
self.accept_keywords.update(config.accept_keywords)
self.pattern_keywords.update(config.pattern_keywords)
self.pattern_accept_keywords.update(config.pattern_accept_keywords)
def update_pattern_use(self, pattern_use): self.use_selection_config.pattern_use.update(pattern_use)
def update_pattern_accept_keywords(self, pattern_accept_keywords):
self.pattern_accept_keywords.update(pattern_accept_keywords)
def update_pattern_keywords(self, pattern_keywords): self.pattern_keywords.update(pattern_keywords)
def update_pattern_mask(self, pattern_mask): self.pattern_mask.update(pattern_mask)
def update_pattern_unmask(self, pattern_unmask): self.pattern_unmask.update(pattern_unmask)
def update_pattern_required(self, pattern_required): self.pattern_required.update_set(pattern_required)
def close_init_phase(self):
self.use_selection_config_init = UseSelectionConfig()
self.use_selection_config_init.use = self.use_selection_config.use
self.use_selection_config.use = SetManipulation()
self.use_selection_config_init.pattern_use = self.use_selection_config.pattern_use
self.use_selection_config.pattern_use = SetManipulationPattern()
def get_unmasked(self, spl_core):
if self.pattern_mask.contains(spl_core):
return self.pattern_unmask.contains(spl_core)
else: return True
def get_stability_status(self, spl_core, unmasked, keyword_set, license): # TODO: add license management
keywords = keyword_set.copy()
self.pattern_keywords.apply(spl_core, keywords)
accept_keywords = self.accept_keywords_full.copy()
self.pattern_accept_keywords.apply(spl_core, accept_keywords)
matched = keywords & accept_keywords
keyword_unmask = bool(matched)
if unmasked:
installable = keyword_unmask
is_stable = not bool(filter(lambda x: x[0] == '~', matched))
else:
installable = False
is_stable = False
return keyword_unmask, True, installable, is_stable # TODO: filled license_unmasked with True for now
def get_use_force_mask(self, spl_core, is_stable):
return self.use_selection_config.get_use_force_mask(spl_core, is_stable)
def get_use_flags(self, spl_core, unmasked, is_stable, use_manipulation):
if unmasked:
use_flags = self.use_selection_config_init.init(spl_core, is_stable)
use_manipulation.apply(use_flags)
self.use_selection_config.apply(spl_core, is_stable, use_flags)
else:
use_flags = set()
return use_flags
#def apply(self, spl_core, use_manipulation, keywords_default):
# # 1. check if the package is masked
# unmasked = self.get_unmasked(spl_core)
# # 2. check if installable and stable
# keyword_mask, installable, is_stable = self.get_stability_status(spl_core, unmasked, keywords_default)
# # 3. compute the USE flag configuration (i.e., product)
# use_flags = self.get_use_flags(spl_core, unmasked, is_stable, use_manipulation)
# # 4. return the result
# return unmasked, installable, is_stable, use_flags
def close(self):
self.pattern_required_flat = {el for k, v in self.pattern_required.iteritems() for el in v}
self.accept_keywords_full = {self.arch} if self.arch else set()
self.accept_keywords.apply(self.accept_keywords_full)
def set_old_config(self, old_config):
self.new_masks = (self.pattern_mask != old_config.pattern_mask) or (self.pattern_unmask != old_config.pattern_unmask)
self.new_use_declaration_eapi4 = (self.use_declaration_eapi4 != old_config.use_declaration_eapi4)
self.new_use_declaration_eapi5 = (self.use_declaration_eapi5 != old_config.use_declaration_eapi5)
self.new_keywords_config = (self.arch != old_config.arch)
if not self.new_keywords_config:
self.new_keywords_config = (self.accept_keywords != old_config.accept_keywords)
if not self.new_keywords_config:
self.new_keywords_config = (self.pattern_keywords != old_config.pattern_keywords)
if not self.new_keywords_config:
self.new_keywords_config = (self.pattern_accept_keywords != old_config.pattern_accept_keywords)
self.new_use_flag_config = (self.use_selection_config != old_config.use_selection_config)
if not self.new_use_flag_config:
self.new_use_flag_config = (self.use_selection_config_init != old_config.use_selection_config_init)
######################################################################
# MAIN SYSTEM CLASS
######################################################################
class Config(object):
"""
This class contains the full configuration of portage.
In addition to the MSPL configuration, this class contains:
- the list of keywords declared in portage
- the list of installed package, with their use flag selection
- the world list, that lists all the patterns required by the user,
accumulated during his previous "emerge" calls.
- the use flag manipulation stated by the user in the environment
Consequently, this class have an "apply" wrapper method around the "apply" method of the MSPL configuration,
that also apply the use flag manipulation from the environment
"""
def __init__(self):
self.mspl_config = MSPLConfig()
self.keyword_list = None
self.installed_packages = None
self.world = None
self.use_manipulation_env = None
self.pattern_required_flat = None
def set_use_manipulation_env(self, use_flag_manipulation):
self.use_manipulation_env = SetManipulation()
self.use_manipulation_env.add_all(use_flag_manipulation)
def close_init_phase(self): self.mspl_config.close_init_phase()
def close(self):
self.mspl_config.close()
self.pattern_required_flat = self.mspl_config.pattern_required_flat | self.world
|
HyVar/gentoo_to_mspl
|
guest/hyvar/core_data.py
|
core_data.py
|
py
| 25,599 |
python
|
en
|
code
| 10 |
github-code
|
6
|
4127628980
|
import math
sum=0
number=input("Enter the number")
list=[]
for i in number:
list.append(i)
for i in list:
sum=sum+math.factorial(int(i))
print(sum)
if sum==int(number):
print("It's a strong number")
else:
print("It's not a strong number")
|
vijama1/codevita
|
strong.py
|
strong.py
|
py
| 260 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26383863194
|
from typing import Tuple
import gym
import numpy as np
class PreprocessEnv(gym.Wrapper): # environment wrapper
def __init__(self, env, if_print=True):
self.env = gym.make(env) if isinstance(env, str) else env
super().__init__(self.env)
self.step = self.step_type
(self.env_name, self.state_dim, self.action_dim, self.action_max, self.max_step,
self.if_discrete, self.target_reward) = get_gym_env_info(self.env, if_print)
def reset(self) -> np.ndarray:
state = self.env.reset()
return state.astype(np.float32)
def step_type(self, action) -> Tuple[np.ndarray, float, bool, dict]:
state, reward, done, info = self.env.step(action * self.action_max)
return state.astype(np.float32), reward, done, info
def get_gym_env_info(env, if_print) -> Tuple[str, int, int, int, int, bool, float]:
import gym # gym of OpenAI is not necessary for ElegantRL (even RL)
gym.logger.set_level(40) # Block warning: 'WARN: Box bound precision lowered by casting to float32'
assert isinstance(env, gym.Env)
env_name = env.unwrapped.spec.id
state_shape = env.observation_space.shape
state_dim = state_shape[0] if len(state_shape) == 1 else state_shape # sometimes state_dim is a list
target_reward = getattr(env, 'target_reward', None)
target_reward_default = getattr(env.spec, 'reward_threshold', None)
if target_reward is None:
target_reward = target_reward_default
if target_reward is None:
target_reward = 2 ** 16
max_step = getattr(env, 'max_step', None)
max_step_default = getattr(env, '_max_episode_steps', None)
if max_step is None:
max_step = max_step_default
if max_step is None:
max_step = 2 ** 10
if_discrete = isinstance(env.action_space, gym.spaces.Discrete)
if if_discrete: # make sure it is discrete action space
action_dim = env.action_space.n
action_max = int(1)
elif isinstance(env.action_space, gym.spaces.Box): # make sure it is continuous action space
action_dim = env.action_space.shape[0]
action_max = float(env.action_space.high[0])
else:
raise RuntimeError('| Please set these value manually: if_discrete=bool, action_dim=int, action_max=1.0')
print(f"\n| env_name: {env_name}, action space if_discrete: {if_discrete}"
f"\n| state_dim: {state_dim}, action_dim: {action_dim}, action_max: {action_max}"
f"\n| max_step: {max_step} target_reward: {target_reward}") if if_print else None
return env_name, state_dim, action_dim, action_max, max_step, if_discrete, target_reward
|
sbl1996/hrl
|
hrl/elegant/env.py
|
env.py
|
py
| 2,648 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73733786108
|
# views.py
import os
from flask import Flask, request, render_template, flash, redirect, url_for, get_flashed_messages, session, abort
from .forms import LoginForm, RegistrationForm, ShoppingListForm, additemsForm
from . import app
from app.modals import User
@app.route('/', methods= ['GET', 'POST'])
def index():
if request.method == 'POST':
if 'user_id' in session:
return redirect(url_for ('viewitems'))
else:
return redirect(url_for ('login'))
@app.route('/signin', methods=['GET', 'POST'])
def login():
form = LoginForm()
if request.method == 'POST':
if form.validate_on_submit():
if 'user_id' in session:
return redirect(url_for('viewitems'))
return redirect(url_for('showsignup'))
else:
if form.validate() == False:
flash('All fields are required.')
return render_template('signin.html', form = form)
return render_template('signin.html',
title='Sign In',
form = form)
@app.route('/signup', methods= ['GET', 'POST'])
def showsignup():
form = RegistrationForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
#return error if email is already registered
user_dict = User.users.items()
existing_user = {k:v for k,v in user_dict if form.Email.data in v['email']}
if existing_user:
flash('email exists please log in instead')
return render_template('signin.html', form = LoginForm())
else:
#if email doesnot exist register user
new_user = User(form.Username.data, form.Email.data, form.Password.data, form.Password2.data)
new_user.create_user()
#assign a user_id
#create a session
for key, value in user_dict: # gets id, eg 2
if form.Email.data in value['email']:
session['user_id'] = key
flash('You have successfully signed up')
return redirect(url_for('login', form = LoginForm()))
elif form.errors:
if form.Password.data != form.Password2.data:
flash({"message": "Your passwords don't match!"})
return render_template('signup.html', form = form)
elif request.method == 'GET':
return render_template('signup.html',
title='Sign up',
form=form)
@app.route('/create_shoppinglist', methods= ['GET', 'POST'])
def createshoppinglist():
form = ShoppingListForm()
if request.method == 'POST':
if form.validate() == False:
flash('All fileds requirred')
return render_template('create_list.html', form = form)
else:
flash('You have successfully created a list')
return redirect('/additem')
elif request.method == 'GET':
return render_template('create_shoppinglist.html',
title='items',
form=form)
@app.route('/additem', methods= ['GET', 'POST'])
def additem():
if request.method == "GET":
form = additemsForm(request.form)
return render_template('additem.html', form = form)
flash("You have added an item to your shopping list")
elif request.method == 'POST':
return redirect('/viewitems')
@app.route('/viewitems', methods= ['GET', 'POST'])
def viewitems():
form = additemsForm(request.form)
if request.method == "POST":
itemname = request.form['itemname']
Quantity = request.form['Quantity']
Price = request.form['Price']
return render_template('viewitems1.html', itemname = itemname, Quantity = Quantity, Price = Price)
elif request.method == "GET":
return redirect('/additem')
|
Basemera/trailapp
|
app/views.py
|
views.py
|
py
| 4,234 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15932161401
|
import datetime
import time
import json
import six
from ..exceptions import HydraError, ResourceNotFoundError
from . import scenario, rules
from . import data
from . import units
from .objects import JSONObject
from ..util.permissions import required_perms
from hydra_base.lib import template, attributes
from ..db.model import Project, Network, Scenario, Node, Link, ResourceGroup,\
ResourceAttr, Attr, ResourceType, ResourceGroupItem, Dataset, Metadata, DatasetOwner,\
ResourceScenario, TemplateType, TypeAttr, Template, NetworkOwner, User, Rule
from sqlalchemy.orm import noload, joinedload
from .. import db
from sqlalchemy import func, and_, or_, distinct
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm import aliased
from ..util import hdb
from sqlalchemy import case
from sqlalchemy.sql import null
from collections import namedtuple
from hydra_base import config
import logging
log = logging.getLogger(__name__)
# Python 2 and 3 compatible string checking
# TODO remove this when Python2 support is dropped.
try:
unicode
except NameError:
unicode = str
def _update_attributes(resource_i, attributes):
if attributes is None:
return dict()
attrs = {}
resource_attribute_qry = db.DBSession.query(ResourceAttr)
if resource_i.ref_key == 'NETWORK':
resource_attribute_qry = resource_attribute_qry.filter(ResourceAttr.network_id==resource_i.id)
elif resource_i.ref_key == 'NODE':
resource_attribute_qry = resource_attribute_qry.filter(ResourceAttr.node_id==resource_i.id)
elif resource_i.ref_key == 'LINK':
resource_attribute_qry = resource_attribute_qry.filter(ResourceAttr.link_id==resource_i.link_id)
elif resource_i.ref_key == 'GROUP':
resource_attribute_qry = resource_attribute_qry.filter(ResourceAttr.group_id==resource_i.group_id)
resource_attributes = resource_attribute_qry.all()
attr_id_map = dict([(ra_i.id, ra_i) for ra_i in resource_attributes])
#ra is for ResourceAttr
for ra in attributes:
if ra.id < 0:
ra_i = resource_i.add_attribute(ra.attr_id, ra.attr_is_var)
else:
ra_i = attr_id_map[ra.id]
ra_i.attr_is_var = ra.attr_is_var
attrs[ra.id] = ra_i
return attrs
def get_scenario_by_name(network_id, scenario_name,**kwargs):
try:
scen = db.DBSession.query(Scenario).filter(and_(Scenario.network_id==network_id, func.lower(Scenario.id) == scenario_name.lower())).one()
return scen.id
except NoResultFound:
log.info("No scenario in network %s with name %s"\
% (network_id, scenario_name))
return None
def get_timing(time):
return datetime.datetime.now() - time
def _get_all_attributes(network):
"""
Get all the complex mode attributes in the network so that they
can be used for mapping to resource scenarios later.
"""
attrs = network.attributes
for n in network.nodes:
attrs.extend(n.attributes)
for l in network.links:
attrs.extend(l.attributes)
for g in network.resourcegroups:
attrs.extend(g.attributes)
return attrs
def _check_ra_duplicates(all_resource_attrs, resource_id_name_map):
"""
Check for any duplicate resource attributes before inserting
into the DB. This just helps to prevent an ugly DB contraint error
"""
unique_ra_check = {}
for ra in all_resource_attrs:
k = (_get_resource_id(ra), ra['attr_id'])
if unique_ra_check.get(k) is None:
unique_ra_check[k] = ra
else:
ref_key = ra['ref_key']
if ref_key == 'NODE':
ref_id = ra['node_id']
elif ref_key == 'LINK':
ref_id = ra['link_id']
elif ref_key == 'GROUP':
ref_id = ra['group_id']
elif ref_key == 'NETWORK':
ref_id = ra['network_id']
resource_name = resource_id_name_map[ref_id]
attr_id = ra['attr_id']
attr_name = db.DBSession.query(Attr.name).filter(Attr.id==attr_id).one()
raise HydraError(f"Duplicate Resource Attr specified: {resource_name} {attr_name}")
def _bulk_add_resource_attrs(network_id, ref_key, resources, resource_name_map, template_lookup=None):
log.info("Bulk adding resource attributes")
if template_lookup is None:
template_lookup = {}
start_time = datetime.datetime.now()
#List of resource attributes
resource_attrs = {}
#Default ra / dataset pairings.
defaults = {}
attr_lookup = {}
all_attrs = db.DBSession.query(Attr).all()
for a in all_attrs:
attr_lookup[a.id] = a
#First get all the attributes assigned from the csv files.
t0 = datetime.datetime.now()
for resource in resources:
#cast name as string here in case the name is a number
resource_i = resource_name_map[str(resource.name)]
resource_attrs[resource.id] = []
if resource.attributes is not None:
for ra in resource.attributes:
if attr_lookup.get(ra.attr_id) is None:
raise HydraError(f"Unable to process attribute {ra.attr_id} on resource {resource.name} as it does not exist")
resource_attrs[resource.id].append({
'ref_key' : ref_key,
'node_id' : resource_i.id if ref_key == 'NODE' else None,
'link_id' : resource_i.id if ref_key == 'LINK' else None,
'group_id' : resource_i.id if ref_key == 'GROUP' else None,
'network_id' : resource_i.id if ref_key == 'NETWORK' else None,
'attr_id' : ra.attr_id,
'attr_is_var' : ra.attr_is_var,
})
logging.info("Resource attributes from resources added in %s",
(datetime.datetime.now() - t0))
#Now get all the attributes supposed to be on the resources based on the types.
t0 = time.time()
##the current user is validated, but some checks require admin permissions,
##so call as a user with all permissions
admin_id = config.get('DEFAULT', 'ALL_PERMISSION_USER', 1)
# template_lookup = {} #a lookup of all the templates used by the resource
typeattr_lookup = {} # a lookup from type ID to a list of typeattrs
#A lookup from type ID to the child template that it should be using.
#We assume that a resource can't have 2 type IDS from the same network.
type_child_template_id_lookup = {}
#Holds all the attributes supposed to be on a resource based on its specified
#type
resource_resource_types = []
resource_id_name_map = {}
network_child_template_id = None
checked_for_child_template = False
for resource in resources:
#cast name as string here in case the name is a number
resource_i = resource_name_map[str(resource.name)]
resource_id_name_map[resource_i.id] = str(resource.name)
existing_attrs = [ra['attr_id'] for ra in resource_attrs[resource.id]]
if resource.types is not None:
for resource_type in resource.types:
#Go through all the resource types and add the appropriate resource
#type entries
resource_type_id = resource_type.id
if resource_type.child_template_id is None:
if type_child_template_id_lookup.get(resource_type_id) is None:
if network_child_template_id is None and checked_for_child_template == False:
network_child_template_id = template.get_network_template(network_id, resource_type.id)#TODO this should be type_id
checked_for_child_template = True
#ok, so no child ID found. We need to just use the template
#ID of the type which was given
if network_child_template_id is None:
tt = template.get_templatetype(resource_type.id, user_id=admin_id)
network_child_template_id = tt.template_id
type_child_template_id_lookup[resource_type_id] = network_child_template_id
resource_type.child_template_id = type_child_template_id_lookup[resource_type_id]
ref_id = resource_i.id
if resource_type.id is None:
raise HydraError(f"Resource type on resource {resource_i.name} has no ID")
resource_resource_types.append(
{
'ref_key' : ref_key,
'node_id' : resource_i.id if ref_key == 'NODE' else None,
'link_id' : resource_i.id if ref_key == 'LINK' else None,
'group_id' : resource_i.id if ref_key == 'GROUP' else None,
'network_id' : resource_i.id if ref_key == 'NETWORK' else None,
'type_id' : resource_type.id,#TODO this should be type_id
'child_template_id' : resource_type.child_template_id
}
)
#Go through all types in the resource and add attributes from these types
template_j = template_lookup.get(resource_type.child_template_id)
if template_j is None:
#it's OK to use user ID 1 here because the calling function has been
#validated for the calling user's permission to get the network
tt = template.get_templatetype(resource_type.id, user_id=admin_id)
template_j = template.get_template(resource_type.child_template_id, user_id=admin_id)
template_lookup[template_j.id] = template_j
for tt in template_j.templatetypes:
typeattr_lookup[tt.id] = tt.typeattrs
typeattrs = typeattr_lookup.get(resource_type.id, []) #TODO this should be type_id
for ta in typeattrs:
if ta.attr_id not in existing_attrs:
resource_attrs[resource.id].append({
'ref_key' : ref_key,
'node_id' : resource_i.id if ref_key == 'NODE' else None,
'link_id' : resource_i.id if ref_key == 'LINK' else None,
'group_id' : resource_i.id if ref_key == 'GROUP' else None,
'network_id' : resource_i.id if ref_key == 'NETWORK' else None,
'attr_id' : ta.attr_id,
'attr_is_var' : ta.attr_is_var,
})
existing_attrs.append(ta.attr_id)
if ta.default_dataset_id is not None:
defaults[(ref_id, ta.attr_id)] = {'dataset_id':ta.default_dataset_id}
if len(resource_resource_types) > 0:
db.DBSession.bulk_insert_mappings(ResourceType, resource_resource_types)
logging.info("%s ResourceTypes inserted in %s secs", \
len(resource_resource_types), str(time.time() - t0))
logging.info("Resource attributes from types added in %s",
(datetime.datetime.now() - start_time))
if len(resource_attrs) > 0:
all_resource_attrs = []
for na in resource_attrs.values():
all_resource_attrs.extend(na)
_check_ra_duplicates(all_resource_attrs, resource_id_name_map)
if len(all_resource_attrs) > 0:
db.DBSession.bulk_insert_mappings(ResourceAttr, all_resource_attrs)
logging.info("ResourceAttr insert took %s secs", str(time.time() - t0))
else:
logging.warning("No attributes on any %s....", ref_key.lower())
logging.info("Resource attributes insertion from types done in %s",\
(datetime.datetime.now() - start_time))
#Now that the attributes are in, we need to map the attributes in the DB
#to the attributes in the incoming data so that the resource scenarios
#know what to refer to.
res_qry = db.DBSession.query(ResourceAttr)
if ref_key == 'NODE':
res_qry = res_qry.join(Node).filter(Node.network_id == network_id)
elif ref_key == 'GROUP':
res_qry = res_qry.join(ResourceGroup).filter(ResourceGroup.network_id == network_id)
elif ref_key == 'LINK':
res_qry = res_qry.join(Link).filter(Link.network_id == network_id)
elif ref_key == 'NETWORK':
res_qry = res_qry.filter(ResourceAttr.network_id == network_id)
real_resource_attrs = res_qry.all()
logging.info("retrieved %s entries in %s",
len(real_resource_attrs), (datetime.datetime.now() - start_time))
resource_attr_dict = {}
for resource_attr in real_resource_attrs:
if ref_key == 'NODE':
ref_id = resource_attr.node_id
elif ref_key == 'GROUP':
ref_id = resource_attr.group_id
elif ref_key == 'LINK':
ref_id = resource_attr.link_id
elif ref_key == 'NETWORK':
ref_id = resource_attr.network_id
resource_attr_dict[(ref_id, resource_attr.attr_id)] = resource_attr
if defaults.get((ref_id, resource_attr.attr_id)):
defaults[(ref_id, resource_attr.attr_id)]['id'] = resource_attr.id
logging.info("Processing Query results took %s",
(datetime.datetime.now() - start_time))
resource_attrs = {}
for resource in resources:
iface_resource = resource_name_map[str(resource.name)]
if ref_key == 'NODE':
ref_id = iface_resource.node_id
elif ref_key == 'GROUP':
ref_id = iface_resource.group_id
elif ref_key == 'LINK':
ref_id = iface_resource.link_id
elif ref_key == 'NETWORK':
ref_id = iface_resource.id
if resource.attributes is not None:
for ra in resource.attributes:
resource_attrs[ra.id] = resource_attr_dict[(ref_id, ra.attr_id)]
logging.info("Resource attributes added in %s",\
(datetime.datetime.now() - start_time))
logging.debug(" resource_attrs size: %s",\
len(resource_attrs))
return resource_attrs, defaults, template_lookup
def _add_nodes_to_database(net_i, nodes):
#First add all the nodes
log.info("Adding nodes to network %s", net_i.id)
node_list = []
for node in nodes:
node_dict = {'network_id' : net_i.id,
'name' : node.name,
'description': node.description,
'layout' : node.get_layout(),
'x' : node.x,
'y' : node.y,
}
node_list.append(node_dict)
t0 = time.time()
if len(node_list):
db.DBSession.bulk_insert_mappings(Node, node_list)
db.DBSession.flush()
logging.info("Node insert took %s secs"% str(time.time() - t0))
def _add_nodes(net_i, nodes, template_lookup):
#check_perm(user_id, 'edit_topology')
start_time = datetime.datetime.now()
#List of resource attributes
node_attrs = {}
#Maps temporary node_ids to real node_ids
node_id_map = dict()
if nodes is None or len(nodes) == 0:
return node_id_map, node_attrs, {}
_add_nodes_to_database(net_i, nodes)
iface_nodes = dict()
for n_i in net_i.nodes:
if iface_nodes.get(n_i.name) is not None:
raise HydraError("Duplicate Node Name: %s"%(n_i.name))
iface_nodes[n_i.name] = n_i
for node in nodes:
#cast node.name as str here as a node name can sometimes be a number
node_id_map[node.id] = iface_nodes[str(node.name)]
node_attrs, defaults, template_lookup = _bulk_add_resource_attrs(net_i.id, 'NODE', nodes, iface_nodes, template_lookup)
log.info("Nodes added in %s", get_timing(start_time))
return node_id_map, node_attrs, defaults
def _add_links_to_database(net_i, links, node_id_map):
log.info("Adding links to network")
link_dicts = []
for link in links:
node_1 = node_id_map.get(link.node_1_id)
node_2 = node_id_map.get(link.node_2_id)
if node_1 is None or node_2 is None:
raise HydraError("Node IDS (%s, %s)are incorrect!"%(node_1, node_2))
link_dicts.append({'network_id' : net_i.id,
'name' : link.name,
'description' : link.description,
'layout' : link.get_layout(),
'node_1_id' : node_1.id,
'node_2_id' : node_2.id
})
if len(link_dicts) > 0:
db.DBSession.bulk_insert_mappings(Link, link_dicts)
def _add_links(net_i, links, node_id_map, template_lookup):
#check_perm(user_id, 'edit_topology')
start_time = datetime.datetime.now()
#List of resource attributes
link_attrs = {}
#Map negative IDS to their new, positive, counterparts.
link_id_map = dict()
if links is None or len(links) == 0:
return link_id_map, link_attrs, {}
#check for duplicate names:
link_names = []
duplicate_link_names = []
for link in links:
if link.name in link_names:
duplicate_link_names.append(link.name)
else:
link_names.append(link.name)
if len(duplicate_link_names) > 0:
raise HydraError(f"Duplicate link names: {duplicate_link_names}")
#Then add all the links.
#################################################################
_add_links_to_database(net_i, links, node_id_map)
###################################################################
log.info("Links added in %s", get_timing(start_time))
iface_links = {}
for l_i in net_i.links:
iface_links[str(l_i.name)] = l_i
log.info("Link Map created %s", get_timing(start_time))
for link in links:
link_id_map[link.id] = iface_links[str(link.name)]
log.info("Link ID Map created %s", get_timing(start_time))
link_attrs, defaults, template_lookup = _bulk_add_resource_attrs(net_i.id, 'LINK', links, iface_links, template_lookup)
log.info("Links added in %s", get_timing(start_time))
return link_id_map, link_attrs, defaults
def _add_resource_groups(net_i, resourcegroups, template_lookup):
start_time = datetime.datetime.now()
#List of resource attributes
group_attrs = {}
#Map negative IDS to their new, positive, counterparts.
group_id_map = dict()
if resourcegroups is None or len(resourcegroups)==0:
return group_id_map, group_attrs, {}
#Then add all the groups.
log.info("Adding groups to network")
group_dicts = []
if resourcegroups:
for group in resourcegroups:
group_dicts.append({'network_id' : net_i.id,
'name' : group.name,
'description' : group.description,
})
iface_groups = {}
if len(group_dicts) > 0:
db.DBSession.bulk_insert_mappings(ResourceGroup, group_dicts)
log.info("Resource Groups added in %s", get_timing(start_time))
for g_i in net_i.resourcegroups:
if iface_groups.get(g_i.name) is not None:
raise HydraError("Duplicate Resource Group: %s"%(g_i.name))
iface_groups[g_i.name] = g_i
for group in resourcegroups:
if group.id not in group_id_map:
group_i = iface_groups[group.name]
group_attrs[group.id] = []
for ra in group.attributes:
group_attrs[group.id].append({
'ref_key' : 'GROUP',
'group_id' : group_i.id,
'attr_id' : ra.attr_id,
'attr_is_var' : ra.attr_is_var,
})
group_id_map[group.id] = group_i
group_attrs, defaults, template_lookup = _bulk_add_resource_attrs(net_i.id, 'GROUP', resourcegroups, iface_groups, template_lookup)
log.info("Groups added in %s", get_timing(start_time))
return group_id_map, group_attrs, defaults
@required_perms("add_network")
def add_network(network, **kwargs):
"""
Takes an entire network complex model and saves it to the DB. This
complex model includes links & scenarios (with resource data). Returns
the network's complex model.
As links connect two nodes using the node_ids, if the nodes are new
they will not yet have node_ids. In this case, use negative ids as
temporary IDS until the node has been given an permanent ID.
All inter-object referencing of new objects should be done using
negative IDs in the client.
The returned object will have positive IDS
"""
db.DBSession.autoflush = False
start_time = datetime.datetime.now()
log.debug("Adding network")
insert_start = datetime.datetime.now()
proj_i = db.DBSession.query(Project)\
.filter(Project.id == network.project_id).first()
if proj_i is None:
raise HydraError("Project ID is none. A project ID must be specified on the Network")
existing_net = db.DBSession.query(Network)\
.filter(Network.project_id == network.project_id,
Network.name == network.name).first()
if existing_net is not None:
raise HydraError(f"A network with the name {network.name} is already"
" in project {network.project_id}")
user_id = kwargs.get('user_id')
proj_i.check_write_permission(user_id)
net_i = Network()
net_i.project_id = network.project_id
net_i.name = network.name
net_i.description = network.description
net_i.created_by = user_id
net_i.projection = network.projection
net_i.layout = network.get_json('layout')
net_i.appdata = network.get_json('appdata')
network.id = net_i.id
db.DBSession.add(net_i)
db.DBSession.flush()
#These two lists are used for comparison and lookup, so when
#new attributes are added, these lists are extended.
#List of all the resource attributes
all_resource_attrs = {}
name_map = {network.name:net_i}
network_attrs, network_defaults, template_lookup = _bulk_add_resource_attrs(net_i.id, 'NETWORK', [network], name_map)
hdb.add_resource_types(net_i, network.types)
all_resource_attrs.update(network_attrs)
log.info("Network attributes added in %s", get_timing(start_time))
node_id_map, node_attrs, node_datasets = _add_nodes(net_i, network.nodes, template_lookup)
all_resource_attrs.update(node_attrs)
link_id_map, link_attrs, link_datasets = _add_links(net_i, network.links, node_id_map, template_lookup)
all_resource_attrs.update(link_attrs)
grp_id_map, grp_attrs, grp_datasets = _add_resource_groups(net_i, network.resourcegroups, template_lookup)
all_resource_attrs.update(grp_attrs)
defaults = list(grp_datasets.values()) + list(link_datasets.values()) \
+ list(node_datasets.values()) + list(network_defaults.values())
start_time = datetime.datetime.now()
scenario_names = []
if network.scenarios is not None:
log.info("Adding scenarios to network")
for s in network.scenarios:
log.info("Adding scenario %s", s.name)
if s.name in scenario_names:
raise HydraError("Duplicate scenario name: %s"%(s.name))
scen = Scenario()
scen.name = s.name
scen.description = s.description
scen.layout = s.get_layout()
scen.start_time = s.start_time
scen.end_time = s.end_time
scen.time_step = s.time_step
scen.created_by = user_id
scenario_names.append(s.name)
#extract the data from each resourcescenario
incoming_datasets = []
scenario_resource_attrs = []
for r_scen in s.resourcescenarios:
if all_resource_attrs.get(r_scen.resource_attr_id) is None:
raise HydraError(f"Couldn't find resource attribute {r_scen.resource_attr_id} "
f"as defined on resource scenario {r_scen}. "
f"Shot in the dark: "
f"Does the exporting network have duplicate attributes?")
ra = all_resource_attrs[r_scen.resource_attr_id]
incoming_datasets.append(r_scen.dataset)
scenario_resource_attrs.append(ra)
data_start_time = datetime.datetime.now()
for default in defaults:
scen.add_resource_scenario(JSONObject(default),
JSONObject({'id':default['dataset_id']}),
source=kwargs.get('app_name'))
datasets = data._bulk_insert_data(
incoming_datasets,
user_id,
kwargs.get('app_name')
)
log.info("Data bulk insert took %s", get_timing(data_start_time))
ra_start_time = datetime.datetime.now()
for i, ra in enumerate(scenario_resource_attrs):
scen.add_resource_scenario(ra, datasets[i], source=kwargs.get('app_name'))
log.info("Resource scenarios added in %s", get_timing(ra_start_time))
item_start_time = datetime.datetime.now()
if s.resourcegroupitems is not None:
for group_item in s.resourcegroupitems:
group_item_i = ResourceGroupItem()
group_item_i.group = grp_id_map[group_item.group_id]
group_item_i.ref_key = group_item.ref_key
if group_item.ref_key == 'NODE':
group_item_i.node = node_id_map[group_item.ref_id]
elif group_item.ref_key == 'LINK':
group_item_i.link = link_id_map[group_item.ref_id]
elif group_item.ref_key == 'GROUP':
group_item_i.subgroup = grp_id_map[group_item.ref_id]
else:
raise HydraError("A ref key of %s is not valid for a "
"resource group item."%group_item.ref_key)
scen.resourcegroupitems.append(group_item_i)
log.info("Group items insert took %s", get_timing(item_start_time))
net_i.scenarios.append(scen)
log.info("Scenario %s added", s.name)
log.info("Scenarios added in %s", get_timing(start_time))
net_i.set_owner(user_id)
db.DBSession.flush()
log.info("Insertion of network took: %s",(datetime.datetime.now()-insert_start))
return net_i
def _get_all_resource_attributes(network_id, template_id=None, include_non_template_attributes=False):
"""
Get all the attributes for the nodes, links and groups of a network.
Return these attributes as a dictionary, keyed on type (NODE, LINK, GROUP)
then by ID of the node or link.
args:
network_id (int) The ID of the network from which to retrieve the attributes
template_id (int): Optional ID of a template, which when specified only returns
attributes relating to that template
include_non_template_attributes (bool): If template_id is specified and any
resource has attribtues which are NOT associated to any
network template, this flag indicates whether to return them or not.
returns:
A list of sqlalchemy result proxy objects
"""
base_qry = db.DBSession.query(
ResourceAttr.id.label('id'),
ResourceAttr.ref_key.label('ref_key'),
ResourceAttr.cr_date.label('cr_date'),
ResourceAttr.attr_is_var.label('attr_is_var'),
ResourceAttr.node_id.label('node_id'),
ResourceAttr.link_id.label('link_id'),
ResourceAttr.group_id.label('group_id'),
ResourceAttr.network_id.label('network_id'),
ResourceAttr.attr_id.label('attr_id'),
Attr.name.label('name'),
Attr.dimension_id.label('dimension_id'),
).filter(Attr.id==ResourceAttr.attr_id)
all_node_attribute_qry = base_qry.join(Node).filter(Node.network_id == network_id)
all_link_attribute_qry = base_qry.join(Link).filter(Link.network_id == network_id)
all_group_attribute_qry = base_qry.join(ResourceGroup)\
.filter(ResourceGroup.network_id == network_id)
network_attribute_qry = base_qry.filter(ResourceAttr.network_id == network_id)
x = time.time()
logging.info("Getting all attributes using execute")
attribute_qry = all_node_attribute_qry.union(all_link_attribute_qry,
all_group_attribute_qry,
network_attribute_qry)
all_resource_attributes = attribute_qry.all()
log.info("%s attrs retrieved in %s", len(all_resource_attributes), time.time()-x)
logging.info("Attributes retrieved. Processing results...")
x = time.time()
rt_attribute_dict = {
'NODE' : {},
'LINK' : {},
'GROUP': {},
'NETWORK': {},
}
template_attr_lookup, all_network_typeattrs = _get_network_template_attribute_lookup(network_id)
for resource_attr in all_resource_attributes:
if template_id is not None:
#check if it's in the template. If not, it's either associated to another
#template or to no template
if resource_attr.attr_id not in template_attr_lookup.get(template_id, []):
#check if it's in any other template
if include_non_template_attributes is True:
#if it's associated to a template (but not this one because
#it wouldn't have reached this far) then ignore it
if resource_attr.attr_id in all_network_typeattrs:
continue
else:
#The attr is associated to another template.
continue
attr_dict = rt_attribute_dict[resource_attr.ref_key]
resourceid = _get_resource_id(resource_attr)
resourceattrlist = attr_dict.get(resourceid, [])
resourceattrlist.append(resource_attr)
attr_dict[resourceid] = resourceattrlist
logging.info("Attributes processed in %s", time.time()-x)
return rt_attribute_dict
def _get_resource_id(attr):
"""
return either the node, link, group or network ID of an attribute.
Whichever one is not None
"""
for resourcekey in ('node_id', 'link_id', 'network_id', 'group_id'):
if isinstance(attr, dict):
##this if statement is needed to continue the loop, rather than just
#returning attr.get(resourcekey)
if attr.get(resourcekey) is not None:
return attr[resourcekey]
else:
if getattr(attr, resourcekey) is not None:
return getattr(attr, resourcekey)
return None
def _get_network_template_attribute_lookup(network_id):
"""
Given a network ID, identify all the templates associated to the network
and build a dictionary of template_id: [attr_id, attr_id...]
"""
#First identify all templates associated to the network (assuming the network
#types are 100% representative if all templates linked to this network)
network_types = db.DBSession.query(TemplateType)\
.join(ResourceType, ResourceType.type_id == TemplateType.id)\
.filter(ResourceType.network_id == network_id).all()
template_ids = [t.template_id for t in network_types]
#Now with access to all templates, get all type attributes for all the templates.
network_typeattrs = db.DBSession.query(TemplateType.template_id.label('template_id'),\
TemplateType.id.label('type_id'),\
TypeAttr.attr_id.label('attr_id'))\
.join(TypeAttr, TypeAttr.type_id == TemplateType.id)\
.filter(TemplateType.template_id.in_(template_ids)).all()
typeattr_lookup = {}
all_network_typeattrs = []
for typeattr in network_typeattrs:
if typeattr.template_id not in typeattr_lookup:
typeattr_lookup[typeattr.template_id] = [typeattr.attr_id]
else:
typeattr_lookup[typeattr.template_id].append(typeattr.attr_id)
all_network_typeattrs.append(typeattr.attr_id)
return typeattr_lookup, all_network_typeattrs
def _get_all_templates(network_id, template_id):
"""
Get all the templates for the nodes, links and groups of a network.
Return these templates as a dictionary, keyed on type (NODE, LINK, GROUP)
then by ID of the node or link.
"""
base_qry = db.DBSession.query(
ResourceType.ref_key.label('ref_key'),
ResourceType.node_id.label('node_id'),
ResourceType.link_id.label('link_id'),
ResourceType.group_id.label('group_id'),
ResourceType.network_id.label('network_id'),
ResourceType.child_template_id.label('child_template_id'),
Template.name.label('template_name'),
Template.id.label('template_id'),
TemplateType.id.label('type_id'),
TemplateType.parent_id.label('parent_id'),
TemplateType.layout.label('layout'),
TemplateType.name.label('type_name'),
).filter(TemplateType.id==ResourceType.type_id,
Template.id==TemplateType.template_id)
all_node_type_qry = base_qry.filter(Node.id==ResourceType.node_id,
Node.network_id==network_id)
all_link_type_qry = base_qry.filter(Link.id==ResourceType.link_id,
Link.network_id==network_id)
all_group_type_qry = base_qry.filter(ResourceGroup.id==ResourceType.group_id,
ResourceGroup.network_id==network_id)
network_type_qry = base_qry.filter(ResourceType.network_id==network_id)
#Filter the group attributes by template
if template_id is not None:
all_node_type_qry = all_node_type_qry.filter(Template.id==template_id)
all_link_type_qry = all_link_type_qry.filter(Template.id==template_id)
all_group_type_qry = all_group_type_qry.filter(Template.id==template_id)
x = time.time()
log.info("Getting all types")
type_qry = all_node_type_qry.union(all_link_type_qry, all_group_type_qry, network_type_qry)
all_types = type_qry.all()
log.info("%s types retrieved in %s", len(all_types), time.time()-x)
log.info("Attributes retrieved. Processing results...")
x = time.time()
node_type_dict = dict()
link_type_dict = dict()
group_type_dict = dict()
network_type_dict = dict()
#a lookup to avoid having to query for the same child type every time
child_type_lookup = {}
##the current user is validated, but some checks require admin permissions,
##so call as a user with all permissions
admin_id = config.get('DEFAULT', 'ALL_PERMISSION_USER', 1)
for t in all_types:
child_layout = None
child_name = None
#Load all the inherited columns like layout and name and set them
if t.parent_id is not None:
if t.type_id in child_type_lookup:
child_type = child_type_lookup[t.type_id]
else:
#no need to check for user credentials here as it's called from a
#function which has done that for us
child_type = template.get_templatetype(t.type_id, user_id=admin_id)
child_type_lookup[t.type_id] = child_type
#Now set the potentially missing columns
child_layout = child_type.layout
child_name = child_type.name
templatetype = JSONObject({'template_id' : t.template_id,
'id' : t.type_id,
'template_name' :t.template_name,
'layout' : child_layout if child_layout else t.layout,
'name' : child_name if child_name else t.type_name,
'child_template_id' : t.child_template_id})
if t.ref_key == 'NODE':
nodetype = node_type_dict.get(t.node_id, [])
nodetype.append(templatetype)
node_type_dict[t.node_id] = nodetype
elif t.ref_key == 'LINK':
linktype = link_type_dict.get(t.link_id, [])
linktype.append(templatetype)
link_type_dict[t.link_id] = linktype
elif t.ref_key == 'GROUP':
grouptype = group_type_dict.get(t.group_id, [])
grouptype.append(templatetype)
group_type_dict[t.group_id] = grouptype
elif t.ref_key == 'NETWORK':
nettype = network_type_dict.get(t.network_id, [])
nettype.append(templatetype)
network_type_dict[t.network_id] = nettype
all_types = {
'NODE' : node_type_dict,
'LINK' : link_type_dict,
'GROUP': group_type_dict,
'NETWORK': network_type_dict,
}
logging.info("Attributes processed in %s", time.time()-x)
return all_types
def _get_all_group_items(network_id):
"""
Get all the resource group items in the network, across all scenarios
returns a dictionary of dict objects, keyed on scenario_id
"""
base_qry = db.DBSession.query(ResourceGroupItem)
item_qry = base_qry.join(Scenario).filter(Scenario.network_id==network_id)
x = time.time()
logging.info("Getting all items")
all_items = item_qry.all()
log.info("%s groups jointly retrieved in %s", len(all_items), time.time()-x)
logging.info("items retrieved. Processing results...")
x = time.time()
item_dict = dict()
for item in all_items:
items = item_dict.get(item.scenario_id, [])
items.append(JSONObject(item))
item_dict[item.scenario_id] = items
logging.info("items processed in %s", time.time()-x)
return item_dict
def _get_nodes(network_id, template_id=None):
"""
Get all the nodes in a network
"""
extras = {'types':[], 'attributes':[]}
node_qry = db.DBSession.query(Node).filter(
Node.network_id == network_id,
Node.status == 'A').options(
noload(Node.network)
)
if template_id is not None:
node_qry = node_qry.filter(ResourceType.node_id == Node.id,
TemplateType.id == ResourceType.type_id,
TemplateType.template_id == template_id)
node_res = node_qry.all()
nodes = []
for n in node_res:
nodes.append(JSONObject(n, extras=extras))
return nodes
def _get_links(network_id, template_id=None):
"""
Get all the links in a network
"""
extras = {'types':[], 'attributes':[]}
link_qry = db.DBSession.query(Link).filter(
Link.network_id==network_id,
Link.status=='A').options(
noload(Link.network)
)
if template_id is not None:
link_qry = link_qry.filter(ResourceType.link_id==Link.id,
TemplateType.id==ResourceType.type_id,
TemplateType.template_id==template_id)
link_res = link_qry.all()
links = []
for l in link_res:
links.append(JSONObject(l, extras=extras))
return links
def _get_groups(network_id, template_id=None):
"""
Get all the resource groups in a network
"""
extras = {'types':[], 'attributes':[]}
group_qry = db.DBSession.query(ResourceGroup).filter(
ResourceGroup.network_id==network_id,
ResourceGroup.status=='A').options(
noload(ResourceGroup.network)
)
if template_id is not None:
group_qry = group_qry.filter(ResourceType.group_id == ResourceGroup.id,
TemplateType.id == ResourceType.type_id,
TemplateType.template_id == template_id)
group_res = group_qry.all()
groups = []
for g in group_res:
groups.append(JSONObject(g, extras=extras))
return groups
def _get_scenarios(network_id, include_data, include_results, user_id,
scenario_ids=None, include_metadata=False):
"""
Get all the scenarios in a network
"""
scen_qry = db.DBSession.query(Scenario).filter(
Scenario.network_id == network_id).options(
noload(Scenario.network)).filter(
Scenario.status == 'A')
if scenario_ids:
logging.info("Filtering by scenario_ids %s",scenario_ids)
scen_qry = scen_qry.filter(Scenario.id.in_(scenario_ids))
extras = {'resourcescenarios': [], 'resourcegroupitems': []}
scens_i = scen_qry.all()
scens = [JSONObject(s,extras=extras) for s in scens_i]
all_resource_group_items = _get_all_group_items(network_id)
#default to empty metadata
metadata = {}
for i, s in enumerate(scens):
s_i = scens_i[i]
s.resourcegroupitems = all_resource_group_items.get(s.id, [])
if include_data == True:
s.resourcescenarios = s_i.get_all_resourcescenarios(
user_id=user_id,
include_results=include_results,
include_metadata=include_metadata)
return scens
def get_network(network_id,
include_attributes=True,
include_data=False,
include_results=True,
scenario_ids=None,
template_id=None,
include_non_template_attributes=False,
include_metadata=False,
**kwargs):
"""
Return a whole network as a dictionary.
network_id: ID of the network to retrieve
include_attributes (bool): include attributes to save on data
include_data: (bool). Indicate whether scenario data is to be returned.
This has a significant speed impact as retrieving large amounts
of data can be expensive.
include_results: (bool). If data is requested, this flag allows results
data to be ignored (attr is var), as this can often be very large.
scenario_ids: list of IDS to be returned. Used if a network has multiple
scenarios but you only want one returned. Using this filter
will speed up this function call.
template_id: Return the network with only attributes associated with this
template on the network, groups, nodes and links.
include_non_template_attribute: Return attributes which are not associated to any template.
include_metadata (bool): If data is included, then this flag indicates whether to include metadata.
Setting this to True may have performance implications
"""
log.debug("getting network %s"%network_id)
user_id = kwargs.get('user_id')
network_id = int(network_id)
try:
log.debug("Querying Network %s", network_id)
net_i = db.DBSession.query(Network).filter(
Network.id == network_id).options(
noload(Network.scenarios)).options(
noload(Network.nodes)).options(
noload(Network.links)).options(
noload(Network.types)).options(
noload(Network.attributes)).options(
noload(Network.resourcegroups)).one()
net_i.check_read_permission(user_id)
net = JSONObject(net_i)
net.nodes = _get_nodes(network_id, template_id=template_id)
net.links = _get_links(network_id, template_id=template_id)
net.resourcegroups = _get_groups(network_id, template_id=template_id)
net.owners = net_i.get_owners()
if include_attributes in ('Y', True):
all_attributes = _get_all_resource_attributes(network_id,
template_id,
include_non_template_attributes)
log.info("Setting attributes")
net.attributes = all_attributes['NETWORK'].get(network_id, [])
for node_i in net.nodes:
node_i.attributes = all_attributes['NODE'].get(node_i.id, [])
log.info("Node attributes set")
for link_i in net.links:
link_i.attributes = all_attributes['LINK'].get(link_i.id, [])
log.info("Link attributes set")
for group_i in net.resourcegroups:
group_i.attributes = all_attributes['GROUP'].get(group_i.id, [])
log.info("Group attributes set")
log.info("Setting types")
all_types = _get_all_templates(network_id, template_id)
net.types = all_types['NETWORK'].get(network_id, [])
for node_i in net.nodes:
node_i.types = all_types['NODE'].get(node_i.id, [])
for link_i in net.links:
link_i.types = all_types['LINK'].get(link_i.id, [])
for group_i in net.resourcegroups:
group_i.types = all_types['GROUP'].get(group_i.id, [])
log.info("Getting scenarios")
net.scenarios = _get_scenarios(network_id,
include_data,
include_results,
user_id,
scenario_ids,
include_metadata=include_metadata)
except NoResultFound:
raise ResourceNotFoundError("Network (network_id=%s) not found." % network_id)
return net
def get_networks(network_ids, **kwargs):
"""
Get the list of networks specified in a list of network IDS
args:
network_ids (list(int)) : a list of network IDs
returns:
list(Network)
"""
user_id = kwargs.get('user_id')
networks = db.DBSession.query(Network).filter(
Network.id.in_(network_ids))
for n in networks:
n.check_read_permission(user_id)
return networks
def get_nodes(network_id, template_id=None, **kwargs):
"""
Get all the nodes in a network.
args:
network_id (int): The network in which to search
template_id (int): Only return nodes whose type is in this template.
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_read_permission(user_id=user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
node_qry = db.DBSession.query(Node).filter(
Node.network_id == network_id,
Node.status == 'A').options(
noload(Node.network)
).options(
joinedload(Node.types).joinedload(ResourceType.templatetype)
).options(
joinedload(Node.attributes).joinedload(ResourceAttr.attr)
)
if template_id is not None:
node_qry = node_qry.filter(ResourceType.node_id==Node.id,
TemplateType.id==ResourceType.type_id,
TemplateType.template_id==template_id)
nodes = node_qry.all()
return nodes
def get_links(network_id, template_id=None, **kwargs):
"""
Get all the links in a network.
args:
network_id (int): The network in which to search
template_id (int): Only return links whose type is in this template.
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_read_permission(user_id=user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
link_qry = db.DBSession.query(Link).filter(
Link.network_id==network_id,
Link.status=='A').options(
noload(Link.network)
).options(
joinedload(Link.types).joinedload(ResourceType.templatetype)
).options(
joinedload(Link.attributes).joinedload(ResourceAttr.attr)
)
if template_id is not None:
link_qry = link_qry.filter(ResourceType.link_id==Link.id,
TemplateType.id==ResourceType.type_id,
TemplateType.template_id==template_id)
links = link_qry.all()
return links
def get_groups(network_id, template_id=None, **kwargs):
"""
Get all the resource groups in a network.
args:
network_id (int): The network in which to search
template_id (int): Only return resource groups whose type is in this template.
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_read_permission(user_id=user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
group_qry = db.DBSession.query(ResourceGroup).filter(
ResourceGroup.network_id==network_id,
ResourceGroup.status=='A').options(
noload(ResourceGroup.network)
).options(
joinedload(ResourceGroup.types).joinedload(ResourceType.templatetype)
).options(
joinedload(ResourceGroup.attributes).joinedload(ResourceAttr.attr)
)
if template_id is not None:
group_qry = group_qry.filter(ResourceType.group_id==ResourceGroup.id,
TemplateType.id==ResourceType.type_id,
TemplateType.template_id==template_id)
groups = group_qry.all()
return groups
def get_network_simple(network_id,**kwargs):
try:
n = db.DBSession.query(Network).filter(Network.id==network_id).options(joinedload(Network.attributes).joinedload(ResourceAttr.attr)).one()
n.types
for t in n.types:
t.templatetype.typeattrs
return n
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id,))
def get_node(node_id, scenario_id=None, **kwargs):
try:
n = db.DBSession.query(Node).filter(Node.id==node_id).options(joinedload(Node.attributes).joinedload(ResourceAttr.attr)).one()
n.types
for t in n.types:
t.templatetype.typeattrs
t.templatetype.template
#set this for easy access later by client
#t.templatetype.template_name = t.templatetype.template.name
for ta in t.templatetype.typeattrs:
if ta.default_dataset_id:
ta.default_dataset
ta.default_dataset.metadata
ta.default_dataset.unit
except NoResultFound:
raise ResourceNotFoundError("Node %s not found"%(node_id,))
n = JSONObject(n)
if scenario_id is not None:
res_scens = scenario.get_resource_data('NODE', node_id, scenario_id, None, **kwargs)
rs_dict = {}
for rs in res_scens:
rs_dict[rs.resource_attr_id] = JSONObject(rs)
for ra in n.attributes:
if rs_dict.get(ra.id):
ra.resourcescenario = rs_dict[ra.id]
return n
def get_link(link_id, scenario_id=None, **kwargs):
try:
l = db.DBSession.query(Link).filter(Link.id==link_id).options(joinedload(Link.attributes).joinedload(ResourceAttr.attr)).one()
l.types
for t in l.types:
#lazy load the type's template
t.templatetype.template
#set the template name on the type
t.templatetype.template_name = t.templatetype.template.name
t.templatetype.typeattrs
for ta in t.templatetype.typeattrs:
if ta.default_dataset_id:
ta.default_dataset
ta.default_dataset.metadata
ta.default_dataset.unit
except NoResultFound:
raise ResourceNotFoundError("Link %s not found"%(link_id,))
l = JSONObject(l)
if scenario_id is not None:
res_scens = scenario.get_resource_data('LINK', link_id, scenario_id, None, **kwargs)
rs_dict = {}
for rs in res_scens:
rs_dict[rs.resource_attr_id] = JSONObject(rs)
for ra in l.attributes:
if rs_dict.get(ra.id):
ra.resourcescenario = rs_dict[ra.id]
return l
def get_resourcegroup(group_id, scenario_id=None, **kwargs):
try:
rg = db.DBSession.query(ResourceGroup).filter(ResourceGroup.id==group_id).options(joinedload(ResourceGroup.attributes).joinedload(ResourceAttr.attr)).one()
rg.types
for t in rg.types:
#lazy load the type's template
t.templatetype.template
#set the template name on the type
t.templatetype.template_name = t.templatetype.template.name
t.templatetype.typeattrs
for ta in t.templatetype.typeattrs:
if ta.default_dataset_id is not None:
ta.default_dataset
ta.default_dataset.metadata
ta.default_dataset.unit
except NoResultFound:
raise ResourceNotFoundError("ResourceGroup %s not found"%(group_id,))
rg = JSONObject(rg)
if scenario_id is not None:
res_scens = scenario.get_resource_data('GROUP', group_id, scenario_id, None, **kwargs)
rs_dict = {}
for rs in res_scens:
rs_dict[rs.resource_attr_id] = JSONObject(rs)
for ra in rg.attributes:
if rs_dict.get(ra.id):
ra.resourcescenario = rs_dict[ra.id]
return rg
def get_node_by_name(network_id, node_name,**kwargs):
try:
n = db.DBSession.query(Node).filter(Node.name==node_name,
Node.network_id==network_id).\
options(joinedload(Node.attributes).joinedload(ResourceAttr.Attr)).one()
return n
except NoResultFound:
raise ResourceNotFoundError("Node %s not found in network %s"%(node_name, network_id,))
def get_link_by_name(network_id, link_name,**kwargs):
try:
l = db.DBSession.query(Link).filter(Link.name==link_name,
Link.network_id==network_id).\
options(joinedload(Link.attributes).joinedload(ResourceAttr.attr)).one()
return l
except NoResultFound:
raise ResourceNotFoundError("Link %s not found in network %s"%(link_name, network_id))
def get_resourcegroup_by_name(network_id, group_name,**kwargs):
try:
rg = db.DBSession.query(ResourceGroup).filter(ResourceGroup.name==group_name,
ResourceGroup.network_id==network_id).\
options(joinedload(ResourceGroup.attributes).joinedload(ResourceAttr.attr)).one()
return rg
except NoResultFound:
raise ResourceNotFoundError("ResourceGroup %s not found in network %s"%(group_name,network_id))
def get_network_by_name(project_id, network_name,**kwargs):
"""
Return a whole network as a complex model.
"""
try:
res = db.DBSession.query(Network.id).filter(func.lower(Network.name).like(network_name.lower()), Network.project_id == project_id).one()
net = get_network(res.id, 'Y', None, **kwargs)
return net
except NoResultFound:
raise ResourceNotFoundError("Network with name %s not found"%(network_name))
def network_exists(project_id, network_name,**kwargs):
"""
Return a whole network as a complex model.
"""
try:
db.DBSession.query(Network.id).filter(func.lower(Network.name).like(network_name.lower()), Network.project_id == project_id).one()
return 'Y'
except NoResultFound:
return 'N'
@required_perms("edit_network")
def update_network(network,
update_nodes = True,
update_links = True,
update_groups = True,
update_scenarios = True,
**kwargs):
"""
Update an entire network
"""
log.info("Updating Network %s", network.name)
user_id = kwargs.get('user_id')
#check_perm('update_network')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network.id).one()
except NoResultFound:
raise ResourceNotFoundError("Network with id %s not found"%(network.id))
net_i.project_id = network.project_id
net_i.name = network.name
net_i.description = network.description
net_i.projection = network.projection
net_i.layout = network.get_json('layout')
net_i.appdata = network.get_json('appdata')
all_resource_attrs = {}
new_network_attributes = _update_attributes(net_i, network.attributes)
all_resource_attrs.update(new_network_attributes)
hdb.add_resource_types(net_i, network.types)
#Maps temporary node_ids to real node_ids
node_id_map = dict()
if network.nodes is not None and update_nodes is True:
log.info("Updating nodes")
t0 = time.time()
#First add all the nodes
node_id_map = dict([(n.id, n) for n in net_i.nodes])
for node in network.nodes:
#If we get a negative or null node id, we know
#it is a new node.
if node.id is not None and node.id > 0:
n = node_id_map[node.id]
n.name = node.name
n.description = node.description
n.x = node.x
n.y = node.y
n.status = node.status
n.layout = node.get_layout()
else:
log.info("Adding new node %s", node.name)
n = net_i.add_node(node.name,
node.description,
node.get_layout(),
node.x,
node.y)
net_i.nodes.append(n)
node_id_map[n.id] = n
all_resource_attrs.update(_update_attributes(n, node.attributes))
hdb.add_resource_types(n, node.types)
log.info("Updating nodes took %s", time.time() - t0)
link_id_map = dict()
if network.links is not None and update_links is True:
log.info("Updating links")
t0 = time.time()
link_id_map = dict([(l.link_id, l) for l in net_i.links])
for link in network.links:
node_1 = node_id_map[link.node_1_id]
node_2 = node_id_map[link.node_2_id]
if link.id is None or link.id < 0:
log.info("Adding new link %s", link.name)
l = net_i.add_link(link.name,
link.description,
link.get_layout(),
node_1,
node_2)
net_i.links.append(l)
link_id_map[link.id] = l
else:
l = link_id_map[link.id]
l.name = link.name
l.link_descripion = link.description
l.node_a = node_1
l.node_b = node_2
l.layout = link.get_layout()
all_resource_attrs.update(_update_attributes(l, link.attributes))
hdb.add_resource_types(l, link.types)
log.info("Updating links took %s", time.time() - t0)
group_id_map = dict()
#Next all the groups
if network.resourcegroups is not None and update_groups is True:
log.info("Updating groups")
t0 = time.time()
group_id_map = dict([(g.group_id, g) for g in net_i.resourcegroups])
for group in network.resourcegroups:
#If we get a negative or null group id, we know
#it is a new group.
if group.id is not None and group.id > 0:
g_i = group_id_map[group.id]
g_i.name = group.name
g_i.description = group.description
g_i.status = group.status
else:
log.info("Adding new group %s", group.name)
g_i = net_i.add_group(group.name,
group.description,
group.status)
net_i.resourcegroups.append(net_i)
group_id_map[g_i.group_id] = g_i
all_resource_attrs.update(_update_attributes(g_i, group.attributes))
hdb.add_resource_types(g_i, group.types)
group_id_map[group.id] = g_i
log.info("Updating groups took %s", time.time() - t0)
errors = []
if network.scenarios is not None and update_scenarios is True:
for s in network.scenarios:
add_scenario = False
if s.id is not None:
if s.id > 0:
try:
scen_i = db.DBSession.query(Scenario).filter(Scenario.id==s.id).one()
if scen_i.locked == 'Y':
errors.append('Scenario %s was not updated as it is locked'%(s.id))
continue
scenario.update_scenario(s, flush=False, **kwargs)
except NoResultFound:
raise ResourceNotFoundError("Scenario %s not found"%(s.id))
else:
add_scenario = True
else:
add_scenario = True
if add_scenario is True:
log.info("Adding new scenario %s to network", s.name)
scenario.add_scenario(network.id, s, **kwargs)
db.DBSession.flush()
updated_net = get_network(network.id, summary=True, **kwargs)
return updated_net
@required_perms("edit_network")
def move_network(network_id, target_project_id, **kwargs):
"""
Move a network to the project with `target_project_id`
"""
log.info(f"Moving {network_id} to {target_project_id}")
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
except NoResultFound:
raise ResourceNotFoundError("Network with id %s not found"%(network_id))
net_i.check_write_permission(user_id)
net_i.project_id = target_project_id
db.DBSession.flush()
return JSONObject(net_i)
def update_resource_layout(resource_type, resource_id, key, value, **kwargs):
log.info("Updating %s %s's layout with {%s:%s}", resource_type, resource_id, key, value)
resource = get_resource(resource_type, resource_id, **kwargs)
if resource.layout is None:
layout = dict()
else:
layout = json.loads(resource.layout)
layout[key] = value
resource.layout = json.dumps(layout)
db.DBSession.flush()
return layout
def get_resource(resource_type, resource_id, **kwargs):
user_id = kwargs.get('user_id')
resource_type = resource_type.upper()
if resource_type == 'NODE':
return get_node(resource_id, **kwargs)
elif resource_type == 'LINK':
return get_link(resource_id, **kwargs)
elif resource_type == 'GROUP':
return get_resourcegroup(resource_id, **kwargs)
elif resource_type == 'NETWORK':
network = get_network_simple(resource_id, **kwargs)
return network
def set_network_status(network_id,status,**kwargs):
"""
Activates a network by setting its status attribute to 'A'.
"""
user_id = kwargs.get('user_id')
#check_perm(user_id, 'delete_network')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id)
net_i.status = status
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
db.DBSession.flush()
return 'OK'
def get_network_extents(network_id,**kwargs):
"""
Given a network, return its maximum extents.
This would be the minimum x value of all nodes,
the minimum y value of all nodes,
the maximum x value of all nodes and
maximum y value of all nodes.
@returns NetworkExtents object
"""
rs = db.DBSession.query(Node.x, Node.y).filter(Node.network_id==network_id).all()
if len(rs) == 0:
return dict(
network_id = network_id,
min_x=None,
max_x=None,
min_y=None,
max_y=None,
)
# Compute min/max extent of the network.
x = [r.x for r in rs if r.x is not None]
if len(x) > 0:
x_min = min(x)
x_max = max(x)
else:
# Default x extent if all None values
x_min, x_max = 0, 1
y = [r.y for r in rs if r.y is not None]
if len(y) > 0:
y_min = min(y)
y_max = max(y)
else:
# Default y extent if all None values
y_min, y_max = 0, 1
ne = JSONObject(dict(
network_id = network_id,
min_x=x_min,
max_x=x_max,
min_y=y_min,
max_y=y_max,
))
return ne
#########################################
def add_nodes(network_id, nodes,**kwargs):
"""
Add nodes to network
"""
start_time = datetime.datetime.now()
names=[] # used to check uniqueness of node name
for n_i in nodes:
if n_i.name in names:
raise HydraError("Duplicate Node Name: %s"%(n_i.name))
names.append(n_i.name)
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
_add_nodes_to_database(net_i, nodes)
net_i.project_id = net_i.project_id
db.DBSession.flush()
node_s = db.DBSession.query(Node).filter(Node.network_id == network_id).all()
#Maps temporary node_ids to real node_ids
node_id_map = dict()
iface_nodes = dict()
for n_i in node_s:
iface_nodes[n_i.name] = n_i
for node in nodes:
node_id_map[node.id] = iface_nodes[node.name]
_bulk_add_resource_attrs(network_id, 'NODE', nodes, iface_nodes)
log.info("Nodes added in %s", get_timing(start_time))
return node_s
##########################################################################
def add_links(network_id, links,**kwargs):
'''
add links to network
'''
start_time = datetime.datetime.now()
user_id = kwargs.get('user_id')
names = [] # used to check uniqueness of link name before saving links to database
for l_i in links:
if l_i.name in names:
raise HydraError("Duplicate Link Name: %s"%(l_i.name))
names.append(l_i.name)
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
node_id_map=dict()
for node in net_i.nodes:
node_id_map[node.id] = node
_add_links_to_database(net_i, links, node_id_map)
net_i.project_id = net_i.project_id
db.DBSession.flush()
link_s = db.DBSession.query(Link).filter(Link.network_id == network_id).all()
iface_links = {}
for l_i in link_s:
iface_links[l_i.name] = l_i
_bulk_add_resource_attrs(net_i.id, 'LINK', links, iface_links)
log.info("Nodes added in %s", get_timing(start_time))
return link_s
#########################################
def add_node(network_id, node, **kwargs):
"""
Add a node to a network:
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
new_node = net_i.add_node(node.name, node.description, node.layout, node.x, node.y)
hdb.add_resource_attributes(new_node, node.attributes)
db.DBSession.flush()
if node.types is not None and len(node.types) > 0:
res_types = []
res_attrs = []
res_scenarios = {}
for typesummary in node.types:
ra, rt, rs = template.set_resource_type(new_node,
typesummary.id,
network_id=network_id,
**kwargs)
if rt is not None:
res_types.append(rt)#rt is one object
res_attrs.extend(ra)#ra is a list of objects
res_scenarios.update(rs)
if len(res_types) > 0:
db.DBSession.bulk_insert_mappings(ResourceType, res_types)
if len(res_attrs) > 0:
db.DBSession.bulk_insert_mappings(ResourceAttr, res_attrs)
new_res_attrs = db.DBSession.query(ResourceAttr)\
.order_by(ResourceAttr.id.desc())\
.limit(len(res_attrs)).all()
all_rs = []
for ra in new_res_attrs:
ra_id = ra.id
if ra.attr_id in res_scenarios:
rs_list = res_scenarios[ra.attr_id]
for rs in rs_list:
rs_list[rs]['resource_attr_id'] = ra_id
all_rs.append(rs_list[rs])
if len(all_rs) > 0:
db.DBSession.bulk_insert_mappings(ResourceScenario, all_rs)
db.DBSession.refresh(new_node)
#lazy load attributes
new_node.attributes
return new_node
#########################################################################
def update_node(node, flush=True, **kwargs):
"""
Update a node.
If new attributes are present, they will be added to the node.
The non-presence of attributes does not remove them.
The flush argument indicates whether dbsession.flush should be called. THis
is set to False when update_node is called from another function which does
the flush.
"""
user_id = kwargs.get('user_id')
try:
node_i = db.DBSession.query(Node).filter(Node.id == node.id).one()
except NoResultFound:
raise ResourceNotFoundError("Node %s not found"%(node.id))
node_i.network.check_write_permission(user_id)
node_i.name = node.name if node.name is not None else node_i.name
node_i.x = node.x if node.x is not None else node_i.x
node_i.y = node.y if node.y is not None else node_i.y
node_i.description = node.description if node.description is not None else node_i.description
node_i.layout = node.get_layout() if node.layout is not None else node_i.layout
if node.attributes is not None:
_update_attributes(node_i, node.attributes)
if node.types is not None:
hdb.add_resource_types(node_i, node.types)
if flush is True:
db.DBSession.flush()
return node_i
def update_nodes(nodes,**kwargs):
"""
Update multiple nodes.
If new attributes are present, they will be added to the node.
The non-presence of attributes does not remove them.
%TODO:merge this with the 'update_nodes' functionality in the 'update_netework'
function, so we're not duplicating functionality. D.R.Y!
returns: a list of updated nodes
"""
user_id = kwargs.get('user_id')
updated_nodes = []
for n in nodes:
updated_node_i = update_node(n, flush=False, user_id=user_id)
updated_nodes.append(updated_node_i)
db.DBSession.flush()
return updated_nodes
def set_node_status(node_id, status, **kwargs):
"""
Set the status of a node to 'X'
"""
user_id = kwargs.get('user_id')
try:
node_i = db.DBSession.query(Node).filter(Node.id == node_id).one()
except NoResultFound:
raise ResourceNotFoundError("Node %s not found"%(node_id))
node_i.network.check_write_permission(user_id)
node_i.status = status
for link in node_i.links_to:
link.status = status
for link in node_i.links_from:
link.status = status
db.DBSession.flush()
return node_i
def _unique_data_qry(count=1):
rs = aliased(ResourceScenario)
subqry = db.DBSession.query(
rs.dataset_id,
func.count(rs.dataset_id).label('dataset_count')).\
group_by(rs.dataset_id).\
having(func.count(rs.dataset_id) == count).\
subquery()
unique_data = db.DBSession.query(rs).\
join(subqry,
and_(rs.dataset_id==subqry.c.dataset_id)
).\
filter(
rs.resource_attr_id == ResourceAttr.id
)
return unique_data
def delete_network(network_id, purge_data,**kwargs):
"""
Call the original purge network call for backward compatibility
"""
return purge_network(network_id, purge_data, **kwargs)
def purge_network(network_id, purge_data,**kwargs):
"""
Remove a network from DB completely
Use purge_data to try to delete the data associated with only this network.
If no other resources link to this data, it will be deleted.
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
log.info("Deleting network %s, id=%s", net_i.name, network_id)
net_i.check_write_permission(user_id)
db.DBSession.delete(net_i)
db.DBSession.flush()
return 'OK'
def _purge_datasets_unique_to_resource(ref_key, ref_id):
"""
Find the number of times a a resource and dataset combination
occurs. If this equals the number of times the dataset appears, then
we can say this dataset is unique to this resource, therefore it can be deleted
"""
count_qry = db.DBSession.query(ResourceScenario.dataset_id,
func.count(ResourceScenario.dataset_id)).group_by(
ResourceScenario.dataset_id).filter(
ResourceScenario.resource_attr_id==ResourceAttr.id)
if ref_key == 'NODE':
count_qry.filter(ResourceAttr.node_id == ref_id)
elif ref_key == 'LINK':
count_qry.filter(ResourceAttr.link_id == ref_id)
elif ref_key == 'GROUP':
count_qry.filter(ResourceAttr.group_id == ref_id)
count_rs = count_qry.all()
for dataset_id, count in count_rs:
full_dataset_count = db.DBSession.query(ResourceScenario)\
.filter(ResourceScenario.dataset_id==dataset_id).count()
if full_dataset_count == count:
"""First delete all the resource scenarios"""
datasets_rs_to_delete = db.DBSession.query(ResourceScenario)\
.filter(ResourceScenario.dataset_id==dataset_id).all()
for dataset_rs in datasets_rs_to_delete:
db.DBSession.delete(dataset_rs)
"""Then delete all the datasets"""
dataset_to_delete = db.DBSession.query(Dataset)\
.filter(Dataset.id == dataset_id).one()
log.info("Deleting %s dataset %s (%s)",\
ref_key, dataset_to_delete.name, dataset_to_delete.id)
db.DBSession.delete(dataset_to_delete)
def delete_node(node_id, purge_data,**kwargs):
"""
Remove node from DB completely
If there are attributes on the node, use purge_data to try to
delete the data. If no other resources link to this data, it
will be deleted.
"""
user_id = kwargs.get('user_id')
try:
node_i = db.DBSession.query(Node).filter(Node.id == node_id).one()
except NoResultFound:
raise ResourceNotFoundError("Node %s not found"%(node_id))
group_items = db.DBSession.query(ResourceGroupItem).filter(
ResourceGroupItem.node_id==node_id).all()
for gi in group_items:
db.DBSession.delete(gi)
if purge_data == 'Y':
_purge_datasets_unique_to_resource('NODE', node_id)
log.info("Deleting node %s, id=%s", node_i.name, node_id)
node_i.network.check_write_permission(user_id)
db.DBSession.delete(node_i)
db.DBSession.flush()
return 'OK'
def add_link(network_id, link,**kwargs):
"""
Add a link to a network
"""
user_id = kwargs.get('user_id')
#check_perm(user_id, 'edit_topology')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
try:
node_1 = db.DBSession.query(Node).filter(Node.id==link.node_1_id).one()
node_2 = db.DBSession.query(Node).filter(Node.id==link.node_2_id).one()
except NoResultFound:
raise ResourceNotFoundError("Nodes for link not found")
link_i = net_i.add_link(link.name, link.description, link.layout, node_1, node_2)
hdb.add_resource_attributes(link_i, link.attributes)
db.DBSession.flush()
if link.types is not None and len(link.types) > 0:
res_types = []
res_attrs = []
res_scenarios = {}
for typesummary in link.types:
ra, rt, rs = template.set_resource_type(link_i,
typesummary.id,
network_id=network_id,
**kwargs)
res_types.append(rt)
res_attrs.extend(ra)
res_scenarios.update(rs)#rs is a dict
if len(res_types) > 0:
db.DBSession.bulk_insert_mappings(ResourceType, res_types)
if len(res_attrs) > 0:
db.DBSession.bulk_insert_mappings(ResourceAttr, res_attrs)
new_res_attrs = db.DBSession.query(ResourceAttr).order_by(ResourceAttr.id.desc()).limit(len(res_attrs)).all()
all_rs = []
for ra in new_res_attrs:
ra_id = ra.id
if ra.attr_id in res_scenarios:
rs_list = res_scenarios[ra.attr_id]
for rs in rs_list:
rs_list[rs]['resource_attr_id'] = ra_id
all_rs.append(rs_list[rs])
if len(all_rs) > 0:
db.DBSession.bulk_insert_mappings(ResourceScenario, all_rs)
db.DBSession.refresh(link_i)
#lazy load attributes
link_i.attributes
return link_i
@required_perms("edit_network")
def update_links(links, **kwargs):
log.info("Updating %s links", len(links))
for l in links:
update_link(l, flush=False, **kwargs)
db.DBSession.flush()
def update_link(link, flush=False, **kwargs):
"""
Update a link.
"""
user_id = kwargs.get('user_id')
#check_perm(user_id, 'edit_topology')
try:
link_i = db.DBSession.query(Link).filter(Link.id == link.id).one()
link_i.network.check_write_permission(user_id)
except NoResultFound:
raise ResourceNotFoundError("Link %s not found"%(link.id))
#Each of thiese should be updateable independently
if link.name is not None:
link_i.name = link.name
if link.node_1_id is not None:
link_i.node_1_id = link.node_1_id
if link.node_2_id is not None:
link_i.node_2_id = link.node_2_id
if link.description is not None:
link_i.description = link.description
if link.layout is not None:
link_i.layout = link.get_layout()
if link.attributes is not None:
hdb.add_resource_attributes(link_i, link.attributes)
if link.types is not None:
hdb.add_resource_types(link_i, link.types)
if flush is True:
db.DBSession.flush()
return link_i
def set_link_status(link_id, status, **kwargs):
"""
Set the status of a link
"""
user_id = kwargs.get('user_id')
#check_perm(user_id, 'edit_topology')
try:
link_i = db.DBSession.query(Link).filter(Link.id == link_id).one()
except NoResultFound:
raise ResourceNotFoundError("Link %s not found"%(link_id))
link_i.network.check_write_permission(user_id)
link_i.status = status
db.DBSession.flush()
def delete_link(link_id, purge_data,**kwargs):
"""
Remove link from DB completely
If there are attributes on the link, use purge_data to try to
delete the data. If no other resources link to this data, it
will be deleted.
"""
user_id = kwargs.get('user_id')
try:
link_i = db.DBSession.query(Link).filter(Link.id == link_id).one()
except NoResultFound:
raise ResourceNotFoundError("Link %s not found"%(link_id))
group_items = db.DBSession.query(ResourceGroupItem).filter(
ResourceGroupItem.link_id==link_id).all()
for gi in group_items:
db.DBSession.delete(gi)
if purge_data == 'Y':
_purge_datasets_unique_to_resource('LINK', link_id)
log.info("Deleting link %s, id=%s", link_i.name, link_id)
link_i.network.check_write_permission(user_id)
db.DBSession.delete(link_i)
db.DBSession.flush()
def add_group(network_id, group,**kwargs):
"""
Add a resourcegroup to a network
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id=user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
res_grp_i = net_i.add_group(group.name, group.description, group.status)
hdb.add_resource_attributes(res_grp_i, group.attributes)
db.DBSession.flush()
if group.types is not None and len(group.types) > 0:
res_types = []
res_attrs = []
res_scenarios = {}
for typesummary in group.types:
ra, rt, rs = template.set_resource_type(res_grp_i,
typesummary.id,
network_id=network_id,
**kwargs)
res_types.append(rt)
res_attrs.extend(ra)
res_scenarios.update(rs)#rs is a dict
if len(res_types) > 0:
db.DBSession.bulk_insert_mappings(ResourceType, res_types)
if len(res_attrs) > 0:
db.DBSession.bulk_insert_mappings(ResourceAttr, res_attrs)
new_res_attrs = db.DBSession.query(ResourceAttr).order_by(ResourceAttr.id.desc()).limit(len(res_attrs)).all()
all_rs = []
for ra in new_res_attrs:
ra_id = ra.id
if ra.attr_id in res_scenarios:
rs_list = res_scenarios[ra.attr_id]
for rs in rs_list:
rs_list[rs]['resource_attr_id'] = ra_id
all_rs.append(rs_list[rs])
if len(all_rs) > 0:
db.DBSession.bulk_insert_mappings(ResourceScenario, all_rs)
db.DBSession.refresh(res_grp_i)
#lazy load attributes
res_grp_i.attributes
return res_grp_i
def update_group(group,**kwargs):
"""
Update a group.
If new attributes are present, they will be added to the group.
The non-presence of attributes does not remove them.
"""
user_id = kwargs.get('user_id')
try:
group_i = db.DBSession.query(ResourceGroup).filter(ResourceGroup.id == group.id).one()
except NoResultFound:
raise ResourceNotFoundError("group %s not found"%(group.id))
group_i.network.check_write_permission(user_id)
group_i.name = group.name if group.name != None else group_i.name
group_i.description = group.description if group.description else group_i.description
if group.attributes is not None:
_update_attributes(group_i, group.attributes)
if group.types is not None:
hdb.add_resource_types(group_i, group.types)
db.DBSession.flush()
return group_i
def set_group_status(group_id, status, **kwargs):
"""
Set the status of a group to 'X'
"""
user_id = kwargs.get('user_id')
try:
group_i = db.DBSession.query(ResourceGroup).filter(ResourceGroup.id == group_id).one()
except NoResultFound:
raise ResourceNotFoundError("ResourceGroup %s not found"%(group_id))
group_i.network.check_write_permission(user_id)
group_i.status = status
db.DBSession.flush()
return group_i
def delete_group(group_id, purge_data,**kwargs):
"""
Remove group from DB completely
If there are attributes on the group, use purge_data to try to
delete the data. If no other resources group to this data, it
will be deleted.
"""
user_id = kwargs.get('user_id')
try:
group_i = db.DBSession.query(ResourceGroup).filter(ResourceGroup.id == group_id).one()
except NoResultFound:
raise ResourceNotFoundError("Group %s not found"%(group_id))
group_items = db.DBSession.query(ResourceGroupItem).filter(
ResourceGroupItem.group_id==group_id).all()
for gi in group_items:
db.DBSession.delete(gi)
if purge_data == 'Y':
_purge_datasets_unique_to_resource('GROUP', group_id)
log.info("Deleting group %s, id=%s", group_i.name, group_id)
group_i.network.check_write_permission(user_id)
db.DBSession.delete(group_i)
db.DBSession.flush()
def get_scenarios(network_id,**kwargs):
"""
Get all the scenarios in a given network.
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_read_permission(user_id=user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
return net_i.scenarios
def validate_network_topology(network_id,**kwargs):
"""
Check for the presence of orphan nodes in a network.
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id=user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
nodes = []
for node_i in net_i.nodes:
if node_i.status == 'A':
nodes.append(node_i.node_id)
link_nodes = []
for link_i in net_i.links:
if link_i.status != 'A':
continue
if link_i.node_1_id not in link_nodes:
link_nodes.append(link_i.node_1_id)
if link_i.node_2_id not in link_nodes:
link_nodes.append(link_i.node_2_id)
nodes = set(nodes)
link_nodes = set(link_nodes)
isolated_nodes = nodes - link_nodes
return isolated_nodes
def get_resource(resource_type, resource_id, **kwargs):
user_id = kwargs.get('user_id')
resource_type = resource_type.upper()
if resource_type == 'NODE':
return get_node(resource_id, **kwargs)
elif resource_type == 'LINK':
return get_link(resource_id, **kwargs)
elif resource_type == 'GROUP':
return get_resourcegroup(resource_id, **kwargs)
elif resource_type == 'NETWORK':
network = get_network_simple(resource_id, **kwargs)
return network
def get_resources_of_type(network_id, type_id, **kwargs):
"""
Return the Nodes, Links and ResourceGroups which
have the type specified.
"""
#'set a ref key on the resources to easily distinguish them'
nodes_with_type = db.DBSession.query(Node).join(ResourceType).filter(Node.network_id==network_id, ResourceType.type_id==type_id).all()
for n in nodes_with_type:
n.ref_key = 'NODE'
links_with_type = db.DBSession.query(Link).join(ResourceType).filter(Link.network_id==network_id, ResourceType.type_id==type_id).all()
for l in links_with_type:
l.ref_key = 'LINK'
groups_with_type = db.DBSession.query(ResourceGroup).join(ResourceType).filter(ResourceGroup.network_id==network_id, ResourceType.type_id==type_id).all()
for g in groups_with_type:
g.ref_key = 'GROUP'
return nodes_with_type+links_with_type+groups_with_type
def clean_up_network(network_id, **kwargs):
"""
Purge any deleted nodes, links, resourcegroups and scenarios in a given network
"""
user_id = kwargs.get('user_id')
#check_perm(user_id, 'delete_network')
try:
log.debug("Querying Network %s", network_id)
net_i = db.DBSession.query(Network).filter(Network.id == network_id).\
options(noload(Network.scenarios)).options(noload(Network.nodes)).options(noload(Network.links)).options(
noload(Network.resourcegroups)).options(
joinedload(Network.types)\
.joinedload(ResourceType.templatetype)\
.joinedload(TemplateType.template)
).one()
net_i.attributes
#Define the basic resource queries
node_qry = db.DBSession.query(Node).filter(Node.network_id==network_id).filter(Node.status=='X').all()
link_qry = db.DBSession.query(Link).filter(Link.network_id==network_id).filter(Link.status=='X').all()
group_qry = db.DBSession.query(ResourceGroup).filter(ResourceGroup.network_id==network_id).filter(ResourceGroup.status=='X').all()
scenario_qry = db.DBSession.query(Scenario).filter(Scenario.network_id==network_id).filter(Scenario.status=='X').all()
for n in node_qry:
db.DBSession.delete(n)
for l in link_qry:
db.DBSession.delete(l)
for g in group_qry:
db.DBSession.delete(g)
for s in scenario_qry:
db.DBSession.delete(s)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
db.DBSession.flush()
return 'OK'
def get_all_node_data(network_id, scenario_id, node_ids=None, include_metadata=False, **kwargs):
resource_scenarios = get_attributes_for_resource(network_id, scenario_id, 'NODE', ref_ids=node_ids, include_metadata='N', **kwargs)
node_data = []
for rs in resource_scenarios:
resource_attr = JSONObject({
'id': rs.resourceattr.id,
'attr_id' : rs.resourceattr.attr_id,
'attr_name' : rs.resourceattr.attr.name,
'resourcescenario': rs
})
node_data.append(resource_attr)
return node_data
def get_all_link_data(network_id, scenario_id, link_ids=None, include_metadata=False, **kwargs):
resource_scenarios = get_attributes_for_resource(network_id, scenario_id, 'LINK', ref_ids=link_ids, include_metadata='N', **kwargs)
link_data = []
for rs in resource_scenarios:
resource_attr = JSONObject({
'id': rs.resourceattr.id,
'attr_id' : rs.resourceattr.attr_id,
'attr_name' : rs.resourceattr.attr.name,
'resourcescenario': rs
})
link_data.append(resource_attr)
return link_data
def get_all_group_data(network_id, scenario_id, group_ids=None, include_metadata=False, **kwargs):
resource_scenarios = get_attributes_for_resource(network_id, scenario_id, 'GROUP', ref_ids=group_ids, include_metadata='N', **kwargs)
group_data = []
for rs in resource_scenarios:
resource_attr = JSONObject({
'id': rs.resourceattr.id,
'attr_id' : rs.resourceattr.attr_id,
'attr_name' : rs.resourceattr.attr.name,
'resourcescenario': rs
})
group_data.append(resource_attr)
return group_data
def get_attributes_for_resource(network_id, scenario_id, ref_key, ref_ids=None, include_metadata=False, **kwargs):
try:
db.DBSession.query(Network).filter(Network.id==network_id).one()
except NoResultFound:
raise HydraError("Network %s does not exist"%network_id)
try:
db.DBSession.query(Scenario).filter(Scenario.id==scenario_id, Scenario.network_id==network_id).one()
except NoResultFound:
raise HydraError("Scenario %s not found."%scenario_id)
rs_qry = db.DBSession.query(ResourceScenario).filter(
ResourceAttr.id==ResourceScenario.resource_attr_id,
ResourceScenario.scenario_id==scenario_id,
ResourceAttr.ref_key==ref_key)\
.join(ResourceScenario.dataset)
log.info("Querying %s data",ref_key)
if ref_ids is not None and len(ref_ids) < 999:
if ref_key == 'NODE':
rs_qry = rs_qry.filter(ResourceAttr.node_id.in_(ref_ids))
elif ref_key == 'LINK':
rs_qry = rs_qry.filter(ResourceAttr.link_id.in_(ref_ids))
elif ref_key == 'GROUP':
rs_qry = rs_qry.filter(ResourceAttr.group_id.in_(ref_ids))
all_resource_scenarios = rs_qry.all()
log.info("Data retrieved")
resource_scenarios = []
dataset_ids = []
if ref_ids is not None:
log.info("Pulling out requested info")
for rs in all_resource_scenarios:
ra = rs.resourceattr
if ref_key == 'NODE':
if ra.node_id in ref_ids:
resource_scenarios.append(rs)
if rs.dataset_id not in dataset_ids:
dataset_ids.append(rs.dataset_id)
elif ref_key == 'LINK':
if ra.link_id in ref_ids:
resource_scenarios.append(rs)
if rs.dataset_id not in dataset_ids:
dataset_ids.append(rs.dataset_id)
elif ref_key == 'GROUP':
if ra.group_id in ref_ids:
resource_scenarios.append(rs)
if rs.dataset_id not in dataset_ids:
dataset_ids.append(rs.dataset_id)
else:
resource_scenarios.append(ra)
log.info("Requested info pulled out.")
else:
resource_scenarios = all_resource_scenarios
log.info("Retrieved %s resource attrs", len(resource_scenarios))
if include_metadata is True:
metadata_qry = db.DBSession.query(Metadata).filter(
ResourceAttr.ref_key == ref_key,
ResourceScenario.resource_attr_id == ResourceAttr.id,
ResourceScenario.scenario_id == scenario_id,
Dataset.id == ResourceScenario.dataset_id,
Metadata.dataset_id == Dataset.id)
log.info("Querying node metadata")
all_metadata = metadata_qry.all()
log.info("Node metadata retrieved")
metadata = []
if ref_ids is not None:
for m in all_metadata:
if m.dataset_id in dataset_ids:
metadata.append(m)
else:
metadata = all_metadata
log.info("%s metadata items retrieved", len(metadata))
metadata_dict = {}
for m in metadata:
if metadata_dict.get(m.dataset_id):
metadata_dict[m.dataset_id].append(m)
else:
metadata_dict[m.dataset_id] = [m]
for rs in resource_scenarios:
d = rs.dataset
if d.hidden == 'Y':
try:
d.check_read_permission(kwargs.get('user_id'))
except:
d.value = None
d.metadata = []
else:
if include_metadata is True:
rs.dataset.metadata = metadata_dict.get(d.id, [])
return resource_scenarios
def get_all_resource_attributes_in_network(attr_id, network_id, include_resources=True, **kwargs):
"""
Find every resource attribute in the network matching the supplied attr_id
Args:
attr_id (int): The attribute on which to match
network_id (int): The ID of the network to search
include_resources (bool): A flag to indicate whether to return the
resource that the resource attribute belongs to.
Including resources can have a performance implication
Returns:
List of JSONObjects
Raises:
HydraError if the attr_id or network_id do not exist
"""
user_id = kwargs.get('user_id')
try:
a = db.DBSession.query(Attr).filter(Attr.id == attr_id).one()
except NoResultFound:
raise HydraError("Attribute %s not found"%(attr_id,))
ra_qry = db.DBSession.query(ResourceAttr).filter(
ResourceAttr.attr_id == attr_id,
or_(Network.id == network_id,
Node.network_id == network_id,
Link.network_id == network_id,
ResourceGroup.network_id == network_id)
).outerjoin(ResourceAttr.node)\
.outerjoin(ResourceAttr.link)\
.outerjoin(ResourceAttr.network)\
.outerjoin(ResourceAttr.resourcegroup)\
.options(joinedload(ResourceAttr.node))\
.options(joinedload(ResourceAttr.link))\
.options(joinedload(ResourceAttr.resourcegroup))\
.options(joinedload(ResourceAttr.network))
resourceattrs = ra_qry.all()
json_ra = []
#Load the metadata too
for ra in resourceattrs:
ra_j = JSONObject(ra, extras={'node':JSONObject(ra.node) if ra.node else None,
'link':JSONObject(ra.link) if ra.link else None,
'resourcegroup':JSONObject(ra.resourcegroup) if ra.resourcegroup else None,
'network':JSONObject(ra.network) if ra.network else None})
if ra_j.node is not None:
ra_j.resource = ra_j.node
elif ra_j.link is not None:
ra_j.resource = ra_j.link
elif ra_j.resourcegroup is not None:
ra_j.resource = ra_j.resourcegroup
elif ra.network is not None:
ra_j.resource = ra_j.network
json_ra.append(ra_j)
return json_ra
def get_all_resource_data(scenario_id, include_metadata=False, page_start=None, page_end=None, **kwargs):
"""
A function which returns the data for all resources in a network.
-
"""
rs_qry = db.DBSession.query(
ResourceAttr.attr_id,
Attr.name.label('attr_name'),
ResourceAttr.id.label('resource_attr_id'),
ResourceAttr.ref_key,
ResourceAttr.network_id,
ResourceAttr.node_id,
ResourceAttr.link_id,
ResourceAttr.group_id,
ResourceAttr.project_id,
ResourceAttr.attr_is_var,
ResourceScenario.scenario_id,
ResourceScenario.source,
Dataset.id.label('dataset_id'),
Dataset.name.label('dataset_name'),
Dataset.value,
Dataset.unit_id,
Dataset.hidden,
Dataset.type,
null().label('metadata'),
case(
(ResourceAttr.node_id != None, Node.name),
(ResourceAttr.link_id != None, Link.name),
(ResourceAttr.group_id != None, ResourceGroup.name),
(ResourceAttr.network_id != None, Network.name),
).label('ref_name'),
).join(ResourceScenario, ResourceScenario.resource_attr_id==ResourceAttr.id)\
.join(Dataset, ResourceScenario.dataset_id==Dataset.id).\
join(Attr, ResourceAttr.attr_id==Attr.id).\
outerjoin(Node, ResourceAttr.node_id==Node.id).\
outerjoin(Link, ResourceAttr.link_id==Link.id).\
outerjoin(ResourceGroup, ResourceAttr.group_id==ResourceGroup.id).\
outerjoin(Network, ResourceAttr.network_id==Network.id).\
filter(ResourceScenario.scenario_id==scenario_id)
all_resource_data = rs_qry.all()
if page_start is not None and page_end is None:
all_resource_data = all_resource_data[page_start:]
elif page_start is not None and page_end is not None:
all_resource_data = all_resource_data[page_start:page_end]
log.info("%s datasets retrieved", len(all_resource_data))
if include_metadata is True:
metadata_qry = db.DBSession.query(
distinct(Metadata.dataset_id).label('dataset_id'),
Metadata.key,
Metadata.value).filter(
ResourceScenario.resource_attr_id == ResourceAttr.id,
ResourceScenario.scenario_id == scenario_id,
Dataset.id == ResourceScenario.dataset_id,
Metadata.dataset_id == Dataset.id)
log.info("Querying node metadata")
metadata = metadata_qry.all()
log.info("%s metadata items retrieved", len(metadata))
metadata_dict = {}
for m in metadata:
if metadata_dict.get(m.dataset_id):
metadata_dict[m.dataset_id].append(m)
else:
metadata_dict[m.dataset_id] = [m]
return_data = []
for ra in all_resource_data:
ra_dict = ra._asdict()
if ra.hidden == 'Y':
try:
d = db.DBSession.query(Dataset).filter(
Dataset.id == ra.dataset_id
).options(noload(Dataset.metadata)).one()
d.check_read_permission(kwargs.get('user_id'))
except:
ra_dict['value'] = None
ra_dict['metadata'] = []
else:
if include_metadata is True:
ra_dict['metadata'] = metadata_dict.get(ra.dataset_id, [])
return_data.append(namedtuple('ResourceData', ra_dict.keys())(**ra_dict))
log.info("Returning %s datasets", len(return_data))
return return_data
def clone_network(network_id,
recipient_user_id=None,
new_network_name=None,
new_network_description=None,
project_id=None,
project_name=None,
new_project=True,
include_outputs=False,
scenario_ids=[],
creator_is_owner=False,
**kwargs):
"""
Create an exact clone of the specified network for the specified user.
If project_id is specified, put the new network in there.
Otherwise create a new project with the specified name and put it in there.
creator_is_owner (Bool) : The user who creates the network isn't added as an owner
(won't have an entry in tNetworkOwner and therefore won't see the network in 'get_project')
"""
user_id = kwargs['user_id']
ex_net = db.DBSession.query(Network).filter(Network.id==network_id).one()
ex_net.check_read_permission(user_id)
if recipient_user_id is None:
recipient_user_id = user_id
if project_id is None and new_project == True:
log.info("Creating a new project for cloned network")
ex_proj = db.DBSession.query(Project).filter(Project.id==ex_net.project_id).one()
user = db.DBSession.query(User).filter(User.id==user_id).one()
project = Project()
if project_name is None or project_name=="":
project_name=ex_proj.name + " (Cloned by %s)" % user.display_name
#check a project with this name doesn't already exist:
ex_project = db.DBSession.query(Project).filter(Project.name == project_name,
Project.created_by == user_id).all()
#If it exists, use it.
if len(ex_project) > 0:
project=ex_project[0]
else:
project.name = project_name
project.created_by = user_id
if creator_is_owner is True and user_id != recipient_user_id:
project.set_owner(user_id)
if recipient_user_id is not None:
project.set_owner(recipient_user_id)
db.DBSession.add(project)
db.DBSession.flush()
project_id = project.id
elif project_id is None:
log.info("Using current project for cloned network")
project_id = ex_net.project_id
if new_network_name is None or new_network_name == "":
new_network_name = ex_net.name
log.info('Cloning Network...')
#Find if there's any projects with this name in the project already
ex_network = db.DBSession.query(Network).filter(Network.project_id == project_id,
Network.name.like(
f"{new_network_name}%")).all()
if len(ex_network) > 0:
new_network_name = f"{new_network_name} ({str(len(ex_network))})"
newnet = Network()
newnet.project_id = project_id
newnet.name = new_network_name
newnet.description = ex_net.description if new_network_description is None else new_network_description
newnet.layout = ex_net.layout
newnet.status = ex_net.status
newnet.projection = ex_net.projection
newnet.created_by = user_id
#if true, the the creator will see this network in their project.networks.
if creator_is_owner is True and user_id != recipient_user_id:
newnet.set_owner(user_id)
#set the owner to the recipient. THis can be either the requesting user id (user_id)
#or an explicitly defined user.
newnet.set_owner(recipient_user_id)
db.DBSession.add(newnet)
db.DBSession.flush()
newnetworkid = newnet.id
log.info('CLoning Nodes')
node_id_map = _clone_nodes(network_id, newnetworkid, user_id)
log.info('Cloning Links')
link_id_map = _clone_links(network_id, newnetworkid, node_id_map, user_id)
log.info('CLoning Groups')
group_id_map = _clone_groups(network_id,
newnetworkid,
node_id_map,
link_id_map,
user_id)
log.info("Cloning Resource Attributes")
ra_id_map = _clone_resourceattrs(network_id,
newnetworkid,
node_id_map,
link_id_map,
group_id_map,
newnet.project_id,
ex_net.project_id,
user_id)
log.info("Cloning Resource Types")
_clone_resourcetypes(network_id, newnetworkid, node_id_map, link_id_map, group_id_map)
log.info('Cloning Scenarios')
scenario_id_map = _clone_scenarios(network_id,
newnetworkid,
ra_id_map,
node_id_map,
link_id_map,
group_id_map,
user_id,
include_outputs=include_outputs,
scenario_ids=scenario_ids)
_clone_rules(
network_id,
newnetworkid,
node_id_map,
link_id_map,
group_id_map,
scenario_id_map,
user_id)
db.DBSession.flush()
return newnetworkid
def _clone_rules(old_network_id, new_network_id, node_id_map, link_id_map, group_id_map, scenario_id_map, user_id):
"""
"""
rules.clone_resource_rules('NETWORK',
old_network_id,
target_ref_key='NETWORK',
target_ref_id=new_network_id,
scenario_id_map=scenario_id_map,
user_id=user_id)
node_rules = db.DBSession.query(Rule).join(Node).filter(Node.network_id==old_network_id).all()
for node_rule in node_rules:
rules.clone_rule(node_rule.id,
target_ref_key='NODE',
target_ref_id=node_id_map[node_rule.node_id],
scenario_id_map=scenario_id_map,
user_id=user_id)
link_rules = db.DBSession.query(Rule).join(Link).filter(Link.network_id==old_network_id).all()
for link_rule in link_rules:
rules.clone_rule(link_rule.id,
target_ref_key='LINK',
target_ref_id=link_id_map[link_rule.link_id],
scenario_id_map=scenario_id_map,
user_id=user_id)
group_rules = db.DBSession.query(Rule).join(ResourceGroup).filter(ResourceGroup.network_id==old_network_id).all()
for group_rule in group_rules:
rules.clone_rule(group_rule.id,
group_rule.node_id,
target_ref_key='GROUP',
target_ref_id=group_id_map[group_rule.group_id],
scenario_id_map=scenario_id_map,
user_id=user_id)
def _clone_nodes(old_network_id, new_network_id, user_id):
nodes = db.DBSession.query(Node).filter(Node.network_id==old_network_id).all()
newnodes = []
old_node_name_map = {}
id_map = {}
for ex_n in nodes:
new_n = dict(
network_id=new_network_id,
name = ex_n.name,
description = ex_n.description,
x = ex_n.x,
y = ex_n.y,
layout = ex_n.layout,
status = ex_n.status,
)
old_node_name_map[ex_n.name] = ex_n.node_id
newnodes.append(new_n)
db.DBSession.bulk_insert_mappings(Node, newnodes)
db.DBSession.flush()
#map old IDS to new IDS
nodes = db.DBSession.query(Node).filter(Node.network_id==new_network_id).all()
for n in nodes:
old_node_id = old_node_name_map[n.name]
id_map[old_node_id] = n.node_id
return id_map
def _clone_links(old_network_id, new_network_id, node_id_map, user_id):
links = db.DBSession.query(Link).filter(Link.network_id==old_network_id).all()
newlinks = []
old_link_name_map = {}
id_map = {}
for ex_l in links:
new_l = dict(
network_id=new_network_id,
name = ex_l.name,
description = ex_l.description,
node_1_id = node_id_map[ex_l.node_1_id],
node_2_id = node_id_map[ex_l.node_2_id],
layout = ex_l.layout,
status = ex_l.status,
)
newlinks.append(new_l)
old_link_name_map[ex_l.name] = ex_l.id
db.DBSession.bulk_insert_mappings(Link, newlinks)
db.DBSession.flush()
#map old IDS to new IDS
links = db.DBSession.query(Link).filter(Link.network_id==new_network_id).all()
for l in links:
old_link_id = old_link_name_map[l.name]
id_map[old_link_id] = l.link_id
return id_map
def _clone_groups(old_network_id, new_network_id, node_id_map, link_id_map, user_id):
groups = db.DBSession.query(ResourceGroup).filter(ResourceGroup.network_id==old_network_id).all()
newgroups = []
old_group_name_map = {}
id_map = {}
for ex_g in groups:
new_g = dict(
network_id=new_network_id,
name = ex_g.name,
description = ex_g.group_description,
status = ex_g.status,
)
newgroups.append(new_g)
old_group_name_map[ex_g.name] = ex_g.id
db.DBSession.bulk_insert_mappings(ResourceGroup, newgroups)
db.DBSession.flush()
#map old IDS to new IDS
groups = db.DBSession.query(ResourceGroup).filter(ResourceGroup.network_id==new_network_id).all()
for g in groups:
old_group_id = old_group_name_map[g.name]
id_map[old_group_id] = g.group_id
return id_map
def _clone_attributes(network_id, newnetworkid, exnet_project_id, newnet_project_id, user_id):
"""
Clone the attributes scoped to a network nad its project when cloning a network
"""
#first find any attributes which are scoped to the source network, and scope them to the parent project if the source
#and target are in the same project, otherwise clone all the scoped attributes.
#find any attributes scoped directly to the source
network_scoped_attrs = attributes.get_attributes(network_id=network_id, user_id=user_id)
project_scoped_attrs = []
#get all the attributes scoped to the project of the source network (if it's not the same project as the target)
if exnet_project_id != newnet_project_id:
new_attributes = []
exnet_project_scoped_attrs = attributes.get_attributes(project_id=exnet_project_id, user_id=user_id)
for a in exnet_project_scoped_attrs:
a.project_id = newnet_project_id
new_attributes.append(a)
for a in network_scoped_attrs:
#the networks are in different projects, so clone the attributes
a = JSONObject(a)
a.network_id = newnetworkid
new_attributes.append(a)
attributes.add_attributes(new_attributes, user_id=user_id)
else:
for a in network_scoped_attrs:
#the networks are in the same project, so re-scope the attribute
#to the project, so it is shared by the networks
a.network_id=None
a.project_id=exnet_project_id
attributes.update_attribute(a)
def _clone_resourceattrs(network_id, newnetworkid, node_id_map, link_id_map, group_id_map, exnet_project_id, newnet_project_id, user_id):
#clone any attributes which are scoped to a network or to the network's project (if the networks)
#are in different projects.
_clone_attributes(network_id, newnetworkid, exnet_project_id, newnet_project_id, user_id)
log.info("Cloning Network Attributes")
network_ras = db.DBSession.query(ResourceAttr).filter(ResourceAttr.network_id==network_id)
id_map = {}
new_ras = []
old_ra_name_map = {}
for ra in network_ras:
new_ras.append(dict(
network_id=newnetworkid,
node_id=None,
group_id=None,
link_id=None,
ref_key='NETWORK',
attr_id=ra.attr_id,
attr_is_var=ra.attr_is_var,
))
#key is (network_id, node_id, link_id, group_id) -- only one of which can be not null for a given row
old_ra_name_map[(newnetworkid, None, None, None, ra.attr_id)] = ra.id
log.info("Cloning Node Attributes")
node_ras = db.DBSession.query(ResourceAttr).filter(and_(ResourceAttr.node_id==Node.id, Node.network_id==network_id)).all()
for ra in node_ras:
new_ras.append(dict(
node_id=node_id_map[ra.node_id],
network_id=None,
link_id=None,
group_id=None,
attr_id=ra.attr_id,
attr_is_var=ra.attr_is_var,
ref_key=ra.ref_key,
))
old_ra_name_map[(None, node_id_map[ra.node_id], None, None, ra.attr_id)] = ra.id
log.info("Cloning Link Attributes")
link_ras = db.DBSession.query(ResourceAttr).filter(and_(ResourceAttr.link_id==Link.id, Link.network_id==network_id)).all()
for ra in link_ras:
new_ras.append(dict(
link_id=link_id_map[ra.link_id],
network_id=ra.network_id,
node_id=ra.node_id,
group_id=ra.group_id,
attr_id=ra.attr_id,
attr_is_var=ra.attr_is_var,
ref_key=ra.ref_key,
))
old_ra_name_map[(None, None, link_id_map[ra.link_id], None, ra.attr_id)] = ra.id
log.info("Cloning Group Attributes")
group_ras = db.DBSession.query(ResourceAttr).filter(and_(ResourceAttr.group_id==ResourceGroup.id, ResourceGroup.network_id==network_id)).all()
for ra in group_ras:
new_ras.append(dict(
group_id=group_id_map[ra.group_id],
network_id=ra.network_id,
link_id=ra.link_id,
node_id=ra.node_id,
attr_id=ra.attr_id,
attr_is_var=ra.attr_is_var,
ref_key=ra.ref_key,
))
old_ra_name_map[(None, None, None, group_id_map[ra.group_id], ra.attr_id)] = ra.id
log.info("Inserting new resource attributes")
db.DBSession.bulk_insert_mappings(ResourceAttr, new_ras)
db.DBSession.flush()
log.info("Insertion Complete")
log.info("Getting new RAs and building ID map")
new_network_ras = db.DBSession.query(ResourceAttr).filter(ResourceAttr.network_id==newnetworkid).all()
for ra in new_network_ras:
id_map[old_ra_name_map[(ra.network_id, ra.node_id, ra.link_id, ra.group_id, ra.attr_id)]] = ra.id
new_node_ras = db.DBSession.query(ResourceAttr).filter(and_(ResourceAttr.node_id==Node.id, Node.network_id==newnetworkid)).all()
for ra in new_node_ras:
id_map[old_ra_name_map[(ra.network_id, ra.node_id, ra.link_id, ra.group_id, ra.attr_id)]] = ra.id
new_link_ras = db.DBSession.query(ResourceAttr).filter(and_(ResourceAttr.link_id==Link.id, Link.network_id==newnetworkid)).all()
for ra in new_link_ras:
id_map[old_ra_name_map[(ra.network_id, ra.node_id, ra.link_id, ra.group_id, ra.attr_id)]] = ra.id
new_group_ras = db.DBSession.query(ResourceAttr).filter(and_(ResourceAttr.group_id==ResourceGroup.id, ResourceGroup.network_id==newnetworkid)).all()
for ra in new_group_ras:
id_map[old_ra_name_map[(ra.network_id, ra.node_id, ra.link_id, ra.group_id, ra.attr_id)]] = ra.id
log.info("ID map completed. Returning")
return id_map
def _clone_resourcetypes(network_id, newnetworkid, node_id_map, link_id_map, group_id_map):
log.info("Cloning Network Types")
network_rts = db.DBSession.query(ResourceType).filter(ResourceType.network_id==network_id)
new_ras = []
for rt in network_rts:
new_ras.append(dict(
ref_key=rt.ref_key,
network_id=newnetworkid,
node_id=rt.node_id,
link_id=rt.link_id,
group_id=rt.group_id,
type_id=rt.type_id,
child_template_id=rt.child_template_id,
))
log.info("Cloning Node Types")
node_rts = db.DBSession.query(ResourceType).filter(and_(ResourceType.node_id==Node.id, Node.network_id==network_id))
for rt in node_rts:
new_ras.append(dict(
ref_key=rt.ref_key,
network_id=rt.network_id,
node_id=node_id_map[rt.node_id],
link_id=rt.link_id,
group_id=rt.group_id,
type_id=rt.type_id,
child_template_id=rt.child_template_id,
))
log.info("Cloning Link Types")
link_rts = db.DBSession.query(ResourceType).filter(and_(ResourceType.link_id==Link.id, Link.network_id==network_id))
for rt in link_rts:
new_ras.append(dict(
ref_key=rt.ref_key,
network_id=rt.network_id,
node_id=rt.node_id,
link_id=link_id_map[rt.link_id],
group_id=rt.group_id,
type_id=rt.type_id,
child_template_id=rt.child_template_id,
))
log.info("Cloning Group Types")
group_rts = db.DBSession.query(ResourceType).filter(and_(ResourceType.group_id==ResourceGroup.id, ResourceGroup.network_id==network_id))
for rt in group_rts:
new_ras.append(dict(
ref_key=rt.ref_key,
network_id=rt.network_id,
node_id=rt.node_id,
link_id=rt.link_id,
group_id=group_id_map[rt.group_id],
type_id=rt.type_id,
child_template_id=rt.child_template_id,
))
log.info("Inserting new resource types")
db.DBSession.bulk_insert_mappings(ResourceType, new_ras)
db.DBSession.flush()
log.info("Insertion Complete")
def _clone_scenarios(network_id,
newnetworkid,
ra_id_map,
node_id_map,
link_id_map,
group_id_map,
user_id,
include_outputs=False,
scenario_ids=[]):
scenarios = db.DBSession.query(Scenario).filter(Scenario.network_id == network_id).all()
id_map = {}
for scenario in scenarios:
#if scenario_ids are specified (the list is not empty) then filter out
#the scenarios not specified.
if len(scenario_ids) > 0 and scenario.id not in scenario_ids:
log.info("Not cloning scenario %s", scenario.id)
continue
if scenario.status == 'A':
new_scenario_id = _clone_scenario(scenario,
newnetworkid,
ra_id_map,
node_id_map,
link_id_map,
group_id_map,
user_id,
include_outputs=include_outputs)
id_map[scenario.id] = new_scenario_id
return id_map
def _clone_scenario(old_scenario,
newnetworkid,
ra_id_map,
node_id_map,
link_id_map,
group_id_map,
user_id,
include_outputs=False):
log.info("Adding scenario shell to get scenario ID")
news = Scenario()
news.network_id = newnetworkid
news.name = old_scenario.name
news.description = old_scenario.description
news.layout = old_scenario.layout
news.start_time = old_scenario.start_time
news.end_time = old_scenario.end_time
news.time_step = old_scenario.time_step
news.parent_id = old_scenario.parent_id
news.created_by = user_id
db.DBSession.add(news)
db.DBSession.flush()
scenario_id = news.id
log.info("New Scenario %s created", scenario_id)
log.info("Getting old resource scenarios for scenario %s", old_scenario.id)
old_rscen_qry = db.DBSession.query(ResourceScenario).filter(
ResourceScenario.scenario_id == old_scenario.id,
ResourceAttr.id == ResourceScenario.resource_attr_id,
)
#Filter out output data unless explicitly requested not to.
if include_outputs is not True:
old_rscen_qry = old_rscen_qry.filter(ResourceAttr.attr_is_var == 'N')
old_rscen_rs = old_rscen_qry.all()
new_rscens = []
for old_rscen in old_rscen_rs:
new_rscens.append(dict(
dataset_id=old_rscen.dataset_id,
scenario_id=scenario_id,
resource_attr_id=ra_id_map[old_rscen.resource_attr_id],
))
log.info("Inserting new resource scenarios")
db.DBSession.bulk_insert_mappings(ResourceScenario, new_rscens)
log.info("Insertion Complete")
log.info("Getting old resource group items for scenario %s", old_scenario.id)
old_rgis = db.DBSession.query(ResourceGroupItem).filter(
ResourceGroupItem.scenario_id == old_scenario.id).all()
new_rgis = []
for old_rgi in old_rgis:
new_rgis.append(dict(
ref_key=old_rgi.ref_key,
node_id=node_id_map.get(old_rgi.node_id),
link_id=link_id_map.get(old_rgi.link_id),
subgroup_id=group_id_map.get(old_rgi.subgroup_id),
group_id=group_id_map.get(old_rgi.group_id),
scenario_id=scenario_id,
))
db.DBSession.bulk_insert_mappings(ResourceGroupItem, new_rgis)
return scenario_id
@required_perms("edit_network")
def apply_unit_to_network_rs(network_id, unit_id, attr_id, scenario_id=None, **kwargs):
"""
Set the unit on all the datasets in a network which have the same attribue
as the supplied resource_attr_id.
args:
unit_id (int): The unit ID to set on the network's datasets
attr_id (int): The attribute ID
scenario_id (int) (optional): Supplied if only datasets in a
specific scenario are to be affected
returns:
None
raises:
ValidationError if the supplied unit is incompatible with the attribute's dimension
"""
#Now get all the RS associated to both the attr and network.
network_rs_query = db.DBSession.query(ResourceScenario).filter(
Scenario.network_id == network_id,
ResourceScenario.scenario_id == Scenario.id,
ResourceScenario.resource_attr_id == ResourceAttr.id,
ResourceAttr.attr_id == attr_id)
if scenario_id is not None:
network_rs_query.filter(Scenario.id == scenario_id)
network_rs_list = network_rs_query.all()
#Get the attribute in question so we can check its dimension
attr_i = db.DBSession.query(Attr).filter(Attr.id == attr_id).one()
#now check whether the supplied unit can be applied by comparing it to the attribute's dimension
units.check_unit_matches_dimension(unit_id, attr_i.dimension_id)
#set the unit ID for each of the resource scenarios
for network_rs in network_rs_list:
network_rs.dataset.unit_id = unit_id
|
hydraplatform/hydra-base
|
hydra_base/lib/network.py
|
network.py
|
py
| 127,911 |
python
|
en
|
code
| 8 |
github-code
|
6
|
8659872785
|
import argparse
import os
import sys
import time
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
from watchdog.events import FileSystemEventHandler
#import multiprocessing as mp
from collections import OrderedDict
import re
import copy
import json
import subprocess
#import sched
import webbrowser
from shutil import copyfile, which
import dateutil
from datetime import datetime
from operator import itemgetter
from .version import __version__
from .statsparser import get_argument_parser as sp_get_argument_parser
from .statsparser import parse_args as sp_parse_args
from .helper import initLogger, resources_dir, get_script_dir, hostname, ArgHelpFormatter, r_file, r_dir, rw_dir, defaults, jinja_env
import threading
import logging
import queue
from pathlib import Path
from jinja2 import Environment, PackageLoader, select_autoescape
ALL_RUNS = {}
ALL_RUNS_LOCK = threading.RLock()
SP_DIRS = {}
SP_DIRS_LOCK = threading.RLock()
MUX_RESULTS = {}
MUX_RESULTS_LOCK = threading.RLock()
UPDATE_OVERVIEW = False
UPDATE_OVERVIEW_LOCK = threading.RLock()
logger = None
class parse_statsparser_args(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
to_test = values.split(' ')
argument_parser = sp_get_argument_parser()
args = sp_parse_args(argument_parser, to_test)
setattr(namespace,self.dest,to_test)
def parse_args():
parser = argparse.ArgumentParser(description='''A tool for monitoring and protocoling sequencing runs
performed on the Oxford Nanopore Technologies GridION
sequencer and for automated post processing and transmission
of generated data. It collects information on QC and
sequencing experiments and displays summaries of mounted
flow cells as well as comprehensive reports about currently
running and previously performed experiments.''',
formatter_class=ArgHelpFormatter,
add_help=False)
general_group = parser.add_argument_group('General arguments',
"arguments for advanced control of the program's behavior")
general_group.add_argument('-n', '--no_transfer',
action='store_true',
help='''no data transfer to the remote host''')
general_group.add_argument('-a', '--all_fast5',
action='store_true',
help='''also put fast5 files of reads removed by length and quality
filtering into barcode bins''')
general_group.add_argument('-p', '--pass_only',
action='store_true',
help='''use data from fastq_pass only''')
general_group.add_argument('-l', '--min_length',
type=int,
default=1000,
help='''minimal length to pass filter''')
general_group.add_argument('-r', '--min_length_rna',
type=int,
default=50,
help='''minimal length to pass filter for rna libraries''')
general_group.add_argument('-q', '--min_quality',
type=int,
default=5,
help='''minimal quality to pass filter''')
general_group.add_argument('-d', '--rsync_dest',
default="{}@{}:{}".format(defaults()["user"], defaults()["host"], defaults()["dest"]),
help='''destination for data transfer with rsync, format USER@HOST[:DEST].
Key authentication for the specified destination must be set up (see option -i),
otherwise data transfer will fail. Default value is parsed from setting
file {}'''.format(os.path.join(resources_dir, "defaults.ini")))
general_group.add_argument('-i', '--identity_file',
default="{}".format(defaults()["identity"]),
help='''file from which the identity (private key) for public key authentication is read.
Default value is parsed from setting file {}'''.format(os.path.join(resources_dir, "defaults.ini")))
general_group.add_argument('--bc_kws',
nargs='*',
default=['RBK', 'NBD', 'RAB', 'LWB', 'PBK', 'RPB', 'arcod'],
help='''if at least one of these key words is a substring of the run name,
porechop is used to demultiplex the fastq data''')
general_group.add_argument('-u', '--update_interval',
type=int,
default=300,
help='minimum time interval in seconds for updating the content of a report page')
general_group.add_argument('-m', '--ignore_file_modifications',
action='store_true',
help='''Ignore file modifications and only consider file creations regarding
determination of the latest log files''')
io_group = parser.add_argument_group('I/O arguments',
'Further input/output arguments. Only for special use cases')
io_group.add_argument('-o', '--output_dir',
action=rw_dir,
default="/data/dominION/",
help='Path to the base directory where experiment reports shall be saved')
arg_data_basedir = \
io_group.add_argument('--data_basedir',
action=rw_dir,
default='/data',
help='Path to the directory where basecalled data is saved')
io_group.add_argument('--minknow_log_basedir',
action=r_dir,
default='/var/log/MinKNOW',
help='''Path to the base directory of GridIONs log files''')
io_group.add_argument('--logfile',
help='''File in which logs will be safed
(default: OUTPUTDIR/logs/YYYY-MM-DD_hh:mm_HOSTNAME_LOGLVL.log''')
sp_arguments = parser.add_argument_group('Statsparser arguments',
'Arguments passed to statsparser for formatting html reports')
sp_arguments.add_argument('--statsparser_args',
action=parse_statsparser_args,
default=[],
help='''Arguments that are passed to the statsparser script.
See a full list of available arguments with --statsparser_args " -h" ''')
help_group = parser.add_argument_group('Help')
help_group.add_argument('-h', '--help',
action='help',
default=argparse.SUPPRESS,
help='Show this help message and exit')
help_group.add_argument('--version',
action='version',
version=__version__,
help="Show program's version string and exit")
help_group.add_argument('-v', '--verbose',
action='store_true',
help='Additional debug messages are printed to stdout')
help_group.add_argument('--quiet',
action='store_true',
help='Only errors and warnings are printed to stdout')
args = parser.parse_args()
ns = argparse.Namespace()
arg_data_basedir(parser, ns, args.data_basedir, '')
if not os.path.exists(args.identity_file):
print("Identity file {} does not exists. Please check key authentication settings or specify a different key with option -i.".format(args.identity_file))
exit()
args.watchnchop_args = []
if args.no_transfer:
args.watchnchop_args.append('-n')
if args.all_fast5:
args.watchnchop_args.append('-a')
if args.pass_only:
args.watchnchop_args.append('-p')
#args.watchnchop_args.extend(['-l', str(args.min_length)])
#args.watchnchop_args.extend(['-r', str(args.min_length_rna)])
args.watchnchop_args.extend(['-q', str(args.min_quality)])
args.watchnchop_args.extend(['-d', args.rsync_dest])
args.watchnchop_args.extend(['-i', args.identity_file])
return args
def main(args):
global ALL_RUNS
global ALL_RUNS_LOCK
global UPDATE_OVERVIEW
global logger
for p in [args.output_dir,
os.path.join(args.output_dir, 'runs'),
os.path.join(args.output_dir, 'qc'),
os.path.join(args.output_dir, 'logs')]:
if not os.path.exists(p):
os.makedirs(p)
if args.verbose:
loglvl = logging.DEBUG
elif args.quiet:
loglvl = logging.WARNING
else:
loglvl = logging.INFO
if not args.logfile:
logs_filename = "{}_{}_{}.log".format(datetime.now().strftime("%Y-%m-%d_%H:%M"), hostname, loglvl)
args.logfile = os.path.join(args.output_dir, 'logs', logs_filename)
initLogger(logfile=args.logfile, level=loglvl)
logger = logging.getLogger(name='gw')
logger.info("##### starting dominION {} #####\n".format(__version__))
logger.info("setting up dominION status page environment")
if not os.path.exists(os.path.join(args.output_dir, 'res')):
os.makedirs(os.path.join(args.output_dir, 'res'))
for res_file in ['style.css', 'flowcell.png', 'no_flowcell.png']:
copyfile(os.path.join(resources_dir, res_file),
os.path.join(args.output_dir, 'res', res_file))
import_qcs(os.path.join(args.output_dir, "qc"))
import_runs(os.path.join(args.output_dir, "runs"))
logger.info("starting to observe runs directory for changes to directory names")
observed_dir = os.path.join(args.output_dir, 'runs')
event_handler = RunsDirsEventHandler(observed_dir)
observer = Observer()
observer.schedule(event_handler,
observed_dir,
recursive=True)
observer.start()
logger.info("starting channel watchers:")
watchers = []
for channel in range(5):
watchers.append(Watcher(args.minknow_log_basedir,
channel,
args.ignore_file_modifications,
args.output_dir,
args.data_basedir,
args.statsparser_args,
args.update_interval,
args.watchnchop_args,
args.min_length,
args.min_length_rna,
args.bc_kws))
logger.info("initiating dominION overview page")
update_overview(watchers, args.output_dir)
webbrowser.open('file://' + os.path.realpath(os.path.join(args.output_dir, "{}_overview.html".format(hostname))))
logger.info("entering main loop")
try:
n = 0
while True:
for watcher in watchers:
watcher.check_q()
if UPDATE_OVERVIEW:
update_overview(watchers, args.output_dir)
UPDATE_OVERVIEW = False
time.sleep(0.2)
n += 1
if n == 100:
n = 0
set_update_overview()
except KeyboardInterrupt:
for watcher in watchers:
watcher.observer.stop()
if watcher.spScheduler.is_alive() if watcher.spScheduler else None:
watcher.stop_statsparser(0.05)
for wcScheduler in watcher.wcScheduler:
if wcScheduler.is_alive() if wcScheduler else None:
wcScheduler.join(timeout=0.05)
for watcher in watchers:
logger.info("joining GA{}0000's observer".format(watcher.channel))
watcher.observer.join()
for wcScheduler in watcher.wcScheduler:
if wcScheduler.is_alive() if wcScheduler else None:
logger.info("joining GA{}0000's watchnchop scheduler".format(watcher.channel))
wcScheduler.join()
for watcher in watchers:
if watcher.spScheduler.is_alive() if watcher.spScheduler else None:
logger.info("joining GA{}0000's statsparser scheduler".format(watcher.channel))
watcher.stop_statsparser()
def set_update_overview():
global UPDATE_OVERVIEW
UPDATE_OVERVIEW_LOCK.acquire()
UPDATE_OVERVIEW = True
UPDATE_OVERVIEW_LOCK.release()
def add_database_entry(flowcell, run_data, mux_scans):
ALL_RUNS_LOCK.acquire()
#TODO: check for all mandatory entries
asic_id_eeprom = flowcell['asic_id_eeprom']
run_id = run_data['run_id']
if asic_id_eeprom in ALL_RUNS:
if run_id in ALL_RUNS[asic_id_eeprom]:
logger.warning("{} exists multiple times in database!".format(run_id))
logger.warning("conflicting runs: {}, {}".format(ALL_RUNS[asic_id_eeprom][run_id]['run_data']['relative_path'],
run_data['relative_path']))
ALL_RUNS_LOCK.release()
return False
else:
ALL_RUNS[asic_id_eeprom] = {}
ALL_RUNS[asic_id_eeprom][run_id] = {'flowcell' : flowcell,
'run_data' : run_data,
'mux_scans' : mux_scans}
logger.debug('{} - added experiment of type "{}" performed on flowcell "{}" on "{}"'.format(asic_id_eeprom,
run_data['experiment_type'],
flowcell['flowcell_id'],
run_data['protocol_start']))
ALL_RUNS_LOCK.release()
return True
def add_mux_scan_results(flowcell_data, mux_scans):
MUX_RESULTS_LOCK.acquire()
asic_id_eeprom = flowcell_data['asic_id_eeprom']
flowcell_id = flowcell_data['flowcell_id']
if asic_id_eeprom not in MUX_RESULTS:
MUX_RESULTS[asic_id_eeprom] = []
for mux_scan in mux_scans:
mux_scan_copy = copy.deepcopy(mux_scan)
if not 'total' in mux_scan:
if 'group * total' in mux_scan:
mux_scan_copy['total'] = mux_scan['group * total']
del mux_scan_copy['group * total']
else:
continue
mux_scan_copy['flowcell_id'] = flowcell_id
mux_scan_copy['timestamp'] = dateutil.parser.parse(mux_scan['timestamp'])
for i in range(len(MUX_RESULTS[asic_id_eeprom])):
if mux_scan_copy['timestamp'] < MUX_RESULTS[asic_id_eeprom][i]['timestamp']:
MUX_RESULTS[asic_id_eeprom].insert(i, mux_scan_copy)
break
else:
MUX_RESULTS[asic_id_eeprom].append(mux_scan_copy)
MUX_RESULTS_LOCK.release()
def import_qcs(qc_dir):
logger.info("importing platform qc entries from files in directory {}".format(qc_dir))
for fp in [os.path.join(qc_dir, fn) for fn in os.listdir(qc_dir) if fn.endswith('.json')]:
if os.path.isfile(fp):
with open(fp, "r") as f:
try:
flowcell, run_data, mux_scans = json.loads(f.read(), object_pairs_hook=OrderedDict)
except:
logger.warning("failed to parse {}, json format or data structure corrupt".format(fn))
continue
asic_id_eeprom = flowcell['asic_id_eeprom']
add_mux_scan_results(flowcell, mux_scans)
def import_runs(base_dir, refactor=False):
logger.info("importing sequencing run entries from files in directory {}".format(base_dir))
for experiment in [d for d in os.listdir(base_dir) if os.path.isdir(os.path.join(base_dir, d))]:
experiment_dir = os.path.join(base_dir, experiment)
for sample in [d for d in os.listdir(experiment_dir) if os.path.isdir(os.path.join(experiment_dir, d))]:
sample_dir = os.path.join(experiment_dir, sample)
for fp in [os.path.join(sample_dir, fn) for fn in os.listdir(sample_dir) if fn.endswith('.json')]:
if os.path.isfile(fp):
with open(fp, "r") as f:
try:
flowcell, run_data, mux_scans = json.loads(f.read(), object_pairs_hook=OrderedDict)
except:
logger.warning("failed to parse {}, json format or data structure corrupt".format(fn))
continue
# temporarily change attributes experiment and sample according to directory names
prev = (run_data['experiment'] if 'experiment' in run_data else None,
run_data['sample'] if 'sample' in run_data else None)
changed = prev == (experiment, sample)
run_data['experiment'] = experiment
run_data['sample'] = sample
if refactor and changed:
# make changes permanent
logging.info("writing changes to attributes 'experiment' and 'sample' to file")
data = (flowcell, run_data, mux_scans)
with open( fp, 'w') as f:
print(json.dumps(data, indent=4), file=f)
if not add_database_entry(flowcell, run_data, mux_scans):
logger.error("failed to add content from {} to the database".format(fp))
continue
# add mux scans
add_mux_scan_results(flowcell, mux_scans)
def get_runs_by_flowcell(asic_id_eeprom):
ALL_RUNS_LOCK.acquire()
runs = {}
if asic_id_eeprom:
if asic_id_eeprom in ALL_RUNS:
for run_id in ALL_RUNS[asic_id_eeprom]:
if 'qc' not in ALL_RUNS[asic_id_eeprom][run_id]['run_data']['experiment_type'].lower():
runs[run_id] = ALL_RUNS[asic_id_eeprom][run_id]
ALL_RUNS_LOCK.release()
return runs
def get_latest_mux_scan_result(asic_id_eeprom):
latest_result = None
MUX_RESULTS_LOCK.acquire()
if asic_id_eeprom in MUX_RESULTS:
latest_result = MUX_RESULTS[asic_id_eeprom][0]
MUX_RESULTS_LOCK.release()
return latest_result
def get_latest(runs):
latest_qc = None
for run_id in runs:
if latest_qc:
_protocol_start = dateutil.parser.parse(runs[latest_qc]['run_data']['protocol_start'])
if protocol_start > _protocol_start:
latest_qc = run_id
else:
latest_qc = run_id
protocol_start = dateutil.parser.parse(runs[run_id]['run_data']['protocol_start'])
return latest_qc
def update_overview(watchers, output_dir):
channel_to_css = {0:"one", 1:"two", 2:"three", 3:"four", 4:"five"}
render_dict = {"version" : __version__,
"dateTimeNow" : datetime.now().strftime("%Y-%m-%d_%H:%M"),
"channels" : [],
"all_exp" : []
}
for watcher in watchers:
channel = watcher.channel
render_dict["channels"].append({})
asic_id_eeprom = None
try:
asic_id_eeprom = watcher.channel_status.flowcell['asic_id_eeprom']
except:
pass
runs = get_runs_by_flowcell(asic_id_eeprom)
#qcs = get_qcs_by_flowcell(asic_id_eeprom)
render_dict["channels"][channel]['latest_qc'] = {}
latest_qc = get_latest_mux_scan_result(asic_id_eeprom)
if latest_qc:
render_dict["channels"][channel]['latest_qc']['timestamp'] = latest_qc['timestamp'].date()
render_dict["channels"][channel]['latest_qc']['total'] = latest_qc['total']
if 'in_use' in latest_qc and watcher.channel_status.sequencing:
render_dict["channels"][channel]['latest_qc']['in_use'] = latest_qc['in_use']
else:
render_dict["channels"][channel]['latest_qc']['in_use'] = 0
render_dict["channels"][channel]['runs'] = []
for run_id in runs:
experiment = runs[run_id]['run_data']['experiment']
if not experiment:
if 'user_filename_input' in runs[run_id]['run_data']:
experiment = runs[run_id]['run_data']['user_filename_input']
if not experiment:
logger.WARNING('not adding run with id {} to overview because no experiment name is set'.format(run_id))
sample = runs[run_id]['run_data']['sample']
if not sample:
sample = experiment
link = os.path.abspath(os.path.join(output_dir,'runs',experiment,sample,'report.html'))
render_dict["channels"][channel]['runs'].append({'experiment':experiment,
'link':link})
render_dict["channels"][channel]['channel'] = channel_to_css[watcher.channel]
render_dict["channels"][channel]['asic_id_eeprom'] = asic_id_eeprom
if asic_id_eeprom:
if not latest_qc:
render_dict["channels"][channel]['flowcell_id'] = "NO RECORDS"
else:
render_dict["channels"][channel]['flowcell_id'] = latest_qc['flowcell_id']
else:
render_dict["channels"][channel]['flowcell_id'] = '-'
ALL_RUNS_LOCK.acquire()
all_runs_info = []
for asic_id_eeprom in ALL_RUNS:
for run_id in ALL_RUNS[asic_id_eeprom]:
experiment_type = ALL_RUNS[asic_id_eeprom][run_id]['run_data']['experiment_type']
if not 'qc' in experiment_type.lower():
protocol_start = dateutil.parser.parse(ALL_RUNS[asic_id_eeprom][run_id]['run_data']['protocol_start'])
duration = "N/A"
if 'protocol_end' in ALL_RUNS[asic_id_eeprom][run_id]['run_data']:
if ALL_RUNS[asic_id_eeprom][run_id]['run_data']['protocol_end']:
protocol_end = dateutil.parser.parse(ALL_RUNS[asic_id_eeprom][run_id]['run_data']['protocol_end'])
duration = "{}".format(protocol_end - protocol_start).split('.')[0]
sequencing_kit = ALL_RUNS[asic_id_eeprom][run_id]['run_data']['sequencing_kit']
experiment = ALL_RUNS[asic_id_eeprom][run_id]['run_data']['experiment']
sample = ALL_RUNS[asic_id_eeprom][run_id]['run_data']['sample']
if not sample:
sample = experiment
link = os.path.abspath(os.path.join(output_dir,'runs',experiment,sample,'report.html'))
all_runs_info.append({'link':link,
'experiment':experiment,
'sample': sample,
'sequencing_kit': sequencing_kit,
'protocol_start': protocol_start,
'duration': duration})
ALL_RUNS_LOCK.release()
if all_runs_info:
all_runs_info = sorted(all_runs_info, key=lambda k: k['protocol_start'], reverse=True)
run = 0
sample = 0
grouped = [[[all_runs_info[0]]]] if all_runs_info else [[[]]]
for run_info in all_runs_info[1:]:
if grouped[run][sample][0]['experiment'] == run_info['experiment']:
if grouped[run][sample][0]['sample'] == run_info['sample']:
grouped[run][sample].append(run_info)
else:
grouped[run].append( [run_info] )
sample += 1
else:
grouped.append( [[run_info]] )
run += 1
sample = 0
for exp in grouped:
render_dict['all_exp'].append(
{'num_samples':str(sum([len(sample) for sample in exp])),
'experiment':exp[0][0]['experiment'],
'samples':[]})
for sample in exp:
render_dict['all_exp'][-1]['samples'].append(
{'num_runs':str(len(sample)),
'link':sample[0]['link'],
'sample':sample[0]['sample'],
'runs':[]})
for run in sample:
render_dict['all_exp'][-1]['samples'][-1]['runs'].append(run)
template = jinja_env.get_template('overview.template')
with open(os.path.join(output_dir, "{}_overview.html".format(hostname)), 'w') as f:
print(template.render(render_dict), file=f)
class ChannelStatus():
empty_run_data = OrderedDict([
('run_id', None),
('minion_id', None),
('sequencing_kit', None),
('protocol_start', None),
('protocol_end', None),
('relative_path', None),
('sample', None),
('experiment', None)
])
empty_flowcell = OrderedDict([
('flowcell_id', None),
('asic_id', None),
('asic_id_eeprom', None),
('flowcell', None)
])
empty_mux = OrderedDict()
def __init__(self, minion_id, channel):
self.minion_id = minion_id
self.flowcell = copy.deepcopy(self.empty_flowcell)
self.run_data = copy.deepcopy(self.empty_run_data)
self.mux_scans = []
self.run_data['minion_id'] = minion_id
self.logger = logging.getLogger(name='gw.w{}.cs'.format(channel+1))
self.sequencing = False
def update(self, content, overwrite=False):
for key in content:
if key in self.flowcell:
if self.flowcell[key]:
if overwrite:
self.logger.info("changing the current value of {} ({}) to {}".format(key, self.flowcell[key], content[key]))
self.flowcell[key] = content[key]
else:
self.logger.debug("not changing the current value of {} ({}) to {}".format(key, self.flowcell[key], content[key]))
continue
else:
self.flowcell[key] = content[key]
self.logger.info("new flowcell value for {} : {}".format(key, content[key]))
continue
elif key in self.run_data:
if self.run_data[key]:
if overwrite:
self.logger.info("changing the current value of {} ({}) to {}".format(key, self.run_data[key], content[key]))
self.run_data[key] = content[key]
else:
self.logger.debug("not changing the current value of {} ({}) to {}".format(key, self.run_data[key], content[key]))
continue
self.run_data[key] = content[key]
self.logger.info("new run value for {} : {}".format(key, content[key]))
def add_mux_scan(self, timestamp, active_pores, in_use=None):
self.mux_scans.append(copy.deepcopy(self.empty_mux))
self.mux_scans[-1]['timestamp'] = timestamp
self.mux_scans[-1]['total'] = active_pores
if in_use:
self.mux_scans[-1]['in_use'] = in_use
add_mux_scan_results(self.flowcell, [self.mux_scans[-1]])
self.logger.debug("added new mux scan result")
def flowcell_disconnected(self):
self.logger.info("resetting flowcell and run data")
self.flowcell = copy.deepcopy(self.empty_flowcell)
self.run_data = copy.deepcopy(self.empty_run_data)
self.run_data['minion_id'] = self.minion_id
self.mux_scans = []
self.sequencing = False
def reset_channel(self):
self.logger.info("resetting run data")
self.run_data = copy.deepcopy(self.empty_run_data)
self.run_data['minion_id'] = self.minion_id
self.mux_scans = []
self.sequencing = False
class WatchnchopScheduler(threading.Thread):
def __init__(self, data_basedir, relative_path, experiment, sequencing_kit, fastq_reads_per_file,
bc_kws, stats_fp, channel, watchnchop_args, min_length, min_length_rna):
threading.Thread.__init__(self)
if getattr(self, 'daemon', None) is None:
self.daemon = True
else:
self.setDaemon(True)
self.stoprequest = threading.Event() # set when joined without timeout (eg if terminated with ctr-c)
self.exp_end = threading.Event() # set when joined with timeout (eg if experiment ended)
self.logger = logging.getLogger(name='gw.w{}.wcs'.format(channel+1))
self.observed_dir = os.path.join(data_basedir, relative_path, 'fastq_pass')
# define the command that is to be executed
self.cmd = [which('perl'),
which('watchnchop'),
'-o', stats_fp,
'-f', str(fastq_reads_per_file)]
if watchnchop_args:
self.cmd.extend(watchnchop_args)
for kw in bc_kws:
if kw.lower() in experiment.lower() or kw.lower() in sequencing_kit.lower():
self.cmd.append('-b')
break
self.cmd.append('-l')
if 'rna' in experiment.lower() or 'rna' in sequencing_kit.lower():
self.cmd.append(str(min_length_rna))
else:
self.cmd.append(str(min_length))
self.cmd.append(os.path.join(data_basedir, relative_path, ''))
self.process = None
def run(self):
self.logger.info("STARTED watchnchop scheduler")
while not (self.stoprequest.is_set() or self.exp_end.is_set()):
if self.conditions_met():
self.process = subprocess.Popen(self.cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, creationflags=subprocess.IDLE_PRIORITY_CLASS)
self.logger.info("STARTED WATCHNCHOP with arguments: {}".format(self.cmd))
break
time.sleep(1)
while not (self.stoprequest.is_set() or self.exp_end.is_set()):
time.sleep(1)
if self.process:
try:
self.process.terminate()
self.logger.info("TERMINATED watchnchop process")
except:
self.logger.error("TERMINATING watchnchop process failed")
else:
if self.stoprequest.is_set():
self.logger.error("watchnchop was NEVER STARTED: this thread was ordered to kill the watchnchop subprocess before it was started")
return
# try one last time to start watchnchop (necessary for runs with extremly low output, where all reads are buffered)
self.logger.info("starting watchnchop in one minutes, then kill it after another 5 minutes")
for i in range(60):
if self.stoprequest.is_set():
self.logger.error("watchnchop was NEVER STARTED: this thread was ordered to kill the watchnchop subprocess before it was started")
return
time.sleep(1)
if self.conditions_met():
self.process = subprocess.Popen(self.cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, creationflags=subprocess.IDLE_PRIORITY_CLASS)
self.logger.info("STARTED WATCHNCHOP with arguments: {}".format(self.cmd))
else:
self.logger.error("watchnchop NOT STARTED: directory {} still does not exist or contains no fastq files".format(self.observed_dir))
return
for i in range(300):
if self.stoprequest.is_set():
break
time.sleep(1)
self.process.terminate()
self.logger.info("TERMINATED watchnchop process")
def conditions_met(self):
if os.path.exists(self.observed_dir):
if [fn for fn in os.listdir(self.observed_dir) if fn.endswith('.fastq')]:
return True
return False
def join(self, timeout=None):
if timeout:
self.exp_end.set()
else:
self.stoprequest.set()
super(WatchnchopScheduler, self).join(timeout)
class StatsparserScheduler(threading.Thread):
def __init__(self, update_interval, sample_dir, statsparser_args, channel):
threading.Thread.__init__(self)
if getattr(self, 'daemon', None) is None:
self.daemon = True
else:
self.setDaemon(True)
self.stoprequest = threading.Event() # set when joined without timeout (eg if terminated with ctr-c)
self.exp_end = threading.Event() # set when joined with timeout (eg if experiment ended)
self.logger = logging.getLogger(name='gw.w{}.sps'.format(channel+1))
self.channel = channel
self.update_interval = update_interval
self.sample_dir = sample_dir
self.statsparser_args = statsparser_args
self.page_opened = False
def run(self):
while not self.stoprequest.is_set() or self.exp_end.is_set():
last_time = time.time()
if self.conditions_met():
self.update_report()
this_time = time.time()
while (this_time - last_time < self.update_interval) and not self.stoprequest.is_set() or self.exp_end.is_set():
time.sleep(1)
this_time = time.time()
# start statsparser a last time if the experiment ended
if not self.stoprequest.is_set() and self.conditions_met():
self.update_report()
SP_DIRS_LOCK.acquire()
if self.sample_dir in SP_DIRS:
if SP_DIRS[self.sample_dir] == self.channel:
del SP_DIRS[self.sample_dir]
SP_DIRS_LOCK.release()
def conditions_met(self):
conditions_met = False
stats_fns = [fn for fn in os.listdir(os.path.abspath(self.sample_dir)) if fn.endswith('stats.csv')] if os.path.exists(os.path.abspath(self.sample_dir)) else []
# assure that only one statsparser instance is running on a directory at a time
SP_DIRS_LOCK.acquire()
if not self.sample_dir in SP_DIRS:
SP_DIRS[self.sample_dir] = self.channel
if stats_fns and SP_DIRS[self.sample_dir] == self.channel:
conditions_met = True
SP_DIRS_LOCK.release()
return conditions_met
def update_report(self):
self.logger.info("updating report...")
cmd = [os.path.join(get_script_dir(),'statsparser'), #TODO: change to which() ?
self.sample_dir,
'-q']
cmd.extend(self.statsparser_args)
cp = subprocess.run(cmd) # waits for process to complete
if cp.returncode == 0:
if not self.page_opened:
basedir = os.path.abspath(self.sample_dir)
fp = os.path.join(basedir, 'report.html')
self.logger.info("OPENING " + fp)
try:
webbrowser.open('file://' + os.path.realpath(fp))
except:
pass
self.page_opened = True
else:
self.logger.warning("statsparser returned with errorcode {} for directory {}".format(cp.returncode, self.sample_dir))
def join(self, timeout=None):
if timeout:
self.exp_end.set()
else:
self.stoprequest.set()
super(StatsparserScheduler, self).join(timeout)
class Watcher():
def __init__(self, minknow_log_basedir, channel, ignore_file_modifications, output_dir, data_basedir,
statsparser_args, update_interval, watchnchop_args, min_length, min_length_rna, bc_kws):
self.q = queue.PriorityQueue()
self.watchnchop_args = watchnchop_args
self.min_length = min_length
self.min_length_rna = min_length_rna
self.channel = channel
self.output_dir = output_dir
self.data_basedir = data_basedir
self.statsparser_args = statsparser_args
self.update_interval = update_interval
self.bc_kws = bc_kws
self.observed_dir = os.path.join(minknow_log_basedir, "GA{}0000".format(channel+1))
self.event_handler = LogFilesEventHandler(self.q, ignore_file_modifications, channel)
self.observer = Observer()
self.observer.schedule(self.event_handler,
self.observed_dir,
recursive=False)
self.observer.start()
self.channel_status = ChannelStatus("GA{}0000".format(channel+1), channel)
self.spScheduler = None
self.wcScheduler = []
self.logger = logging.getLogger(name='gw.w{}'.format(channel+1))
self.logger.info("...watcher for {} ready".format(self.observed_dir))
def check_q(self):
# checking sheduler queue
if not self.q.empty():
self.logger.debug("Queue content for {}:".format(self.observed_dir))
while not self.q.empty():
timestamp, origin, line = self.q.get()
self.logger.debug("received '{}' originating from '{} log' at '{}'".format(line, origin, timestamp))
if origin == 'server':
self.parse_server_log_line(line)
elif origin == 'bream':
self.parse_bream_log_line(line)
#elif origin == 'analyser':
# self.parse_analyser_log_line(line)
def parse_server_log_line(self, line):
dict_content = {}
overwrite = False
timestamp = line[:23]
# fetch output_path, run_id, script_path, relative_path, protocol_start, flowcell_id [, experiment, sample]
if "protocol_started" in line:
for m in re.finditer('([^\s,]+) = ([^\s,]+)', line):
dict_content[m.group(1)] = m.group(2)
overwrite = True
dict_content['relative_path'] = dict_content['output_path'].split("/./")[1].strip("/")
subdirs = dict_content['relative_path'].split('/')
if len(subdirs) == 3:
# case sequencing protocol
dict_content['experiment'] = subdirs[0]
dict_content['sample'] = subdirs[1]
dict_content['flowcell_id'] = subdirs[2].split('_')[3]
elif len(subdirs) == 1:
# case qc protocol
dict_content['flowcell_id'] = subdirs[0].split('_')[3]
self.logger.info("PROTOCOL START")
set_update_overview()
self.channel_status.run_data['protocol_start'] = timestamp
# fetch protocol_end
elif "protocol_finished" in line:
self.logger.info("PROTOCOL END")
set_update_overview()
self.channel_status.run_data['protocol_end'] = timestamp
if self.channel_status.mux_scans:
self.save_logdata()
self.channel_status.reset_channel()
self.stop_statsparser()
self.stop_watchnchop()
#
elif "[engine/info]: : flowcell_discovered" in line:
for m in re.finditer('([^\s,]+) = ([^\s,]+)', line):
dict_content[m.group(1)] = m.group(2)
overwrite = True
self.logger.info("FLOWCELL DISCOVERED")
set_update_overview()
self.channel_status.flowcell_disconnected()
self.stop_statsparser()
self.stop_watchnchop()
elif "[engine/info]: : data_acquisition_started" in line:
for m in re.finditer('([^\s,]+) = ([^\s,]+)', line):
dict_content[m.group(1)] = m.group(2)
overwrite = True
elif "flowcell_disconnected" in line:
self.logger.info("FLOWCELL DISCONNECTED")
set_update_overview()
self.channel_status.flowcell_disconnected()
elif "pores available for sequencing" in line:
active_pores = None
in_use = None
for m in re.finditer("has ([0-9]+) pores available for sequencing", line):
active_pores = m.group(1)
for m in re.finditer("Starting sequencing with ([0-9]+) pores", line):
in_use = m.group(1)
self.logger.info("new mux scan result: {} active, {} in use".format(active_pores, in_use))
self.channel_status.add_mux_scan(timestamp, active_pores, in_use=in_use)
set_update_overview()
self.save_logdata()
if dict_content:
self.channel_status.update(dict_content, overwrite)
def parse_bream_log_line(self, line):
dict_content = {}
overwrite = False
timestamp = line.split(" - ")[1]
if "INFO - Attribute" in line:
for m in re.finditer("([^\s,]+) set to (.+)", line):
dict_content[m.group(1)] = m.group(2)
elif "INFO - Asked to start protocol" in line:
for m in re.finditer("'--([^\s,]+)=([^\s,]+)'", line):
dict_content[m.group(1)] = m.group(2)
overwrite = True
elif "INFO - Updating context tags in MinKNOW with" in line:
for m in re.finditer("'([^\s,]+)'[:,] u?'([^\s,]+)'", line):
dict_content[m.group(1)] = m.group(2)
if 'sequencing_kit' in dict_content:
dict_content['sequencing_kit'] = dict_content['sequencing_kit'].upper()
elif "platform_qc.report" in line:
self.logger.info("QC FINISHED")
elif "sequencing.start" in line:
dict_content["sequencing_start_time"] = timestamp
self.logger.info("SEQUENCING STARTS")
self.channel_status.sequencing = True
set_update_overview()
self.start_watchnchop()
self.start_statsparser()
if dict_content:
self.channel_status.update(dict_content, overwrite)
def check_attributes(self, attributes):
for key in attributes:
if key in self.channel_status.run_data:
if self.channel_status.run_data[key]:
continue
else:
return key
elif key in self.channel_status.flowcell:
if self.channel_status.flowcell[key]:
continue
else:
return key
else:
return key
return None
def save_logdata(self):
missing_key = self.check_attributes(['experiment_type', 'run_id', 'flowcell_id', 'asic_id_eeprom'])
if missing_key:
self.logger.warning("NOT SAVING REPORT for {} because the crucial attribute '{}' is missing".format(self.channel_status.run_data['run_id'], missing_key))
return
fn = []
if "qc" in self.channel_status.run_data['experiment_type'].lower():
missing_key = self.check_attributes(['experiment', 'sample'])
if not missing_key:
self.logger.warning("NOT SAVING REPORT for {} because it is not certain that this is a qc run".format(self.channel_status.run_data['run_id']))
return
fn.extend(["QC", self.channel_status.flowcell['flowcell_id'], self.channel_status.run_data['run_id']])
target_dir = os.path.join(self.output_dir, 'qc')
else:
missing_key = self.check_attributes(['experiment', 'sample'])
if missing_key:
self.logger.warning("NOT SAVING REPORT for {} because the crucial attribute '{}' is missing".format(self.channel_status.run_data['run_id'], missing_key))
return
fn.extend([self.channel_status.run_data['run_id'], 'logdata'])
target_dir = os.path.join(self.output_dir,
'runs',
self.channel_status.run_data['experiment'],
self.channel_status.run_data['sample'])
fn = "_".join(fn) + ".json"
self.logger.info("saving log data to file {}".format(os.path.join(target_dir, fn)))
data = (self.channel_status.flowcell, self.channel_status.run_data, self.channel_status.mux_scans)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
with open( os.path.join(target_dir, fn), 'w') as f:
print(json.dumps(data, indent=4), file=f)
ALL_RUNS_LOCK.acquire()
run_id = self.channel_status.run_data['run_id']
asic_id_eeprom = self.channel_status.flowcell['asic_id_eeprom']
if asic_id_eeprom in ALL_RUNS:
ALL_RUNS[asic_id_eeprom][run_id] = {'flowcell': data[0],
'run_data': data[1],
'mux_scans': data[2]}
else:
ALL_RUNS[asic_id_eeprom] = {}
ALL_RUNS[asic_id_eeprom][run_id] = {'flowcell': data[0],
'run_data': data[1],
'mux_scans': data[2]}
ALL_RUNS_LOCK.release()
def start_watchnchop(self):
missing_key = self.check_attributes(['experiment', 'sample', 'sequencing_kit', 'run_id', 'fastq_reads_per_file', 'relative_path'])
if missing_key:
self.logger.warning("NOT executing watchnchop because the crucial attribute '{}' is missing".format(missing_key))
return
self.stop_watchnchop()
stats_fp = os.path.join(self.output_dir,
'runs',
self.channel_status.run_data['experiment'],
self.channel_status.run_data['sample'],
"{}_stats.csv".format(self.channel_status.run_data['run_id']))
self.wcScheduler.append(WatchnchopScheduler(self.data_basedir,
self.channel_status.run_data['relative_path'],
self.channel_status.run_data['experiment'],
self.channel_status.run_data['sequencing_kit'],
self.channel_status.run_data['fastq_reads_per_file'],
self.bc_kws,
stats_fp,
self.channel,
self.watchnchop_args,
self.min_length,
self.min_length_rna))
self.wcScheduler[-1].start()
return
def stop_watchnchop(self, timeout=1.2):
if self.wcScheduler[-1].is_alive() if self.wcScheduler else None:
if timeout:
self.wcScheduler[-1].join(timeout)
else:
self.wcScheduler[-1].join()
def start_statsparser(self):
missing_key = self.check_attributes(['experiment', 'sample'])
if missing_key:
self.logger.warning("NOT starting statsparser scheduler because the crucial attribute '{}' is missing".format(missing_key))
return
#start creation of plots at regular time intervals
self.stop_statsparser()
sample_dir = os.path.join(self.output_dir,
'runs',
self.channel_status.run_data['experiment'],
self.channel_status.run_data['sample'])
self.logger.info('SCHEDULING update of report for sample {1} every {0:.1f} minutes'.format(self.update_interval/1000, sample_dir))
self.spScheduler = StatsparserScheduler(self.update_interval,
sample_dir,
self.statsparser_args,
self.channel)
self.spScheduler.start()
def stop_statsparser(self, timeout=1.2):
if self.spScheduler.is_alive() if self.spScheduler else None:
if timeout:
self.spScheduler.join(timeout)
else:
self.spScheduler.join()
class OpenedFilesHandler():
'''manages a set of opened files, reads their contents and
processes them line by line. Incomplete lines are stored until
they are "completed" by a newline character.'''
def __init__(self, channel):
self.logger = logging.getLogger(name='gw.w{}.ofh'.format(channel+1))
self.open_files = {}
def open_new_file(self, path):
self.logger.info("Opening file {}".format(path))
self.open_files[path] = [open(path, 'r'), ""]
def close_file(self, path):
self.logger.debug("Attempting to close file {}".format(path))
try:
self.open_files[path][0].close()
except:
self.logger.debug("File handle of file {} couldn't be closed".format(path))
if path in self.open_files:
del self.open_files[path]
self.logger.debug("Deleted entry in open_files for file {}".format(path))
def process_lines_until_EOF(self, process_function, path):
file = self.open_files[path][0]
while 1:
line = file.readline()
if line == "":
break
elif line.endswith("\n"):
line = (self.open_files[path][1] + line).strip()
if line:
process_function(line)
self.open_files[path][1] = ""
else:
#line potentially incomplete
self.open_files[path][1] = self.open_files[path][1] + line
class LogFilesEventHandler(FileSystemEventHandler):
control_server_log, bream_log = None, None
def __init__(self, q, ignore_file_modifications, channel):
super(LogFilesEventHandler, self).__init__()
self.ignore_file_modifications = ignore_file_modifications
self.file_handler = OpenedFilesHandler(channel)
self.comm_q = q
# while no server log file is opened, all lines read are buffered in a seperate Priority Queue
self.buff_q = queue.PriorityQueue()
self.q = self.buff_q
self.logger = logging.getLogger(name='gw.w{}.lfeh'.format(channel+1))
def on_moved(self, event):
pass
def on_created(self, event):
if not event.is_directory:
activate_q = False
self.logger.debug("File {} was created".format(event.src_path))
basename = os.path.basename(event.src_path)
if basename.startswith("control_server_log"):
if self.control_server_log:
self.file_handler.close_file(event.src_path)
self.logger.info("Replacing current control_server_log file {} with {}".format(self.control_server_log, event.src_path))
else:
# read lines of server file first, then activate the real communication q
activate_q = True
self.control_server_log = event.src_path
self.logger.info("New control_server_log file {}".format(self.control_server_log))
process_function = self.enqueue_server_log_line
elif basename.startswith("bream") and basename.endswith(".log"):
if self.bream_log:
self.file_handler.close_file(event.src_path)
self.logger.info("Replacing current bream_log file {} with {}".format(self.bream_log, event.src_path))
self.bream_log = event.src_path
self.logger.info("New bream_log file {}".format(self.bream_log))
process_function = self.enqueue_bream_log_line
else:
self.logger.debug("File {} is not of concern for this tool".format(event.src_path))
return
self.file_handler.open_new_file(event.src_path)
self.file_handler.process_lines_until_EOF(process_function, event.src_path)
self.logger.info("approx. queue size: {}".format(self.q.qsize()))
if activate_q:
self.activate_q()
def on_deleted(self, event):
if not event.is_directory:
self.logger.debug("File {} was deleted".format(event.src_path))
#self.file_handler.close_file(event.src_path)
if self.control_server_log == event.src_path:
control_server_log = None
self.logger.warning("Current control_server_log file {} was deleted!".format(event.src_path))
elif self.bream_log == event.src_path:
self.bream_log = None
self.logger.warning("Current bream_log file {} was deleted".format(event.src_path))
else:
self.logger.debug("File {} is not opened and is therefore not closed.".format(event.src_path))
self.file_handler.close_file(event.src_path)
def on_modified(self, event):
if not event.is_directory:
self.logger.debug("File {} was modified".format(event.src_path))
if event.src_path in self.file_handler.open_files:
if self.control_server_log == event.src_path:
self.file_handler.process_lines_until_EOF(self.enqueue_server_log_line, event.src_path)
elif self.bream_log == event.src_path:
self.file_handler.process_lines_until_EOF(self.enqueue_bream_log_line, event.src_path)
else:
self.logger.warning("case not handled")
return
else:
if not self.ignore_file_modifications:
self.on_created(event)
else:
self.logger.debug("File {} existed before this script was started".format(event.src_path))
def activate_q(self):
self.logger.info("activating communication queue")
self.q = self.comm_q
while not self.buff_q.empty():
self.q.put(self.buff_q.get())
def enqueue_server_log_line(self, line):
try:
self.q.put( (dateutil.parser.parse(line[:23]), 'server', line) )
except:
self.logger.debug("the timestamp of the following line in the server log file could not be parsed:\n{}".format(line))
def enqueue_bream_log_line(self, line):
try:
self.q.put( (dateutil.parser.parse(line.split(' - ')[1]), 'bream', line) )
except:
self.logger.debug("the timestamp of the following line in the bream log file could not be parsed:\n{}".format(line))
class RunsDirsEventHandler(FileSystemEventHandler):
def __init__(self, observed_dir):
super(RunsDirsEventHandler, self).__init__()
self.observed_dir = os.path.abspath(observed_dir)
self.logger = logging.getLogger(name='gw.reh')
def on_moved(self, event):
if event.is_directory or (self.depth(event.src_path) == 3 and event.src_path.endswith('.json')):
self.logger.debug("moved {}, depth {}, \ndest {}".format(event.src_path, self.depth(event.src_path), event.dest_path))
if self.observed_dir in event.dest_path and self.depth(event.dest_path) == self.depth(event.src_path):
self.reload_runs()
else:
self.on_deleted(event)
def on_created(self, event):
if event.is_directory:
self.logger.debug("created directory {}, depth {}".format(event.src_path, self.depth(event.src_path)))
if 1 <= self.depth(event.src_path) <= 2:
self.reload_runs()
elif self.depth(event.src_path) == 3 and event.src_path.endswith('.json'):
self.logger.debug("created file {}, depth {}".format(event.src_path, self.depth(event.src_path)))
self.reload_runs()
def on_modified(self, event):
if event.is_directory:
self.logger.debug("modified directory {}, depth {}".format(event.src_path, self.depth(event.src_path)))
def on_deleted(self, event):
if event.is_directory:
self.logger.debug("deleted directory {}, depth {}".format(event.src_path, self.depth(event.src_path)))
if 1 <= self.depth(event.src_path) <= 2:
self.reload_runs()
elif self.depth(event.src_path) == 3 and event.src_path.endswith('.json'):
self.logger.debug("deleted file {}, depth {}".format(event.src_path, self.depth(event.src_path)))
self.reload_runs()
def depth(self, src_path):
src_path = os.path.abspath(src_path)
return len(src_path.replace(self.observed_dir, '').strip('/').split('/'))
def reload_runs(self):
ALL_RUNS_LOCK.acquire()
self.logger.info('deleting and re-importing all runs due to changes in the run directory')
# delete sequencing runs
to_delete = []
for asic_id_eeprom in ALL_RUNS:
for run_id in ALL_RUNS[asic_id_eeprom]:
if 'qc' not in ALL_RUNS[asic_id_eeprom][run_id]['run_data']['experiment_type']:
to_delete.append( (asic_id_eeprom, run_id) )
for asic_id_eeprom, run_id in to_delete:
del ALL_RUNS[asic_id_eeprom][run_id]
#reload runs
import_runs(self.observed_dir)
ALL_RUNS_LOCK.release()
set_update_overview()
return
def standalone():
args = parse_args()
main(args)
if __name__ == "__main__":
standalone()
|
MarkusHaak/dominION
|
dominion/dominion.py
|
dominion.py
|
py
| 47,728 |
python
|
en
|
code
| 3 |
github-code
|
6
|
24293520683
|
def shortest_path(file_path):
path_list = file_path.split('/')[1:]
min_path = ['/']
while path_list:
name = path_list.pop(0)
if name == '..':
min_path.pop()
elif name and name != '.':
min_path.append(name+'/')
return "".join(min_path) if min_path else None
def main():
assert shortest_path('/Users/Joma/Documents/../Desktop/./../') == '/Users/Joma/'
if __name__ == '__main__':
main()
|
ckallum/Daily-Interview-Pro
|
solutions/absolute_path.py
|
absolute_path.py
|
py
| 460 |
python
|
en
|
code
| 16 |
github-code
|
6
|
42586362539
|
""" Text Preprocessing """
import logging
import re
from functools import lru_cache
from multiprocessing import Pool
from typing import Optional, List
import numpy as np
import pandas as pd
import pymorphy2
from stop_words import get_stop_words
wcoll_morph: Optional[pymorphy2.MorphAnalyzer] = None
g_chunks: Optional[List[pd.DataFrame]] = None
def ensure_morph():
global wcoll_morph
if wcoll_morph is None:
wcoll_morph = pymorphy2.MorphAnalyzer()
def release_morph():
global wcoll_morph
wcoll_morph = None
def make_chuncks(df, proc_count):
result = []
data_size = df.shape[0]
chunksize = int(np.ceil(data_size / proc_count))
left = right = 0
for i in range(proc_count - 1):
right += chunksize
result.append(df[left:right])
left += chunksize
result.append(df[left:])
return result
class Preprocessing:
""" Clean, tokenize and normalize texts """
def __init__(self):
self.stopwords = set(get_stop_words("ru"))
self.pattern = re.compile("[А-Яа-яA-z0-9]+")
def process_texts(self, df, text_col, proc_count=1):
ensure_morph()
df["tokens"] = df[text_col].str.lower().str.findall(self.pattern)
try:
if proc_count == 1:
df["tokens"] = df["tokens"].apply(
lambda txt: self.tokenize(txt)
)
else:
global g_chunks
logging.info("Reading chunks ...")
g_chunks = list(make_chuncks(df["tokens"], proc_count=proc_count))
logging.info(
"Chunk count %s %s",
len(g_chunks),
sum(ch.shape[0] for ch in g_chunks),
)
logging.info("Processing chunks ...")
with Pool(proc_count) as p:
result = p.map(self.process_series, range(len(g_chunks)))
df["tokens"] = pd.concat(result)
g_chunks = None
finally:
release_morph()
return df["tokens"]
@lru_cache(maxsize=50000)
def normalize_word(self, token):
"""
Pymorphy2 normalizer.
Args:
token: str
token to normalize
Returns:
str
"""
global wcoll_morph
return wcoll_morph.parse(token)[0].normal_form
def tokenize(self, arr):
"""
Tokenizes, normalizes input text, removes stop-words.
Args:
arr: List[str]
list of tokens
Returns:
list of integers
"""
return [
self.normalize_word(t.strip())
for t in arr
if t not in self.stopwords and len(t) > 2
]
def process_series(self, chunk_num: int) -> pd.Series:
global g_chunks
return g_chunks[chunk_num].apply(lambda txt: self.tokenize(txt))
|
vlade89/agrohack2023
|
lib/nlp_utils.py
|
nlp_utils.py
|
py
| 2,935 |
python
|
en
|
code
| 1 |
github-code
|
6
|
21888978054
|
import os
import combat.combat_core as com
import factory.factory_core as fty
import config.config_core as cfg
import expedition.expedition_core as exp
import fleet_switcher.fleet_switcher_core as fsw
import fleet.fleet_core as flt
import nav.nav as nav
import pvp.pvp_core as pvp
import quest.quest_core as qst
import repair.repair_core as rep
import resupply.resupply_core as res
import scheduler.scheduler_core as sch
import ship_switcher.ship_switcher_core as ssw
import stats.stats_core as sts
import util.kca as kca_u
from kca_enums.expeditions import ExpeditionEnum
from util.logger import Log
from kca_enums.maps import MapEnum
from constants import COMBAT_CONFIG
class Kcauto(object):
"""Primary kcauto class.
"""
end_loop_at_port = False
is_first_print_fleet = True
def __init__(self):
kca_u.kca.hook_chrome()
def start_kancolle(self):
kca_u.kca.start_kancolle()
def find_kancolle(self):
kca_u.kca.find_kancolle()
def find_browser(self):
kca_u.kca.find_browser()
def hook_health_check(self):
kca_u.kca.hook_health_check()
def check_config(self):
if cfg.config.config_changed:
Log.log_msg("Config change detected. Loading updated config.")
if cfg.config.initialize_config():
com.combat.update_from_config()
exp.expedition.update_from_config()
pvp.pvp.update_from_config()
qst.quest.update_from_config()
sch.scheduler.update_from_config()
def initialization_check(self):
if sts.stats.rsc.ammo is None:
Log.log_msg("kcauto is initializing.")
if not exp.expedition.receive_expedition():
nav.navigate.to('refresh_home')
sts.stats.set_print_loop_end_stats()
def check_for_expedition(self):
if not exp.expedition.receive_expedition():
if exp.expedition.expect_returned_fleets():
nav.navigate.to('refresh_home')
sts.stats.set_print_loop_end_stats()
def run_print_fleet_logic(self):
if not com.combat.enabled and self.is_first_print_fleet:
self.is_first_print_fleet = False
nav.navigate.to('refresh_home')
flt.fleets.fleets[1].get_fleet_id_and_name()
else:
return False
def run_expedition_logic(self):
if not exp.expedition.enabled:
return False
if not exp.expedition.timer.is_time_up():
return False
if exp.expedition.expect_returned_fleets() or \
(set([ExpeditionEnum.E5_33, ExpeditionEnum.E5_34,
ExpeditionEnum.EE_S1, ExpeditionEnum.EE_S2]) & set(
cfg.config.expedition.all_expeditions) and com.combat.time_to_sortie == True):
self.find_kancolle()
nav.navigate.to('refresh_home')
if exp.expedition.fleets_are_ready:
if exp.expedition.exp_for_fleet == []:
exp.expedition.get_expedition_ranking()
if cfg.config.expedition.fleet_preset == "auto":
if not fsw.fleet_switcher.assign_exp_ship():
exp.expedition.enabled = False
Log.log_error(f"Failed to assign ships for self balance expedition, disable expedition module.")
return False
if exp.expedition.is_fleetswitch_needed():
if self._run_fleetswitch_logic('expedition') == -2:
exp.expedition.timer.set(15*60)
Log.log_warn(f"Failed to switch ships for self balance expedition, disable expedition module for 15 mins.")
return False
exp.expedition.goto()
exp.expedition.send_expeditions()
self.run_quest_logic('expedition')
sts.stats.set_print_loop_end_stats()
def run_factory_logic(self):
if not fty.factory.enabled or not fty.factory.disable_time_up():
return False
self.run_quest_logic('factory', fast_check=False)
nav.navigate.to('home')
anything_is_done = False
if "F5" in qst.quest.next_check_intervals.keys():
anything_is_done = True
self._run_fleetswitch_logic('factory_develop')
fty.factory.goto()
if fty.factory.develop_logic(1) == True:
self.run_quest_logic('factory', fast_check=True, back_to_home=True, force=True)
nav.navigate.to('home')
if "F6" in qst.quest.next_check_intervals.keys():
anything_is_done = True
self._run_fleetswitch_logic('factory_build')
fty.factory.goto()
if fty.factory.build_logic(1) == True:
self.run_quest_logic('factory', fast_check=True, back_to_home=True, force=True)
nav.navigate.to('home')
else:
# disable module for 15 mins
fty.factory.set_timer()
if "F7" in qst.quest.next_check_intervals.keys():
anything_is_done = True
self._run_fleetswitch_logic('factory_develop')
fty.factory.goto()
if fty.factory.develop_logic(3) == True:
self.run_quest_logic('factory', fast_check=True, back_to_home=True, force=True)
nav.navigate.to('home')
if "F8" in qst.quest.next_check_intervals.keys():
anything_is_done = True
self._run_fleetswitch_logic('factory_build')
fty.factory.goto()
"""If F8 is already 80% done, one more build could finish the quest"""
"""Therefore, no if == True here"""
fty.factory.build_logic(3)
self.run_quest_logic('factory', fast_check=True, back_to_home=True, force=True)
nav.navigate.to('home')
#always disable module for 15 mins
fty.factory.set_timer()
if anything_is_done == False:
"""Daily factory process done, disable from now"""
fty.factory.enabled = False
def run_pvp_logic(self):
if not pvp.pvp.enabled:
return False
if pvp.pvp.time_to_pvp():
self.find_kancolle()
self.run_quest_logic('pvp')
nav.navigate.to('home')
self._run_fleetswitch_logic('pvp')
self.run_resupply_logic(back_to_home=True)
sts.stats.set_print_loop_end_stats()
else:
return False
pvp.pvp.goto()
while pvp.pvp.pvp_available():
pvp.pvp.conduct_pvp()
self.run_resupply_logic(back_to_home=True)
self.run_quest_logic('pvp', fast_check=True, back_to_home=True)
if pvp.pvp.pvp_available():
pvp.pvp.goto()
sts.stats.set_print_loop_end_stats()
return True
def run_combat_logic(self):
quest_selected = False
if not com.combat.enabled or com.combat.time_to_sortie == False:
return False
else :
#update port api, for _run_fleetswitch_logic
nav.navigate.to('refresh_home')
was_sortie_queue_empty = False
#set sortie_queue if it is empty
if len(com.combat.get_sortie_queue()) == 0:
was_sortie_queue_empty = True
Log.log_debug(f"cfg.config.combat.sortie_map_read_only:{cfg.config.combat.sortie_map_read_only}")
if cfg.config.combat.sortie_map_read_only == MapEnum.auto_map_selete:
self.run_quest_logic('auto_sortie', fast_check=False, back_to_home=False, force= True) #quest module will call set_sortie_queue
else:
Log.log_debug(f"Manual sortie mode:{cfg.config.combat.sortie_map_read_only.value}")
sortie_queue = [cfg.config.combat.sortie_map_read_only.value]
com.combat.set_sortie_queue(sortie_queue)
else:
Log.log_msg(f"Sortie queue:{com.combat.get_sortie_queue()}")
if len(com.combat.get_sortie_queue()) == 0: #If no combat map available, turn off combat module
Log.log_debug(f"Stop combat module cause no combat quest available")
com.combat.enabled = False
return False
else:
#update current sortie_map
cfg.config.combat.sortie_map = com.combat.get_sortie_queue()[0]
"""Check if multi stage map requested"""
MULTI_STAGE_MAPS = {"7-2":["G", "M"], "7-3":["E", "M"], "7-5":["K", "M", "Q", "T"]}
if cfg.config.combat.sortie_map.value in MULTI_STAGE_MAPS:
nav.navigate.to('combat')
Log.log_error(f"com.combat.sortie_map_stage: {com.combat.sortie_map_stage}")
stage = MULTI_STAGE_MAPS[cfg.config.combat.sortie_map.value][com.combat.sortie_map_stage - 1]
Log.log_error(f"stage: {stage}")
cfg.config.combat.sortie_map = cfg.config.combat.sortie_map.value + "-" + stage
#update map_data for combat module
com.combat.load_map_data(cfg.config.combat.sortie_map)
if cfg.config.combat.override == False:
#load user config
config_json = cfg.config.load_json(cfg.config.cfg_path)
cfg.config.combat.config_override(config_json)
#load default config
default_json = cfg.config.load_json(COMBAT_CONFIG + "default.json")
cfg.config.combat.config_override(default_json)
#load default config
sortie_queue = com.combat.get_sortie_queue()
if os.path.isfile(COMBAT_CONFIG + sortie_queue[0] + ".json"):
default_json = cfg.config.load_json(COMBAT_CONFIG + sortie_queue[0] + ".json")
cfg.config.combat.config_override(default_json)
else:
Log.log_warn(f"{sortie_queue[0]} combat config not found, use default combat config instead.")
#apply for combat queue, assume map_data is up-to-date
self.run_quest_logic('combat', fast_check = not was_sortie_queue_empty, force= was_sortie_queue_empty)
if self._run_fleetswitch_logic('combat') == 0:
#update port api, for should_and_able_to_sortie
nav.navigate.to('refresh_home')
if com.combat.should_and_able_to_sortie(ignore_supply=True):
self.run_resupply_logic()
com.combat.goto()
if com.combat.conduct_sortie():
#sortie success, pop the head of sortie_queue
com.combat.pop_sortie_queue()
sts.stats.set_print_loop_end_stats()
exp.expedition.receive_expedition()
else:
Log.log_error(f"Sortie failed.")
def run_resupply_logic(self, back_to_home=False):
if res.resupply.need_to_resupply:
self.find_kancolle()
res.resupply.goto()
res.resupply.resupply_fleets()
self.handle_back_to_home(back_to_home)
if not back_to_home:
self.end_loop_at_port = True
sts.stats.set_print_loop_end_stats()
def run_repair_logic(self, back_to_home=False):
if rep.repair.can_conduct_repairs:
self.find_kancolle()
rep.repair.goto()
rep.repair.repair_ships()
self.handle_back_to_home(back_to_home)
if not back_to_home:
self.end_loop_at_port = True
sts.stats.set_print_loop_end_stats()
def _run_fleetswitch_logic(self, context):
switch_needed = False
while fsw.fleet_switcher.require_fleetswitch(context):
switch_needed = True
fsw.fleet_switcher.goto()
if not fsw.fleet_switcher.switch_fleet(context):
self.handle_back_to_home(True)
return -2
self.handle_back_to_home(True)
if switch_needed:
return 0
else:
return -1
def run_shipswitch_logic(self, back_to_home=False):
switch_list = ssw.ship_switcher.get_ship_switch_list()
if switch_list:
nav.navigate.to('home')
ssw.ship_switcher.goto()
ssw.ship_switcher.switch_ships(switch_list)
self.handle_back_to_home(back_to_home)
if not back_to_home:
self.end_loop_at_port = True
def run_quest_logic(
self, context=None, fast_check=False, back_to_home=False, force=False):
if not qst.quest.enabled:
return False
if qst.quest.need_to_check(context) or force == True:
self.find_kancolle()
qst.quest.goto()
qst.quest.manage_quests(context, fast_check)
sts.stats.quest.times_checked += 1
self.handle_back_to_home(back_to_home)
sts.stats.set_print_loop_end_stats()
def handle_back_to_home(self, back_to_home):
if back_to_home:
nav.navigate.to('home')
def run_scheduler(self):
sch.scheduler.check_and_process_rules()
def check_end_loop_at_port(self):
if self.end_loop_at_port:
self.end_loop_at_port = False
self.handle_back_to_home(True)
@property
def scheduler_kca_active(self):
return sch.scheduler.kca_active
def print_stats(self):
if sts.stats.print_loop_end_stats:
sts.stats.loop_count += 1
sts.stats.print_stats()
kcauto = Kcauto()
|
XVs32/kcauto_custom
|
kcauto/startup/kcauto.py
|
kcauto.py
|
py
| 13,568 |
python
|
en
|
code
| 5 |
github-code
|
6
|
19014167036
|
import sys
h, w = map(int, input().split())
c = [list(input()) for i in range(h)]
for i in range(h):
for j in range(w):
if c[i][j] == "s":
sx, sy = i, j # スタート
elif c[i][j] == "g":
gx, gy = i, j # ゴール
stack = [[sx, sy]]
visited = [[0 for i in range(w)] for j in range(h)]
visited[sx][sy] = 1
# 次のマスの探索のために使用する
dx_dy = [[1, 0], [0, 1], [-1, 0], [0, -1]]
while stack:
x, y = stack.pop() # 要素を取り出す
for i in range(4):
nx, ny = x + dx_dy[i][0], y+dx_dy[i][1] # 探索する位置
if 0 <= nx < h and 0 <= ny < w and visited[nx][ny] == 0 and c[nx][ny] != "#":
visited[nx][ny] = 1
stack.append([nx, ny]) # stackに追加
if visited[gx][gy] == 1:
print("Yes")
sys.exit()
print("No")
|
minheibis/atcoder
|
algorithm/DFS/ATC001A_by_stack.py
|
ATC001A_by_stack.py
|
py
| 860 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38831014944
|
from keras.callbacks import Callback
from keras import backend as K
from keras.utils import to_categorical
from sklearn.metrics import roc_auc_score
import os
import matplotlib.pyplot as plt
import shutil
import numpy as np
class PerSubjAucMetricHistory(Callback):
"""
This callback for testing model on each subject separately during training. It writes auc for every subject to the
history object
"""
def __init__(self,subjects):
self.subjects = subjects
super(PerSubjAucMetricHistory,self).__init__()
def on_epoch_end(self, epoch, logs={}):
for subj in self.subjects.keys():
x,y = self.subjects[subj]
y_pred = self.model.predict(x, verbose=0)
if isinstance(y_pred,list):
y_pred = y_pred[0]
if len(y_pred.shape) == 1:
y_pred = to_categorical(y_pred,2)
if len(y.shape) == 1:
y = to_categorical(y,2)
y_pred = to_categorical(y_pred) if (len(y_pred.shape)==1) else y_pred
logs['val_auc_%s' %(subj)] = roc_auc_score(y[:,1], y_pred[:,1])
if type(self.model.output) == list:
fake_subj_labels = np.zeros((len(y),self.model.output[1].shape._dims[1]._value))
logs['val_loss_%s' % (subj)] = self.model.evaluate(x,[y,fake_subj_labels], verbose=0)[0]
else:
logs['val_loss_%s' % (subj)] = self.model.evaluate(x,y, verbose=0)[0]
class AucMetricHistory(Callback):
def __init__(self,save_best_by_auc=False,path_to_save=None):
super(AucMetricHistory, self).__init__()
self.save_best_by_auc=save_best_by_auc
self.path_to_save = path_to_save
self.best_auc = 0
self.best_epoch = 1
if save_best_by_auc and (path_to_save is None):
raise ValueError('Specify path to save the model')
def on_epoch_end(self, epoch, logs={}):
x_val,y_val = self.validation_data[0],self.validation_data[1]
y_pred = self.model.predict(x_val,batch_size=len(y_val), verbose=0)
if isinstance(y_pred,list):
y_pred = y_pred[0]
current_auc = roc_auc_score(y_val, y_pred)
logs['val_auc'] = current_auc
if current_auc > self.best_auc:
if self.save_best_by_auc:
prev_model_path = os.path.join(self.path_to_save,'best_on_auc_%d_%.2f.hdf5' %(self.best_epoch,self.best_auc))
if os.path.isfile(prev_model_path):
os.remove(prev_model_path)
path_to_file = os.path.join(self.path_to_save, 'best_on_auc_%d_%.2f.hdf5' % (epoch,current_auc))
self.model.save(path_to_file)
self.best_auc = current_auc
self.best_epoch = epoch
class DomainActivations(Callback):
def __init__(self, x_train,y_train, subj_label_train,path_to_save):
super(DomainActivations, self).__init__()
self.path_to_save = '%s/domain_activations_grl/' % path_to_save
self.x_train = x_train
self.y_train = y_train
self.subj_label_train = subj_label_train
plt.plot(subj_label_train.argmax(axis=1))
if os.path.isdir(self.path_to_save):
shutil.rmtree(self.path_to_save)
os.makedirs(self.path_to_save)
plt.savefig(os.path.join('%s/class_distr' % self.path_to_save))
plt.close()
def _log_domain_activations(self, domain_label_pred, domain_label,pic_name):
activations = (domain_label_pred * domain_label).sum(axis=1)
plt.plot(activations)
# plt.plot(activations[self.y_train[:,1] == 1])
plt.savefig(os.path.join('%s/%s' % (self.path_to_save, pic_name)))
plt.close()
def on_epoch_end(self, epoch, logs=None):
if epoch %10 ==0:
self._log_domain_activations(self.model.predict(self.x_train, verbose=0)[1],self.subj_label_train,'%d_train' % epoch)
|
bkozyrskiy/NN_hyperopt_search
|
my_callbacks.py
|
my_callbacks.py
|
py
| 3,918 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31299469552
|
from django.test import TestCase, Client
from django.urls import reverse
from ecomapp.models import Category, Product, CartItem, Cart, Order, Brand
class TestModels(TestCase):
def setUp(self):
self.category1 = Category.objects.create(
name = 'category1',
slug = 'category-1'
)
self.brand1 = Brand.objects.create(
name = 'brand1'
)
self.product1 = Product.objects.create(
category = self.category1,
brand = self.brand1,
title = 'title1',
slug = 'product-1',
description = 'abc',
image = 'macbook-pro-15',
price = 10,
)
self.product2= Product.objects.create(
category = self.category1,
brand = self.brand1,
title = 'title2',
slug = 'product-2',
description = 'abc',
image = 'macbook-pro-15',
price = 15,
)
self.cart_item1 = CartItem.objects.create(
product = self.product1,
qty = 1,
item_total = 10.00
)
self.cart_item2 = CartItem.objects.create(
product = self.product2
)
self.cart1 = Cart.objects.create(
)
#AC
def test_add_to_cart_then_remove_from_cart_is_false(self):
self.cart1.add_to_cart(self.product1.slug)
self.cart1.remove_from_cart(self.product1.slug)
self.assertFalse(self.cart1.items.all())
#AD2
def test_add_to_cart_if_item_in_cart(self):
self.cart1.add_to_cart(self.product1.slug)
self.cart1.add_to_cart(self.product1.slug)
self.cart1.cart_total = self.cart1.count_total_price()
self.assertEquals(self.cart1.cart_total, 10)
#ACD
def test_remove_from_cart_product1_is_successful(self):
self.cart1.add_to_cart(self.product1.slug)
self.cart1.remove_from_cart(self.product1.slug)
self.cart1.cart_total = self.cart1.count_total_price()
self.assertEquals(self.cart1.cart_total, 0)
#CD
def test_remove_from_cart_if_item_is_not_in_cart(self):
self.cart1.remove_from_cart(self.product1.slug)
self.cart1.cart_total = self.cart1.count_total_price()
self.assertEquals(self.cart1.cart_total, 0)
#AD
def test_add_to_cart_count_total_price_10_is_10(self):
self.cart1.add_to_cart(self.product1.slug)
self.cart1.cart_total = self.cart1.count_total_price()
self.assertEquals(self.cart1.cart_total, 10)
def test_add_to_cart_count_total_price_for_some_changes(self):
self.cart1.add_to_cart(self.product1.slug)
self.cart1.remove_from_cart(self.product1.slug)
self.cart1.cart_total = self.cart1.count_total_price()
self.assertEquals(self.cart1.cart_total, 0)
#AB
def test_add_to_cart_change_qty_1_changes_to_3(self):
self.cart1.add_to_cart(self.product1.slug)
self.cart1.change_qty(3, self.cart_item1.product.id)
self.assertEquals(self.cart1.cart_total, 30)
#AB2
def test_add_to_cart_change_qty_3_changes_to_2(self):
self.cart1.add_to_cart(self.product1.slug)
self.cart1.change_qty(3, self.cart_item1.product.id)
self.cart1.change_qty(2, self.cart_item1.product.id)
self.assertEquals(self.cart1.cart_total, 20)
|
hussienalhaj/alhajjj
|
djangoshop/ecomapp/tests/test_models.py
|
test_models.py
|
py
| 2,992 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20366491858
|
import parser, compile_lll
def memsize_to_gas(memsize):
return (memsize // 32) * 3 + (memsize // 32) ** 2 // 512
initial_gas = compile_lll.gas_estimate(parser.mk_initial())
function_gas = compile_lll.gas_estimate(parser.parse_func(parser.parse('def foo(): pass')[0], {}))
class Compiler():
def compile(self, code, *args, **kwargs):
lll = parser.parse_tree_to_lll(parser.parse(code))
return compile_lll.assembly_to_evm(compile_lll.compile_to_assembly(lll))
def mk_full_signature(self, code, *args, **kwargs):
o = parser.mk_full_signature(parser.parse(code))
return o
def gas_estimate(self, code, *args, **kwargs):
code = parser.parse(code)
_defs, _globals = parser.get_defs_and_globals(code)
o = {}
for i, _def in enumerate(_defs):
name, args, output_type, const, sig, method_id = parser.get_func_details(_def)
varz = {}
kode = parser.parse_func(_def, _globals, varz)
gascost = compile_lll.gas_estimate(kode) + initial_gas
o[name] = gascost + memsize_to_gas(varz.get("_next_mem", parser.RESERVED_MEMORY)) + function_gas * i
return o
|
0xc1c4da/viper
|
compiler_plugin.py
|
compiler_plugin.py
|
py
| 1,185 |
python
|
en
|
code
| null |
github-code
|
6
|
43627519034
|
class Solution:
"""
Explanation
We have 4 plans:
- kill 3 biggest elements
- kill 2 biggest elements + 1 smallest elements
- kill 1 biggest elements + 2 smallest elements
- kill 3 smallest elements
"""
def minDifference(self, nums: []) -> int:
if len(nums) <= 4:
return 0
nums.sort()
res = float('inf')
for i in range(4):
res = min(res, nums[len(nums) - 4 + i] - nums[i])
return res
def test(self) -> None:
testCases = [
[5, 3, 2, 4], # 0
[1, 5, 0, 10, 14], # 1
[6, 6, 0, 1, 1, 4, 6], # 2
[1, 5, 6, 14, 15], # 1
]
for nums in testCases:
res = self.minDifference(nums)
print('res: %s' % res)
print('-=' * 30 + '-')
if __name__ == '__main__':
Solution().test()
|
MichaelTQ/LeetcodePythonProject
|
solutions/leetcode_1501_1550/LeetCode1509_MinimumDifferenceBetweenLargestAndSmallestValueInThreeMoves.py
|
LeetCode1509_MinimumDifferenceBetweenLargestAndSmallestValueInThreeMoves.py
|
py
| 899 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16814344104
|
#!/usr/bin/env python
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='overcooked_ai',
version='1.1.0',
description='Cooperative multi-agent environment based on Overcooked',
long_description=long_description,
long_description_content_type="text/markdown",
author='Nathan Miller',
author_email='[email protected]',
url='https://github.com/HumanCompatibleAI/overcooked_ai',
download_url='https://github.com/HumanCompatibleAI/overcooked_ai/archive/refs/tags/1.1.0.tar.gz',
packages=find_packages('src'),
keywords=['Overcooked', 'AI', 'Reinforcement Learning'],
package_dir={"": "src"},
package_data={
'overcooked_ai_py' : [
'data/layouts/*.layout', 'data/planners/*.py', 'data/human_data/*.pickle',
'data/graphics/*.png', 'data/graphics/*.json', 'data/fonts/*.ttf',
],
},
install_requires=[
'numpy',
'scipy',
'tqdm',
'gym',
'ipython',
'pygame',
'ipywidgets'
]
)
|
samjia2000/HSP
|
hsp/envs/overcooked_new/setup.py
|
setup.py
|
py
| 1,135 |
python
|
en
|
code
| 15 |
github-code
|
6
|
42229692815
|
#!/usr/bin/env python
import rospy
from race.msg import drive_param
from nav_msgs.msg import Odometry
from std_msgs.msg import Float64
import math
import numpy as np
from numpy import linalg as LA
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import csv
import os
import copy
# Publishers for tracking error
max_error_pub = rospy.Publisher('max_tracking_error', Float64, queue_size=1)
sqd_avg_error_pub = rospy.Publisher('squared_average_tracking_error', Float64, queue_size=1)
max_error = 0
sum_sqd_error = 0
n = 0
# Computes the Euclidean distance between two 2D points p1 and p2.
def dist(p1, p2):
return np.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
def callback(msg):
global max_error
global sum_sqd_error
global n
x = msg.pose.pose.position.x;
y = msg.pose.pose.position.y;
# First find the point closest to the vehicle
distances = []
for point in path_points:
distances.append(dist((x,y),(point[0],point[1])))
min_idx = distances.index(min(distances))
closest_pt = path_points[min_idx]
a = distances[min_idx]
# Next, get the second closest point
if min_idx == 0:
if distances[len(distances)-1] < distances[1]:
second_closest_pt = path_points[len(distances)-1]
c = distances[len(distances)-1]
else:
second_closest_pt = path_points[1]
c = distances[1]
elif min_idx == len(distances)-1:
if distances[0] < distances[len(distances)-2]:
second_closest_pt = path_points[0]
c = distances[0]
else:
second_closest_pt = path_points[len(distances)-2]
c = distances[len(distances)-2]
else:
if distances[min_idx-1] < distances[min_idx+1]:
second_closest_pt = path_points[min_idx-1]
c = distances[min_idx-1]
else:
second_closest_pt = path_points[min_idx+1]
c = distances[min_idx+1]
# Now use Heron's formula to get the error with respect to the path
b = dist(closest_pt,second_closest_pt)
s = (a + b + c)/2.0
A = np.sqrt(s*(s-a)*(s-b)*(s-c))
error = A/b
# Update running error values
if error > max_error:
max_error = error
sum_sqd_error = sum_sqd_error + error**2
n = n + 1
# Publish error values
max_error_pub.publish(Float64(max_error))
sqd_avg_error_pub.publish(Float64(sum_sqd_error/n))
if __name__ == '__main__':
global path_points
rospy.init_node('tracking_error_node')
rospy.Subscriber('/odom', Odometry, callback, queue_size=1)
# Import waypoints.csv into a list (path_points)
dirname = os.path.dirname(__file__)
waypoint_file = rospy.get_param('/pure_pursuit_node/waypoint_filename','levine-waypoints.csv')
filename = os.path.join(dirname, '../waypoints/' + waypoint_file)
with open(filename) as f:
path_points = [tuple(line) for line in csv.reader(f)]
# Turn path_points into a list of floats to eliminate the need for casts in the code below.
path_points = [(float(point[0]), float(point[1]), float(point[2])) for point in path_points]
rospy.spin()
|
zabrock/Need4Speed-F110-2020
|
race_ws/src/need4speed_pure_pursuit/scripts/error_tracking.py
|
error_tracking.py
|
py
| 3,195 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19105871775
|
from datetime import datetime
from blockCountries import blockCountryList
def minOfGame(isPlaying,isBreak : bool, startPeriod :datetime, startGame :datetime):
'''
:param startPeriod: время начала периода
:param startGame: время начала игры
:return: minOfGame - минута игры, isBreak - перерыв
'''
minGame = -999999
dPeriodGameTime = startPeriod - startGame #разница между началом тайма(перерыва) и началом игры
minPeriod = int((datetime.now() - startPeriod).total_seconds() // 60) #минута тайма
if not isPlaying: # игра окончена
minGame=-1
elif isBreak:
minGame =45
elif dPeriodGameTime.total_seconds() < 1800: #в первом тайме
minGame=minPeriod
elif (dPeriodGameTime.total_seconds() > 3300): #во втором тайме
minGame=(45+minPeriod)
#if isPlaying:
# print(isPlaying, isBreak, startGame, startPeriod , dPeriodGameTime.total_seconds())
#if minGame==-999999:
# print(isPlaying, isBreak, startPeriod, startGame, dPeriodGameTime.total_seconds())
# print(minGame,isBreak)
return minGame
def isCorrectGame(game: dict) -> bool:
'''
:param game: словарь, заполненный данными матча
:return: True, если матч удовлетворяет фильтрам по времени, голам
'''
def getGoalFilter(game:dict) ->bool:
if (type(game['score1']) == str) and (type(game['score2']) == str):
return (game['score1'] == '0') and (game['score2'] == '0')
else:
return False
def getTimeFilter(game:dict) ->bool:
minGame = minOfGame(game['isplaying'], game['isbreak'], game['startperiod'], game['startdate'])
return (minGame > 30) and (minGame < 60)
countryFilter = not (game['country'].lower() in blockCountryList)
gameIsPlaying = game['isplaying']
timeFilter = getTimeFilter(game)
goalFilter = getGoalFilter(game)
#
if gameIsPlaying and timeFilter and goalFilter and countryFilter:
return True
else:
return False
def gameToStr(game : dict) -> str:
def minOrBreak(isPlaying,isBreak : bool, startPeriod:datetime, startGame : datetime) -> str:
minGame = minOfGame(isPlaying,isBreak,startPeriod, startGame)
if isBreak:
minStr='Перерыв'
else:
minStr = '{}\' мин'.format(minGame)
return minStr
return '{} {}. {}. {} {} - {} {}'.format(game['startdate'].strftime('%H:%M'),game['startperiod'].strftime('%H:%M'),
minOrBreak(game['isplaying'],game['isbreak'], game['startperiod'],game['startdate']),
game['team1'],game['score1'], game['score2'], game['team2'])
def addNewCountry(game : dict):
outlist=[]
outlist.append('*{}*'.format(game['country']))
outlist.append('*{}*'.format(game['tourney']))
outlist.append(gameToStr(game))
return outlist
def addNewTourney(game : dict):
outlist=[]
outlist.append('*{}*'.format(game['tourney']))
outlist.append(gameToStr(game))
return outlist
def addGame(game : dict):
outlist=[]
outlist.append(gameToStr(game))
return outlist
|
AlexRechnoy/betBot
|
flashScoreFuncs.py
|
flashScoreFuncs.py
|
py
| 3,405 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36004397978
|
import collections
class VigenereMethod:
_cipher = ""
def __init__(self, cipherText):
self._cipher = cipherText
def RunMethod(self):
cipher1 = self._cipher
letterFreq = [["A", 8.2], ["B", 1.5], ["C", 2.8], ["D", 4.2], ["E", 12.7], ["F", 2.2],
["G", 2.0], ["H", 6.1], ["I", 7.0], ["J", 0.1], ["K", 0.8], ["L", 4.0], ["M", 2.4],
["N", 6.7], ["O", 7.5], ["P", 1.9], ["Q", 0.1], ["R", 6.0], ["S", 6.3], ["T", 9.0],
["U", 2.8], ["V", 1.0], ["W", 2.4], ["X", 0.1], ["Y", 2.0], ["Z", 0.1], ]
englishLetterDist = sorted(letterFreq, key=lambda x: x[1], reverse=True)
englishBigramDist = [["TH", 3.15], ["AN", 1.72], ["ER", 1.54], ["ES", 1.45], ["EA", 1.31], ["AT", 1.24],
["EN", 1.20], ["HE", 2.51], ["IN", 1.69], ["RE", 1.48], ["ON", 1.45], ["TI", 1.28],
["ST", 1.21], ["ND", 1.18]]
englishTrigramDist = [["THE", 0], ["ING", 1], ["AND", 2], ["HER", 3], ["ERE", 4], ["ENT", 5], ["THA", 6],
["NTH", 7], ["WAS", 8], ["ETH", 9], ["FOR", 10]]
# build bigram list
bigramList = []
i = 0
for c in cipher1:
if i == 0:
firstChar = c
i += 1
else:
bigram = firstChar + c
bigramList.append(bigram)
i = 0
# build trigram list
trigramList = []
i = 0
for c in cipher1:
if i == 0:
firstChar = c
i += 1
elif i == 1:
secondChar = c
i += 1
else:
trigram = firstChar + secondChar + c
trigramList.append(trigram)
i = 0
# ------------------------------------------------------------------------------------------------------------------
# Decrpypt cipher1
# -----------------------------------------------------------------------------------------------------------------
# Letter distribution
# 97-122 - a-z
i = 97
singleLetters = []
while i < 123:
letter = chr(i)
letterCount = cipher1.count(letter)
singleLetters.append([letter, letterCount])
i += 1
# Bigram distrubition
totalLetterCount = len(cipher1)
for c in singleLetters:
c[1] = round(c[1] / totalLetterCount * 100, 1)
# Sort lists
cipherLetterDist = sorted(singleLetters, key=lambda x: x[1], reverse=True)
bigramDist = sorted(list(collections.Counter(bigramList).items()), key=lambda x: x[1], reverse=True)
trigramDist = sorted(list(collections.Counter(trigramList).items()), key=lambda x: x[1], reverse=True)
# decrypt message
decryptMsg = []
for c in cipher1:
if c in decrpytDict:
value = decrpytDict[c]
decryptMsg.append(value)
else:
decryptMsg.append("_")
print(cipher1)
print(''.join(decryptMsg))
|
dhjaekol/cybersecurity
|
Assignment1/VigenereMethod.py
|
VigenereMethod.py
|
py
| 3,262 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21980018417
|
import base64
import hashlib
import time
import requests
import tkinter as tk
from tkinter import filedialog
import json
##################
"""
手写文字识别WebAPI接口调用示例接口文档(必看):https://doc.xfyun.cn/rest_api/%E6%89%8B%E5%86%99%E6%96%87%E5%AD%97%E8%AF%86%E5%88%AB.html
图片属性:jpg/png/bmp,最短边至少15px,最长边最大4096px,编码后大小不超过4M,识别文字语种:中英文
webapi OCR服务参考帖子(必看):http://bbs.xfyun.cn/forum.php?mod=viewthread&tid=39111&highlight=OCR
(Very Important)创建完webapi应用添加服务之后一定要设置ip白名单,找到控制台--我的应用--设置ip白名单,如何设置参考:http://bbs.xfyun.cn/forum.php?mod=viewthread&tid=41891
错误码链接:https://www.xfyun.cn/document/error-code (code返回错误码时必看)
@author iflytek
"""
def ocr_fuction():
# OCR手写文字识别接口地址
URL = "http://webapi.xfyun.cn/v1/service/v1/ocr/handwriting"
###需要更改内容
# 应用APPID(必须为webapi类型应用,并开通手写文字识别服务,参考帖子如何创建一个webapi应用:http://bbs.xfyun.cn/forum.php?mod=viewthread&tid=36481)
APPID = "5f698b5c"
# 接口密钥(webapi类型应用开通手写文字识别后,控制台--我的应用---手写文字识别---相应服务的apikey)
API_KEY = "22d07d46937e6e4f474a165729c6c56c"
def getHeader():
curTime = str(int(time.time()))
param = "{\"language\":\""+language+"\",\"location\":\""+location+"\"}"
paramBase64 = base64.b64encode(param.encode('utf-8'))
m2 = hashlib.md5()
str1 = API_KEY + curTime + str(paramBase64, 'utf-8')
m2.update(str1.encode('utf-8'))
checkSum = m2.hexdigest()
# 组装http请求头
header = {
'X-CurTime': curTime,
'X-Param': paramBase64,
'X-Appid': APPID,
'X-CheckSum': checkSum,
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
}
return header
def getBody(filepath):
with open(filepath, 'rb') as f:
imgfile = f.read()
data = {'image': str(base64.b64encode(imgfile), 'utf-8')}
return data
# 语种设置
language = "cn|en"
# 是否返回文本位置信息
location = "true"
window=tk.Tk()
window.withdraw()
file_path = filedialog.askopenfilename()
picFilePath =file_path
# headers=getHeader(language, location)
r = requests.post(URL, headers=getHeader(), data=getBody(picFilePath))
return r
def analysis_json(json_data):
json1 =json_data.content
result = json.loads(json1)
dict1 = result['data']
dict2 = dict1['block']
# 保存提取内容
for d in dict2:
dict3 = d['line']
for i in dict3:
list2 = i['word']
for words in list2:
print(words)
word = words['content']
if ':' in word:
str_write = word.split(':')
else:
str_write = word.split(':')
print(str_write)
if str_write[0] == "设备品牌":
s1 = str_write[1]
if str_write[0] == "设备型号":
s2=str_write[1]
if str_write[0] == "用期限/频次)":
s3 = str_write[1]
if str_write[0] == "供货公司名称":
s4 = str_write[1]
if str_write[0] == "电话":
s5 = str_write[1]
return [s1,s2,s3,s4,s5]
json_data=ocr_fuction()
st=analysis_json(json_data)
print(st)
|
shang-jun123/purchase_management
|
mepms/ui/ocr_fuction.py
|
ocr_fuction.py
|
py
| 3,697 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10420485791
|
"""
.. attribute:: whole_cache
Used when specifying the scope for a cache invalidation operation to
indicate that the whole cache should be cleared.
.. moduleauthor:: Martí Congost <[email protected]>
"""
from typing import Iterable, Union
from cocktail.modeling import (
OrderedSet,
ListWrapper,
SetWrapper
)
class WholeCache:
pass
whole_cache = WholeCache()
ScopeSelector = Union[str, Iterable[str]]
Scope = Union[WholeCache, Iterable[ScopeSelector]]
def normalize_scope(scope: Scope) -> Scope:
if scope is whole_cache:
return whole_cache
else:
return set(selector for selector in resolve_selector(scope))
def resolve_selector(selector: ScopeSelector) -> Iterable[ScopeSelector]:
if isinstance(selector, (str, tuple)):
yield selector
elif isinstance(selector, (
list,
set,
frozenset,
OrderedSet,
ListWrapper,
SetWrapper
)):
for selector_part in selector:
for resolved_selector in resolve_selector(selector_part):
yield resolved_selector
else:
raise TypeError(
"Scope selectors must be strings, tuples or collections, got %r "
"instead."
% selector
)
|
marticongost/cocktail
|
cocktail/caching/scope.py
|
scope.py
|
py
| 1,282 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42136460155
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
simlen = 10000
pX = pY = np.ones(6)
pX = pY = pX/6
pZ = np.array([ 1/36, 2/36, 3/36, 4/36, 5/36, 6/36, 5/36, 4/36, 3/36, 2/36, 1/36 ])
possible = np.arange(1,7)
possible_Z = np.arange(2, 13)
X= np.random.choice(possible, simlen, p=pX)
Y= np.random.choice(possible, simlen, p=pY)
cdfY = np.cumsum(pY)
Z = X + Y
simX, simpX = np.unique(X , return_counts= True)
simZ, simpZ = np.unique(Z , return_counts= True)
simY, simpY = np.unique(Y , return_counts= True)
simpX = simpX/simlen
simpY = simpY/simlen
simpZ = simpZ/simlen
simp_cdfY = np.cumsum(simpY)
# for Question1,
p1theo = cdfY[5] - cdfY[3]
p1sim = simp_cdfY[5] - simp_cdfY[3]
print("Theoretical value of probability of getting sum > 9 given X>5 is: " + str(p1theo))
print("Actual value of probability of getting sum > 9 given X>5 is: " + str(p1sim))
print("----------")
#for Question2,
p2theo = (pX[2]*pY[4] + pX[1]*pY[5])/cdfY[3]
p2sim = (simpX[2]*simpY[4] + simpX[1]*simpY[5])/simp_cdfY[3]
print("Theoretical value of probability of getting sum = 8 given Y < 4 is: " + str(p2theo))
print("Actual value of probability of getting sum = 8 given Y < 4 is: " + str(p2sim))
#plotting
fig,axes = plt.subplots(2,figsize = (12,20))
possible1 = possible.reshape(1,6)
eq9X = np.repeat(possible1,3, axis = 1)
pos = np.array([[1,2,3]])
eq9Y = np.repeat(pos , 6 , axis = 0)
#plot-1
axes[0].set_title("Plot for verifying current problem")
axes.flat[0].set(xlabel = 'Questions' , ylabel = 'Answers')
axes[0].stem([1, 2] , [p1theo,p2theo] , label = 'Actual', markerfmt = 'ro' , basefmt = 'g-' ,linefmt = 'b')
axes[0].set_xticks([1,2])
axes[0].plot([1, 2] , [p1sim, p2sim], 'yo',label = 'Theoretical')
axes[0].legend()
validX = np.repeat(possible,6).reshape(6,6)
validY = np.repeat(possible1,6 ,axis = 0)
print(validX)
print(validY)
#plot-2
axes[1].set_title("X+Y = 8 | Y < 4")
axes.flat[1].set(xlabel = 'X' , ylabel = 'Y')
axes[1].set_xlim(-0.2 , 7)
axes[1].set_ylim(-0.2 , 7)
axes[1].add_patch(Rectangle((0.6, 0.6), 5.8,5.8 , edgecolor = '#000000',facecolor = '#00000020' ,fill =True , label = 'Valid region for die roll results'))
axes[1].add_patch(Rectangle((0.8, 0.8), 5.4,2.4 , edgecolor = 'blue',facecolor = '#0000f020' ,fill =True ,label = 'Y<4'))
axes[1].scatter(validX,validY ,c = 'black' ,label = 'Integral points (X,Y)')
axes[1].scatter(eq9X,eq9Y , label = 'Integral points where Y<4')
axes[1].plot(possible[1:],8-possible[1:],'yo-' , label ='X+Y = 8')
axes[1].legend()
plt.show()
|
gadepall/digital-communication
|
ncert/12/13/1/10/codes/die_sim.py
|
die_sim.py
|
py
| 2,541 |
python
|
en
|
code
| 7 |
github-code
|
6
|
22762248832
|
x = 10
# original version
if x >= 10:
y = 1
else:
y = 0
print("Original version {}".format(x))
# use ternary conditionals instead
y = 1 if x >= 10 else 0
print("Pythonic version {}".format(x))
|
ssciwr/Python-best-practices-course
|
Material_Part5_BetterCoding/examples/ternary.py
|
ternary.py
|
py
| 206 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14522883806
|
#!/usr/bin/env python3
from flask import Flask, Response, request
from flask_cors import CORS
from PIL import Image
from rembg import new_session, remove
import io
app = Flask(__name__)
CORS(app)
@app.post('/api/rem-bg')
def remBg():
auth = request.headers.get('Authorization', None)
if not auth or not auth.startswith('Bearer '):
return Response(status=401)
auth = auth.removeprefix('Bearer ')
with open('keys', 'r') as f:
if not auth.strip() in [l.strip() for l in f.readlines()]:
return Response(status=401)
try:
image = request.get_data(True, False, False)
content_type = request.headers.get('Content-Type', '')
image_kind = [content_type.split('/', 2)[1]] if '/' in content_type else None
image = Image.open(io.BytesIO(image), formats=image_kind)
image = remove(image, post_process_mask=True)
body = io.BytesIO()
image.save(body, format='PNG')
return Response(body.getvalue(), status=200, content_type='image/png')
except BaseException as e:
return Response(repr(e), status=400, content_type='text/plain')
if __name__ == "__main__":
app.run(debug=True)
|
EasySnacks/remove-background-api
|
main.py
|
main.py
|
py
| 1,193 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14418643616
|
from django.db.models import fields
from django.shortcuts import render
from django.views.generic import ListView,DetailView,CreateView,UpdateView,DeleteView,RedirectView
from .models import Post
from .forms import PostForm,EditForm
from django.urls import reverse_lazy
# Create your views here.
#def home(request):
#return render(request,'home.html',{})
class HomeView(ListView):
model=Post
template_name='home.html'
ordering= ['-post_date']
#ordering= ['-id'] # '-' here represents ascending order
class ArticleDetailView(DetailView):
model= Post
template_name='article_details.html'
class AddPostView(CreateView):
model=Post
form_class=PostForm
template_name='add_post.html'
#fields= '__all__'
#fields=('title', 'body')
success_url= reverse_lazy('home')
class UpdatePostView(UpdateView):
model=Post
form_class= EditForm
template_name= 'update_post.html'
#fields= ['title', 'title_tag','body']
class DeletePostView(DeleteView):
model=Post
template_name='delete_post.html'
success_url= reverse_lazy('home')
class RedirectView(DeleteView):
model=Post
template_name='redirect.html'
success_url= reverse_lazy('redirect')
|
AniketShukla14/Interlink_platform
|
ablog/Theblog/views.py
|
views.py
|
py
| 1,272 |
python
|
en
|
code
| 2 |
github-code
|
6
|
26424799614
|
import random
import time
import sys
def easy_mode():
score = 100
answerEasy = random.randrange(1,11)
while True:
guess = input('1-10 or q to quit\n')
if guess.isdigit():
if int(guess) == answerEasy:
print('Correct! Your score was', score)
play_again()
elif int(guess) < answerEasy:
print('larger')
if score > 0:
score = score - 10
else:
score = 0
elif int(guess) > answerEasy:
print('smaller')
if score > 0:
score = score - 10
else:
score = 0
elif guess.lower() == 'q':
return end()
else:
print('Invaid input')
easy_mode()
def medium_mode():
score = 100
answerMedium = random.randrange(1,101)
while True:
guess = input('1-100 or q to quit\n')
if guess.isdigit():
if int(guess) == answerMedium:
print('Correct! Your score was', score)
play_again()
elif int(guess) < answerMedium:
print('larger')
if score > 0:
score = score - 5
else:
score = 0
elif int(guess) > answerMedium:
print('smaller')
if score > 0:
score = score - 5
else:
score = 0
elif guess.lower() == 'q':
return end()
else:
print('Invalid input')
medium_mode()
def hard_mode():
score = 100
answerHard = random.randrange(1,1001)
while True:
guess = input('1-1000 or q to quit\n')
if guess.isdigit():
if int(guess) == answerHard:
print('Correct! Your score was', score)
play_again()
elif int(guess) < answerHard:
print('larger')
if score > 0:
score = score - 1
else:
score = 0
elif int(guess) > answerHard:
print('smaller')
if score > 0:
score = score - 1
else:
score = 0
elif guess.lower() == 'q':
return end()
else:
print('Invalid input')
hard_mode()
def end():
print('Thanks for playing!')
time.sleep(2)
sys.exit()
def play_again():
playAgain = input('Would you like to play again?')
if playAgain == 'y' or playAgain.lower == 'yes':
main()
elif playAgain == 'n' or playAgain.lower == 'no':
end()
else:
print('Please choose y or n')
play_again()
def main():
chooseMode = int(input("Choose a difficulty: 1,2,3\n"))
if chooseMode == 1:
easy_mode()
elif chooseMode == 2:
medium_mode()
elif chooseMode == 3:
hard_mode()
else:
print("enter valid mode")
while True:
main()
|
tbnd88/Guess-the-number
|
updated_guess_number.py
|
updated_guess_number.py
|
py
| 2,317 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72496080189
|
"""TestProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.i18n import i18n_patterns
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from rest_framework.permissions import AllowAny
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
from drf_yasg import openapi
from drf_yasg.views import get_schema_view as swagger_get_schema_view
schema_view = swagger_get_schema_view(
openapi.Info(
title="YagonaBilling",
default_version='1.0.0',
description="API documentation of App",
),
public=True,
permission_classes=(AllowAny,)
)
urlpatterns = [
path('admin/', admin.site.urls),
path('api/oauth/', include("oneid.urls")),
path('accounts/', include("accounts.urls")),
path('contracts/', include("contracts.urls")),
path('services/', include("services.urls")),
path('one-c/', include("one_c.urls")),
path('billing/', include("billing.urls")),
path('main/', include('main.urls')),
path('expertise/', include("expertiseService.urls")),
path('vps/', include("vpsService.urls")),
path('auth/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('auth/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name="swagger-schema"),
path('redoc/', schema_view.with_ui("redoc", cache_timeout=0), name="schema-redoc-ui"),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
Rahmet97/TestProjectBackend
|
TestProject/urls.py
|
urls.py
|
py
| 2,302 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74926945466
|
"""
Does the work to translate colour/effect names to ANSI codes
"""
# std libs
import numbers
import functools as ftl
# third-party libs
import numpy as np
from matplotlib.colors import to_rgb
# local libs
from recipes.dicts import ManyToOneMap
# relative libs
from .ansi import parse
# source: https://en.wikipedia.org/wiki/ANSI_escape_code
# http://ascii-table.com/ansi-escape-sequences.php
# Escape sequence
ESC = '\033' # All sequences start with this character # equivalent to \x1b
CSI = ESC + '[' # Control Sequence Initiator
END = CSI + '0m'
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ANSI Codes for Text effects and colours FG_CODES BG_CODE
FG_CODES = {
'bold': 1,
'dim': 2, # faint
'italic': 3,
'underline': 4,
'blink': 5, # blink slow
# 'blink' : 6, # blink fast
'invert': 7,
'hidden': 8, # conceal
'strike': 9,
# ------------------
# 10 Primary(default) font
# 11–19 {\displaystyle n} n-th alternate font Select the {\displaystyle n}
# n-th alternate font (14 being the fourth alternate font, up to 19 being
# the 9th alternate font).
# 20 Fraktur hardly ever supported
# 21 Bold: off or Underline: Double Bold off not widely supported;
# double underline hardly ever supported.
# 22 Normal color or intensity Neither bold nor faint
# 23 Not italic, not Fraktur
# 24 Underline: None Not singly or doubly underlined
# 25 Blink: off
# 26 Reserved
# 27 Image: Positive
# 28 Reveal conceal off
# 29 Not crossed out
# ------------------
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'light gray': 37,
# 38 Reserved for extended set foreground color typical supported next
# arguments are 5;n where {\displaystyle n} is color index (0..255) or
# 2;r;g;b where {\displaystyle r,g,b} are red, green and blue color
# channels (out of 255)
'default': 39, # Default text color (foreground)
# ------------------
'frame': 51,
'circle': 52,
'overline': 53,
# 54 Not framed or encircled
# 55 Not overlined
# ------------------
'dark gray': 90,
'gray': 90,
'light red': 91,
'light green': 92,
'light yellow': 93,
'light blue': 94,
'light magenta': 95,
'light cyan': 96,
'white': 97,
}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Background Colours
BG_CODES = {
'black': 40,
'red': 41,
'green': 42,
'yellow': 43,
'blue': 44,
'magenta': 45,
'cyan': 46,
'light gray': 47,
# ------------------
# 48 Reserved for extended set background color typical supported next
# arguments are 5;n where {\displaystyle n} is color index (0..255) or
# 2;r;g;b where {\displaystyle r,g,b} are red, green and blue color
# channels (out of 255)
'default': 49,
# 49 Default background color implementation defined (according to
# standard)
# 50 Reserved
# ------------------
# 56–59 Reserved
# 60 ideogram underline or right side line hardly ever supported
# 61 ideogram double underline or double line on the right side hardly
# ever supported
# 62 ideogram overline or left side line hardly ever supported
# 63 ideogram double overline or double line on the left side
# hardly ever supported
# 64 ideogram stress marking hardly ever supported
# 65 ideogram attributes off hardly ever supported, reset the effects of
# all of 60–64
# ------------------
'dark gray': 100,
'light red': 101,
'light green': 102,
'light yellow': 103,
'light blue': 104,
'light magenta': 105,
'light cyan': 106,
'white': 107,
}
# TODO: alternate colour names here: https://en.wikipedia.org/wiki/ANSI_escape_code
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Convenient short colour descriptions a la matplotlib
mplShortsMap = {
'b': 'blue',
'g': 'green',
'r': 'red',
'c': 'cyan',
'm': 'magenta',
'y': 'yellow',
'k': 'black',
'w': 'white',
}
#
effectShortsMap = {'B': 'bold',
'I': 'italic',
'U': 'underline',
'S': 'strike',
'unbold': 'dim',
'strikethrough': 'strike',
'blink': 'blink_slow',
'hide': 'hidden',
'faint': 'dim'
}
# volcab is translated before keyword mappings in Many2One, so the uppercase
# here works
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# alias map for allowed keywords for functions
kwAliasMap = {
# text
'text': 'fg',
'txt': 'fg',
'colour': 'fg',
'color': 'fg',
'c': 'fg',
'fg': 'fg',
'foreground': 'fg',
'rgb': 'fg',
# background
'highlight': 'bg',
'background': 'bg',
'bg': 'bg',
'bc': 'bg',
'bgc': 'bg'
}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Movement = {} # TODO
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# def _aliasFactory(codes, aliases):
# """Create the code translation dict"""
# Codes = ManyToOneMap(codes)
# Codes.add_vocab(aliases)
# Codes.add_map(str.lower)
# return Codes
class KeyMap(ManyToOneMap):
"""
Resolve all the various ways in which colours or effects can be specified.
"""
def __init__(self, dic=None, **kws):
super().__init__(dic, **kws)
self.add_vocab(kwAliasMap) # translation
def __missing__(self, key):
try:
return super().__missing__(key)
except KeyError:
pass
raise KeyError(
f'{key} is not a valid description for text or background effect.')
class CodeMap(ManyToOneMap): # CodeMap
"""
Resolve all the various names for colours or effects into codes
"""
# def __init__(self, dic=None, **kws):
# super().__init__(dic, **kws)
# add mappings for matplotlib color names eg: 'r' --> 'red' etc..
# self.add_vocab(mplShortsMap)
# add a layer that maps to lower case: 'REd' --> 'red'
# self.add_mapping(str.lower)
def __getitem__(self, key):
# make sure we always return a str
return str(super().__getitem__(key))
def __missing__(self, key):
try:
return super().__missing__(key)
except KeyError:
raise KeyError(f'Unknown colour or effect {key!r}') from None
# additional shorthands for bold / italic text
fg_codes = CodeMap(FG_CODES)
fg_codes.add_vocab(effectShortsMap)
codes = KeyMap(fg=fg_codes,
bg=CodeMap(BG_CODES))
FORMAT_8BIT = KeyMap(fg='38;5;{:d}',
bg='48;5;{:d}')
FORMAT_24BIT = KeyMap(fg='38;2;{:d};{:d};{:d}',
bg='48;2;{:d};{:d};{:d}')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Dispatch functions for translating user input to ANSI codes
@ftl.singledispatch
def resolve(obj, fg_or_bg='fg'):
"""default dispatch func for resolving ANSI codes from user input"""
raise TypeError('Could not interpret %r (type %r) as a colour / effect '
'for %r' % (obj, type(obj), fg_or_bg))
# TODO: might want to give the functions below names for more readable traceback
@resolve.register(type(None))
def _(obj, fg_or_bg='fg'):
return
yield
@resolve.register(str)
def _(obj, fg_or_bg='fg'):
if obj == '':
return
try:
yield from resolve(to_24bit(obj), fg_or_bg)
except ValueError:
yield codes[fg_or_bg][obj]
@resolve.register(numbers.Integral)
def _(obj, fg_or_bg='fg'):
# integers are interpreted as 8-bit colour codes
if 0 <= obj < 256:
yield FORMAT_8BIT[fg_or_bg].format(obj)
else:
raise ValueError('Could not interpret key %r as a 8 bit colour' % obj)
@resolve.register(np.ndarray)
@resolve.register(list)
@resolve.register(tuple)
def _(obj, fg_or_bg='fg'):
# tuple, lists are interpreted as 24-bit rgb true colour codes
if is_24bit(obj):
if all(0 <= _ < 256 for _ in obj):
yield FORMAT_24BIT[fg_or_bg].format(*obj)
else:
raise ValueError(
f'Could not interpret key {obj!r} as a 24 bit colour'
)
else:
for p in obj:
yield from resolve(p, fg_or_bg)
@resolve.register(dict)
def _(obj, _=''):
for key, val in obj.items():
# `val` may have tuple of effects: eg: ((55, 55, 55), 'bold', 'italic')
# but may also be a rgb tuple eg: (55, 55, 55)
yield from resolve(val, key)
def is_24bit(obj):
if len(obj) != 3:
return False
return all(isinstance(o, numbers.Integral) for o in obj)
def to_24bit(name):
return tuple((np.multiply(to_rgb(name), 255).astype(int)))
def _gen_codes(*properties, **kws):
"""
Parameters
----------
properties
kws
Yields
-------
code: int
"""
yield from resolve(properties)
yield from resolve(kws)
def _get_params(*properties, **kws):
# get the nrs '34;48;5;22' part of the code
return ';'.join(_gen_codes(*properties, **kws))
def get(*properties, **kws):
"""
Get the ANSI code for `properties` and `kws`
Parameters
----------
properties:
kws:
Returns
-------
"""
return ''.join((CSI, _get_params(*properties, **kws), 'm'))
def from_list(fg=None, bg=None):
if fg is not None:
return list(map(get, fg))
if bg is not None:
return [get(bg=_) for _ in bg]
def apply(s, *properties, **kws):
"""
Apply the ANSI codes mapped to by `properties` and `kws` to the string `s`
Parameters
----------
s
properties
kws
Returns
-------
"""
# first convert to str
# string = str(s)
# get code bits eg: '34;48;5;22'
new_codes = _get_params(*properties, **kws)
if not len(new_codes):
return s
# In order to get the correct representation of the string,
# we strip and ANSI codes that are in place and stack the new codes
# This means previous colours are replaced, but effects like 'bold' or
# 'italic' will stack for recursive invocations. This also means we get
# the shortest representation of the string given the parameters which is
# nice and efficient. If we were to apply blindly our string would be
# longer than needed by a few (non-display) characters. This might seem
# innocuous but becomes deadly once you start doing more complicated
# effects on longer strings
# note: final byte 'm' only valid for SGR (Select Graphic Rendition) and
# not other codes, but this is all we support for now
return ''.join(''.join((CSI, params, ';', new_codes, 'm', w, END))
for _, params, _, w, _ in parse(s)
) # .replace('\n', f'{END}\n{CSI}{new_codes}m'
#
# Finally, terminate and restart all codes at a newline boundary so that
# we can more easily stack text blocks horizontally
def apply_naive(s, *properties, **kws):
"""
Initial naive implementation of `apply` that blindly wraps the string with
the ANSI codes. Use `apply` instead of this function.
"""
# get code string eg: '34;48;5;22'
code = get(*properties, **kws)
if not len(code):
return s
return ''.join((code, s, END))
|
astromancer/motley
|
src/motley/codes.py
|
codes.py
|
py
| 11,577 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5308751490
|
m, n = map(int, input().split())
key = [list(map(int, input().split())) for _ in range(m)]
lock = [list(map(int, input().split())) for _ in range(n)]
def solution(key, lock):
# 90도 회전
def rotate_90(key):
n = len(key)
rot = [[0]*n for _ in range(n)]
for r in len(n):
for c in len(n):
rot[c][n-1-r] = key[r][c]
return rot
# 맞는지 체크
def check(new_lock):
lock_length = len(new_lock) // 3
for i in range(lock_length, lock_length*2):
for j in range(lock_length, lock_length*2):
if new_lock[i][j] != 1:
return False
return True
n = len(lock) # lock 길이
m = len(key) # key 길이
new_lock = [[0]*(n*3) for _ in range(n*3)]
# 중간에 위치시킴
for i in range(n):
for j in range(n):
new_lock[i+n][j+n] = lock[i][j]
# 4방향에 대해 확인
for rotation in range(4):
key = rotate_90(key)
# 0 ~ 2n - lock
for x in range(n*2):
# 0 ~ 2n
for y in range(n*2):
# 0 ~ m - key
# 자물쇠에 열쇠 넣기
for i in range(m):
for j in range(m):
new_lock[x+i][y+j] += key[i][j]
if check(new_lock):
return True
for i in range(m):
for j in range(m):
new_lock[x+i][y+j] -= key[i][j]
return False
|
louisuss/Algorithms-Code-Upload
|
Python/DongbinBook/simulation/lock_key.py
|
lock_key.py
|
py
| 1,541 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18101466174
|
from functools import cache
from tkinter import W
from typing import List, Tuple
from unittest import TestCase, main
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
len_x, len_y = len(matrix[0]), len(matrix)
@cache
def dfs(x: int, y: int):
if x >= len_x or y >= len_y:
return False
if matrix[y][x] == target:
return True
if matrix[y][x] > target:
return False
return dfs(x + 1, y) or dfs(x, y + 1)
return dfs(0, 0)
class Test(TestCase):
s = Solution()
data: List[Tuple[List[List[int]], int, bool]] = [
(
[
[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30],
],
5,
True,
),
(
[
[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30],
],
5,
True,
),
(
[
[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30],
],
20,
False,
),
]
def test_solution(self):
for matrix, target, expected in self.data:
self.assertEqual(self.s.searchMatrix(matrix, target), expected)
if __name__ == "__main__":
main()
|
hirotake111/leetcode_diary
|
leetcode/search_2d_matrix_II/solution.py
|
solution.py
|
py
| 1,686 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15107122264
|
import unittest
import time
from BeautifulReport import BeautifulReport
from base.web_driver import browser
from config.setting import logging
from util.send_email import inser_img,get_time
now = time.strftime("%Y-%m-%d %H_%M_%S")
class StartEnd(unittest.TestCase):
name=''
@classmethod
def setUpClass(cls):
cls.driver = browser()
logging.info("打开浏览器")
def setUp(self):
self.driver.implicitly_wait(10)
self.driver.maximize_window()
def tearDown(self):
logging.info("检测异常处理")
self._testMethodName=self._testname
for method_name, error in self._outcome.errors: # case如果执行失败,错误会保存到_outcome.errors 中
if error:
case_name = self._testname # case名,即定义好的方法名
report_error_name =get_time()+ case_name + '.png'
# logging.error("report_error:", report_error_name)
inser_img(self.driver,report_error_name)
@classmethod
def tearDownClass(cls):
# if "end" not in cls.name:
logging.info("用例正在结束:"+cls.name)
logging.info("关闭浏览器")
cls.driver.quit()
|
newcaolaing/web_auto
|
test_case/model/myunit.py
|
myunit.py
|
py
| 1,237 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31651316167
|
#! /usr/bin/python
from getRoomKitData import send_command, data_to_dict, dict_to_json, start_connect
from dotenv import load_dotenv
import os
load_dotenv()
def get_xconfig(session):
command = "xConfiguration\r"
commandTrimmed = "xConfiguration"
data = send_command(session, command)
# with open('./callHistory.txt', 'r') as ch:
# data = ch.read() callsArray.pop(0)
mydict = data_to_dict(data, pattern="*c ")
dict_to_json(mydict, commandTrimmed)
def main():
session = start_connect()
get_xconfig(session)
if __name__ == "__main__":
main()
|
ingenium21/getRoomKitData
|
getRoomKitConfiguration.py
|
getRoomKitConfiguration.py
|
py
| 590 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17649871547
|
# IMPORT RANDRANGE FUNCTION FOR COMPUTER MOVEMENTS
from random import randrange
# CREATE THE STARTING BOARD
board = []
n = 1
for row in range(3):
column = [n, n+1, n+2]
n += 3
board.append(column)
board[1][1] = 'X'
# FUNCTIONS
def displayBoard():
# DISPLAY THE CURRENT STATUS OF THE BOARD
print(
f"""
+-------+-------+-------+
| | | |
| {board[0][0]} | {board[0][1]} | {board[0][2]} |
| | | |
+-------+-------+-------+
| | | |
| {board[1][0]} | {board[1][1]} | {board[1][2]} |
| | | |
+-------+-------+-------+
| | | |
| {board[2][0]} | {board[2][1]} | {board[2][2]} |
| | | |
+-------+-------+-------+
"""
)
def enterMove():
# ASKS THE USER ABOUT THEIR MOVE, CHECKS THE INPUT AND UPDATES THE BOARD
isFree = False
try:
move = int(input("Enter your move: "))
if move < 1 or move > 9:
print("ERROR! You can't enter a value less than 1 or greater than 9.")
enterMove()
else:
for r, c in makeAListOfFreeFields():
if board[r][c] == move:
isFree = True
move = (r, c)
if isFree:
board[move[0]][move[1]] = 'O'
else:
print("Invalid move.")
enterMove()
except:
print("Invalid input!")
enterMove()
def makeAListOfFreeFields():
# BUILDS A LIST OF ALL THE FREE SQUARES, CONSISTING OF A TUPLE LIST
freeFields = []
for r in range(3):
for c in range(3):
if board[r][c] == 'X' or board[r][c] == 'O': continue
freeFields.append((r, c))
return freeFields
def victoryFor(sign):
# THE FUNCTION VERIFY IF SOMEONE HAS WON THE GAME
global gameStatus
global winner
gameStatus = True
winner = ''
if \
(board[2][0] == board[1][1] and board[1][1] == board[0][2]) or \
(board[0][0] == board[1][1] and board[1][1] == board[2][2]) or \
(board[0][0] == board[1][0] and board[1][0] == board[2][0]) or \
(board[0][1] == board[1][1] and board[1][1] == board[2][1]) or \
(board[0][2] == board[1][2] and board[1][2] == board[2][2]) or \
(board[0][0] == board[0][1] and board[0][1] == board[0][2]) or \
(board[1][0] == board[1][1] and board[1][1] == board[1][2]) or \
(board[2][0] == board[2][1] and board[2][1] == board[2][2]):
gameStatus = False
winner = sign
elif len(makeAListOfFreeFields()) == 0:
gameStatus = False
winner = 'none'
def drawMove():
# The function draws the computer's move and updates the board.
cpuMove = randrange(1, 10)
isFree = False
for r, c in makeAListOfFreeFields():
if board[r][c] == cpuMove:
isFree = True
cpuMove = (r, c)
if isFree:
board[cpuMove[0]][cpuMove[1]] = 'X'
else:
drawMove()
# EXE
displayBoard()
while True:
enterMove()
displayBoard()
victoryFor('O')
if not gameStatus: break
drawMove()
displayBoard()
victoryFor('X')
if not gameStatus: break
if winner == 'O': print("You won!")
elif winner == 'X': print("You lost!")
elif winner == 'none' : print("Tie.")
|
antopuli/python-projects
|
tic-tac-toe.py
|
tic-tac-toe.py
|
py
| 3,657 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24987056441
|
import csv
import datetime
import time
from datetime import date, timedelta
import netsvc
logger = netsvc.Logger()
if __name__ != '__main__':
from tools import config
else:
config={'addons_path':'/home/quentin/tinydev/cci/code/server/bin/addons'}
partner_dict = {}
partner_dict[''] = ''
dict_partner = {}
def _get_partner_id(char):
return char
def convert2utf(row):
if row:
retRow = {}
for k,v in row.items():
retRow[k] = v.decode('latin1').encode('utf8').strip()
return retRow
return row
def get_first_day(dt, d_years=0, d_months=0):
# d_years, d_months are "deltas" to apply to dt
y, m = dt.year + d_years, dt.month + d_months
a, m = divmod(m-1, 12)
return date(y+a, m+1, 1)
def get_last_day(dt):
return get_first_day(dt, 0, 1) + timedelta(-1)
def mkDateTime(dateString,strFormat="%Y-%m-%d"):
# Expects "YYYY-MM-DD" string
# returns a datetime object
eSeconds = time.mktime(time.strptime(dateString,strFormat))
return datetime.datetime.fromtimestamp(eSeconds)
def _get_tax_code_id(char):
if char == '':
return ''
tmp = []
for c in char.split(';'):
if c != '':
if c[0] == '+':
tmp.append(('+','l10n_be.vat_code_a'+c[2:4]))
else:
tmp.append(('-','l10n_be.vat_code_a'+c[2:4]))
return tmp
def construct_vat_dict(reader_vat_code, reader_vat, vat_dict):
count = 0
for row in reader_vat_code:
#fill the first line with False
if count != "0":
if row['VSTORED,A,15']:
vat_dict[row['VSTORED,A,15']] = {
'inv':_get_tax_code_id(row['VLBAINV,A,30']),
'vat':_get_tax_code_id(row['VLTAINV,A,30']),
'ref_inv':_get_tax_code_id(row['VLBACRE,A,30']),
'ref_vat':_get_tax_code_id(row['VLTACRE,A,30']),
}
else:
vat_dict[''] = False
count += 1
count = 0
for row in reader_vat:
#fill the first line with False
if count != "0":
if row['VSTORED,A,15'] and vat_dict.has_key(row['VSTORED,A,15']):
vat_dict[row['VSTORED,A,15']]['inv_account'] = row['VIMPINV,A,10']
vat_dict[row['VSTORED,A,15']]['ref_account'] = row['VIMPCRE,A,10']
else:
vat_dict[row['VSTORED,A,15']]={
'inv':'',
'vat':'',
'ref_inv':'',
'ref_vat':'',
'inv_account':'',
'ref_account':'',
}
count += 1
return vat_dict
# -=====================================-
# -= 1. Defining Structure and Mapping =-
# -=====================================-
# -= A. Chart of Accounts =-
def _check_code_4_usertype(x):
if x['ABALANCE,A,10'] == 'LIABILIT':
return 'account_type_liability'
if x['ABALANCE,A,10'] == 'ASSETS':
return 'account_type_asset'
if x['ABALANCE,A,10'] == 'FXASSETS':
return 'account_type_asset'
if x['ABALANCE,A,10'] == 'INCOME':
return 'account_type_income'
if x['ABALANCE,A,10'] == 'DISCINC':
return 'account_type_income'
if x['ABALANCE,A,10'] == 'EXPENSE':
return 'account_type_expense'
if x['ABALANCE,A,10'] == 'DISCEXP':
return 'account_type_expense'
if x['ABALANCE,A,10'] == 'UNDEF':
return 'account_type_root'
# if x['AID,A,10'].startswith('6'):
# return 'account_type_expense'
# if x['AID,A,10'].startswith('7'):
# return 'income'
return 'account_type_root'
def _check_code_4_type(x):
if x['AID,A,10'].startswith('40'):
if x['AID,A,10'].startswith('406'):
return 'payable'
return 'receivable'
if x['AID,A,10'].startswith('44'):
return 'payable'
if len(x['AID,A,10']) <= 4:
return 'view'
return 'other'
account_map = {
'id': lambda x: 'account_'+x['AID,A,10'],
'code': lambda x: x['AID,A,10'],
'name': lambda x: x['HEADING1,A,40'],
'note': lambda x: x['AMEMO,M,11'],
'type': lambda x: _check_code_4_type(x),
'user_type:id': lambda x: _check_code_4_usertype(x),
'parent_id:id': lambda a: ''#'account_bob_0'
}
def import_account(reader, writer, mapping):
record = {}
for key, column_name in mapping.items():
record[key] = key
writer.writerow(record)
temp_dict = {}
list_ids = []
list_rows = []
for row in reader:
record = {}
for key,fnct in mapping.items():
record[key] = fnct(convert2utf(row))
temp_dict[record['code']]=record
list_ids.append(record['code'])
temp_keys = map(lambda x: int(x),temp_dict.keys())
temp_keys.sort()
temp_str_keys = map(lambda x: str(x),temp_keys)
for t in temp_str_keys:
if len(t)>1:
l = len(temp_dict[t]['code'])
aa = range(l+1)
aa.reverse()
aa.pop()
for i in aa:
if temp_dict[t]['code'][0:i-1] in list_ids:
temp_dict[t]['parent_id:id'] = 'account_' + str(temp_dict[t]['code'][0:i-1])
break
else:
temp_dict[t]['parent_id:id'] = 'account_bob_import.account_bob_0'
list_rows.append(temp_dict[t])
writer.writerows(list_rows)
return True
# -= B. Financial Journals =-
journals_map = {
'id' : lambda x: 'journal_'+x['DBID,A,4'],
'code': lambda x: x['DBID,A,4'],
'name': lambda x: x['HEADING1,A,30'],
'view_id:id': lambda x: 'account.account_journal_view', # journal view for all except the ones that are of type cash => cash journal view
'currency:id': lambda x: x['DBCURRENCY,A,3'],#to be check
'sequence_id:id': lambda x: 'account.sequence_journal', #entry journal for all
'type': lambda x: {
'PUR': 'purchase',
'PUC': 'purchase',
'SAL': 'sale',
'SAC': 'sale',
'CAS': 'cash',
'ISB': 'general',#default
'PRI': 'general',#default
'ISD': 'general',#default
'ICO': 'general',#default
'ISO': 'general',#default
'PRO': 'general',#default
'COP': 'general',#default
'ISI': 'general',#default
'ISM': 'general',#default
'IDN': 'general',#default
'ICE': 'general',#default
'':'general'
#else should be of 'general' type
}[x['DBTYPE,A,3']],
'default_debit_account_id:id':lambda x: x['DBACCOUNT,A,10'], #filled with the id of the account_account with code = x['DBACCOUNT,A,10'],
'default_credit_account_id:id':lambda x: x['DBACCOUNT,A,10'] ,#filled with the id of the account_account with code =
}
def import_journal(reader_journal, writer_journal, journals_map):
record = {}
for key, column_name in journals_map.items():
record[key] = key
writer_journal.writerow(record)
for row in reader_journal:
record = {}
for key,fnct in journals_map.items():
record[key] = fnct(convert2utf(row))
if record['default_debit_account_id:id']:
record['default_debit_account_id:id'] = 'account_' + str(record['default_debit_account_id:id'])
if record['default_credit_account_id:id']:
record['default_credit_account_id:id'] = 'account_' + str(record['default_credit_account_id:id'])
if record['type']=='cash':
record['view_id:id']='account.account_journal_bank_view'
cur = ''
if record['currency:id']:
cur = 'base.' + record['currency:id'].upper()
record['currency:id'] = cur
writer_journal.writerow(record)
return True
# -= C. Partners Data =-
#Beware: If 2 partners have the same name, we have to create only one partner with several adresses.
#We also have to record all their old names because they can be referenced in another files (e.g. the account_move_line one).
#That's the reason why we keep a dictionary to match the IDS.
def _get_cat(record):
#have to put the partner into category suppliers if CSUPTYPE,A,1 == 'S'
#have to put the partner into category customers if CCUSTYPE,A,1 == 'C'
res=[]
if 'CSUPTYPE,A,1' in record and record['CSUPTYPE,A,1'].upper() in ['S'] :
res.append('base.res_partner_category_8')
if 'CCUSTYPE,A,1' in record and record['CCUSTYPE,A,1'].upper() in ['C']:
res.append('base.res_partner_category_0')
return ','.join(res)
partners_map = {
'id':lambda x: x['CID,A,10'],
'ref': lambda x: x['CID,A,10'],
'name': lambda x: x['CNAME1,A,40'],
'lang': lambda x: {
#/!\ if a lang isn't installed, the value should be filled with ''
'E': 'en_US', #'E' for English
'D': 'de_DE',#'de_DE',#?? #'D' for German....de_DE
'F': 'fr_FR',#'fr_FR',#??#'F' for French..fr_FR
'N': 'nl_NL',#'nl_NL',#??#'N' for Dutch....nl_NL
'A': '',#no lang
'' : ''
}[x['CLANGUAGE,A,2']],
'vat': lambda x: x['CVATNO,A,12'],
'website': lambda x: x['HTTPADDRESS,A,60'],
'comment': lambda x: x['CMEMO,M,11'],
'domiciliation_bool': lambda x : x['CBANKORDERPAY,L,1'],
'domiciliation': lambda x : x['CBANKORDERPAYNO,A,15'],
'category_id:id':lambda x:_get_cat(x),
}
#have to create one res.partner.adress for this partner with this
partner_add_map = {
'id' : lambda x: '',
'city' : lambda x: x['CLOCALITY,A,40'],
'fax': lambda x: x['CFAXNO,A,25'],
'zip' : lambda x: x['CZIPCODE,A,10'],
'country_id:id':lambda x: x['CCOUNTRY,A,6'], #filled with id of res.country that have code == x['CCOUNTRY,A,6']
'phone' : lambda x: x['CTELNO,A,25'],
'street' : lambda x: x['CADDRESS1,A,40'],
'type' : lambda x: 'default',
'partner_id:id':lambda x: ''
}
#have to create res.partner.bank if x['CBANKNO,A,20'] <> False
partner_bank_map = {
'state': lambda x:'bank',#should be filled with id of res.Partner.bank.type that have name == 'Bank Account'
'acc_number': lambda x: x['CBANKNO,A,20'],
'partner_id:id':lambda x:''
}
def import_partner(reader_partner, writer_partner, partners_map, writer_address, partner_add_map, writer_bank, partner_bank_map):
record = {}
record_address = {}
record_bank = {}
list_partners = {}
for key, column_name in partners_map.items():
record[key] = key
for key, column_name in partner_add_map.items():
record_address[key] = key
for key, column_name in partner_bank_map.items():
record_bank[key] = key
writer_partner.writerow(record)
writer_address.writerow(record_address)
writer_bank.writerow(record_bank)
count_address = 0
for row in reader_partner:
record = {}
record_address = {}
record_bank = {}
for key,fnct in partners_map.items():
record[key] = fnct(convert2utf(row))
for key,fnct in partner_add_map.items():
record_address[key] = fnct(convert2utf(row))
partner_name = record['name']
if partner_name != "":
if record['lang'] not in langs:
langs.append(record['lang'])
#partner already exists
count_address = count_address + 1
record_address['id'] = 'add' + str(count_address)
if list_partners.has_key(record['name']):
record_address['type'] = 'other'
partner_dict[row['CID,A,10']] = list_partners[record['name']]
else:
#record it
list_partners[record['name']] = row['CID,A,10']
partner_dict[row['CID,A,10']] = record['id']
dict_partner[record['ref']] = record_address['id']
if not record['domiciliation_bool'] == '1':
record['domiciliation_bool'] = ''
writer_partner.writerow(record)
#create bank account if necessary
if row.has_key('CBANKNO,A,20') and row['CBANKNO,A,20']:
for key,fnct in partner_bank_map.items():
record_bank[key] = fnct(convert2utf(row))
record_bank['partner_id:id'] = _get_partner_id(partner_dict[row['CID,A,10']])
writer_bank.writerow(record_bank)
#create address in all cases ('default' address if partner didn't exist before, 'other' otherwise)
address = ''
if record_address['country_id:id']:
address = 'base.'+record_address['country_id:id'].lower()
record_address['partner_id:id'] = _get_partner_id(partner_dict[row['CID,A,10']])
record_address['country_id:id'] = address
writer_address.writerow(record_address)
return True
# -= D. Contacts Data =-
contacts_map = {
'id': lambda x:'' ,
'first_name': lambda x: x['PFIRSTNAME,A,30'],
'name': lambda x: x['PNAME,A,30'],
'title': lambda x: {
'0':'', #keep empty
'1':'Mss', #should be the id of res.partner.title where name == 'Miss'
'2':'Ms.', #should be the id of res.partner.title where name == 'Madam'
'3':'M.', #should be the id of res.partner.title where name == 'Sir'
'':'', #keep empty
#~ #/!\ if an id cannot be found, the value should be ''
}[x['PMF,A,1']],
'mobile': lambda x: x['PGSM,A,25'],
# 'lang_id': lambda x: {
# 'E': 'English',#should be the id of res.lang where name == 'English'
# 'D': 'German',#should be the id of res.lang where name == 'German'
# 'F': 'French',#should be the id of res.lang where name == 'French'
# 'N': 'Dutch',#should be the id of res.lang where name == 'Dutch'
# '': ''#for empty data.....
#
# #~ #/!\ if an id cannot be found, the value should be ''
# }[x['PLANGUAGE,A,2']],
#~ #have to be linked to the default adress of the partner with code == x['PCID,A,10']
}
job_map = {
#'id' : lambda x : '',
'address_id:id' : lambda x:'',
'contact_id:id' : lambda x:'',
'function_id:id': lambda x:'account_bob_import.res_partner_function_bob',
#'function_label' : lambda x:'' ...should be check...for cci users
}
def import_contact(reader_contact, writer_contact, contacts_map, writer_job, job_map):
record = {}
record_job = {}
for key, column_name in contacts_map.items():
record[key] = key
for key, column_name in job_map.items():
record_job[key] = key
writer_contact.writerow(record)
writer_job.writerow(record_job)
count_contact = 0
for row in reader_contact:
record = {}
record_job = {}
for key,fnct in contacts_map.items():
record[key] = fnct(convert2utf(row))
for key,fnct in job_map.items():
record_job[key] = fnct(convert2utf(row))
count_contact = count_contact + 1
record['id'] = "cont" + str(count_contact)
record_job['contact_id:id'] = record['id']
if dict_partner.has_key(row['PCID,A,10']):
record_job['address_id:id'] = dict_partner[row['PCID,A,10']]
else:
record_job['address_id:id'] = 'account_bob_import.res_partner_address_bob'
writer_contact.writerow(record)
writer_job.writerow(record_job)
return True
# -= E. Periods and FY =-
fyear_map = {
'id': lambda x: 'FY'+x['YEAR,I,4'],
'date_stop': lambda x: x['YEAR,I,4']+'-12-31', #last day of x['YEAR,I,4']
'date_start': lambda x: x['YEAR,I,4']+'-01-01',#first day of x['YEAR,I,4']
'code': lambda x: 'FY'+x['YEAR,I,4'],
'name': lambda x: 'Fiscal Year '+x['YEAR,I,4'],
'state': lambda x: 'draft',
}
def import_fyear(reader_fyear, writer_fyear, fyear_map):
record = {}
for key, column_name in fyear_map.items():
record[key] = key
writer_fyear.writerow(record)
fyear_rows = []
fyear_rows_ref = []
#parse the period csv file to know what are the fiscal years that need to be created
for row in reader_fyear:
if row['YEAR,I,4'] not in fyear_rows_ref:
fyear_rows_ref.append(row['YEAR,I,4'])
fyear_rows.append(row)
#create the fiscal years
for fyear in fyear_rows:
record = {}
for key,fnct in fyear_map.items():
record[key] = fnct(convert2utf(fyear))
writer_fyear.writerow(record)
return True
periods_map = {
'id': lambda x: 'period_'+x['YEAR,I,4']+"/"+x['MONTH,I,4'],
'date_stop': lambda x: get_last_day(mkDateTime(x['YEAR,I,4']+"-"+x['MONTH,I,4']+"-01")).strftime("%Y-%m-%d"),#last day of x['MONTH,I,4']
'date_start': lambda x:get_first_day(mkDateTime(x['YEAR,I,4']+"-"+x['MONTH,I,4']+"-01")).strftime("%Y-%m-%d"), #first day of x['MONTH,I,4']
'name': lambda x: x['LABEL,A,8'],
'state': lambda x: 'draft',
'fiscalyear_id:id': lambda x: 'FY'+x['YEAR,I,4'],
}
def import_period(reader_period, writer_period, period_map):
record = {}
for key, column_name in period_map.items():
record[key] = key
writer_period.writerow(record)
period_rows = []
for row in reader_period:
#only create periods if x['MONTH,I,4'] != 0
if row['MONTH,I,4'] != "0":
record = {}
for key,fnct in period_map.items():
record[key] = fnct(convert2utf(row))
writer_period.writerow(record)
return True
# -= F. Reconcile =-
arecon_map = {
'id' : lambda x: 'a'+x['HID,A,10'].strip()+'_'+x['HMATCHNO,I,4'],
'type': lambda x: 'bob imported',
'name': lambda x: 'a'+x['HID,A,10'].strip()+'_'+x['HMATCHNO,I,4'],
}
crecon_map = {
'id' : lambda x: 'c'+x['HID,A,10'].strip()+'_'+x['HMATCHNO,I,4'],
'type': lambda x: 'bob imported',
'name': lambda x: 'c'+x['HID,A,10'].strip()+'_'+x['HMATCHNO,I,4'],
}
def import_areconcile(reader, writer, map):
#write the header of creconcile
record = {}
for key, column_name in map.items():
record[key] = key
writer.writerow(record)
dict = {}
last_id = ""
for row in reader:
dict[row['HID,A,10']+row['HDBK,A,4']+row['HFYEAR,A,5']+row['HMONTH,I,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']] = row['HMTACHNO_ID:ID']
record = {}
for key,fnct in map.items():
record[key] = fnct(convert2utf(row))
if last_id != record['id']:
writer.writerow(record)
last_id = record['id']
return dict
def import_creconcile(reader, writer, map):
#write the header of creconcile
record = {}
for key, column_name in map.items():
record[key] = key
writer.writerow(record)
dict = {}
last_id = ""
for row in reader:
dict[row['HTYPE,A,1']+row['HID,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']] = row['HMATCHNO_ID:ID']
record = {}
for key,fnct in map.items():
record[key] = fnct(convert2utf(row))
if last_id != record['id']:
writer.writerow(record)
last_id = record['id']
return dict
# -= G. Move and Move_line =-
move_map = {
'id': lambda x: 'move_'+x['HDBK,A,4']+'/'+x['HFYEAR,A,5']+'/'+x['HDOCNO,I,4'],
'journal_id:id': lambda x: 'journal_'+x['HDBK,A,4'],
'state': lambda x: 'draft',
'period_id:id': lambda x: 'period_'+x['HFYEAR,A,5']+"/"+x['HMONTH,I,4'],
'ref': lambda x: '',
}
def _check_debit(x):
if (float(x) > 0):
return float(x)
return 0
def _check_credit(x):
if (float(x) < 0):
return -(float(x))
return 0
def _get_ammount_currency(x):
if x['HORDERNO,I,4'] != '1':
return _check_debit(x['HAMOUNT,$,8']) + _check_credit(x['HAMOUNT,$,8'])
return 0
def _check_debit_vat(x, ref):
if ref.startswith('-'):
return 0
if (float(x) < 0):
return -(float(x))
return float(x)
def _check_credit_vat(x, ref):
if ref.startswith('-'):
if (float(x) < 0):
return -(float(x))
return float(x)
return 0
#def _get_ammount_currency_vat(x):
# if x['HORDERNO,I,4'] != '1':
# return _check_debit_vat(x['HTAX,$,8'],x['HAMOUNT,$,8']) - _check_credit_vat(x['HTAX,$,8'],x['HAMOUNT,$,8'])
# return 0
def _pick_vat_code(x, vat_dict, is_vat=False):
if is_vat:
if x['HDBTYPE,A,3'][2]=='C':
#the move is a refund
return vat_dict[x['HVATCODE,A,10']]['ref_vat']
return vat_dict[x['HVATCODE,A,10']]['vat']
if x['HDBTYPE,A,3'][2]=='C':
#the move is a refund
return vat_dict[x['HVATCODE,A,10']]['ref_inv']
return vat_dict[x['HVATCODE,A,10']]['inv']
def _pick_vat_account(x, vat_dict):
if x['HDBTYPE,A,3'][2]=='C':
#the move is a refund
return vat_dict[x['HVATCODE,A,10']]['ref_account'] and 'account_'+vat_dict[x['HVATCODE,A,10']]['ref_account'] or 'account_'+x['HID,A,10']
return vat_dict[x['HVATCODE,A,10']]['inv_account'] and 'account_'+vat_dict[x['HVATCODE,A,10']]['inv_account'] or 'account_'+x['HID,A,10']
def _create_vat_move(x, vat_dict, count):
res = []
count = 0
for vat_code in _pick_vat_code(x,vat_dict,True):
count += 1
if count == 1:
res.append(_create_vat_move_core(x, vat_code, vat_dict, count))
else:
res.append(_create_vat_move_vat(x, vat_code, vat_dict, count, 'HTAX,$,8'))
return res
def _create_vat_move_vat(x, vat_code, vat_dict, count,base_or_vat):
return {
'id': 'move_line_'+x['HDBK,A,4']+'/'+x['HFYEAR,A,5']+'/'+x['HDOCNO,I,4']+'/'+x['HORDERNO,I,4']+'/'+str(count),
'currency_id': x['HCURRENCY,A,3'],
'date_maturity': x['HDUEDATE,D,4'],
'partner_id:id': _get_partner_id(partner_dict[x['HCUSSUP,A,10']]),
'journal_id:id': 'journal_'+x['HDBK,A,4'],
'tax_code_id:id': vat_code[1],
'tax_amount': str(abs(float(x[base_or_vat])) * _get_float(vat_code[0])),
'state': 'draft',
'debit': '0',
'credit': '0',
'ref': x['HDOCNO,I,4'],
'account_id:id': _pick_vat_account(x, vat_dict),
'period_id:id': 'period_'+x['HFYEAR,A,5']+"/"+x['HMONTH,I,4'],
'date': x['HDOCDATE,D,4'],
'move_id:id': 'move_'+x['HDBK,A,4']+'/'+x['HFYEAR,A,5']+'/'+x['HDOCNO,I,4'],
'name': x['HREM,A,40'] or '/',
# 'amount_currency': str(_get_ammount_currency_vat(x)),
}
def _create_vat_move_core(x, vat_code, vat_dict, count):
return {
'id': 'move_line_'+x['HDBK,A,4']+'/'+x['HFYEAR,A,5']+'/'+x['HDOCNO,I,4']+'/'+x['HORDERNO,I,4']+'/'+str(count),
'currency_id': x['HCURRENCY,A,3'],
'date_maturity': x['HDUEDATE,D,4'],
'partner_id:id': _get_partner_id(partner_dict[x['HCUSSUP,A,10']]),
'journal_id:id': 'journal_'+x['HDBK,A,4'],
'tax_code_id:id': vat_code[1],
'tax_amount': str(abs(float(x['HTAX,$,8'])) * _get_float(vat_code[0])),
'state': 'draft',
'debit': str(_check_debit_vat(x['HTAX,$,8'],x['HAMOUNT,$,8'])),
'credit': str(_check_credit_vat(x['HTAX,$,8'],x['HAMOUNT,$,8'])),
'ref': x['HDOCNO,I,4'],
'account_id:id': _pick_vat_account(x, vat_dict),
'period_id:id': 'period_'+x['HFYEAR,A,5']+"/"+x['HMONTH,I,4'],
'date': x['HDOCDATE,D,4'],
'move_id:id': 'move_'+x['HDBK,A,4']+'/'+x['HFYEAR,A,5']+'/'+x['HDOCNO,I,4'],
'name': x['HREM,A,40'] or '/',
# 'amount_currency': str(_get_ammount_currency_vat(x)),
}
#check if the movement is a VAT movement: return TRUE if the account code begins with '450' or '451'
def _is_vat_movement(x):
return x['HID,A,10'].startswith(('450','451','411'))
def _get_float(char):
if char == '-':
return -1
return 1
move_line_map = {
#TODO check currency import
#TODO (bugfix): create one currency BEF with value: 1 EUR = 40.3399 BEF
'id': lambda x: 'move_line_'+x['HDBK,A,4']+'/'+x['HFYEAR,A,5']+'/'+x['HDOCNO,I,4']+'/'+x['HORDERNO,I,4'],
'currency_id': lambda x: x['HCURRENCY,A,3'],
'date_maturity': lambda x: x['HDUEDATE,D,4'],
'partner_id:id': lambda x: _get_partner_id(partner_dict[x['HCUSSUP,A,10']]),
'journal_id:id': lambda x: 'journal_'+x['HDBK,A,4'],
'tax_code_id:id': lambda x:'',
'tax_amount': lambda x:'',
'state': lambda x: 'draft',
#qd vente: <0 c'est credit et >0 c'est debit
#qd achat: <0 c'est le credit et >0 c'est debit
'debit': lambda x: str(_check_debit(x['HAMOUNT,$,8'])),
'credit': lambda x: str(_check_credit(x['HAMOUNT,$,8'])),
'ref': lambda x: x['HDOCNO,I,4'],
'account_id:id': lambda x: 'account_'+x['HID,A,10'],
'period_id:id': lambda x: 'period_'+x['HFYEAR,A,5']+"/"+x['HMONTH,I,4'],
'date': lambda x: x['HDOCDATE,D,4'],
'move_id:id': lambda x: 'move_'+x['HDBK,A,4']+'/'+x['HFYEAR,A,5']+'/'+x['HDOCNO,I,4'],
'reconcile_id:id': lambda x: '',
'name': lambda x: x['HREM,A,40'] or '/',
# 'amount_currency': lambda x: str(_get_ammount_currency(x)),
}
def import_moves_and_lines(reader_move, writer_move, writer, move_map, map, dict_ahisto, dict_chisto, vat_dict):
#write the header of account.move
record = {}
for key, column_name in move_map.items():
record[key] = key
writer_move.writerow(record)
#write the header of account.move.line
record = {}
for key, column_name in map.items():
record[key] = key
writer.writerow(record)
move_rows_ref = {}
count =0
period_rows = []
tvacount = 0
#parse the move.csv file to know what are the account_move that need to be created
for row in reader_move:
count += 1
if (count%1000) == 0:
logger.notifyChannel(count)
if row['HCURRENCY,A,3'] not in currencies:
currencies.append(row['HCURRENCY,A,3'])
#only create move and move_line if x['HMONTH,I,4'] != 0
#and if row['HAMOUNT,$,8']!="" is different from 0 (or False)
if row['HMONTH,I,4'] != "0" and row['HAMOUNT,$,8']!="" and not float(row['HAMOUNT,$,8']) == 0.0:
temp = 'move_line_'+row['HDBK,A,4']+'/'+row['HFYEAR,A,5']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']
if not move_rows_ref.has_key(temp):
#writing of the account.move
move_rows_ref[temp] = 'ok'
record = {}
for key,fnct in move_map.items():
record[key] = fnct(convert2utf(row))
writer_move.writerow(record)
#writing of the account.move.line
if _is_vat_movement(row):
#vat movement cannot be imported and have to be generated from the move line
continue
record = {}
for key,fnct in map.items():
record[key] = fnct(convert2utf(row))
if dict_ahisto.has_key(row['HID,A,10']+row['HDBK,A,4']+row['HFYEAR,A,5']+row['HMONTH,I,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']):
record['reconcile_id:id'] = dict_ahisto[row['HID,A,10']+row['HDBK,A,4']+row['HFYEAR,A,5']+row['HMONTH,I,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']]
#for the case of sales or sales refund
elif row['HDBTYPE,A,3'] == 'SAL' or row['HDBTYPE,A,3'] == 'SAC':
if dict_chisto.has_key('C'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']):
record['reconcile_id:id'] = dict_chisto['C'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']]
#for the case of purchases or purchases refund
elif row['HDBTYPE,A,3'] == 'PUR' or row['HDBTYPE,A,3'] == 'PUC':
if dict_chisto.has_key('S'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']):
record['reconcile_id:id'] = dict_chisto['S'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+row['HORDERNO,I,4']]
else:
#for the case of other operations. We have to search for the reconciliation with a customer and a supplier and we have to add 1 to the orderno
tmp = str(int(row['HORDERNO,I,4'])+1)
if dict_chisto.has_key('C'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+tmp):
record['reconcile_id:id'] = dict_chisto['C'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+tmp]
elif row['HDBTYPE,A,3'] == 'PUR' or row['HDBTYPE,A,3'] == 'PUC':
if dict_chisto.has_key('S'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+tmp):
record['reconcile_id:id'] = dict_chisto['S'+row['HCUSSUP,A,10']+row['HFYEAR,A,5']+row['HDBK,A,4']+'/'+row['HDOCNO,I,4']+'/'+tmp]
#if this move line is taxed
if row['HVATCODE,A,10']:
#create the base movement
tvacount += 1
tmp_cnt = 0
for vat_code in _pick_vat_code(row,vat_dict,False):
tmp_cnt += 1
if tmp_cnt == 1:
record['tax_amount']= str(abs(float(row['HBASE,$,8'])) * _get_float(vat_code[0]))
record['tax_code_id:id'] = vat_code[1]
else:
writer.writerow(convert2utf(_create_vat_move_vat(row, vat_code, vat_dict, count,'HBASE,$,8')))
#generate the vat movement
vat_move_list = _create_vat_move(row, vat_dict, tvacount)
for vat_move in vat_move_list:
writer.writerow(convert2utf(vat_move))
writer.writerow(record)
return True
# -=====================-
# -= 2. Importing DATA =-
# -=====================-
#specific part for CCI
reader_partner_matching = csv.DictReader(file('_conv_bob_id.csv','rb'))
bob_conv_matching = {}
bob_conv_matching[''] = ''
for row in reader_partner_matching:
bob_conv_matching[row['bob']] = row['partner']
def _get_partner_id(char):
if bob_conv_matching.has_key(char):
return bob_conv_matching[char]
return 'res_partner_destroyed'
partner_dict['GRAMME'] = ''
#end of specific part
langs = []
currencies = []
def run():
reader_account = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/accoun.csv','rb'))
writer_account = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.account.csv', 'wb'), account_map.keys())
import_account(reader_account, writer_account, account_map)
reader_journal = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/dbk.csv','rb'))
writer_journal = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.journal.csv', 'wb'), journals_map.keys())
import_journal(reader_journal, writer_journal, journals_map)
reader_partner = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/compan.csv','rb'))
writer_partner = csv.DictWriter(file(config['addons_path']+'/account_bob_import/res.partner.csv', 'wb'), partners_map.keys())
writer_address = csv.DictWriter(file(config['addons_path']+'/account_bob_import/res.partner.address.csv','wb'), partner_add_map.keys())
writer_bank = csv.DictWriter(file(config['addons_path']+'/account_bob_import/res.partner.bank.csv','wb'), partner_bank_map.keys())
import_partner(reader_partner, writer_partner, partners_map, writer_address, partner_add_map, writer_bank, partner_bank_map)
reader_contact = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/contacts.csv','rb'))
writer_contact = csv.DictWriter(file(config['addons_path']+'/account_bob_import/res.partner.contact.csv','wb'),contacts_map.keys())
writer_job = csv.DictWriter(file(config['addons_path']+'/account_bob_import/res.partner.job.csv','wb'),job_map.keys())
import_contact(reader_contact, writer_contact, contacts_map, writer_job, job_map)
reader_fyear = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/period.csv','rb'))
writer_fyear = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.fiscalyear.csv', 'wb'), fyear_map.keys())
import_fyear(reader_fyear, writer_fyear, fyear_map)
reader_period = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/period.csv','rb'))
writer_period = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.period.csv', 'wb'), periods_map.keys())
import_period(reader_period, writer_period, periods_map)
#import the account_tax from vat.csv
# constructing table account_tax => account_tax_code (for move and move_line)
reader_vat_code = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/vatcas.csv','rb'))
reader_vat = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/vat.csv','rb'))
vat_dict = construct_vat_dict(reader_vat_code, reader_vat, {})
reader_ahisto = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/ahisto_matchings.csv','rb'))
writer_reconcile = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.move.reconcile-1.csv', 'wb'), arecon_map.keys())
dict_ahisto = import_areconcile(reader_ahisto, writer_reconcile, arecon_map)
reader_chisto = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/chisto_matchings.csv','rb'))
writer_reconcile2 = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.move.reconcile-2.csv', 'wb'), crecon_map.keys())
dict_chisto = import_creconcile(reader_chisto, writer_reconcile2, crecon_map)
reader_move = csv.DictReader(file(config['addons_path']+'/account_bob_import/original_csv/ahisto.csv','rb'))
writer_move = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.move.csv', 'wb'), move_map.keys())
writer_move_line = csv.DictWriter(file(config['addons_path']+'/account_bob_import/account.move.line.csv', 'wb'), move_line_map.keys())
import_moves_and_lines(reader_move, writer_move, writer_move_line, move_map, move_line_map, dict_ahisto, dict_chisto, vat_dict)
if __name__ == '__main__':
run()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
factorlibre/openerp-extra-6.1
|
account_bob_import/bob_import_step_2.py
|
bob_import_step_2.py
|
py
| 34,361 |
python
|
en
|
code
| 9 |
github-code
|
6
|
43975077800
|
import mock
import uuid
from contextlib import contextmanager
import webtest
from pyramid.config import Configurator
from cliquet.events import (ResourceChanged, AfterResourceChanged,
ResourceRead, AfterResourceRead, ACTIONS)
from cliquet.storage.exceptions import BackendError
from cliquet.tests.testapp import main as testapp
from cliquet.tests.support import unittest, BaseWebTest, get_request_class
from cliquet import statsd
@contextmanager
def notif_broken(app, event_cls):
old = app.registry.notify
def buggy(event):
if not isinstance(event, event_cls):
return old(event)
raise Exception("boom")
app.registry.notify = buggy
yield
app.registry.notify = old
class BaseEventTest(BaseWebTest):
subscribed = tuple()
def setUp(self):
super(BaseEventTest, self).setUp()
self.events = []
self.body = {'data': {'name': 'de Paris'}}
def tearDown(self):
self.events = []
super(BaseEventTest, self).tearDown()
def listener(self, event):
self.events.append(event)
def make_app(self, settings=None):
settings = self.get_app_settings(settings)
self.config = Configurator(settings=settings)
for event_cls in self.subscribed:
self.config.add_subscriber(self.listener, event_cls)
self.config.commit()
app = testapp(config=self.config)
app = webtest.TestApp(app)
app.RequestClass = get_request_class(self.api_prefix)
return app
class ResourceReadTest(BaseEventTest, unittest.TestCase):
subscribed = (ResourceRead,)
def test_get_sends_read_event(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record_id = resp.json['data']['id']
record_url = self.get_item_url(record_id)
self.app.get(record_url, headers=self.headers)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'], ACTIONS.READ.value)
self.assertEqual(len(self.events[0].read_records), 1)
def test_collection_get_sends_read_event(self):
self.app.get(self.collection_url, headers=self.headers)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'], ACTIONS.READ.value)
self.assertEqual(len(self.events[0].read_records), 0)
def test_post_sends_read_if_id_already_exists(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record = resp.json['data']
body = dict(self.body)
body['data']['id'] = record['id']
# a second post with the same record id
self.app.post_json(self.collection_url, body, headers=self.headers,
status=200)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'], ACTIONS.READ.value)
class ResourceChangedTest(BaseEventTest, unittest.TestCase):
subscribed = (ResourceChanged,)
def test_post_sends_create_action(self):
self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
def test_put_sends_create_action(self):
body = dict(self.body)
body['data']['id'] = record_id = str(uuid.uuid4())
record_url = self.get_item_url(record_id)
self.app.put_json(record_url, body,
headers=self.headers, status=201)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
def test_not_triggered_on_failed_put(self):
record_id = str(uuid.uuid4())
record_url = self.get_item_url(record_id)
self.app.put_json(record_url, self.body, headers=self.headers)
headers = self.headers.copy()
headers['If-Match'] = '"12345"'
self.app.put_json(record_url, self.body, headers=headers, status=412)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
def test_patch_sends_update_action(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record = resp.json['data']
record_url = self.get_item_url(record['id'])
self.app.patch_json(record_url, self.body, headers=self.headers,
status=200)
self.assertEqual(len(self.events), 2)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
self.assertEqual(self.events[1].payload['action'],
ACTIONS.UPDATE.value)
def test_put_sends_update_action_if_record_exists(self):
body = dict(self.body)
body['data']['id'] = record_id = str(uuid.uuid4())
record_url = self.get_item_url(record_id)
self.app.put_json(record_url, body,
headers=self.headers, status=201)
body['data']['more'] = 'stuff'
self.app.put_json(record_url, body,
headers=self.headers, status=200)
self.assertEqual(len(self.events), 2)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
self.assertEqual(self.events[1].payload['action'],
ACTIONS.UPDATE.value)
def test_delete_sends_delete_action(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record = resp.json['data']
record_url = self.get_item_url(record['id'])
self.app.delete(record_url, headers=self.headers, status=200)
self.assertEqual(len(self.events), 2)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
self.assertEqual(self.events[1].payload['action'],
ACTIONS.DELETE.value)
def test_collection_delete_sends_delete_action(self):
self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
self.app.delete(self.collection_url, headers=self.headers, status=200)
self.assertEqual(len(self.events), 3)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
self.assertEqual(self.events[1].payload['action'],
ACTIONS.CREATE.value)
self.assertEqual(self.events[2].payload['action'],
ACTIONS.DELETE.value)
def test_request_fails_if_notify_fails(self):
with notif_broken(self.app.app, ResourceChanged):
self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=500)
self.assertEqual(len(self.events), 0)
def test_triggered_on_protected_resource(self):
app = self.make_app(settings={
'psilo_write_principals': 'system.Authenticated'
})
app.post_json('/psilos', self.body,
headers=self.headers, status=201)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
def test_permissions_are_stripped_from_event_on_protected_resource(self):
app = self.make_app(settings={
'psilo_write_principals': 'system.Authenticated'
})
resp = app.post_json('/psilos', self.body,
headers=self.headers, status=201)
record = resp.json['data']
record_url = '/psilos/' + record['id']
app.patch_json(record_url, {"data": {"name": "De barcelona"}},
headers=self.headers)
impacted_records = self.events[-1].impacted_records
self.assertNotIn('__permissions__', impacted_records[0]['new'])
self.assertNotIn('__permissions__', impacted_records[0]['old'])
class AfterResourceChangedTest(BaseEventTest, unittest.TestCase):
subscribed = (AfterResourceChanged,)
def test_request_succeeds_if_notify_fails(self):
with notif_broken(self.app.app, AfterResourceChanged):
self.app.post_json(self.collection_url, self.body,
headers=self.headers)
self.assertEqual(len(self.events), 0)
class AfterResourceReadTest(BaseEventTest, unittest.TestCase):
subscribed = (AfterResourceRead,)
def test_request_succeeds_if_notify_fails(self):
with notif_broken(self.app.app, AfterResourceChanged):
self.app.post_json(self.collection_url, self.body,
headers=self.headers)
self.assertEqual(len(self.events), 0)
class ImpactedRecordsTest(BaseEventTest, unittest.TestCase):
subscribed = (ResourceChanged,)
def test_create_has_new_record_and_no_old_in_payload(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers)
record = resp.json['data']
impacted_records = self.events[-1].impacted_records
self.assertEqual(len(impacted_records), 1)
self.assertNotIn('old', impacted_records[0])
self.assertEqual(impacted_records[0]['new'], record)
def test_collection_delete_has_old_record_and_no_new_in_payload(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers)
record1 = resp.json['data']
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers)
record2 = resp.json['data']
self.app.delete(self.collection_url, headers=self.headers, status=200)
impacted_records = self.events[-1].impacted_records
self.assertEqual(len(impacted_records), 2)
self.assertNotIn('new', impacted_records[0])
self.assertNotIn('new', impacted_records[1])
self.assertEqual(impacted_records[0]['old']['deleted'], True)
self.assertEqual(impacted_records[1]['old']['deleted'], True)
deleted_ids = {impacted_records[0]['old']['id'],
impacted_records[1]['old']['id']}
self.assertEqual(deleted_ids, {record1['id'], record2['id']})
def test_update_has_old_and_new_record(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record = resp.json['data']
record_url = self.get_item_url(record['id'])
self.app.patch_json(record_url, {'data': {'name': 'en boite'}},
headers=self.headers)
impacted_records = self.events[-1].impacted_records
self.assertEqual(len(impacted_records), 1)
self.assertEqual(impacted_records[0]['new']['id'], record['id'])
self.assertEqual(impacted_records[0]['new']['id'],
impacted_records[0]['old']['id'])
self.assertEqual(impacted_records[0]['old']['name'], 'de Paris')
self.assertEqual(impacted_records[0]['new']['name'], 'en boite')
def test_delete_has_old_record_and_no_new_in_payload(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record = resp.json['data']
record_url = self.get_item_url(record['id'])
self.app.delete(record_url, headers=self.headers, status=200)
impacted_records = self.events[-1].impacted_records
self.assertEqual(len(impacted_records), 1)
self.assertNotIn('new', impacted_records[0])
self.assertEqual(impacted_records[0]['old']['id'], record['id'])
self.assertEqual(impacted_records[0]['old']['deleted'], True)
class BatchEventsTest(BaseEventTest, unittest.TestCase):
subscribed = (ResourceChanged, ResourceRead)
def test_impacted_records_are_merged(self):
record_id = str(uuid.uuid4())
record_url = self.get_item_url(record_id)
body = {
"defaults": {
"method": "PUT",
"path": record_url
},
"requests": [
{"body": {'data': {'name': 'foo'}}},
{"body": {'data': {'name': 'bar'}}},
{"body": {'data': {'name': 'baz'}}},
{"method": "DELETE"}
]
}
self.app.post_json("/batch", body, headers=self.headers)
self.assertEqual(len(self.events), 3)
create_event = self.events[0]
self.assertEqual(create_event.payload['action'], 'create')
self.assertEqual(len(create_event.impacted_records), 1)
self.assertNotIn('old', create_event.impacted_records[0])
update_event = self.events[1]
self.assertEqual(update_event.payload['action'], 'update')
impacted = update_event.impacted_records
self.assertEqual(len(impacted), 2)
self.assertEqual(impacted[0]['old']['name'], 'foo')
self.assertEqual(impacted[0]['new']['name'], 'bar')
self.assertEqual(impacted[1]['old']['name'], 'bar')
self.assertEqual(impacted[1]['new']['name'], 'baz')
delete_event = self.events[2]
self.assertEqual(delete_event.payload['action'], 'delete')
self.assertEqual(len(delete_event.impacted_records), 1)
self.assertNotIn('new', delete_event.impacted_records[0])
def test_one_event_is_sent_per_resource(self):
body = {
"defaults": {
"method": "POST",
"body": self.body,
},
"requests": [
{"path": '/mushrooms'},
{"path": '/mushrooms'},
{"path": '/psilos'},
]
}
self.app.post_json("/batch", body, headers=self.headers)
self.assertEqual(len(self.events), 2)
def test_one_event_is_sent_per_action(self):
body = {
"defaults": {
"path": '/mushrooms',
},
"requests": [
{"method": "POST", "body": self.body},
{"method": "DELETE"},
{"method": "GET"},
]
}
self.app.post_json("/batch", body, headers=self.headers)
self.assertEqual(len(self.events), 3)
def test_events_are_not_sent_if_subrequest_fails(self):
patch = mock.patch.object(self.storage,
'delete_all',
side_effect=BackendError('boom'))
patch.start()
self.addCleanup(patch.stop)
request_create = {
"method": "POST",
"body": self.body,
}
request_delete_all = {
"method": "DELETE",
"body": self.body,
}
body = {
"defaults": {
"path": self.collection_url
},
"requests": [request_create, request_delete_all]
}
self.app.post_json("/batch", body, headers=self.headers,
status=503)
self.assertEqual(len(self.events), 0)
def load_from_config(config, prefix):
class ClassListener(object):
def __call__(self, event):
pass
return ClassListener()
@unittest.skipIf(not statsd.statsd_module, "statsd is not installed.")
class StatsDTest(BaseWebTest, unittest.TestCase):
def get_app_settings(self, *args, **kwargs):
settings = super(StatsDTest, self).get_app_settings(*args, **kwargs)
if not statsd.statsd_module:
return settings
settings['statsd_url'] = 'udp://localhost:8125'
this_module = 'cliquet.tests.resource.test_events'
settings['event_listeners'] = 'test'
settings['event_listeners.test.use'] = this_module
return settings
def test_statds_tracks_listeners_execution_duration(self):
statsd_client = self.app.app.registry.statsd._client
with mock.patch.object(statsd_client, 'timing') as mocked:
self.app.post_json(self.collection_url,
{"data": {"name": "pouet"}},
headers=self.headers)
timers = set(c[0][0] for c in mocked.call_args_list)
self.assertIn('listeners.test', timers)
|
mozilla-services/cliquet
|
cliquet/tests/resource/test_events.py
|
test_events.py
|
py
| 16,885 |
python
|
en
|
code
| 65 |
github-code
|
6
|
26224854603
|
#!/usr/local/bin/python3.7
import cv2
import numpy as np
img = cv2.imread("../test_pic.jpg")
kernel = np.ones((2, 2), np.uint8)
imgCanny = cv2.Canny(img, 100, 200)
imgErosion = cv2.erode(imgCanny, kernel, iterations=1)
cv2.imshow("Edge detection", imgCanny)
cv2.imshow("Erosion Fix", imgErosion)
cv2.waitKey(0)
|
smoonmare/object_50071
|
open_cv/chapter-2/chapter_2_5.py
|
chapter_2_5.py
|
py
| 313 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27894174453
|
import sys
import collections
#sys.setrecursionlimit(100001)
# bfs로 동작하는 코드: 정확성은 맞지만 시간초과
def bfs_find_root():
while queue:
root = queue.popleft()
for child in tree[root]:
if not visited[child]:
output[child] = root
queue.append(child)
visited[child] = True
'''
# dfs로 동작하는 코드
def dfs_find_root(root):
for child in tree[root]:
if output[child] == 0:
output[child] = root
dfs_find_root(child)
'''
n = int(sys.stdin.readline())
tree = [[] for _ in range(n+1)]
output = [0 for _ in range(n+1)]
visited = [False for i in range(n+1)]
queue = collections.deque()
queue.append(1)
output[1] = 1
for _ in range(n-1):
a, b = map(int, sys.stdin.readline().split())
tree[a].append(b)
tree[b].append(a)
bfs_find_root()
#dfs_find_root(1)
for i in range(2, n+1):
print(output[i])
'''
1
4 6
2 7 1x 3
4x 4x x6 5
2번 노드부터 부모 노드 순서
4 6 1 3 1 4
import sys
sys.setrecursionlimit(100000)
n = int(input())
tree = [[] for _ in range(n+1)]
for _ in range(n-1):
a, b = map(int, input().split())
tree[a].append(b)
tree[b].append(a)
parents = [0 for _ in range(n+1)]
parents[1] =1
def dfs(curr, tree, parents):
for node in tree[curr]:
if parents[node] == 0:
parents[node] = curr
dfs(node, tree, parents)
dfs(1, tree, parents)
for i in range(2, n+1):
print(parents[i])
'''
|
SheepEatLion/Algorithms
|
tree_baekjoon_11725.py
|
tree_baekjoon_11725.py
|
py
| 1,547 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72318397308
|
import sys
sys.path.append("../SimpleNN")
"""SingleNN potential."""
from fp_calculator import set_sym, calculate_fp
from NN import MultiLayerNet
import torch
from torch.autograd import grad
from Batch import batch_pad
import time
import numpy as np
import pickle
from ase.data import chemical_symbols, atomic_numbers
from ase.calculators.calculator import (Calculator, all_changes,
PropertyNotImplementedError)
import os
class SingleNN(Calculator):
implemented_properties = ['energy', 'energies', 'forces']
def __init__(self, model_path,cal_list = None,**kwargs):
Calculator.__init__(self, **kwargs)
self.model_path = model_path
self.cal_list = cal_list
def initialize(self, atoms):
self.numbers = atoms.get_atomic_numbers()
self.energies = np.empty(len(atoms))
self.forces = np.empty((len(atoms), 3))
def calculate(self, atoms=None, properties=['energy'],system_changes=all_changes):
cal_list = self.cal_list
if os.path.exists(self.model_path+'/best_model'):
model = torch.load(self.model_path+'/best_model')
ensemble_training = False
else:
ensemble_training = True
models = []
ensemble = 0
end = False
while end is False:
if os.path.exists(self.model_path+f'/best_model-{ensemble}'):
models += [torch.load(self.model_path+f'/best_model-{ensemble}')]
ensemble += 1
else:
end = True
sym_params = pickle.load(open(self.model_path+"/sym_params.sav", "rb" ))
[Gs, cutoff, g2_etas, g2_Rses, g4_etas, g4_zetas, g4_lambdas, elements, weights, element_energy]=sym_params
params_set = set_sym(elements, Gs, cutoff,
g2_etas=g2_etas, g2_Rses=g2_Rses,
g4_etas=g4_etas, g4_zetas = g4_zetas,
g4_lambdas= g4_lambdas, weights=weights)
if cal_list is None:
N_atoms = len(atoms)
else:
N_atoms = len(cal_list)
nelem = len(elements)
N_sym = params_set[elements[0]]['num']
data = calculate_fp(atoms, elements, params_set, cal_list = cal_list)
fps = data['x']
dfpdXs = data['dx']
fp = torch.zeros((N_atoms,N_sym))
dfpdX = torch.zeros((N_atoms, N_sym, N_atoms, 3))
elements_num = torch.tensor([atomic_numbers[ele] for ele in elements])
atom_idx = data['atom_idx'] - 1
a_num = elements_num[atom_idx]
atom_numbers = a_num.repeat_interleave(nelem).view(len(a_num),nelem)
# change to float for pytorch to be able to run without error
if cal_list is not None:
e_mask = (atom_numbers == elements_num).float()[cal_list]
atom_idx = atom_idx[cal_list]
else:
e_mask = (atom_numbers == elements_num).float()
fp_track = [0]*nelem
if element_energy is not None:
element_energy = torch.sum(element_energy * e_mask)
for i,idx in enumerate(atom_idx):
ele = elements[idx]
fp[i,:] = torch.tensor(fps[ele][fp_track[idx],:]).float()
if cal_list is None:
dfpdX[i,:,:,:] = torch.tensor(dfpdXs[ele][fp_track[idx],:,:,:]).float()
fp_track[idx] += 1
fp.requires_grad = True
if ensemble_training:
scaling = models[0].scaling
else:
scaling = model.scaling
gmin = scaling['gmin']
gmax = scaling['gmax']
emin = scaling['emin']
emax = scaling['emax']
eps = 1e-5
sfp = (fp - gmin) / (gmax - gmin+eps)
if ensemble_training:
all_energy = []
all_forces = []
for model in models:
Atomic_Es = model(sfp)
E_predict = torch.sum(torch.sum(Atomic_Es * e_mask,
dim = 1)*(emax-emin)+emin,dim=0)
dEdfp = grad(E_predict,
fp,
grad_outputs=torch.ones_like(E_predict),
create_graph = True,
retain_graph = True)[0].view(1,fp.shape[0]*fp.shape[1])
dfpdX = dfpdX.view(fp.shape[0]*fp.shape[1],fp.shape[0]*3)
F_predict = -torch.mm(dEdfp,dfpdX).view(fp.shape[0],3)
forces = F_predict.data.numpy()
if element_energy is not None:
energy = (E_predict + element_energy).data.numpy()
else:
energy = E_predict.data.numpy()
all_energy += [energy]
all_forces += [forces]
all_energy = np.array(all_energy)
all_forces = np.array(all_forces)
ensemble_energy = np.mean(all_energy)
energy_std = np.std(all_energy)
ensemble_forces = np.mean(all_forces, axis=0)
forces_std = np.std(all_forces, axis=0)
self.energy = ensemble_energy
self.forces = ensemble_forces
self.results['energy'] = self.energy
self.results['free_energy'] = self.energy
self.results['forces'] = self.forces
self.results['energy_std'] = energy_std
self.results['forces_std'] = forces_std
else:
Atomic_Es = model(sfp)
E_predict = torch.sum(torch.sum(Atomic_Es * e_mask,
dim = 1)*(emax-emin)+emin,dim=0)
dEdfp = grad(E_predict,
fp,
grad_outputs=torch.ones_like(E_predict),
create_graph = True,
retain_graph = True)[0].view(1,fp.shape[0]*fp.shape[1])
dfpdX = dfpdX.view(fp.shape[0]*fp.shape[1],fp.shape[0]*3)
F_predict = -torch.mm(dEdfp,dfpdX).view(fp.shape[0],3)
self.forces = F_predict.data.numpy()
if element_energy is not None:
self.energy = (E_predict + element_energy).data.numpy()
else:
self.energy = E_predict.data.numpy()
self.results['energy'] = self.energy
self.results['free_energy'] = self.energy
self.results['forces'] = self.forces
class SingleNNTrainer(object):
def __init__(self, model_path, scaling, N_sym, n_nodes, activations, nelem, optim_params, device=torch.device('cpu')):
self.model = MultiLayerNet(N_sym, n_nodes, activations, nelem, scaling=scaling)
self.model_path = model_path
self.opt_method = optim_params['opt_method']
if self.opt_method == 'lbfgs':
self.history_size = optim_params['history_size']
self.lr = optim_params['lr']
self.max_iter = optim_params['max_iter']
self.line_search_fn = optim_params['line_search_fn']
self.optimizer = torch.optim.LBFGS(self.model.parameters(), lr=self.lr,
max_iter=self.max_iter, history_size=self.history_size,
line_search_fn=self.line_search_fn)
else:
print('Optimization method not implemented!')
raise
self.device = device
# self.convergence = hyperparams['convergence']
# self.n_nodes = n_nodes
# self.activations = hyperparams['activations']
def train(self, train_dict, val_dict, E_coeff, F_coeff, epoch, val_interval, n_val_stop, convergence, is_force, logfile):
device = self.device
opt_method = self.opt_method
optimizer = self.optimizer
model = self.model.to(device)
model_path = self.model_path
t0 = time.time()
SSE = torch.nn.MSELoss(reduction='sum')
SAE = torch.nn.L1Loss(reduction='sum')
scaling = self.model.scaling
gmin = scaling['gmin'].to(device)
gmax = scaling['gmax'].to(device)
emin = scaling['emin'].to(device)
emax = scaling['emax'].to(device)
n_val = 0
E_cov = convergence['E_cov']
F_cov = convergence['F_cov']
t_ids = np.array(list(train_dict.keys()))
batch_info = batch_pad(train_dict,t_ids)
b_fp = batch_info['b_fp'].to(device)
if is_force:
b_dfpdX = batch_info['b_dfpdX'].view(b_fp.shape[0],
b_fp.shape[1]*b_fp.shape[2],
b_fp.shape[1]*3,
).to(device)
b_e_mask = batch_info['b_e_mask'].to(device)
b_fp.requires_grad = True
eps = 1e-5
sb_fp = ((b_fp - gmin) / (gmax - gmin + eps))
N_atoms = batch_info['N_atoms'].view(-1).to(device)
b_e = batch_info['b_e'].view(-1).to(device)
b_f = batch_info['b_f'].to(device)
sb_e = ((b_e - emin) / (emax - emin))
sb_f = (b_f / (emax - emin))
t1 = time.time()
logfile.write(f'Batching takes {t1-t0}.\n')
v_ids = np.array(list(val_dict.keys()))
v_batch_info = batch_pad(val_dict,v_ids)
v_b_fp = v_batch_info['b_fp'].to(device)
if is_force:
v_b_dfpdX = v_batch_info['b_dfpdX'].view(v_b_fp.shape[0],
v_b_fp.shape[1]*v_b_fp.shape[2],
v_b_fp.shape[1]*3,
).to(device)
v_b_e_mask = v_batch_info['b_e_mask'].to(device)
v_b_fp.requires_grad = True
v_sb_fp = ((v_b_fp - gmin) / (gmax - gmin + eps))
v_N_atoms = v_batch_info['N_atoms'].view(-1).to(device)
v_b_e = v_batch_info['b_e'].view(-1).to(device)
v_b_f = v_batch_info['b_f'].to(device)
v_sb_e = ((v_b_e - emin) / (emax - emin))
v_sb_f = (v_b_f / (emax - emin))
if opt_method == 'lbfgs':
for i in range(epoch):
def closure():
global E_MAE, F_MAE
optimizer.zero_grad()
Atomic_Es = model(sb_fp)
E_predict = torch.sum(Atomic_Es * b_e_mask, dim = [1,2])
if is_force:
F_predict = self.get_forces(E_predict, b_fp, b_dfpdX)
metrics = self.get_metrics(sb_e, sb_f, N_atoms, t_ids,
E_predict, F_predict, SSE, SAE, scaling, b_e_mask)
[E_loss, F_loss, E_MAE, F_MAE, E_RMSE, F_RMSE] = metrics
loss = E_coeff * E_loss + F_coeff * F_loss
else:
metrics = self.get_metrics(sb_e, None, N_atoms, t_ids,
E_predict, None, SSE, SAE, scaling, b_e_mask)
[E_loss, F_loss, E_MAE, F_MAE, E_RMSE, F_RMSE] = metrics
loss = E_coeff * E_loss
loss.backward(retain_graph=True)
return loss
optimizer.step(closure)
if i % val_interval == 0:
n_val += 1
Atomic_Es = model(sb_fp)
E_predict = torch.sum(Atomic_Es * b_e_mask, dim = [1,2])
if is_force:
F_predict = self.get_forces(E_predict, b_fp, b_dfpdX)
metrics = self.get_metrics(sb_e, sb_f, N_atoms, t_ids,
E_predict, F_predict, SSE, SAE, scaling, b_e_mask)
[E_loss, F_loss, E_MAE, F_MAE, E_RMSE, F_RMSE] = metrics
loss = E_coeff * E_loss + F_coeff * F_loss
else:
metrics = self.get_metrics(sb_e, None, N_atoms, t_ids,
E_predict, None, SSE, SAE, scaling, b_e_mask)
[E_loss, F_loss, E_MAE, F_MAE, E_RMSE, F_RMSE] = metrics
loss = E_coeff * E_loss
logfile.write(f'{i}, E_RMSE/atom = {E_RMSE}, F_RMSE = {F_RMSE}, loss={loss}\n')
logfile.write(f'{i}, E_MAE/atom = {E_MAE}, F_MAE = {F_MAE}\n')
v_Atomic_Es = model(v_sb_fp)
v_E_predict = torch.sum(v_Atomic_Es * v_b_e_mask, dim = [1,2])
if is_force:
v_F_predict = self.get_forces(v_E_predict, v_b_fp, v_b_dfpdX)
v_metrics = self.get_metrics(v_sb_e, v_sb_f, v_N_atoms, v_ids,
v_E_predict, v_F_predict, SSE, SAE, scaling, v_b_e_mask)
[v_E_loss, v_F_loss, v_E_MAE, v_F_MAE, v_E_RMSE, v_F_RMSE] = v_metrics
v_loss = E_coeff * v_E_loss + F_coeff * v_F_loss
else:
v_metrics = self.get_metrics(v_sb_e, None, v_N_atoms, v_ids,
v_E_predict, None, SSE, SAE, scaling, v_b_e_mask)
[v_E_loss, v_F_loss, v_E_MAE, v_F_MAE, v_E_RMSE, v_F_RMSE] = v_metrics
v_loss = E_coeff * v_E_loss
try:
if v_loss < best_v_loss:
best_loss = loss
best_E_MAE = E_MAE
best_F_MAE = F_MAE
best_v_loss = v_loss
best_v_E_MAE = v_E_MAE
best_v_F_MAE = v_F_MAE
torch.save(model,model_path)
n_val = 1
except NameError:
best_loss = loss
best_E_MAE = E_MAE
best_F_MAE = F_MAE
best_v_loss = v_loss
best_v_E_MAE = v_E_MAE
best_v_F_MAE = v_F_MAE
torch.save(model,model_path)
n_val = 1
logfile.write(f'val, E_RMSE/atom = {v_E_RMSE}, F_RMSE = {v_F_RMSE}\n')
logfile.write(f'val, E_MAE/atom = {v_E_MAE}, F_MAE = {v_F_MAE}\n')
logfile.flush()
if n_val > n_val_stop:
break
t2 = time.time()
logfile.write(f'Training takes {t2-t0}\n')
logfile.close()
return [best_loss, best_E_MAE, best_F_MAE, best_v_loss, best_v_E_MAE, best_v_F_MAE]
def get_forces(self, E_predict, b_fp, b_dfpdX):
b_dEdfp = grad(E_predict,
b_fp,
grad_outputs=torch.ones_like(E_predict),
create_graph = True,
retain_graph = True)[0].view(b_fp.shape[0],1,b_fp.shape[1]*b_fp.shape[2])
F_predict = - torch.bmm(b_dEdfp,b_dfpdX).view(b_fp.shape[0],b_fp.shape[1],3)
return F_predict
def get_metrics(self, sb_e, sb_f, N_atoms, ids, E_predict, F_predict, SSE, SAE, scaling, b_e_mask):
gmin = scaling['gmin']
gmax = scaling['gmax']
emin = scaling['emin']
emax = scaling['emax']
E_loss = SSE(sb_e, E_predict / N_atoms) / len(ids)
E_MAE = SAE(sb_e, E_predict / N_atoms) / len(ids) * (emax - emin)
E_RMSE = torch.sqrt(E_loss) * (emax - emin)
if sb_f is None:
F_loss = 0
F_MAE = 0
F_RMSE = 0
else:
F_loss = SSE(sb_f, F_predict) / (3 * torch.sum(N_atoms))
F_MAE = SAE(sb_f, F_predict) / (3 * torch.sum(N_atoms)) * (emax - emin)
F_RMSE = torch.sqrt(F_loss) * (emax - emin)
F_max = torch.max(torch.abs(sb_f-F_predict))*(emax-emin)
print('F_max = ',F_max.data.numpy(), 'eV/A')
return [E_loss, F_loss, E_MAE, F_MAE, E_RMSE, F_RMSE]
def evaluate(self, data_dict, E_coeff, F_coeff, is_force):
device = self.device
for key in data_dict.keys():
data_dict[key] = data_dict[key].to(device)
model_path = self.model_path
SSE = torch.nn.MSELoss(reduction='sum')
SAE = torch.nn.L1Loss(reduction='sum')
model = torch.load(model_path)
scaling = model.scaling
gmin = scaling['gmin']
gmax = scaling['gmax']
emin = scaling['emin']
emax = scaling['emax']
ids = np.array(list(data_dict.keys()))
batch_info = batch_pad(data_dict,ids)
b_fp = batch_info['b_fp']
if is_force:
b_dfpdX = batch_info['b_dfpdX'].view(b_fp.shape[0],
b_fp.shape[1]*b_fp.shape[2],
b_fp.shape[1]*3)
b_e_mask = batch_info['b_e_mask']
b_fp.requires_grad = True
sb_fp = (b_fp - gmin) / (gmax - gmin)
N_atoms = batch_info['N_atoms'].view(-1)
b_e = batch_info['b_e'].view(-1)
b_f = batch_info['b_f']
sb_e = (b_e - emin) / (emax - emin)
sb_f = b_f / (emax - emin)
Atomic_Es = model(sb_fp)
E_predict = torch.sum(Atomic_Es * b_e_mask, dim = [1,2])
if is_force:
F_predict = get_forces(E_predict, b_fp, b_dfpdX)
metrics = get_metrics(sb_e, sb_f, N_atoms, ids,
E_predict, F_predict, SSE, SAE, scaling, b_e_mask)
[E_loss, F_loss, E_MAE, F_MAE, E_RMSE, F_RMSE] = metrics
loss = E_coeff * E_loss + F_coeff * F_loss
else:
metrics = get_metrics(sb_e, None, N_atoms, ids,
E_predict, None, SSE, SAE, scaling, b_e_mask)
[E_loss, F_loss, E_MAE, F_MAE, E_RMSE, F_RMSE] = metrics
loss = E_coeff * E_loss
return [loss, E_MAE, F_MAE]
|
lmj1029123/SGCMC_Acrolein_AgPd
|
ML_Models/SNN.py
|
SNN.py
|
py
| 17,775 |
python
|
en
|
code
| 2 |
github-code
|
6
|
27427166246
|
"""
This script has the information for the initialization of the model.
-------------------------------------------------------------------------------
created on:
Thu 3 Mar 2022
-------------------------------------------------------------------------------
last change:
Wed 18 May 2022
-------------------------------------------------------------------------------
notes:
-------------------------------------------------------------------------------
contributors:
Jose:
name: Jose Betancourt
email: [email protected]
-------------------------------------------------------------------------------
"""
import Params
import Agents
import Groups
import Network
import TimeIteration
import numpy as np
#------------------------------------------------------------------------------
# AUXILIARY FUNCTIONS
#------------------------------------------------------------------------------
def generate_attributes():
'''
This function creates agent attributes according to the specified
distributions.
'''
opinion = np.random.beta(Params.sigma, Params.sigma)
return opinion
#------------------------------------------------------------------------------
# GENERATION
#------------------------------------------------------------------------------
class Population(object):
def __init__(self, agents=None, network=None):
'''
This function initializes the population object.
'''
# Create agent list
agents = []
Na = Params.N_agents
for i in range(Na):
opinion = generate_attributes()
agent = Agents.Agent(ident=i, opinion=opinion)
agents.append(agent)
self.agents = agents
# Generate group information tensor
groups = []
Nl = Params.N_layers
for l in range(Nl):
Ng = int(Na/Params.avg_members[l])
layer = []
for g in range(Ng):
group = Groups.Group(layer=l, ident=g)
layer.append(group)
groups.append(layer)
self.groups = groups
# Assign agents to groups
for agent in agents:
agent.groups = []
for l in range(Nl):
Ng = int(Na/Params.avg_members[l])
g = np.random.choice(np.arange(Ng))
agent.groups.append(groups[l][g])
groups[l][g].members.append(agent)
networks = []
for l in range(Nl):
layer_net = np.zeros([Na, Na], dtype=float)
for group in groups[l]:
for agent_i in group.members:
for agent_j in group.members:
# Family ties
if l == 0:
layer_net[agent_i.ident, agent_j.ident] = Params.init_weight[l]
# Work ties
elif l == 1:
layer_net[agent_i.ident, agent_j.ident] = \
np.random.choice([0.,Params.init_weight[l]],p=[1-Params.p_link[l], Params.p_link[l]])
# Friend ties
else:
if np.random.rand() < Params.p_main_gang:
layer_net[agent_i.ident, agent_j.ident] = \
np.random.choice([0.,Params.init_weight[l]],p=[1-Params.p_link[l], Params.p_link[l]])
elif agent_i.ident < agent_j.ident:
others = [k for k in agents if k.groups[l]!=group]
new_friend = np.random.choice(others)
layer_net[agent_i.ident, new_friend.ident] = Params.init_weight[l]
networks.append(layer_net)
# Generate network object
network = Network.Network(np.array(networks))
self.network = network
#------------------------------------------------------------------------------
# SIMULATION
#------------------------------------------------------------------------------
def run_simulation(ind, agents, network, record):
'''
This function runs the simulation with the initialized agents.
'''
my_simulation = TimeIteration.Simulation()
while my_simulation.time < Params.T:
my_simulation.iterate(ind, agents, network, record)
|
frantisek901/PublicOpinion
|
PythonModel/Generator.py
|
Generator.py
|
py
| 4,453 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34829704572
|
# -*- coding: utf-8 -*-
"""
animation of global earthquakes locatiopns from 2000-2019
plotted annually
"""
import numpy as np
import matplotlib as plt
from mlp_toolkit.basemat import Basemap as Basemap
#===================================================
# files and parameters
#===================================================
file_eq = 'globalEqs.txt'
#===================================================
# load data
#===================================================
#
aYr = np.genfromtxt( file_eq, skip_header = 1,
usecols = (0), delimiter = '-', dtype = int)
print (np.unique(aYr))
mLoc = np.genfromtxt( file_eq, skip_header = 1,
usecols = (2,1), delimiter = '-', dtype = float).T
#===================================================
# plot eq map using basemap
#===================================================
for it in np.unique(aYr):
sel_eq = it == aYr
print( ' no of eq in %i: %i' %(it, sel_eq.sum()))
plt.figure(1)
plt.title(str(it))
m = Basemap()
m.drawcoastlines()
a_x, a_y = m(mLoc[0], mLoc[1])
plt.plot (a_x, a_y, 'ro', ms = 5, mew = 1.5, mfc = 'none')
plt.pause(.5)
#plt.clf()
|
patrickward110/Astro-199
|
Astro119/In Class/Wk 3/global earthquakes.py
|
global earthquakes.py
|
py
| 1,406 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33957213827
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 3 17:53:18 2022
@author: yiann
"""
import pandas as pd
import pytz
# eisagwgh tou arxeiou ston kwdika
data_file='Solar_1min_2021.txt'
df=pd.read_csv(data_file, index_col=[0], usecols=[0,6], sep=',', header=None,
parse_dates=True, na_values='"NAN"')
df.columns=['GH']
df.dropna() # Deletes missing values
df = df * 1000 / 8.63 # Converts mV to W/m^2
# dokimazw timezones apo to pytz
grc = pytz.timezone('Europe/Athens') # includes DST, den mou kanei
grc_w = pytz.timezone('Etc/GMT-2') # auto pou psaxnw???
utc=pytz.timezone('UCT')
df.index = df.index.tz_localize(grc_w).tz_convert(utc)
''' ------------------------ TA KATAFERAAAAAAAA? ---------------------------'''
|
ikaitsas/Irradiance-QC-UP
|
oldstuff/prospathw_na_allaksw_timezones.py
|
prospathw_na_allaksw_timezones.py
|
py
| 730 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43394887837
|
#!/usr/bin/env python3
"""
zbdump.py
"""
import logging
import subprocess
import sys
import time
import asyncio
from typing import Any, Optional, Union
from scapy.all import Dot15d4FCS # type: ignore
import scapy.all as sp
import datetime as dt
_LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
try:
from killerbee import KillerBee
except ImportError:
try:
from ..killerbee import KillerBee
except ImportError:
install("git+https://github.com/antonio-boiano/killerbee.git#egg=killerbee")
from killerbee import KillerBee
try:
from killerbee.scapy_extensions import *
except ImportError:
try:
from ..killerbee.scapy_extensions import *
except ImportError:
install("git+https://github.com/antonio-boiano/killerbee.git#egg=killerbee")
from killerbee.scapy_extensions import *
try:
from .zbsocket import *
from .zbconst import DEFAULT_ZIGBEE_CHANNEL,DEFAULT_QUEUE_BUFFER_SIZE
from .zb_dissector import ZbDissector,ZbFiltering,header_154
except ImportError:
from zbsocket import *
from zbconst import DEFAULT_ZIGBEE_CHANNEL,DEFAULT_QUEUE_BUFFER_SIZE
from zb_dissector import ZbDissector,ZbFiltering,header_154
class AsyncDump:
KillerBee = KillerBee
def __init__(self,channel=11,subghz_page=0,dev_path:Optional[str]=None,hardware:Optional[str]=None,kb:Optional[KillerBee]=None) -> None:
self.kb: Optional[KillerBee] = kb
self.channel = 11 if channel is None else channel
self.subghz_page=subghz_page
self._queue_list = []
self._filter_dict = {}
self.dev_path=dev_path
self.hardware=hardware
self.zbdissect = ZbDissector()
self.zbfilter = ZbFiltering()
if(self.kb is None):
try:
self.kb = KillerBee(device=self.dev_path,hardware=self.hardware)
except Exception as e:
_LOGGER.debug("KillerBee cannot find device"+str(e))
raise
def subscribe (self,queue:asyncio.Queue=None,filter:str=None)->asyncio.Queue: # type: ignore
"""
Subscribe to the queue to receive packets from the sniffer.
Args:
queue (asyncio.Queue, optional): Queue to receive packets. Defaults to None, in this case a new queue is created.
Returns:
asyncio.Queue: Queue to receive packets.
"""
if not queue:
queue = asyncio.Queue(DEFAULT_QUEUE_BUFFER_SIZE)
self._queue_list.append(queue)
self._filter_dict[queue]=filter
return queue
#To Do add filtering capabilities from class zb_filter. In case of filtering return in the queue also the disssected pcket
#def set_filter(self,queue:asyncio.Queue,filter:str):
def unsubscribe (self,queue:asyncio.Queue):
"""
Unsubscribe from the queue to stop receiving packets from the sniffer.
Args:
queue (asyncio.Queue): Queue to unsubscribe from.
"""
try:
self._queue_list.remove(queue)
except ValueError:
pass
self._filter_dict.pop(queue,None)
async def start_dump(self,channel:Optional[int]=None,subghz_page:Optional[int]=None,queue=None,async_handler=None):
if subghz_page is not None:
self.subghz_page=subghz_page
if channel is not None:
self.channel=channel
if queue is not None:
self.subscribe(queue)
if self.kb is not None:
if not self.kb.is_valid_channel(self.channel, self.subghz_page):
_LOGGER.debug("Channel and sub_ghz not valid. Using Default channel %d and sub_ghz 0 instead" % DEFAULT_ZIGBEE_CHANNEL)
self.channel=DEFAULT_ZIGBEE_CHANNEL
self.subghz_page=0
self.kb.sniffer_on(channel=self.channel,page=self.subghz_page)
if async_handler is None: async_handler = asyncio.create_task
self.dump_task=async_handler(self.dump_packets())
async def dump_packets(self):
async def handle_queue (packet,queue):
if self._filter_dict[queue] is not None:
if not self.zbfilter.verify(self._filter_dict[queue],header=packet["header"]):
return None
else:
try:
queue.put_nowait(packet)
except asyncio.QueueFull:
self.unsubscribe(queue)
else:
try:
queue.put_nowait(packet)
except asyncio.QueueFull:
self.unsubscribe(queue)
async def handle_dissecting(packet:dict[Union[int, str], Any]):
if packet['bytes'] is None: return None
packet_header = self.zbdissect.packet_dissecting(packet['bytes'])
packet['header']=packet_header
await asyncio.gather(*[handle_queue(packet,queue) for queue in self._queue_list])
if self.kb :
while True:
await asyncio.sleep(0)
packet: Optional[dict[Union[int, str], Any]] = self.kb.pnext()
if packet is None or not self._queue_list:
continue
else:
await asyncio.create_task(handle_dissecting(packet))
async def read_pcap(self,file_path,queue:asyncio.Queue=None): # type: ignore
if not os.path.exists(file_path): return None, None
if queue is None: queue = asyncio.Queue(DEFAULT_QUEUE_BUFFER_SIZE)
async def actual_pcap_read(self,file_path,queue:asyncio.Queue):
pcap = sp.PcapReader(file_path)
for p in pcap:
packet = {"bytes":None,"datetime":None,"dbm":None}
packet["bytes"]= sp.raw(p) # type: ignore
packet["datetime"]=dt.datetime(1970, 1, 1) + dt.timedelta(seconds=float(p.time)) # type: ignore
await queue.put(packet)
return queue , asyncio.create_task(actual_pcap_read(self,file_path,queue))
async def get_dev_info(self):
if self.kb:
return self.kb.get_dev_info()
def get_frequency(self):
if self.kb is not None:
freq = self.kb.frequency(self.channel, self.subghz_page) / 1000.0
return freq
else:
return None
def shutdown(self):
for k in self._queue_list: self.unsubscribe(k)
if self.kb is not None:
self.kb.sniffer_off()
self.kb.close()
if self.dump_task:
if not self.dump_task.cancelled(): self.dump_task.cancel()
class SockZbDump:
def __init__(
self,
channel,
pcapfile = None,
dev_path = None,
dev_name = None,
ppi=0,
subghz_page=0,
pan_id_hex=None,
count=-1,
timeout=-1
) -> None:
self.packetcount: int = 0
self.kb: Optional[KillerBee] = None
self.pcap_dumper: Optional[PcapDumper] = None
self.usok: Optional[Usocket] = None
self.unbuffered: Optional[Any] = None
self.channel = channel
self.pcapfile = pcapfile
self.devstring = dev_path
self.device = dev_name
self.ppi = ppi
self.subghz_page = subghz_page
self.pan_id_hex = pan_id_hex
self.count = count
self.timeout=timeout
def close(self) -> None:
if self.kb is not None:
self.kb.sniffer_off()
self.kb.close()
if self.pcap_dumper is not None:
self.pcap_dumper.close()
def dump_packets(self):
pan = None
if self.pan_id_hex:
panid: Optional[int] = int(self.pan_id_hex, 16)
else:
panid = None
if self.kb is not None:
rf_freq_mhz = self.kb.frequency(self.channel, self.subghz_page) / 1000.0
else:
rf_freq_mhz = 0.0
_LOGGER.debug(
"zbdump: listening on '{}', channel {}, page {} ({} MHz), link-type DLT_IEEE802_15_4, capture size 127 bytes".format(
self.devstring, self.channel, self.subghz_page, rf_freq_mhz
)
)
timeout_start = time.time()
while (time.time() < timeout_start + self.timeout) or (self.count != self.packetcount):
if self.kb is not None:
packet: Optional[dict[Union[int, str], Any]] = self.kb.pnext()
else:
packet = None
if packet is None:
continue
if panid is not None:
pan, layer = kbgetpanid(Dot15d4FCS(packet["bytes"]))
if panid is None or panid == pan:
self.packetcount += 1
if self.pcap_dumper is not None:
self.pcap_dumper.pcap_dump(
packet["bytes"], ant_dbm=packet["dbm"], freq_mhz=rf_freq_mhz
)
if self.usok is not None:
self.usok.send_data_dstream(pickle.dumps(packet))
def capture(self):
if self.pcapfile is not None and self.pcapfile is not None:
self.pcap_dumper = PcapDumper(DLT_IEEE802_15_4, self.pcapfile, ppi=self.ppi) # type: ignore
if self.devstring is None:
_LOGGER.debug(
"Autodetection features will be deprecated - please include interface string (e.g. -i /dev/ttyUSB0)"
)
if self.device is None:
_LOGGER.debug(
"Autodetection features will be deprecated - please include device string (e.g. -d apimote)"
)
self.kb = KillerBee(device=self.devstring, hardware=self.device)
if not self.kb.is_valid_channel(self.channel, self.subghz_page):
_LOGGER.error(
"ERROR: Must specify a valid IEEE 802.15.4 channel for the selected device."
)
self.kb.close()
self.kb.set_channel(self.channel, self.subghz_page)
self.kb.sniffer_on()
self.usok = Usocket()
try:
self.usok.ustream_start()
except:
self.usok.close()
raise
self.dump_packets()
self.kb.sniffer_off()
self.kb.close()
self.usok.close()
if self.pcap_dumper is not None:
self.pcap_dumper.close()
_LOGGER.debug(f"{self.packetcount} packets captured")
|
antonio-boiano/IoTScent
|
core/zbdump.py
|
zbdump.py
|
py
| 10,906 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8631487904
|
import codecs
import yaml
from typing import Optional
from alert_autoconf.models import Alerts
CLUSTER_NAME_PLACEHOLDER = "{cluster}"
def read_from_file(filename: str, cluster_name: Optional[str]) -> Alerts:
"""
Читает данные из конфиг файла
:param filename: имя файла
:return: словарь конфигурации
"""
with codecs.open(filename, "r", encoding="UTF-8") as stream:
data = Alerts(**yaml.load(stream, Loader=yaml.FullLoader))
if data.version < 1.1:
return data
prefix = data.prefix
# применяем prefix
if len(prefix):
skip = ("ERROR", "WARN", "OK", "NODATA", "MONAD")
for trigger in data.triggers:
trigger.name = prefix + trigger.name
trigger.tags = [
prefix + tag for tag in trigger.tags if tag not in skip
] + [tag for tag in trigger.tags if tag in skip]
for alerting in data.alerting:
alerting.tags = [
prefix + tag for tag in alerting.tags if tag not in skip
] + [tag for tag in alerting.tags if tag in skip]
# применяем cluster_name
for trigger in data.triggers:
_apply_cluster_name(trigger.tags, cluster_name)
_apply_cluster_name(trigger.targets, cluster_name)
if trigger.parents:
for parent in trigger.parents:
_apply_cluster_name(parent.tags, cluster_name)
for alerting in data.alerting:
_apply_cluster_name(alerting.tags, cluster_name)
return data
def _apply_cluster_name(strings, cluster_name):
for i in range(len(strings)):
if CLUSTER_NAME_PLACEHOLDER in strings[i]:
if not cluster_name:
raise ValueError(
"Config file uses {} but cluster name is not set".format(CLUSTER_NAME_PLACEHOLDER),
)
strings[i] = strings[i].replace(CLUSTER_NAME_PLACEHOLDER, cluster_name)
|
avito-tech/alert-autoconf
|
alert_autoconf/config.py
|
config.py
|
py
| 2,092 |
python
|
en
|
code
| 1 |
github-code
|
6
|
35971477802
|
from Tkinter import *
import tkMessageBox
import tkFont
import ttk
import RPi.GPIO as GPIO
import time
import serial
import threading
import Queue
#Start Serial Communication with Arduino Mega in another thread
ser = serial.Serial ("/dev/ttyS0",9600)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(38, GPIO.OUT)
GPIO.setup(37, GPIO.OUT)
GPIO.output(38, GPIO.LOW) #Produkt A
GPIO.output(37, GPIO.LOW) #Produkt B
lastState = 100
win = Tk()
myFont = tkFont.Font(family = 'Helvetica', size = 36, weight = 'bold')
smallFont = tkFont.Font(family = 'Helvetica', size = 18, weight = 'bold')
def productA():
if lastState == 100:
print("Product A selected and GPIO37 set to HIGH")
GPIO.output(37,GPIO.HIGH)
GPIO.output(38,GPIO.LOW)
productAbutton["text"] = "Produkt A"
def productB():
if lastState == 100:
print("Product B selected and GPIO38 set to HIGH")
GPIO.output(38,GPIO.HIGH)
GPIO.output(37,GPIO.LOW)
productBbutton["text"] = "Produkt B"
def exitProgram():
print("Exit Button pressed")
GPIO.cleanup()
win.quit()
win.title("Produktwahl")
win.geometry('800x480')
#product buttons
productAbutton = Button(win, text = "Produkt A", font = myFont, command = productA, height = 2, width =8, bg='#fcba03')
productAbutton.pack(pady=10)
productBbutton = Button(win, text = "Produkt B", font = myFont, command = productB, height = 2, width =8, bg='#1363a1' )
productBbutton.pack(pady=10)
#progressbar
progressbar = ttk.Progressbar(orient=HORIZONTAL, length=200, mode='determinate')
progressbar.pack(side=TOP,pady=10)
#progressbar.start()
#read data from serial port in a new thread
def read_from_port(ser):
while (ser.inWaiting() > 0):
while True:
print("received data via serial: ")
serialData = ser.readline().decode()
print(serialData)
#start a new thread for serial readings
thread = threading.Thread(target=read_from_port, args=(ser,))
thread.start()
serialData = 0
try:
while 1:
win.update_idletasks()
win.update()
#process finished - reset everything
if serialData == 100 and lastState != 100:
#reset progressbar for next process
progressbar.stop()
print("Finished")
#reset product choices
GPIO.output(35,GPIO.LOW)
GPIO.output(37,GPIO.LOW)
tkMessageBox.showinfo('Produktwahl', 'Produkt fertig')
lastState = 100
if serialData == 25 and lastState !=25:
progressbar.stop()
progressbar.step(25)
print("25")
lastState = 25
if serialData == 50 and lastState !=50:
progressbar.stop()
progressbar.step(50)
lastState = 50
print("50")
if serialData == 75 and lastState !=75:
progressbar.stop()
progressbar.step(75)
print("75")
lastState = 75
if serialData == 99 and lastState !=99:
progressbar.stop()
progressbar.step(99)
print("99")
lastState = 99
except KeyboardInterrupt:
GPIO.cleanup()
|
mschweig/mechatronicPlayground
|
gui.py
|
gui.py
|
py
| 3,256 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1898256439
|
import re
from copy import copy
from timeit import default_timer as timer
import numpy as np
DEBUG = True
"""This is the sorts timer. It's a class with all the sorts and a timer function to time how long it takes for the sorting algorithms to sort the list
the user inputs!"""
# This function is used to time the algorithms
def timed(func):
def wrapper_function(*args, **kwargs):
start = timer()
func(*args, **kwargs)
print("Timed:", (timer() - start) * 1000, "ms")
return wrapper_function
# If the list that is being input is too long for display, this method shrinks the list in the following way
# [1, 2, 3, 4, ... , 97, 98, 99, 100]
def list_for_printing(arr):
if len(arr) > 20:
left = arr[0:4]
right = arr[-4:]
ret = "["
for i in left:
ret += "%s, " % i
ret += "... "
for i in right:
ret += ", %s" % i
ret += "]"
return ret
return arr
class Sorts(object):
def __init__(self, arr) -> None:
self.arr = copy(arr)
self.orig_arr: list = copy(arr)
self.qs_lower: int = 0
self.qs_upper: int = len(self.arr) - 1
def __repr__(self) -> str:
return "Sorts[\n original: %s,\n sorted: %s\n]" % (
list_for_printing(self.orig_arr), list_for_printing(self.arr))
# resets the input list to its original input
def reset(self) -> None:
self.arr = copy(self.orig_arr)
def __quick_sort(self, lower: int, upper: int) -> None:
def partition(p_lower: int, p_upper: int) -> int:
pivot = self.arr[p_upper]
n = p_lower - 1
for k in range(p_lower, p_upper):
if self.arr[k] <= pivot:
n += 1
self.arr[n], self.arr[k] = self.arr[k], self.arr[n]
self.arr[n + 1], self.arr[p_upper] = self.arr[p_upper], self.arr[n + 1]
return n + 1
if lower < upper:
pivot_num = partition(lower, upper)
self.__quick_sort(lower, pivot_num - 1)
self.__quick_sort(pivot_num + 1, upper)
def __merge_sort(self, arr: list) -> list:
def merge(left: list, right: list) -> list:
sorted_arr = []
left_idx, right_idx = 0, 0
length_left, length_right = len(left), len(right)
while left_idx < length_left and right_idx < length_right:
if left[left_idx] <= right[right_idx]:
sorted_arr.append(left[left_idx])
left_idx += 1
else:
sorted_arr.append(right[right_idx])
right_idx += 1
while left_idx < length_left:
sorted_arr.append(left[left_idx])
left_idx += 1
while right_idx < length_right:
sorted_arr.append(right[right_idx])
right_idx += 1
return sorted_arr
if len(arr) <= 1:
return arr
mid = len(arr) // 2
left_arr = arr[:mid]
right_arr = arr[mid:]
left_arr = self.__merge_sort(left_arr)
right_arr = self.__merge_sort(right_arr)
return merge(left_arr, right_arr)
@timed
def heap_sort(self) -> None:
def heapify(arr: list, h_quantity: int, h_ini_parent: int) -> None:
parent_idx = h_ini_parent
left_idx = 2 * h_ini_parent + 1
right_idx = 2 * h_ini_parent + 2
if left_idx < h_quantity and arr[h_ini_parent] < arr[left_idx]:
parent_idx = left_idx
if right_idx < h_quantity and arr[parent_idx] < arr[right_idx]:
parent_idx = right_idx
if parent_idx != h_ini_parent:
arr[h_ini_parent], arr[parent_idx] = arr[parent_idx], arr[h_ini_parent]
heapify(arr, h_quantity, parent_idx)
quantity = len(self.arr)
for ini_parent in range(quantity, -1, -1):
heapify(self.arr, quantity, ini_parent)
for idx in range(quantity - 1, 0, -1):
self.arr[idx], self.arr[0] = self.arr[0], self.arr[idx]
heapify(self.arr, idx, 0)
@timed
def quick_sort(self) -> None:
self.__quick_sort(self.qs_lower, self.qs_upper)
@timed
def merge_sort(self) -> None:
self.arr = self.__merge_sort(self.arr)
@timed
def insertion_sort(self) -> None:
quantity = len(self.arr)
for idx in range(1, quantity):
predecessor_idx = idx - 1
key = self.arr[idx]
while predecessor_idx >= 0 and key < self.arr[predecessor_idx]:
self.arr[predecessor_idx + 1] = self.arr[predecessor_idx]
predecessor_idx -= 1
self.arr[predecessor_idx + 1] = key
def validate_input(value: str) -> list:
items = re.split(r"[,\s]+", value)
ret = []
for item in items:
try:
ret.append(int(item))
except ValueError:
if DEBUG:
print("Invalid value [{}] ignoring...".format(item))
return ret
def main() -> None:
while True:
input_arr = input('input your list, separate the elements of the list with spaces, commas or: ')
if input_arr == 'stop':
break
arr = validate_input(input_arr)
sorts_std_list = Sorts(arr)
sorts_np_list = Sorts(np.array(arr))
print("Heap sort")
sorts_std_list.heap_sort()
print(sorts_std_list)
print("Heap sort - np")
sorts_np_list.heap_sort()
print(sorts_np_list)
print("Quick sort")
sorts_std_list.reset()
sorts_std_list.quick_sort()
print(sorts_std_list)
print("Quick sort - np")
sorts_np_list.reset()
sorts_np_list.quick_sort()
print(sorts_np_list)
print("Merge sort")
sorts_std_list.reset()
sorts_std_list.merge_sort()
print(sorts_std_list)
print("Merge sort - np")
sorts_np_list.reset()
sorts_np_list.merge_sort()
print(sorts_np_list)
print("Insertion sort")
sorts_std_list.reset()
sorts_std_list.insertion_sort()
print(sorts_std_list)
print("Insertion sort - np")
sorts_np_list.reset()
sorts_np_list.insertion_sort()
print(sorts_np_list)
if __name__ == '__main__':
main()
|
stevevandijk/sorts
|
Sortstimer.py
|
Sortstimer.py
|
py
| 6,452 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70452853947
|
def build(topics):
import html
print('Welcome to trivia night!\n How many questions would you like?')
amount = input('enter a number: ')
url = f'https://opentdb.com/api.php?amount={amount}'
print('Which topic would you like?')
for topic in topics:
name = html.unescape(topic['name'])
print(topic['parameter'], name)
topic = input('enter the topic number or any: ')
url += f'&category={topic}'
print('Easy, Medium, or Hard questions?')
difficulty = input('enter easy, medium, hard or any: ').lower()
url += f'&difficulty={difficulty}'
print('Boolean (true/false) or multiple choice questions?')
qtype = input('enter boolean, multiple, or any: ')
url += f'&type={qtype}'
return url
|
austenc-id/Guild
|
1 - Python/14/functions/url.py
|
url.py
|
py
| 723 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24793906303
|
#!/usr/bin/env python3
import argparse, re
from sys import exit
from os import path
def getCombLayerObject(cell):
"""Return CombLayer object name based on its cell number"""
fname = "ObjectRegister.txt"
if not path.isfile(fname):
fname = path.join("case001", fname);
if not path.isfile(fname):
print("ObjectRegister not found")
return None
with open(fname) as f:
for line in f.readlines():
words = line.strip().split()
pos = words.index('::')
cells = words[pos+1:]
if words[0] != 'World':
if len(cells) == 1:
if int(cells[0]) == cell:
return words[0]
elif len(cells) == 2: # min/max range is given
cmin,cmax = map(int,(cells[0][1:], cells[1][:-1]))
if cell >= cmin and cell <= cmax:
return words[0]
print("Object not found")
return None
def main():
"""
Finds the CombLayer object name for the given MCNP(X) cell number
based on the CombLayer-generated object register
argument: cell - MCNP(X) cell number
"""
parser = argparse.ArgumentParser(description=main.__doc__, epilog="")
parser.add_argument('cell', type=int, help=__doc__)
args = parser.parse_args()
obj = getCombLayerObject(args.cell)
if obj:
print(obj+" "*100+".") # for MCNPX to add space if the previous object name was longer
if obj is None:
exit(1)
if __name__ == "__main__":
exit(main())
|
kbat/mc-tools
|
mctools/common/CombLayer/getcell.py
|
getcell.py
|
py
| 1,577 |
python
|
en
|
code
| 38 |
github-code
|
6
|
4840987385
|
#!usr/local/bin/python
#coding: utf-8
'''
Created on 2016年3月14日
@author: CasparWang
'''
"""
################################################################################
provide type-specific option sets for application
################################################################################
"""
from learning.GUI.chapter4.shellgui import *
from learning.GUI.chapter4.packdlg import runPackDialog # dialogs for data entry
from learning.GUI.chapter4.unpkdlg import runUnpackDialog # they both run app classes
class TextPak1(ListMenuGui):
def __init__(self):
self.myMenu = [('Pack ', runPackDialog), # simple functions
('Unpack', runUnpackDialog), # use same width here
('Mtool ', self.notdone)] # method from guimixin
ListMenuGui.__init__(self)
def forToolBar(self, label):
return label in {'Pack ', 'Unpack'}
class TextPak2(DictMenuGui):
def __init__(self):
self.myMenu = {'Pack ': runPackDialog, # or use input here...
'Unpack': runUnpackDialog, # instead of in dialogs
'Mtool ': self.notdone}
DictMenuGui.__init__(self)
if __name__ == '__main__':
from sys import argv
if len(argv) > 1 and argv[1] == 'list':
print('list test')
TextPak1().mainloop()
else:
print('dict test')
TextPak2().mainloop()
|
Calvin-WangA/learning
|
learning/GUI/chapter4/mytools.py
|
mytools.py
|
py
| 1,481 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36187285700
|
from dataclasses import dataclass
import pytest
from pydantic import ValidationError, Json, BaseModel
from typing import Optional
from qwery import Model, Query, JSONB
class ExampleModel(Model):
class Meta:
table_name = "test"
a: int
b: Optional[str]
c: bool
def test_compile_select_query():
base = Query(ExampleModel).select().where("a = {.a}")
assert callable(base.fetch_one())
assert base.fetch_one().sql == "SELECT a, b, c FROM test WHERE a = $1"
assert (
base.limit(1).fetch_one().sql == "SELECT a, b, c FROM test WHERE a = $1 LIMIT 1"
)
test_model_query_complex = (
Query(ExampleModel).select(raw="COUNT(*)").where("a = {.a}").fetch_one()
)
assert test_model_query_complex.sql == "SELECT COUNT(*) FROM test WHERE a = $1"
assert callable(test_model_query_complex)
def test_compile_insert_query():
test_model_query = Query(ExampleModel).insert().execute()
assert test_model_query.sql == "INSERT INTO test (a, b, c) VALUES ($1, $2, $3)"
assert callable(test_model_query)
test_model_query_complex = (
Query(ExampleModel).insert().on_conflict("a").returning().fetch_one()
)
assert (
test_model_query_complex.sql
== "INSERT INTO test (a, b, c) VALUES ($1, $2, $3) ON CONFLICT (a) DO NOTHING RETURNING *"
)
assert callable(test_model_query_complex)
def test_compile_delete_query():
test_model_query = Query(ExampleModel).delete().where("a = {.a}").execute()
assert test_model_query.sql == "DELETE FROM test WHERE a = $1"
assert callable(test_model_query)
def test_compile_update_query():
test_model_query = (
Query(ExampleModel).update("b", "a", "c").where("a = {.a}").execute()
)
assert test_model_query.sql == "UPDATE test SET b = $1, a = $2, c = $3 WHERE a = $2"
assert callable(test_model_query)
def test_compile_dynamic_update_query():
test_model_query = Query(ExampleModel).dynamic_update().where("a = {.a}").execute()
assert test_model_query.sql == "UPDATE test SET {dynamic} WHERE a = $1"
assert callable(test_model_query)
@pytest.mark.asyncio
async def test_validate_select_query():
test_model_query = Query(ExampleModel).select().where("a = {.a}").fetch_one()
with pytest.raises(ValidationError):
await test_model_query(None, a="fuck")
@pytest.mark.asyncio
async def test_validate_insert_query():
test_model_query = Query(ExampleModel).insert(ignore={"b"}).execute()
with pytest.raises(ValidationError) as excinfo:
await test_model_query(None, a=1, c="yeet")
assert excinfo.value.errors() == [
{
"loc": ("c",),
"msg": "value could not be parsed to a boolean",
"type": "type_error.bool",
}
]
test_model_query = Query(ExampleModel).insert(body=True).execute()
with pytest.raises(ValidationError) as excinfo:
await test_model_query(None, a=1, c=1)
assert excinfo.value.errors() == [
{
"loc": ("example_model",),
"msg": "field required",
"type": "value_error.missing",
}
]
class ExampleEmbeddedData(BaseModel):
a: int
b: str
c: bool
class ExampleJSONModel(Model):
class Meta:
table_name = "test"
data: Optional[JSONB[ExampleEmbeddedData]]
@pytest.mark.asyncio
async def test_embedded_json():
example = ExampleEmbeddedData(a=1, b="test", c=True)
insert = Query(ExampleJSONModel).insert().execute()
_, args = insert._query.build(data=example)
assert args[0] == example.json()
update = Query(ExampleJSONModel).dynamic_update().execute()
_, args = update._query.build(data=example)
assert args[0] == example.json()
assert ExampleJSONModel(data=example).dict() == {
"data": {"a": 1, "b": "test", "c": True}
}
|
uplol/qwery
|
qwery/test_qwery.py
|
test_qwery.py
|
py
| 3,862 |
python
|
en
|
code
| 18 |
github-code
|
6
|
30394819861
|
from strings.games.deckofcards import Deckofcards
from strings.games.player import Player
# from strings.games.card import Card
class Cardgame:
# מאתחל את המשחק קלפים
def __init__(self, player1, player2, number_of_cards_for_all_players = 10):
if type(number_of_cards_for_all_players) == int:
if 0 < number_of_cards_for_all_players <= 26:
self.number_of_cards_for_all_players = number_of_cards_for_all_players
else:
self.number_of_cards_for_all_players = 10
else:
self.number_of_cards_for_all_players = 10
print("next time enter a number")
if type(player1) == str:
pass
else:
raise ValueError("please enter an str")
if type(player2) == str:
pass
else:
raise ValueError("please enter an str")
self.deck_of_cards = Deckofcards()
self.player1 = Player(player1, self.number_of_cards_for_all_players)
self.player2 = Player(player2, self.number_of_cards_for_all_players)
self.start_game = False
self.new_game()
self.start_game = True
#מגדיר את השחקנים ומערבב את החפיסה
def new_game(self):
if self.start_game == False:
self.deck_of_cards.shuffle()
self.player1.set_hand(self.deck_of_cards)
self.player2.set_hand(self.deck_of_cards)
else:
raise ValueError("you cant call new_game again")
#מתודה שמחזירה את המנצח
def get_winner(self):
if self.player1.number_of_cards > self.player2.number_of_cards:
return self.player2.name
elif self.player2.number_of_cards > self.player1.number_of_cards:
return self.player1.name
else:
return None
# gamenumber1=Cardgame("ariel","dolfin","dgrdgrd")
# gamenumber1.player1.show_hand()
# gamenumber1.player2.show_hand()
# gamenumber1.player1.cardsofplayer.pop()
# gamenumber1.player1.numberofcards-=1
#
# gamenumber1.player1.show_hand()
# print("the winner is",gamenumber1.get_winner())
|
arielvaks/games.cards
|
strings/games/cardgame.py
|
cardgame.py
|
py
| 2,151 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9727275592
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 11:39:34 2019
@author: Administrator
"""
#s1 =["ram","ravi","rahul","gopal"]
#s2 ="chandraprakash"
#
#for item in s1:
# print(item)
#
#for item in s2:
# print(item)
#
#index = 0
#for item in s1:
# print(index, item)
# index += 1
"""
lunch_menu = ["pizza", "sandwich", "sushi", "soup", "salad"]
Since you're super hungry and super excited about lunch,
enumerate over the array and append an "!" ("exclamation mark")
to each menu item.
"""
lunch_menu = ["pizza", "sandwich", "sushi", "soup", "salad"]
obj1 = enumerate(lunch_menu)
print (list(enumerate(lunch_menu)))
# Hands On 2
"""
nums = [1, 2, 3, 4]
Enumerate over the array and return a new array of the
squares of those numbers.
"""
nums = [1, 2, 3, 4]
for index,ele in enumerate(nums,1):
print (index," ", ele**2)
# Hands On 3
"""
odds_and_evens = [1, 3, 2, 18, 5, 10, 24]
iterate over the array and return any even numbers.
iterate over the array and return only the first array element that is odd
"""
odds_and_evens = [1, 3, 2, 18, 5, 10, 24]
index = 0
for i in odds_and_evens:
if i%2==0:
print(i,"even")
else:
print(i,"odd")
# Hands On 4
"""
cats_and_dogs = ["cat", "cat", "dog", "cat", "dog", "dog"]
We all know that cats and dogs don't get along.
Iterate over the array and delete the elements that represent dogs.
"""
cats_and_dogs = ["cat", "cat", "dog", "cat", "dog", "dog"]
while "dog" in (cats_and_dogs):
cats_and_dogs.remove("dog")
print(cats_and_dogs)
# Hands On 5
"""
famous_cats = ["Maru", "Lil Bub", "Grumpy Cat"]
check and see if the array includes the string "Maru"
"""
famous_cats = ["Maru", "Lil Bub", "Grumpy Cat"]
for i in famous_cats:
if i=="Maru":
print("true")
# Hands On 6
"""
quiet_and_loud = ["hi", "HI", "shhh", "WHAT?!"]
terate over the array to determine if any of the words contained there are loud (upcased).
"""
for i in famous_cats:
if i=="":
print("")
|
chandraprakashh/Data_Handling
|
code_prectics.py
|
code_prectics.py
|
py
| 2,291 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42335391035
|
# This program for monthly income and expenses
print("This Program will helps someone create a budget")
# input from user for ask their monthly income and expenses
monthly_income=float(input("How much is your total monthly Income?"))
housing_expenses: float=float(input("How much do you spend on your housing each month?"))
food_expenses=float(input("How much do you spend on your food each month?"))
transportation_expenses=float(input("how much do you spend on your transportation on each month?"))
phone_bill_expenses=float(input("how much do you spend on your phone bills each month?"))
utilities_expenses=float(input("how much do you spend on your utilities bills each month?"))
clothing_expenses=float(input("how much do you spend on your clothing each month?"))
# calculate and display the income percentage of each budget item.
housing_expenses_per = (housing_expenses / monthly_income)
food_expenses_per = (food_expenses / monthly_income)
transportation_expenses_per = (transportation_expenses / monthly_income)
phone_bill_expenses_per = (phone_bill_expenses / monthly_income)
utilities_expenses_per = (utilities_expenses / monthly_income)
clothing_expenses_per = (clothing_expenses / monthly_income)
# display results
print(f"\nHousing expenses takes up {housing_expenses_per:.2%} of your monthly income.")
print(f"Food expenses takes up {food_expenses_per:.2%} of your monthly income.")
print(f"Transportation takes up {transportation_expenses_per:.2%} of your monthly income.")
print(f"Phone bills takes up {phone_bill_expenses_per:.2%} of your monthly income.")
print(f"Utilities takes up {utilities_expenses_per:.2%} of your monthly income.")
print(f"Clothing takes up {clothing_expenses_per:.2%} of your monthly income.\n")
total_expenses = housing_expenses + food_expenses + transportation_expenses + phone_bill_expenses + utilities_expenses \
+ clothing_expenses
savings = monthly_income - total_expenses
print(f"You have ${savings} left from your income after paying these monthly expenses.")
|
jyotsnaagrawal/prg105
|
budget.py
|
budget.py
|
py
| 2,066 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19400375604
|
from time import sleep
import nmap
import nvdlib
from model.host import Host
from model.port import Port
from model.cve import *
class Scanner:
def __init__(self, network=None):
self._network = network
self._list_content_host = []
self._nmap = nmap.PortScanner()
def info_hosts_network(self):
print('iniciou')
dataset = self._nmap.scan(hosts=self._network, arguments='-T5 -sS -sV')['scan']
print('acabou')
for host in dataset:
if 'tcp' in dataset[host]:
self._list_content_host.append(self._popular_objeto(dataset[host]))
return self._list_content_host
def find_cve_by_cpe(self, info_cpe, product):
try:
dataset = nvdlib.searchCPE(cpeMatchString=info_cpe, keyword=product, cves=True, key='2dac879e-d189-4424-9378-ea4e4e06d283')
list_cve = []
for result in dataset:
for vulnerabilite in result.vulnerabilities:
list_cve.append(CVE(id=vulnerabilite))
sleep(1)
return list_cve[len(list_cve)-2:]
except Exception as error:
return []
def find_cve_by_product(self, product):
sleep(6)
dataset = nvdlib.searchCVE(keyword=product, limit=2)
list_cve = []
if dataset:
for result in dataset:
list_cve.append(CVE(id=result.cve.CVE_data_meta.ID))
return list_cve
def find_info_cve(self, cve):
try:
if str(cve).upper().find('CVE') != -1:
return nvdlib.getCVE(cve, key='2dac879e-d189-4424-9378-ea4e4e06d283')
except Exception as error:
return False
def get_cve_from_hosts(self):
for host in self._list_content_host:
for port in host.get('port'):
if port.get('cpe'):
port['list_cve'] = self.find_cve_by_cpe(port['cpe'], port['product'])
#if not port['list_cve']:
#port['list_cve'] = self.find_cve_by_product(port['name'])
if port.get('list_cve'):
list_cve = []
for cve in port.get('list_cve'):
dataset = self.find_info_cve(cve.id)
if dataset:
exploit = None
if 'v3exploitability' in dir(dataset):
exploit = dataset.v3exploitability
elif 'v2exploitability' in dir(dataset):
exploit = dataset.v2exploitability
score = Score(dataset.score[2], exploit, dataset.score[1]).dict()
list_references = [conteudo.url for conteudo in dataset.cve.references.reference_data]
cve_completo = CVE(id=cve.id, resume=dataset.cve.description.description_data[0].value
, reference=list_references, score=score).dict()
list_cve.append(cve_completo)
sleep(1)
port['list_cve'] = list_cve
def vulnerabilitie_network(self):
#Faz o scanner na rede
self.info_hosts_network()
#Chamando metodo para pesquisar os cves com base no cpe ou produto + versão
self.get_cve_from_hosts()
return self._list_content_host
def _popular_objeto(self, dataset):
list_port = []
for porta in dataset['tcp']:
port = Port(porta, dataset['tcp'][porta]['state'],
dataset['tcp'][porta]['name'],
dataset['tcp'][porta]['product'],
dataset['tcp'][porta]['version'],
dataset['tcp'][porta]['cpe']
)
list_port.append(port.dict())
host = Host(dataset['hostnames'][0]['name'],
dataset['addresses']['ipv4'],
list_port)
return host.dict()
#scan = Scanner('192.168.1.0/24')
#hosts = scan.vulnerabilitie_network()
#print(hosts)
#for host in hosts:
# for port in host.port:
# port = port.dict()
# print(host.dict())
|
jonassantos1000/tcc
|
model/scanner.py
|
scanner.py
|
py
| 4,245 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27513951023
|
import ctypes
from timsconvert.constants import *
# modified from alphatims
def init_bruker_dll(bruker_dll_file_name: str=BRUKER_DLL_FILE_NAME):
bruker_dll = ctypes.cdll.LoadLibrary(os.path.realpath(bruker_dll_file_name))
# Functions for .tsf files
# .tsf Open
bruker_dll.tsf_open.argtypes = [ctypes.c_char_p, ctypes.c_uint32]
bruker_dll.tsf_open.restype = ctypes.c_uint64
# .tsf Close
bruker_dll.tsf_close.argtypes = [ctypes.c_uint64]
bruker_dll.tsf_close.restype = None
# Read in profile or line spectra
bruker_dll.tsf_read_line_spectrum.argtypes = [ctypes.c_uint64,
ctypes.c_int64,
ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_float),
ctypes.c_uint32]
bruker_dll.tsf_read_line_spectrum.restype = ctypes.c_uint32
bruker_dll.tsf_read_profile_spectrum.argtypes = [ctypes.c_uint64,
ctypes.c_int64,
ctypes.POINTER(ctypes.c_uint32),
ctypes.c_uint32]
bruker_dll.tsf_read_profile_spectrum.restype = ctypes.c_uint32
# Get m/z values from indices.
bruker_dll.tsf_index_to_mz.argtypes = [ctypes.c_uint64,
ctypes.c_int64,
ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double),
ctypes.c_uint32]
bruker_dll.tsf_index_to_mz.restype = ctypes.c_uint32
# Functions for .tdf files
# .tdf Open
bruker_dll.tims_open.argtypes = [ctypes.c_char_p, ctypes.c_uint32]
bruker_dll.tims_open.restype = ctypes.c_uint64
# .tdf Close
bruker_dll.tims_close.argtypes = [ctypes.c_uint64]
bruker_dll.tims_close.restype = None
# Read scans from .tdf
bruker_dll.tims_read_scans_v2.argtypes = [ctypes.c_uint64,
ctypes.c_int64,
ctypes.c_uint32,
ctypes.c_uint32,
ctypes.c_void_p,
ctypes.c_uint32]
bruker_dll.tims_read_scans_v2.restype = ctypes.c_uint32
# Read PASEF MSMS
MSMS_SPECTRUM_FUNCTOR = ctypes.CFUNCTYPE(None,
ctypes.c_int64,
ctypes.c_uint32,
ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_float))
bruker_dll.tims_read_pasef_msms.argtypes = [ctypes.c_uint64,
ctypes.POINTER(ctypes.c_int64),
ctypes.c_uint32,
MSMS_SPECTRUM_FUNCTOR]
bruker_dll.tims_read_pasef_msms.restype = ctypes.c_uint32
bruker_dll.tims_read_pasef_msms_for_frame.argtypes = [ctypes.c_uint64,
ctypes.c_int64,
MSMS_SPECTRUM_FUNCTOR]
bruker_dll.tims_read_pasef_msms_for_frame.restype = ctypes.c_uint32
MSMS_PROFILE_SPECTRUM_FUNCTOR = ctypes.CFUNCTYPE(None,
ctypes.c_int64,
ctypes.c_uint32,
ctypes.POINTER(ctypes.c_int32))
bruker_dll.tims_read_pasef_profile_msms.argtypes = [ctypes.c_uint64,
ctypes.POINTER(ctypes.c_int64),
ctypes.c_uint32,
MSMS_PROFILE_SPECTRUM_FUNCTOR]
bruker_dll.tims_read_pasef_profile_msms.restype = ctypes.c_uint32
bruker_dll.tims_read_pasef_profile_msms_for_frame.argtypes = [ctypes.c_uint64,
ctypes.c_int64,
MSMS_PROFILE_SPECTRUM_FUNCTOR]
bruker_dll.tims_read_pasef_profile_msms_for_frame.restype = ctypes.c_uint32
# Extract spectra from frames
bruker_dll.tims_extract_centroided_spectrum_for_frame.argtypes = [ctypes.c_uint64,
ctypes.c_int64,
ctypes.c_uint32,
ctypes.c_uint32,
MSMS_SPECTRUM_FUNCTOR,
ctypes.c_void_p]
bruker_dll.tims_extract_centroided_spectrum_for_frame.restype = ctypes.c_uint32
bruker_dll.tims_extract_profile_for_frame.argtypes = [ctypes.c_uint64,
ctypes.c_int64,
ctypes.c_uint32,
ctypes.c_uint32,
MSMS_PROFILE_SPECTRUM_FUNCTOR,
ctypes.c_void_p]
bruker_dll.tims_extract_profile_for_frame.restype = ctypes.c_uint32
# Get m/z values from indices
convfunc_argtypes = [ctypes.c_uint64,
ctypes.c_int64,
ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double),
ctypes.c_uint32]
bruker_dll.tims_index_to_mz.argtypes = convfunc_argtypes
bruker_dll.tims_index_to_mz.restype = ctypes.c_uint32
# Get 1/k0 values from scan number
bruker_dll.tims_scannum_to_oneoverk0.argtypes = convfunc_argtypes
bruker_dll.tims_scannum_to_oneoverk0.restype = ctypes.c_uint32
# Convert 1/k0 to CCS
bruker_dll.tims_oneoverk0_to_ccs_for_mz.argtypes = [ctypes.c_double,
ctypes.c_int32,
ctypes.c_double]
bruker_dll.tims_oneoverk0_to_ccs_for_mz.restype = ctypes.c_double
return bruker_dll
# from tsfdata.py
def throw_last_tsf_error(bruker_dll):
err_len = bruker_dll.tsf_get_last_error_string(None, 0)
buf = ctypes.create_string_buffer(err_len)
bruker_dll.tsf_get_last_error_string(buf, err_len)
raise RuntimeError(buf.value)
# from tsfdata.py
def decode_array_of_strings(blob):
if blob is None:
return None
if len(blob) == 0:
return []
blob = bytearray(blob)
if blob[-1] != 0:
raise ValueError('Illegal BLOB contents.')
return str(blob, 'utf-8').split('\0')[:-1]
# from timsdata.py
# Convert 1/k0 to CCS for a given charge and mz
def one_over_k0_to_ccs(ook0, charge, mz):
bruker_dll = init_bruker_dll()
return bruker_dll.tims_oneoverk0_to_ccs_for_mz(ook0, charge, mz)
if __name__ == '__main__':
logger = logging.getLogger(__name__)
|
orsburn/timsconvert
|
timsconvert/init_bruker_dll.py
|
init_bruker_dll.py
|
py
| 7,438 |
python
|
en
|
code
| null |
github-code
|
6
|
69997094268
|
# Class to store parameters while estimating GPFA model
# Usage:
#
# current_params = Param_GPFA_Class(param_cov_type, param_gamma, param_eps,
# param_d, param_C, param_R,
# param_notes_learnKernelParams, param_notes_learnGPNoise,param_notes_RforceDiagonal)
# current_params.param_cov_type = 'rbf'
class Param_GPFA_Class():
def __init__(self, param_cov_type, param_gamma,
param_eps, param_d, param_C, param_R,
param_notes_learnKernelParams, param_notes_learnGPNoise,
param_notes_RforceDiagonal):
self.param_cov_type = param_cov_type
self.param_gamma = param_gamma
self.param_eps = param_eps
self.param_d = param_d
self.param_C = param_C
self.param_R = param_R
self.param_notes_learnKernelParams = param_notes_learnKernelParams
self.param_notes_learnGPNoise = param_notes_learnGPNoise
self.param_notes_RforceDiagonal = param_notes_RforceDiagonal
|
harvineet/py-gpfa
|
core_gpfa/Param_GPFA_Class.py
|
Param_GPFA_Class.py
|
py
| 1,018 |
python
|
en
|
code
| 5 |
github-code
|
6
|
74432954107
|
from . import views
from django.conf.urls import url, include
urlpatterns = [
url(r'^new$', views.add_new_user),
url(r'^new/process$', views.add_new_user_p),
url(r'^edit$', views.edit_user),
url(r'^edit/process', views.edit_user_p),
url(r'^edit/(?P<u_id>\d+)$', views.edit_user_by_id),
url(r'^edit/(?P<u_id>\d+)/process$', views.edit_user_by_id_p),
url(r'^show/(?P<u_id>\d+)$', views.show_user),
url(r'^show/(?P<u_id>\d+)/post/', include('apps.msg_app.urls')),
]
|
EmilChoparinov/Message-Posting-Website
|
apps/user_app/urls.py
|
urls.py
|
py
| 495 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42649106860
|
"""
Crawls through the 'source' source and looks for labels starting with 'rancon'.
If such a label is found on a service, then it will register the service in the
'backend'. If the backend supports tag all services will be tagged 'rancon'.
depending on the backend the registration behavior can be influenced by tags
set on the source (e.g. rancon.name, ...).
Every rancon.* tag will be available as variable "%NAME%" in the backend.
Please look through the documentation to make more sense of this, it is easy
but just a little bit complex because of the flexibility.
"""
import asyncio
import sys
import time
from rancon import settings
from rancon import tools
from .version import __version__
import uvloop
import prometheus_client.exposition
import prometheus_client.core
from prometheus_client import Counter, Gauge, Histogram, Summary
import sanic
from sanic import Sanic
from sanic.response import HTTPResponse, text
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
app = Sanic(__name__)
@app.route("/")
async def index(request):
""" returns a hello string """
return text("rancon v{} :)".format(__version__))
@app.route("/metrics")
async def metrics(request):
""" returns the latest metrics """
names = request.args.get("name", None)
registry = prometheus_client.core.REGISTRY
if names:
registry = registry.restricted_registry(names)
tmp = prometheus_client.exposition.generate_latest(registry)
return HTTPResponse(body_bytes=tmp)
@app.route("/health")
async def health(request):
""" returns the current health state of the system """
duration = time.time() - LAST_CALL_ROUTE_SERVICES
if duration > settings.args.hangup_detection:
msg = "system hang-up detected, it's been {} seconds since start of "\
"route_services".format(int(duration))
raise sanic.exceptions.ServerError(msg)
return text("OK")
LAST_CALL_ROUTE_SERVICES = time.time()
# NB: documentation does not fit the implementation here. the parameters here should be
# name, labelnames, labelvalues but documented are name, description, so I'm not sure
# what to use here. Also, it's not clear what labelnames and labelvalues should be if they
# were used and how they could be retrieved.
ROUTE_TIME = Summary('rancon_route_services_seconds',
'Number of seconds route_services takes')
METRIC_SERVICES_FOUND = Gauge('rancon_discovered_services',
'Number of discovered services')
METRIC_SUCCESSFUL_REGS = Counter('rancon_successful_registrations',
'Number of services registered')
METRIC_FAILED_REGS = Counter('rancon_failed_registrations',
'Number of failed service registrations')
METRIC_RAISED_REGS = Counter('rancon_registration_exceptions',
'Number of exception during service registrations')
METRIC_SUCCESSFUL_DEREGS = Counter('rancon_successful_deregistrations',
'Number of services deregistered, UNUSED')
METRIC_FAILED_DEREGS = Counter('rancon_failed_deregistrations',
'Number of failed service deregistrations, '
'UNUSED')
METRIC_VERSION = Gauge('rancon_version',
'Rancon version number',
('version',)).labels(__version__).set(1)
@ROUTE_TIME.time()
def route_services(schedule_next=5, loop=None):
""" checks for services to register and then register them with consul """
if loop is not None:
loop.call_later(schedule_next, route_services, schedule_next, loop)
global LAST_CALL_ROUTE_SERVICES
LAST_CALL_ROUTE_SERVICES = time.time()
log = tools.getLogger(__name__)
backend = settings.backend
source = settings.source
services_to_route = source.get_services()
registered_services = []
METRIC_SERVICES_FOUND.set(len(services_to_route))
for service in services_to_route:
try:
# I think that will re-raise or pass the exception thorug,
# so we also need to catch it manually
with METRIC_RAISED_REGS.count_exceptions():
success, routed_service = backend.register(service)
registered_services.append(routed_service)
except Exception as e:
success = False
log.error("EXCEPTION CAUGHT WHILE REGSITERING '{}': {}"
.format(str(service), str(e)))
if success:
METRIC_SUCCESSFUL_REGS.inc()
else:
METRIC_FAILED_REGS.inc()
if len(registered_services) == 0:
log.debug("No services registered (of {} services found)"
.format(len(services_to_route)))
backend.cleanup(registered_services)
log.debug("Run completed @ {}".format(time.ctime()))
def start(sys_argv):
# prepare
settings.parse_params(sys_argv)
# not before here :)
log = tools.getLogger(__name__)
# run
log.error("Start @ {}".format(time.ctime()))
if not settings.args.continuous:
route_services()
sys.exit(0)
loop = asyncio.get_event_loop()
loop.call_soon(route_services, settings.args.wait, loop)
app.run(host="0.0.0.0", port=8000, loop=loop)
log.info("Exiting.")
def console_entrypoint():
start(sys.argv[1:])
|
flypenguin/python-rancon
|
rancon/__init__.py
|
__init__.py
|
py
| 5,424 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27625720407
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# In[2]:
import cv2
import numpy as np
import easyocr
import matplotlib.pyplot as plt
# In[3]:
im_1_path = '/Users/abrahamkom/Groupe 3IL 📖/Test/folder/permis4.jpg'
im_2_path = '/Users/abrahamkom/Groupe 3IL 📖/Test/folder/PERMIS.jpg'
print(im_1_path)
# <h1 class="alert alert-success">Fonction de reconnaissance </h>
# In[4]:
def recognize_text(img_path):
'''loads an image and recognizes text.'''
reader = easyocr.Reader(['fr'], gpu = True)
return reader.readtext(img_path,detail=0)
# In[ ]:
# In[5]:
result = recognize_text(im_1_path)
# In[6]:
print(result)
# In[46]:
import json
# In[53]:
# In[54]:
#print(data)
# In[60]:
personne = {"Contry" : result[1],
"firstName" : result[3],
"laastName" : result[4],
"birthDate" : result[5],
"birthPlace" : result[5],
"issueDate" : result[6],
"expiryDate" : result[8],
"deliveredAutority" : "",
"licenceNumlber" : result[9],
"licenceCategory" : result[10],
"mrzBand" : ""
}
#personne
# In[ ]:
|
viannprems99/projet_ocr
|
old_test.py
|
old_test.py
|
py
| 1,182 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14566034034
|
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from skill_quest_app.models import Course, CourseQuizResult, CourseQuiz, InterestQuiz, CourseEnrollment
from skill_quest_app.serializers import CourseSerializer, QuizResultSerializer, CourseQuizSerializer, ProfileSerializer, \
InterestQuizSerializer, CourseQuizResultSerializer, CourseEnrollmentSerializer
# Create your views here.
class ListCoursesView(APIView):
def get(self, request, id=None):
queryset = Course.objects.filter(id=id) if id else Course.objects.all()
read_serializer = CourseSerializer(queryset, many=True, allow_empty=True)
return Response(read_serializer.data)
class PostCoursesView(APIView):
def post(self, request):
create_serializer = CourseSerializer(data=request.data)
if create_serializer.is_valid():
course_object = create_serializer.save()
read_serializer = CourseSerializer(course_object)
return Response(read_serializer.data, status=status.HTTP_201_CREATED)
return Response(create_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['PUT'])
def update_course(request, pk):
data = request.data
course = Course.objects.get(id=pk)
serializer = CourseSerializer(course, data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(['DELETE'])
def delete_course(request, pk):
course = Course.objects.get(id=pk)
course.delete()
return Response("The course has been deleted...")
@api_view(['GET'])
def get_quiz_results():
quiz_result = CourseQuizResult.objects.all()
read_serializer = QuizResultSerializer(quiz_result)
return Response(read_serializer.data)
@api_view(['GET'])
def get_course_quiz(request, course_id):
queryset = CourseQuiz.objects.filter(id=course_id) if id else CourseQuiz.objects.all()
serializer = CourseQuizSerializer(queryset, many=True)
return Response(serializer.data)
class ProfileView(APIView):
def get(self, request, id=None):
queryset = Course.objects.filter(id=id) if id else Course.objects.all()
read_serializer = ProfileSerializer(queryset)
return Response(read_serializer.data)
def post(self, request):
create_serializer = ProfileSerializer(data=request.data)
if create_serializer.is_valid():
course_object = create_serializer.save()
read_serializer = CourseSerializer(course_object)
return Response(read_serializer.data, status=status.HTTP_201_CREATED)
return Response(create_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
def get_interest_quiz(request, id):
queryset = InterestQuiz.objects.filter(id=id) if id else CourseQuiz.objects.all()
serializer = InterestQuizSerializer(queryset, many=True)
return Response(serializer.data)
@api_view(['POST'])
def post_interest_quiz(request):
create_serializer = InterestQuizSerializer(data=request.data)
if create_serializer.is_valid():
course_object = create_serializer.save()
read_serializer = CourseSerializer(course_object)
return Response(read_serializer.data, status=status.HTTP_201_CREATED)
return Response(create_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CourseQuizResultCreateView(APIView):
def post(self, request):
serializer = CourseQuizResultSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
def get_enrolled_course(request):
courses = CourseEnrollment.objects.all()
serializer = CourseEnrollmentSerializer(courses, many=True)
return Response(serializer.data)
@api_view(['POST'])
def create_enrolled_course(request):
data = request.data
course = CourseEnrollment.objects.create(description=data['description'])
serializer = CourseEnrollmentSerializer(course, many=False)
return Response(serializer.data)
|
HemitPatel/Skill_Quest_Backend
|
skill_quest_app/views.py
|
views.py
|
py
| 4,263 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8022084480
|
import jieba
from flask import Flask,render_template,request
from peewee import fn
from wordcloud import WordCloud
from src.database import User,Weibo
from src.save import save_user,save_weibo
from src.spider import getuser, getresponse,getweibo
from flask_paginate import get_page_parameter, Pagination
app = Flask(__name__)
@app.route('/',methods=['POST','GET'])
def user():
# 获取用户数量
y_sum = User.select().count()
# 对地点进行分组聚合,{‘地点’:数量}
locas = Weibo.select(Weibo.location, fn.Count(Weibo.location).alias('count')).group_by(Weibo.location)
loca_dicts = {}
for temp in locas:
loca_dicts[f'{temp.location}'] = temp.count
loca_y = loca_dicts.keys()
loca_x = loca_dicts.values()
l_y = [item for item in loca_y]
l_x = [item for item in loca_x]
# 粉丝数排名
followers = User.select()
follower_dict = {}
for f in followers:
follower_dict[f.name] = f.number_of_followers
f_name_datas = [f for f in dict(sorted(follower_dict.items(), key=lambda x: x[1], reverse=True)).keys()]
f_follow_datas = [f for f in dict(sorted(follower_dict.items(), key=lambda x: x[1], reverse=True)).values()]
# 微博排名
weibocounts = Weibo.select(Weibo.user_id, fn.Count(Weibo.user_id).alias('count')).group_by(Weibo.user_id)
weiborank_dict = {}
for w in weibocounts:
id = w.user_id
user = User.select().where(User.id == id).first().name
weiborank_dict[user] = w.count
weiborank = [
[w for w in dict(sorted(weiborank_dict.items(), key=lambda x:x[1], reverse=True)).keys()],
[w for w in dict(sorted(weiborank_dict.items(), key=lambda x:x[1], reverse=True)).values()]
]
#词云
# 读取文件
pd_data = Weibo.select()
datas = []
for pd in pd_data:
datas.append(pd.content)
# 切割分词
result = ' '.join(jieba.lcut_for_search(''.join(datas)))
# 设置停用词
stop_words = ['@', '#', ',', '“', ':', '/', '_', '我', '微博', '的', '视频', '了', '是', '你', '们', '在']
ciyun_words = ''
for word in result:
if word not in stop_words:
ciyun_words += word
# 设置参数,创建WordCloud对象
wc = WordCloud(
font_path='msyh.ttc', # 中文
background_color='white', # 设置背景颜色为白色
stopwords=stop_words, # 设置禁用词,在生成的词云中不会出现set集合中的词
height=500,
width=700,
)
# 根据文本数据生成词云
wc.generate(ciyun_words)
# 保存词云文件
wc.to_file('./static/img/word.png')
# 传递的数据
datas = {
'count': Weibo.select().count(),
'follow': User.select().where(User.number_of_followers),
'loca_': [l_x, list(l_y)],
'followrank': [f_name_datas,f_follow_datas],
'weiborank': weiborank,
'y_sum': y_sum,
}
if request.method == 'POST':
# 获取在页面输入的ID,进行操作
uid = request.form.get('uid')
# if uid != '':
if User.select().where(User.id == uid):
print('你已经监控过改用户!')
else:
response = getresponse(uid)
try:
user_data = getuser(response, uid)
save_user(user_data)
except Exception as e:
print('预计uid有问题!',e)
return render_template('dashboard.html',datas=datas)
@app.route('/tables',methods=['POST','GET'])
def tables():
user_count = User.select()
# 更新操作
if request.method == 'POST':
flag = request.form['button']
if flag == 'true':
try:
for u in user_count:
response = getresponse(u.id)
try:
datalist = getweibo(response, u.id)
save_weibo(datalist, u.id)
except Exception as e:
print('my error', e)
except Exception as e:
print('更新有问题', e)
else:
print('no')
# 分页
page = request.args.get(get_page_parameter(), type=int, default=1)
per_page = 5
pagination = Pagination(page=page, per_page=per_page, total=len(user_count), css_framework='bootstrap4')
start = (page - 1) * per_page
end = start + per_page
userdata = user_count[start:end]
return render_template('tables.html',userdata=userdata, pagination=pagination)
if __name__ == '__main__':
app.run(debug=True)
|
zuoqian26/weibo_monitor-2.0
|
app.py
|
app.py
|
py
| 4,736 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37374019551
|
#!/usr/bin/env python
"""
ONS Address Index -
===========================================
A simple script to test the data linking.
This is a prototype code aimed for experimentation and testing. There are not unit tests.
The code has been written for speed rather than accuracy, it therefore uses fairly aggressive
blocking. As the final solution will likely use ElasticSearch, the aim of this prototype is
not the highest accuracy but to quickly test different ideas, which can inform the final
ElasticSearch solution.
Running
-------
After all requirements are satisfied, the script can be invoked using CPython interpreter::
python PostalAddressesMatching.py
Requirements
------------
:requires: pandas (0.19.1)
:requires: addressLinkingNLPindex (and all the requirements within it)
Author
------
:author: Sami Niemi ([email protected])
Version
-------
:version: 0.1
:date: 19-Jan-2017
"""
from Analytics.linking import addressLinkingNLPindex
import pandas as pd
class PostalAddressLinker(addressLinkingNLPindex.AddressLinkerNLPindex):
"""
Address Linker for Postal Addresses test data.
Inherits the AddressLinkerNLPindex and overwrites the load_data method.
"""
def load_data(self):
"""
Read in the test data. Overwrites the method in the AddressLinker.
"""
self.toLinkAddressData = pd.read_csv(self.settings['inputPath'] + self.settings['inputFilename'],
low_memory=False)
self.toLinkAddressData['ID'] = self.toLinkAddressData['UPRN'].copy()
self.toLinkAddressData.rename(columns={'UPRN': 'UPRN_old'}, inplace=True)
def run_postal_addresses_linker(**kwargs):
"""
A simple wrapper that allows running Postal Addresses linker.
:return: None
"""
settings = dict(inputFilename='delivery_point_addresses.csv', outname='DeliveryPointAddresses')
settings.update(kwargs)
linker = PostalAddressLinker(**settings)
linker.run_all()
del linker
if __name__ == "__main__":
run_postal_addresses_linker()
|
ONSdigital/address-index-data
|
DataScience/Analytics/prototype/PostalAddressesMatching.py
|
PostalAddressesMatching.py
|
py
| 2,086 |
python
|
en
|
code
| 18 |
github-code
|
6
|
72255301308
|
from __future__ import annotations
import asyncio
import datetime
import re
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import discord
from dateutil.parser import parse
from discord.ext import commands, menus
from discord.ext.commands import Paginator as CommandPaginator
from ..helpers import GoogleImageData, human_join
from ..vars import CHECK, GO_TO_PAGE, NEXT, PREVIOUS, TRASH
if TYPE_CHECKING:
from cogs.context import Context
blurple = discord.ButtonStyle.blurple
red = discord.ButtonStyle.red
class Pager(discord.ui.View):
def __init__(
self,
source: menus.PageSource,
*,
ctx: commands.Context,
check_embeds: bool = True,
compact: bool = False,
):
super().__init__()
self.source: menus.PageSource = source
self.check_embeds: bool = check_embeds
self.ctx: commands.Context = ctx
self.message: Optional[discord.Message] = None
self.current_page: int = 0
self.compact: bool = compact
self.input_lock = asyncio.Lock()
async def on_timeout(self) -> None:
for item in self.children:
item.disabled = True # type: ignore
if self.message:
await self.message.edit(view=self)
async def show_checked_page(
self, interaction: discord.Interaction, page_number: int
) -> None:
max_pages = self.source.get_max_pages()
try:
if max_pages is None:
await self.show_page(interaction, page_number)
elif max_pages > page_number >= 0:
await self.show_page(interaction, page_number)
except IndexError:
pass
def _update_labels(self, page_number: int) -> None:
self.go_to_next_page.disabled = False
self.go_to_previous_page.disabled = False
self.go_to_number_page.disabled = False
max_pages = self.source.get_max_pages()
if max_pages is not None:
if (page_number + 1) >= max_pages:
self.go_to_next_page.disabled = True
if page_number == 0:
self.go_to_previous_page.disabled = True
if page_number == 0 and (page_number + 1) >= max_pages:
self.go_to_number_page.disabled = True
async def start(self, ctx: Context, e=False):
await self.source._prepare_once()
page = await self.source.get_page(0)
kwargs = await self._get_kwargs_from_page(page)
self._update_labels(0)
self.message = await self.ctx.send(**kwargs, view=self)
async def _get_kwargs_from_page(self, page: int) -> Dict[str, Any]:
value = await discord.utils.maybe_coroutine(self.source.format_page, self, page)
if isinstance(value, dict):
return value
elif isinstance(value, str):
return {"content": value, "embed": None}
elif isinstance(value, discord.Embed):
return {"embed": value, "content": None}
else:
return {}
async def show_page(
self, interaction: discord.Interaction, page_number: int
) -> None:
page = await self.source.get_page(page_number)
self.current_page = page_number
kwargs = await self._get_kwargs_from_page(page)
self._update_labels(page_number)
if kwargs:
if interaction.response.is_done():
if self.message:
await self.message.edit(**kwargs, view=self)
else:
await interaction.response.edit_message(**kwargs, view=self)
async def interaction_check(self, interaction: discord.Interaction):
if interaction.user and interaction.user == self.ctx.author:
return True
await interaction.response.send_message(
f'You can\'t use this, sorry. \nIf you\'d like to use this then run the command `{self.ctx.command}{self.ctx.invoked_subcommand or ""}`',
ephemeral=True,
)
return False
@discord.ui.button(emoji=PREVIOUS, style=blurple)
async def go_to_previous_page(self, interaction: discord.Interaction, __):
"""go to the previous page"""
await self.show_checked_page(interaction, self.current_page - 1)
@discord.ui.button(emoji=NEXT, style=blurple)
async def go_to_next_page(self, interaction: discord.Interaction, __):
"""go to the next page"""
await self.show_checked_page(interaction, self.current_page + 1)
@discord.ui.button(emoji=GO_TO_PAGE, style=blurple)
async def go_to_number_page(self, interaction: discord.Interaction, __):
max_pages = self.source.get_max_pages()
menu = self
class GoPage(discord.ui.Modal, title="Go to page"):
stuff = discord.ui.TextInput(
label=f"Enter a number (1/{max_pages})",
min_length=0,
required=True,
style=discord.TextStyle.short,
)
async def on_submit(self, interaction: discord.Interaction) -> None:
if self.stuff.value and self.stuff.value.isdigit():
page = int(self.stuff.value)
else:
await interaction.response.send_message(
"Please enter a valid number", ephemeral=True
)
return
if max_pages and page > max_pages or page < 1:
await interaction.response.send_message(
f"Page **{page}** does not exist", ephemeral=True
)
return
await menu.show_page(interaction, page - 1)
await interaction.response.send_modal(GoPage())
@discord.ui.button(emoji=TRASH, style=red)
async def stop_pages(self, interaction: discord.Interaction, __):
"""stops the pagination session."""
await interaction.response.defer()
await interaction.delete_original_response()
await self.ctx.message.add_reaction(CHECK)
self.stop()
class FieldPageSource(menus.ListPageSource):
"""A page source that requires (field_name, field_value) tuple items."""
def __init__(
self, entries: List[Tuple[str, str]], *, per_page=12, footer: bool = False
):
super().__init__(entries, per_page=per_page)
self.footer = footer
self.embed = discord.Embed(colour=0x2F3136)
async def format_page(self, menu, entries: Tuple[str, str]):
self.embed.clear_fields()
for key, value in entries:
self.embed.add_field(name=key, value=value, inline=False)
maximum = self.get_max_pages()
if maximum > 1 and not self.footer:
text = (
f"Page {menu.current_page + 1}/{maximum} ({len(self.entries)} entries)"
)
self.embed.set_footer(text=text)
return self.embed
class UrbanPageSource(menus.ListPageSource):
"""A page source that requires Dict[Any, Any] tuple items."""
BRACKETED = re.compile(r"(\[(.+?)\])")
def __init__(
self, entries: List[Dict[Any, Any]], *, per_page=12, footer: bool = False
):
super().__init__(entries, per_page=per_page)
self.footer = footer
self.embed = discord.Embed(colour=0x2F3136)
# credit to Danny for this https://github.com/Rapptz/RoboDanny/blob/rewrite/cogs/buttons.py#L50-L58
def cleanup_definition(self, definition: str, *, regex=BRACKETED) -> str:
def repl(m):
word = m.group(2)
return f'[{word}](http://{word.replace(" ", "-")}.urbanup.com)'
ret = regex.sub(repl, definition)
if len(ret) >= 2048:
return ret[0:2000] + " [...]"
return ret
async def format_page(self, menu, entries: Dict[Any, Any]):
data = entries[0]
embed = self.embed
embed.clear_fields()
maximum = self.get_max_pages()
embed.title = data["word"]
embed.description = self.cleanup_definition(data["definition"])
embed.timestamp = parse(data["written_on"])
embed.set_footer(
text=f"Page {menu.current_page + 1}/{maximum} \nUploaded by {data['author']}"
)
return embed
class AvatarsPageSource(menus.ListPageSource):
"""A page source that requires (avatar, created_at) tuple items."""
def __init__(
self, entries: List[Tuple[str, datetime.datetime, int]], *, per_page=1
):
super().__init__(entries, per_page=per_page)
self.embed = discord.Embed(colour=0x2F3136)
async def format_page(self, menu, entries: Tuple[str, datetime.datetime, int]):
maximum = self.get_max_pages()
self.embed.set_footer(
text=f"Page {menu.current_page + 1}/{maximum} (ID: {entries[2]}) \nChanged"
)
self.embed.timestamp = entries[1]
self.embed.set_image(url=entries[0])
return self.embed
class GoogleImagePageSource(menus.ListPageSource):
def __init__(self, entries: List[GoogleImageData], *, per_page=1):
super().__init__(entries, per_page=per_page)
self.embed = discord.Embed(colour=0x2F3136)
async def format_page(self, menu, entry: GoogleImageData):
self.embed.clear_fields()
self.embed.set_image(url=entry.image_url)
self.embed.set_author(
name=str(entry.author), icon_url=entry.author.display_avatar.url
)
self.embed.title = entry.snippet
self.embed.url = entry.url
self.embed.set_footer(
text=f"Page {menu.current_page + 1}/{self.get_max_pages()} of Google Image search - {entry.query}",
icon_url="https://cdn.discordapp.com/attachments/1055712784458989598/1061514627093110795/google-go.png",
)
return self.embed
class FrontHelpPageSource(menus.ListPageSource):
def __init__(
self,
entries: List[commands.Cog],
*,
per_page=12,
help_command: commands.HelpCommand,
):
super().__init__(entries, per_page=per_page)
self.help_command = help_command
self.embed = discord.Embed(colour=0x2F3136)
async def format_page(self, menu, entries: List[commands.Cog]):
self.embed.clear_fields()
for cog in entries:
cmds = await self.help_command.filter_commands(cog.get_commands())
if len(cmds) == 0:
continue
if cog is None:
continue
self.embed.add_field(
name=cog.qualified_name.capitalize(),
value=human_join(
[f"**`{command.qualified_name}`**" for command in cmds],
final="and",
)
or "No commands found here.",
inline=False,
)
maximum = self.get_max_pages()
if maximum > 1:
text = (
f"Page {menu.current_page + 1}/{maximum} ({len(self.entries)} entries)"
)
self.embed.set_footer(text=text)
return self.embed
class ImagePageSource(menus.ListPageSource):
def __init__(self, entries, *, per_page=1):
super().__init__(entries, per_page=per_page)
self.embed = discord.Embed(colour=0x2F3136)
async def format_page(self, menu, entries):
self.embed.clear_fields()
self.embed.set_image(url=entries)
maximum = self.get_max_pages()
if maximum > 1:
text = (
f"Page {menu.current_page + 1}/{maximum} ({len(self.entries)} entries)"
)
self.embed.set_footer(text=text)
return self.embed
class TextPageSource(menus.ListPageSource):
def __init__(self, text, *, prefix="```", suffix="```", max_size=2000):
pages = CommandPaginator(prefix=prefix, suffix=suffix, max_size=max_size - 200)
for line in text.split("\n"):
pages.add_line(line)
super().__init__(entries=pages.pages, per_page=1)
async def format_page(self, menu, content):
maximum = self.get_max_pages()
if maximum > 1:
return f"{content}\nPage {menu.current_page + 1}/{maximum}"
return content
class SimplePageSource(menus.ListPageSource):
async def format_page(self, menu, entries):
pages = []
for index, entry in enumerate(entries, start=menu.current_page * self.per_page):
pages.append(f"{index + 1}. {entry}")
maximum = self.get_max_pages()
if maximum > 1:
footer = (
f"Page {menu.current_page + 1}/{maximum} ({len(self.entries)} entries)"
)
menu.embed.set_footer(text=footer)
menu.embed.description = "\n".join(pages)
return menu.embed
class SimplePages(Pager):
"""A simple pagination session reminiscent of the old Pages interface.
Basically an embed with some normal formatting.
"""
def __init__(self, entries, *, ctx: commands.Context, per_page: int = 12):
super().__init__(SimplePageSource(entries, per_page=per_page), ctx=ctx)
self.embed = discord.Embed(colour=discord.Colour.blurple())
|
LeoCx1000/fish
|
src/utils/discord_/paginator.py
|
paginator.py
|
py
| 13,193 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8320607344
|
import urllib.request
import http.cookiejar
import urllib.parse
from bs4 import BeautifulSoup
import os
from People import People
import queue
import io
import gzip
import time
def config_init():
"""initial configuration"""
cookie = http.cookiejar.CookieJar()
cookie_support = urllib.request.HTTPCookieProcessor(cookie)
proxy_handle = urllib.request.ProxyHandler({'http':'http://10.56.192.29:8080'})
opener = urllib.request.build_opener(proxy_handle,cookie_support) # add proxy and cookie
urllib.request.install_opener(opener)
def login(email,psw):
""" login to zhihu.com and return the main page bytes"""
postdata = urllib.parse.urlencode({
'email':email,
'password':psw
})
postdata = postdata.encode('UTF-8')
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:26.0) Gecko/20100101 Firefox/26.0',
'Referer':'http://www.zhihu.com'
}
req = urllib.request.Request(
headers = headers,
url = 'http://www.zhihu.com/login',
data = postdata
)
main_page = urllib.request.urlopen(req)
return main_page
def FetchPage(url,data):
""" Fetch page, data type is dict"""
postdata = None
if not (data==None) :
postdata = urllib.parse.urlencode(data)
postdata = postdata.encode('UTF-8')
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:26.0) Gecko/20100101 Firefox/26.0',
'Referer':'http://www.zhihu.com'
}
if not (postdata==None):
req = urllib.request.Request(
headers = headers,
url = url,
data = postdata
)
else:
req = urllib.request.Request(
headers = headers,
url = url,
)
try:
page = urllib.request.urlopen(req)
except:
print('fetch page error')
## using gzip to fetch page
if page.code == 200:
predata = page.read().decode('utf-8')
pdata = io.StringIO(predata)
gzipper = gzip.GzipFile(fileobj = pdata)
try:
pagedata = gzipper.read()
print('gzip')
except:
# if the server don't support gzip download directly
pagedata = predata
else:
return None
page.close()
return pagedata
if __name__ == '__main__':
start = time.time()
email='[email protected]'
psw = '********'
domain = 'http://www.zhihu.com'
config_init() #initial configuratin
main_page = login(email,psw) #login zhihu.com
main_soup = BeautifulSoup(main_page)
peopleQueue = queue.Queue()
urlQueue = queue.Queue()
## profile page
profile_tag = main_soup.find('div',class_='top-nav-profile')
profile_link = domain+profile_tag.a['href']
profile_name = profile_tag.a.span.getText()
me = People(profile_name,profile_link)
peopleQueue.put(me)
count = 0
## followees page
while peopleQueue.qsize() > 0:
item = peopleQueue.get()
print('link -> '+item.getUrl())
print('name -> '+item.getName())
followees_pages = FetchPage(item.getUrl()+r'/followees',None)
followees_soup = BeautifulSoup(followees_pages)
followees_links = followees_soup.find_all('div',
class_='zm-profile-card zm-profile-section-item zg-clear no-hovercard')
for followee in followees_links:
link = domain+followee.a['href']
name = followee.a['title']
person = People(name,link)
peopleQueue.put(person)
count = count + 1
if count%100==0:
print(count)
if count >100:
break
elapsed = time.time() - start
print ("Elapsed Time: %s" % (elapsed))
print('threadNum:'+str(1))
f = open('zhihu.txt','a')
s = 'count:'+str(count)+','+'Thread:'+str(1)+','+'elapsed:'+str(elapsed)
f.write(s+'\n')
f.close()
|
zhibzeng/PythonCode
|
zhihuCrawler.py
|
zhihuCrawler.py
|
py
| 4,001 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10251169117
|
import argparse
import os
import pandas as pd
import json
from pandas.core.frame import DataFrame
from fol.foq_v2 import (concate_n_chains, copy_query,
negation_sink,
binary_formula_iterator,
concate_iu_chains,
decompose_D,
parse_formula,
projection_sink, to_D,
union_bubble,
DeMorgan_replacement,
to_d,
transformation)
parser = argparse.ArgumentParser()
parser.add_argument("--benchmark_name", type=str, default="benchmark")
parser.add_argument("--input_formula_file", type=str, default="outputs/generated_formula_anchor_node=3.csv")
parser.add_argument("--knowledge_graph", action="append")
def convert_query(df,
old_form_name='DNF+MultiIUD',
new_form_name='DNF+MultiIUd',
convert_functional=decompose_D):
def convertor(f):
query_instance = parse_formula(f)
query_instance = convert_functional(query_instance)
return query_instance.formula
df[new_form_name] = df[old_form_name].map(convertor)
return df
def convert_grounded_query(df,
old_form_name='DNF+MultiIUD',
new_form_name='DNF+MultiIUd',
old_form_formula=None,
convert_functional=None):
assert old_form_formula is not None
assert convert_functional is not None
def grounded_convertor(f):
query_instance = parse_formula(old_form_formula)
query_instance.additive_ground(json.loads(f))
query_instance = convert_functional(query_instance)
return query_instance.dumps
df[new_form_name] = df[old_form_name].map(grounded_convertor)
return df
if __name__ == "__main__":
args = parser.parse_args()
target_folder = f"data/{args.benchmark_name}"
formula_file = args.input_formula_file
df = pd.read_csv(formula_file)
df = convert_query(df)
df.to_csv(formula_file, index=False)
for kg in args.knowledge_graph:
folder = os.path.join(target_folder, kg)
for i, row in df.iterrows():
print(row.formula_id)
data_file = f"data-{row.formula_id}.csv"
data_df = pd.read_csv(os.path.join(folder, data_file))
converted_data_df = convert_grounded_query(
data_df,
old_form_formula=row['DNF+MultiIUD'],
convert_functional=decompose_D)
converted_data_df.to_csv(os.path.join(folder, data_file), index=False)
|
HKUST-KnowComp/EFO-1-QA-benchmark
|
append_new_normal_form.py
|
append_new_normal_form.py
|
py
| 2,720 |
python
|
en
|
code
| 17 |
github-code
|
6
|
13703412898
|
# pybutton
# create at 2015/5/28
# autor: qianqians
from tools import argv_instance, tuple_rbg
from pyelement import pyelement
class pybutton(pyelement):
def __init__(self, text, cname, layout, praframe):
# when normal
self.normaltext = text
self.type = "button"
super(pybutton, self).__init__(cname, layout, praframe)
def sub(self, id = None):
js = " var table_" + self.id + " = document.createElement(\"input\");\n"
js += " table_" + self.id + ".type = \"button\";\n"
js += " table_" + self.id + ".value=\"" + self.normaltext + "\";\n"
js += super(pybutton, self).sub()
if id:
js += " " + id + ".appendChild(table_" + self.id + ");\n"
else:
js += " table_pop.appendChild(table_" + self.id + ");\n"
return js
def flush(self):
# if img is not none, use img for button,
# if img is none, use text for button,
# handle onclick in js and send a requst to service
# codegen css in page
shtml = ""
if self.html is not None:
shtml = self.html
else:
shtml += "<div id=\"" + self.id + "_1\"><button " + "id=\"" + self.id + "\"" + " type=\"button\" "
for event, onevent in self.uievent.iteritems():
shtml += event + "=\"" + self.id + event + "(this)\" "
shtml += ">" + self.normaltext + "</button></div>"
js = ""
if self.js is not None:
js = self.js
else:
for event, onevent in self.uievent.iteritems():
js += "function " + self.id + event + "(id){"
js += onevent
js += "}\n"
return shtml, js
def client_set(self, text, textcolor, backcolor):
js = "id.value=\"" + text + "\";"
js += "id.style.color=" + tuple_rbg(textcolor) + ";"
js += "id.style.backgroundColor=" + tuple_rbg(backcolor) + ";"
return js
def server_set(self, text, textcolor, backcolor):
js = "document.getElementById(\"" + self.id + "\").value=\"" + text + "\";\n"
js += "document.getElementById(\"" + self.id + "\").style.color=" + tuple_rbg(textcolor) + ";\n"
js += "document.getElementById(\"" + self.id + "\").style.backgroundColor=" + tuple_rbg(backcolor) + ";\n"
return js
|
theDarkForce/plask
|
plask/pybutton.py
|
pybutton.py
|
py
| 2,060 |
python
|
en
|
code
| 2 |
github-code
|
6
|
26804076161
|
pos = [4, 4]
count = 0
side_len = 1
dist = 1
spiral = [["" for col in range(10)] for row in range(10)]
for num in range(int(input()), int(input()) + 1):
spiral[pos[1]][pos[0]] = str(num)
count += 1
if count <= side_len: pos[1] += dist
elif count > side_len: pos[0] += dist
if count == side_len * 2:
count = 0
side_len += 1
dist = -dist
print("\n".join(" ".join(row).strip() for row in spiral
if not all(num == "" for num in row)))
|
Stevan-Zhuang/DMOJ
|
CCC/CCC '01 S2 - Spirals.py
|
CCC '01 S2 - Spirals.py
|
py
| 471 |
python
|
en
|
code
| 1 |
github-code
|
6
|
13914688192
|
import oneflow as flow
from .recurrent import rnn
def _FullyConnected(input_blob, weight_blob, bias_blob):
output_blob = flow.matmul(input_blob, weight_blob)
if bias_blob:
output_blob = flow.nn.bias_add(output_blob, bias_blob)
return output_blob
class SimpleRNNCell:
def __init__(self, units,
activation=flow.math.tanh,
use_bias=True,
kernel_initializer=flow.glorot_uniform_initializer(),
recurrent_initializer=flow.glorot_normal_initializer(), # should be orthogonal_initializer
bias_initializer=flow.zeros_initializer(),
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
dropout=0.,
recurrent_dropout=0.,
dtype=flow.float32,
trainable=True,
**kwargs):
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.recurrent_initializer = recurrent_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.recurrent_regularizer = recurrent_regularizer
self.bias_regularizer = bias_regularizer
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.trainable = trainable
self.dtype = dtype
self.layer_index = kwargs['layer_index'] if 'layer_index' in kwargs else ''
self.direction = kwargs['direction'] if 'layer_index' in kwargs else 'forward'
def _build(self, inputs):
input_size = inputs.shape[-1]
units = self.units
dtype = self.dtype
trainable = self.trainable
with flow.scope.namespace('layer' + str(self.layer_index)):
with flow.scope.namespace(self.direction):
self.kernel_blob = flow.get_variable(
name='input' + '-kernel',
shape=[input_size, units],
dtype=dtype,
trainable=trainable,
regularizer=self.kernel_regularizer,
initializer=self.kernel_initializer
)
self.recurrent_kernel_blob = flow.get_variable(
name='input' + '-recurrent-kernel',
shape=[units, units],
dtype=dtype,
trainable=trainable,
regularizer=self.recurrent_regularizer,
initializer=self.recurrent_initializer
)
self.bias_blob = flow.get_variable(
name='input' + '-bias',
shape=[units],
dtype=dtype,
trainable=trainable,
regularizer=self.bias_regularizer,
initializer=self.bias_initializer
) if self.use_bias else None
def __call__(self, inputs, states):
self._build(inputs)
hx = states[0]
if 0 < self.dropout < 1.:
inputs = flow.nn.dropout(inputs, rate=self.dropout)
if 0 < self.recurrent_dropout < 1.:
hx = flow.nn.dropout(hx, rate=self.recurrent_dropout)
hy = _FullyConnected(inputs, self.kernel_blob, self.bias_blob)
output = hy + _FullyConnected(hx, self.recurrent_kernel_blob, None)
output = self.activation(output)
return output, [output]
def simple_rnn(inputs,
units,
activation=flow.math.tanh,
use_bias=True,
kernel_initializer=flow.glorot_uniform_initializer(),
recurrent_initializer=flow.glorot_normal_initializer(), # should be orthogonal_initializer
bias_initializer=flow.zeros_initializer(),
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
initial_state=None,
**kwargs):
return rnn(inputs,
SimpleRNNCell(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
),
return_sequences=return_sequences, initial_state=initial_state, kwargs=kwargs)
|
Oneflow-Inc/oneflow_nlp_model
|
nlp_ops/rnn/simple_rnn.py
|
simple_rnn.py
|
py
| 5,227 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73401806269
|
import torch
from colorama import Fore as F
def train_step(model, loss_fn, acc_fn, optimizer, dataloader, epochs):
"""
Trains a model for a binary classification task, calculating both loss and accuracy
Args:
model: the model that will be trained
loss_fn: loss function, should be BCEWithLogitsLoss
acc_fn: accuracy function (ideally from torchmetrics)
optimizer: optimizer from torch.optim
dataloader: dataloader for the data the model will be trained on
epochs: the number of times the model will run through the entire dataloader
Returns:
Each epoch the print out of the current epoch number, loss value, accuracy value (all coloured)
"""
model.train()
train_loss, train_acc = 0, 0
for epoch in range(epochs):
for X, y in dataloader:
y = y.unsqueeze(dim=1)
logits = model(X)
pred = (torch.sigmoid(logits) > 0.5) # Convert logits to probabilites using sigmoid function, then to labels by setting a 0.5 treshold
loss = loss_fn(logits.type(torch.float32), y.type(torch.float32))
acc = acc_fn(pred, y) * 100
train_loss += loss.item()
train_acc += acc
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss /= len(dataloader)
train_acc /= len(dataloader)
print(f'Epoch: {F.BLUE}{epoch}{F.RESET} | Loss: {F.RED}{train_loss:.2f}{F.RESET} | Accuracy: {F.GREEN}{train_acc:.2f}{F.RESET}')
def test_step(model, loss_fn, acc_fn, dataloader):
"""
Tests a model on a binary classification task, calculating both loss and accuracy
Args:
model: the model that will be tested
loss_fn: loss function, should be BCEWithLogitsLoss
acc_fn: accuracy function (ideally from torchmetrics)
dataloader: dataloader for the data the model will be tested on
Returns:
At the end of testing; the measured loss and accuracy value (all coloured)
"""
model.eval()
test_loss, test_acc = 0, 0
with torch.inference_mode():
for X, y in dataloader:
y = y.unsqueeze(dim=1)
logits = model(X)
pred = (torch.sigmoid(logits) > 0.5).float() # Convert logits to probabilites using sigmoid function, then to labels by setting a 0.5 treshold
loss = loss_fn(logits.type(torch.float32), y.type(torch.float32))
acc = acc_fn(pred, y) * 100
test_loss += loss.item()
test_acc += acc
test_loss /= len(dataloader)
test_acc /= len(dataloader)
print(f'{F.CYAN}MÝR TESTING{F.RESET}\nLoss: {F.RED}{test_loss:.2f}{F.RESET} | Accuracy: {F.GREEN}{test_acc:.2f}{F.RESET}')
|
PopeCorn/myr
|
code/functions.py
|
functions.py
|
py
| 2,782 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24547935454
|
#!/usr/bin/env python3
import sqlalchemy as sa
import sqlalchemy.exc
import sys
from contextlib import contextmanager
from pathlib import Path
import click
import csv
sys.path.append('tm_navigator')
from tm_navigator.models import *
engine = sa.create_engine('postgresql+psycopg2://postgres@localhost/tm_navigator')
sa.orm.configure_mappers()
Session = sa.orm.sessionmaker(bind=engine)
@contextmanager
def session_scope():
"""Provide a transactional scope around a series of operations."""
session = Session()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
from sqlalchemy_utils import aggregates
aggregates.local_condition = lambda prop, objects: sa.literal(True)
class ListSession(list):
def execute(self, query):
return self._session.execute(query)
def update_aggregates(session, *classes_modified):
ls = ListSession([c() for c in classes_modified])
ls._session = session
aggregates.manager.construct_aggregate_queries(ls, None)
def copy_from_csv(session, model, csv_file):
with session.connection().connection.cursor() as cursor, csv_file.open() as csv_f:
fieldnames = csv_f.readline()
cursor.copy_expert('copy {0} ({1}) from stdin with csv'.format(model.__tablename__, fieldnames), csv_f)
update_aggregates(session, model)
def check_files(directory, expected_names, extension='.csv', cli=True):
file_names = {p.name for p in directory.iterdir() if p.is_file()}
expected_files = {t + extension for t in expected_names}
if expected_files & file_names:
s = 'Found files {}.'.format(
', '.join('"{}"'.format(f) for f in sorted(expected_files & file_names)))
if cli:
click.secho(s, fg='green')
else:
print(s)
if expected_files - file_names:
s = 'Not found files {}.'.format(
', '.join('"{}"'.format(f) for f in sorted(expected_files - file_names)))
if cli:
click.secho(s, fg='red')
click.echo('Will try to continue with the files present.')
else:
print(s)
print('Will try to continue with the files present.')
def delete_data_for(session, models, cli=True):
def delete_data(iter_func=lambda: None):
for table in reversed(Base.metadata.sorted_tables):
matching_models = [m for m in models if m.__table__ == table]
if not matching_models:
continue
model = matching_models[0]
session.query(model).delete()
iter_func()
if cli:
with click.progressbar(label='Deleting data', length=len(models)) as pbar:
delete_data(lambda: pbar.update(1))
else:
delete_data()
def load_data_for(session, models, directory, cli=True):
def load_data(iter_func=lambda: None):
for table in Base.metadata.sorted_tables:
matching_models = [m for m in models if m.__table__ == table]
if not matching_models:
continue
model = matching_models[0]
file = directory / '{}.csv'.format(model.__tablename__)
copy_from_csv(session, model, file)
iter_func()
if cli:
with click.progressbar(label='Loading data', length=len(models)) as pbar:
load_data(lambda: pbar.update(1))
else:
load_data()
@click.group()
def cli():
pass
dir_type = click.Path(exists=True, file_okay=False, resolve_path=True)
@cli.command()
def describe():
with session_scope() as session:
SchemaMixin.activate_public_schema(session)
dses = session.query(DatasetMeta).order_by(DatasetMeta.id).all()
for ds in dses:
ds.activate_schemas()
click.secho('- Dataset #{id}: {title}, {ntm} models'.format(id=ds.id,
title=ds.title or 'untitled',
ntm=len(ds.topic_models)),
fg='blue')
try:
click.echo(' Documents: {cnt}'.format(cnt=session.query(Document).count()))
click.echo(' Terms: ' +
', '.join('{m.count} {m.name} with {nocc} occurrences'.format(m=m, nocc=nocc)
for m, nocc in session.query(Modality, sa.func.count())
.join(Modality.terms).join(Term.documents)
.group_by(Modality).order_by(Modality.name)))
except sa.exc.ProgrammingError:
click.echo(' Error - can\'t find data')
session.rollback()
click.echo()
for tm in ds.topic_models:
tm.activate_schemas()
click.secho(' - Topic Model #{id}: {title}'.format(id=tm.id, title=tm.title or 'untitled'),
fg='blue')
try:
click.echo(' Topics: ' +
', '.join('{cnt} {t} at lvl {lvl}'.format(lvl=lvl, t='background' if bck else '', cnt=cnt)
for lvl, bck, cnt in session.query(Topic.level, Topic.is_background, sa.func.count())
.group_by(Topic.level, Topic.is_background).order_by(Topic.level, Topic.is_background)))
except sa.exc.ProgrammingError:
click.echo(' Error - can\'t find data')
session.rollback()
click.echo()
def add_dataset_():
with session_scope() as session:
SchemaMixin.activate_public_schema(session)
ds = DatasetMeta()
session.add(ds)
session.flush()
ds.activate_schemas()
Base.metadata.create_all(engine,
tables=map(lambda c: c.__table__, models_dataset))
return ds.id
@cli.command()
def add_dataset():
dataset_id = add_dataset_()
click.echo('Added Dataset #{id}'.format(id=dataset_id))
def load_dataset_(dataset_id, title, directory, cli=False):
directory = Path(directory)
target_models = models_dataset
check_files(directory, [m.__tablename__ for m in target_models], cli=cli)
if cli:
click.confirm('Proceeding will overwrite the corresponding data in the database. Continue?',
abort=True, default=True)
models = [m
for m in target_models
if (directory / '{}.csv'.format(m.__tablename__)).is_file()]
with session_scope() as session:
SchemaMixin.activate_public_schema(session)
ds = session.query(DatasetMeta).filter_by(id=dataset_id).one()
ds.activate_schemas()
if title is not None:
ds.title = title
delete_data_for(session, models, cli)
load_data_for(session, models, directory, cli)
@cli.command()
@click.option('-d', '--dataset-id', type=int, required=True)
@click.option('-t', '--title', type=str)
@click.option('-dir', '--directory', type=dir_type, required=True)
def load_dataset(dataset_id, title, directory):
load_dataset_(dataset_id, title, directory, cli=True)
def add_topicmodel_(dataset_id):
with session_scope() as session:
SchemaMixin.activate_public_schema(session)
ds = session.query(DatasetMeta).filter_by(id=dataset_id).one()
tm = TopicModelMeta(dataset=ds)
session.add(tm)
session.flush()
tm.domains.append(TopicModelDomain(domain='{}.'.format(tm.id)))
tm.activate_schemas()
Base.metadata.create_all(engine,
tables=map(lambda c: c.__table__, models_topic + models_assessment))
return tm.id
@cli.command()
@click.option('-d', '--dataset-id', type=int, required=True)
def add_topicmodel(dataset_id):
topicmodel_id = add_topicmodel_(dataset_id)
click.echo('Added Topic Model #{id} for Dataset #{did}'.format(id=topicmodel_id, did=dataset_id))
def load_topicmodel_(topicmodel_id, title, directory, cli=False):
directory = Path(directory)
target_models = models_topic
check_files(directory, [m.__tablename__ for m in target_models], cli=cli)
if cli:
click.confirm('Proceeding will overwrite the corresponding data in the database. Continue?',
abort=True, default=True)
models = [m
for m in target_models
if (directory / '{}.csv'.format(m.__tablename__)).is_file()]
with session_scope() as session:
SchemaMixin.activate_public_schema(session)
tm = session.query(TopicModelMeta).filter_by(id=topicmodel_id).one()
tm.activate_schemas()
if title is not None:
tm.title = title
delete_data_for(session, models, cli)
load_data_for(session, models, directory, cli)
@cli.command()
@click.option('-m', '--topicmodel-id', type=int, required=True)
@click.option('-t', '--title', type=str)
@click.option('-dir', '--directory', type=dir_type, required=True)
def load_topicmodel(topicmodel_id, title, directory):
load_topicmodel_(topicmodel_id, title, directory, cli=True)
def dump_assessments_(topicmodel_id, directory):
directory = Path(directory)
with session_scope() as session:
SchemaMixin.activate_public_schema(session)
tm = session.query(TopicModelMeta).filter_by(id=topicmodel_id).one()
tm.activate_schemas()
assessments = session.query(ATopic).all()
topic_count = session.query(Topic).count() - 1
grades = {}
for assessment in assessments:
grades[assessment.topic_id] = assessment.value
with open(directory.joinpath('topic_assessments.csv'), 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['topic_id', 'value'])
for idx, grade in grades.items():
writer.writerow([idx, grade])
@cli.command()
@click.option('-m', '--topicmodel-id', type=int, required=True)
@click.option('-dir', '--directory', type=dir_type, required=True)
def dump_assessments(topicmodel_id, directory):
dump_assessments_(topicmodel_id, directory)
if __name__ == '__main__':
cli()
|
aplavin/tm_navigator
|
db_manage.py
|
db_manage.py
|
py
| 10,341 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34867843324
|
import unittest
from hamcrest import assert_that, instance_of, equal_to, raises, calling
from app.exiftool.s3.object import Object
from app.exiftool.s3.object_iterator import ObjectIterator
from tests.config import Config as TestConfig
class TestS3ObjectIterator(unittest.TestCase):
def setUp(self) -> None:
self.config = TestConfig()
self.config.patcher.environment()
self.addCleanup(self.config.patcher.stop())
def test_next_success(self) -> None:
with self.config.use_cassette('s3_object_iterator_succeeded_pdf.yml'):
object_iterator = ObjectIterator(
Object('test-bucket', 'test/test.pdf')
)
assert_that(
object_iterator.__next__(),
instance_of(bytes)
)
assert_that(object_iterator.first_byte_pos, equal_to(0))
assert_that(object_iterator.last_byte_pos, equal_to(5300))
def test_next_failure(self) -> None:
with self.config.use_cassette('s3_object_iterator_succeeded_pdf.yml'):
object_iterator = ObjectIterator(
Object('test-bucket', 'test/test.pdf')
)
object_iterator.__next__()
assert_that(
calling(object_iterator.__next__),
raises(StopIteration)
) # type: ignore
|
zpieslak/exiftool-aws-lamdba
|
tests/s3/test_object_iterator.py
|
test_object_iterator.py
|
py
| 1,358 |
python
|
en
|
code
| 2 |
github-code
|
6
|
34852034193
|
from pyspark.context import SparkContext
import itertools
import collections
from collections import Counter
import time
from operator import add
import os
import sys
def gencomb(freqitem,lenfreqitem):
cand= []
for x in range(0,lenfreqitem-1):
first = freqitem[x]
firstelem = sorted(first)
for y in range(x+1,lenfreqitem):
second = freqitem[y]
secelem= sorted(second)
newItem = []
if firstelem[0:len(firstelem)-1] == secelem[0:len(secelem)-1] :
newItem=list(firstelem)
newItem.append(secelem[len(secelem)-1])
newItem.sort()
cand.append((newItem))
#print("candidate ",cand)
return cand
def checksub(checksub, prev, len, initbasket, ktuple):
store = Counter()
candidates = []
for k in checksub:
item = set(k)
count = 0
for x in prev:
if set(x).issubset(item):
count += 1
if count == ktuple:
candidates.append(tuple(k))
break
candidates = list(dict.fromkeys(candidates))
for basket in initbasket:
bas = set(basket)
for candidate in candidates:
temp = set(candidate)
if temp.issubset(bas):
store[tuple(candidate)] += 1
return store
def prune(checksubs, threshold):
freq = []
for x in checksubs:
if checksubs[x] >= threshold:
freq.append(tuple(x))
return sorted(freq)
def apriori(basket, basket_count, sup):
initbasket = list(basket)
support = float(sup)
partition_baskets = len(initbasket)
#print("basket ", partition_baskets)
total_baskets = basket_count
threshold = support * (float(partition_baskets) / float(total_baskets))
#print("THRESHOLD ", threshold)
final_itemsets = []
single_count = {}
freqsingle = []
single_print = []
# SINGLTON
for bas in initbasket:
for item in bas:
if item in single_count:
single_count[item] += 1
else:
single_count[item] = 1
for x in single_count:
if single_count[x] >= threshold:
freqsingle.append(x)
single_print.append((x,))
single = sorted(tuple(freqsingle))
single_print = sorted(single_print)
final_itemsets = single
#print("single ",len(single))
# PAIRS
res = list(itertools.combinations(single, 2))
#print("res ",len(res))
paircount = collections.defaultdict(int)
freqpair = []
cand = []
for basket in initbasket:
bas = set(basket)
for candidate in res:
temp= set(candidate)
if temp.issubset(bas):
if paircount.get(candidate):
paircount[candidate] += 1
else:
paircount[candidate] = 1
for item,count in paircount.items():
if count >= threshold:
freqpair.append(tuple(sorted(item)))
pair = sorted(freqpair)
pair = list(dict.fromkeys(pair))
#print("pair ",len(pair))
final_itemsets = final_itemsets + pair
single_print = single_print + pair
# print("pair ",single_print)
frequent = pair
k = 3
while frequent != []:
gencombination = gencomb(frequent, len(frequent))
# print(k," itemsets",len(gencombination))
checksubsets = checksub(gencombination, frequent, len(gencombination), initbasket, k)
# print("candidate ", len(checksubsets))
freqitemsets = prune(checksubsets, threshold)
#print(k, " -tuple freqitem ", len(freqitemsets))
frequent = list(freqitemsets)
if frequent != []:
final_itemsets = final_itemsets + frequent
single_print = single_print + frequent
k += 1
# print("Final itemsets ", single_print)
yield (single_print)
def countmap2(bt):
initbasket=list(bt)
count = Counter()
for basket in initbasket:
bas=set(basket)
for item in val:
if item in bas:
count[item] += 1
else:
temp = set(item)
if temp.issubset(bas):
count[tuple(item)] += 1
#print("countint ",list(count.items()))
yield (list(count.items()))
if __name__ == "__main__":
filterthreshold = sys.argv[1]
support = sys.argv[2]
input_path = sys.argv[3]
output_path = sys.argv[4]
sc= SparkContext("local[*]")
sc.setLogLevel("ERROR")
start = time.time()
lines = sc.textFile(input_path)
header = lines.first()
RDD= lines.filter(lambda x: x != header).map(lambda x: tuple(x.split(",")))
basket1= RDD.map(lambda x: (x[0],[x[1]])).reduceByKey(lambda x,y: x + y)
bas=basket1.filter(lambda x: len(x[1]) > int(filterthreshold)).map(lambda x: (x[0],x[1]))
bt=bas.values()
basket_count = bt.count()
mapbasket1 = bt.mapPartitions(lambda x: apriori(x,basket_count,support)).flatMap(lambda x: x)
phase1_map= mapbasket1.map(lambda itemsets: (itemsets,1))
phase1_reduce= phase1_map.reduceByKey(lambda x,y : 1).sortBy(keyfunc= lambda x: (len(x[0]),x[0]))
val= phase1_reduce.keys().collect()
#print("phase1reduce ", len(val))
with open(output_path, 'w') as fileop:
fileop.write("Candidates: \n")
old = 1
storestr = ""
for index in range(len(val)):
element = val[index]
if index == len(val) - 1:
fileop.write(storestr.replace(")(", "),("))
if len(element) > old:
fileop.write("\n\n")
fileop.write(str(element))
break
else:
fileop.write(","+str(element))
break
if len(element) > old:
fileop.write(storestr.replace(")(", "),("))
fileop.write("\n\n")
old += 1
storestr = ""
fileop.write(str(element))
else:
if len(element) == 1:
storestr += str(element).replace(",)", ")")
else:
storestr += "," + str(element)
fileop.close()
phase2_map = bt.mapPartitions(countmap2).flatMap(lambda x: x).reduceByKey(add).sortBy(keyfunc= lambda x: (len(x[0]),x[0]))
phase2_reduce = phase2_map.filter(lambda x: x[1] >= float(support)).map(lambda x: x[0])
res = phase2_reduce.collect()
#print("res ", len(res))
with open(output_path, 'a') as fileop:
fileop.write("\n\nFrequent Itemsets: \n")
old = 1
storestr = ""
for index in range(len(res)):
element = res[index]
if index == len(res) - 1:
fileop.write(storestr.replace(")(", "),("))
if len(element) > old:
fileop.write("\n\n")
fileop.write(str(element))
break
else:
fileop.write(","+str(element))
break
if len(element) > old:
fileop.write(storestr.replace(")(", "),("))
fileop.write("\n\n")
old += 1
storestr = ""
fileop.write(str(element))
else:
if len(element) == 1:
storestr += str(element).replace(",)", ")")
else:
storestr += "," + str(element)
fileop.close()
end = time.time()
print("Duration:", end - start)
|
malika-seth/Spark-Data-Mining
|
HW2_SON_Frequent_Itemsets/malika_seth_task2.py
|
malika_seth_task2.py
|
py
| 7,640 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21073364674
|
import random
import socket
buf = 1024
timeout = 40
class Packet:
def __init__(self, seq_n, is_ack, data, checksum=None):
self.seq_n = seq_n
self.is_ack = is_ack
self.data = data
self.checksum = checksum
if is_ack == "True":
is_ack = True
if is_ack == "False":
is_ack = False
if checksum is None:
self.checksum = self.real_checksum()
def reading_size(self):
_checksum = self.real_checksum()
packet_return = (str(self.seq_n) + "|" + str(_checksum) + "|" + str(self.is_ack) + "|")
return len(packet_return.encode('utf-8')) + 16
def make_packet(self):
_checksum = self.real_checksum()
packet_return = (str(self.seq_n) + "|" + str(_checksum) + "|" + str(self.is_ack) + "|" + str(self.data))
return packet_return
def real_checksum(self):
data = str(self.seq_n) + str(self.is_ack) + str(self.data)
data = data.encode()
polynomial = 0x1021
crc = 0xFFFF
for byte in data:
crc ^= (byte << 8)
for _ in range(8):
if (crc & 0x8000):
crc = (crc << 1) ^ polynomial
else:
crc = (crc << 1)
return crc & 0xFFFF
def is_corrupt(self):
return self.real_checksum() != self.checksum
def extract_packet(string_packet):
seq_num, checksum_, is_ack, data = string_packet.decode().split("|", 3)
return Packet(int(seq_num), is_ack, data, checksum=int(checksum_))
def send_packet(sock, packet, addr):
#print(f"Enviando: {packet.seq_n}")
#print(" " + str(len(packet.make_packet().encode('utf-8'))) + " bytes")
sock.sendto(packet.make_packet().encode(), addr)
def send_ack(sock, seq_num, addr):
packet = Packet(seq_num, True, 0, 0)
#print(f"Enviando ACK: {seq_num}")
sock.sendto(packet.make_packet().encode(), addr)
def wait_for_ack(sock, expected_ack):
try:
ack = Packet(0, True, "")
data, _ = sock.recvfrom(buf - ack.reading_size())
ack = extract_packet(data)
if ack.is_ack and expected_ack == ack.seq_n:
#print(f"ACK recebido: {ack.seq_n}")
return True
else:
#print(f"[ERRO] ACK: {ack.seq_n}, esperado: {expected_ack}")
return False
except socket.timeout:
#print(f"Tempo limite excedido: {timeout} segundos.")
return False
def packet_loss(probability):
return random.random() < probability
|
Faranha300/Infracom-CINtofome
|
rdt.py
|
rdt.py
|
py
| 2,547 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24512155771
|
import asyncio
from django.core.cache import cache
from apps.integration.models import KotakNeoApi as KotakNeoApiModel
from apps.integration.utils import divide_and_list, get_option_ltp
from apps.integration.utils.broker.dummy import DummyApi
from apps.integration.utils.broker.kotak_neo import KotakNeoApi
from apps.trade.models import DeployedOptionStrategy as DeployedOptionStrategyModel
class StrategyOrder(object):
def __init__(self, opt_strategy: DeployedOptionStrategyModel):
self.opt_strategy = opt_strategy
self.symbol = opt_strategy.instrument.symbol
self.websocket_ids = self.opt_strategy.websocket_ids.split(",")
self.strategy_id = self.opt_strategy.pk
self.str_strategy_id = str(self.opt_strategy.pk)
self.options = self.opt_strategy.options.copy()
self.parameters = [
p.parameters
for p in self.opt_strategy.parameters.filter(is_active=True).order_by(
"name"
)
]
self.parameters_len = len(self.parameters) or 1
self.entry_type = self.opt_strategy.strategy_type
self.exit_type = "BUY" if self.entry_type == "SELL" else "SELL"
self.strategy_tradingsymbol_cache = f"TRADINGSYMBOLS_{self.opt_strategy.pk}"
self.broker = self.opt_strategy.broker
self.slippage = float(self.opt_strategy.slippage)
user_params = (
cache.get("DEPLOYED_STRATEGIES", {})
.get(self.str_strategy_id, {})
.get("user_params", [])
)
if user_params and cache.get(self.strategy_tradingsymbol_cache):
self.user_params = user_params
self.entered = True
else:
self.entered = False
self.user_params = []
for user in self.opt_strategy.users.filter(is_active=True).order_by(
"order_seq"
):
self.user_params.append(
{
"broker_api": user.broker_api,
"user": user.broker_api.user,
"broker": user.broker_api.broker,
"quantity_multiple": [
item * self.opt_strategy.lot_size
for item in divide_and_list(self.parameters_len, user.lots)
],
}
)
async def get_kotak_neo_parameters(self, user):
row = KotakNeoApiModel.objects.get(broker_api__user=user)
return {
"username": user.username,
"headers": {
"Authorization": f"Bearer {row.access_token}",
"Sid": row.sid,
"Auth": row.auth,
"neo-fin-key": row.neo_fin_key,
"Content-Type": "application/x-www-form-urlencoded",
},
"query_params": {"sId": row.hs_server_id},
}
async def quantity_wise_strategy_map(self, user_params):
quantity_map_list = []
for idx in range(self.parameters_len):
quantity_map_list.append(
{
user_param["user"].username: user_param["quantity_multiple"][idx]
for user_param in user_params
}
)
return quantity_map_list
async def place_dummy_order(self, buy_pending, sell_pending, user_params):
quantity_map_list = await self.quantity_wise_strategy_map(user_params)
users = [user_param["user"] for user_param in user_params]
if not user_params:
return
dummyapi = DummyApi(users=users)
if buy_pending:
await asyncio.gather(
*[
dummyapi.option_place_and_chase_order(
symbol=self.symbol,
tradingsymbol=row["tradingsymbol"],
quantity_map=quantity_map_list[row.get("idx", 0)],
transaction_type="BUY",
price=get_option_ltp(
symbol=self.symbol,
tradingsymbol=row["tradingsymbol"],
websocket_id=row["websocket_id"],
),
tag_prefix=self.str_strategy_id,
websocket_id=row["websocket_id"],
)
for row in buy_pending
]
)
if sell_pending:
await asyncio.gather(
*[
dummyapi.option_place_and_chase_order(
symbol=self.symbol,
tradingsymbol=row["tradingsymbol"],
quantity_map=quantity_map_list[row.get("idx", 0)],
transaction_type="SELL",
price=get_option_ltp(
symbol=self.symbol,
tradingsymbol=row["tradingsymbol"],
websocket_id=row["websocket_id"],
),
tag_prefix=self.str_strategy_id,
websocket_id=row["websocket_id"],
)
for row in sell_pending
]
)
async def place_kotak_neo_order(self, buy_pending, sell_pending, user_params):
quantity_map_list = await self.quantity_wise_strategy_map(user_params)
users = [
await self.get_kotak_neo_parameters(user_param["user"])
for user_param in user_params
]
if not users:
return
knapi = KotakNeoApi(users=users)
if buy_pending:
await asyncio.gather(
*[
knapi.option_place_and_chase_order(
symbol=self.symbol,
tradingsymbol=row["tradingsymbol"],
quantity_map=quantity_map_list[row.get("idx", 0)],
transaction_type="BUY",
expected_price=get_option_ltp(
symbol=self.symbol,
tradingsymbol=row["tradingsymbol"],
websocket_id=row["websocket_id"],
)
+ self.slippage,
tag_prefix=self.str_strategy_id,
websocket_id=row["websocket_id"],
)
for row in buy_pending
]
)
if sell_pending:
await asyncio.gather(
*[
knapi.option_place_and_chase_order(
symbol=self.symbol,
tradingsymbol=row["tradingsymbol"],
quantity_map=quantity_map_list[row.get("idx", 0)],
transaction_type="SELL",
expected_price=get_option_ltp(
symbol=self.symbol,
tradingsymbol=row["tradingsymbol"],
websocket_id=row["websocket_id"],
)
- self.slippage,
tag_prefix=self.str_strategy_id,
websocket_id=row["websocket_id"],
)
for row in sell_pending
]
)
async def place_order(
self,
sell_pending: list,
buy_pending: list,
user_params: list | None = None,
):
if buy_pending or sell_pending:
if not user_params:
user_params = self.user_params
if self.broker == "kotak_neo":
kotak_neo_users_params = [
user_param
for user_param in user_params
if user_param["broker"] == "kotak_neo"
]
dummy_users_params = [
user_param
for user_param in user_params
if user_param["broker"] == "dummy"
]
await asyncio.gather(
self.place_dummy_order(
buy_pending,
sell_pending,
user_params=dummy_users_params,
),
self.place_kotak_neo_order(
buy_pending,
sell_pending,
user_params=kotak_neo_users_params,
),
)
|
finbyz/trading_child
|
apps/trade/strategies/__init__.py
|
__init__.py
|
py
| 8,566 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29929603258
|
from threading import Thread, Lock
from math import ceil
lst = []
NO_THREADS = 8
total_sum = 0
lock = Lock()
class MyThread(Thread):
def __init__(self, index):
Thread.__init__(self)
self.index = index
def run(self):
global total_sum
start = self.index * ceil(len(lst) / NO_THREADS)
end = min(len(lst), ((self.index + 1) * ceil(len(lst) / NO_THREADS)))
for i in range(start, end):
lock.acquire()
total_sum += lst[i]
lock.release()
'''
# alternativ
with lock:
total_sum += lst[i]
'''
def main():
global lst
lst = [x for x in range(1, 101)]
threads = []
for i in range(0, NO_THREADS):
threads.append(MyThread(i))
threads[-1].start()
for t in threads:
t.join()
print(total_sum)
if __name__ == '__main__':
main()
|
florinrm/ASC-Lab-Tutorial
|
Lab2/sum_list_lock.py
|
sum_list_lock.py
|
py
| 975 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32005535437
|
import yaml
import os
import logging
from weight import Weight
from schema import Schema, SchemaError, Optional
from typing import Union
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(logging.DEBUG)
class KB_Chaos:
def __init__(self, chaos_path):
self.chaos_path = chaos_path
self.last_chaos = None
def is_instance_related(self):
"""Check whether chaos is instance related"""
if len(os.listdir(self.chaos_path)) == 0:
_LOGGER.error("No chaos found in {}".format(self.chaos_path))
return True, None
for chaos in os.listdir(self.chaos_path):
if chaos.endswith(".yaml"):
f = open(self.chaos_path + "/" + chaos)
data = f.read()
f.close()
data = yaml.safe_load(data)
if self.last_chaos is None:
self.last_chaos = data
else:
if data["anomalies"] == self.last_chaos["anomalies"]:
continue
else:
return True, data
if len(os.listdir(self.chaos_path)) <= 2:
return True, data
return False, data
class KB:
def __init__(self) -> None:
self.kb = None
self.hierarchy = {0: "chaos_type", 1: "chaos"}
self.metrics = []
self.traces = []
self.logs = []
self.cmds = []
self.type_metrics = []
self.type_traces = []
self.type_logs = []
self.type_cmds = []
self.metrics_score = None
self.traces_score = None
self.logs_score = None
self.cmds_score = None
self.type_metrics_score = None
self.type_traces_score = None
self.type_logs_score = None
self.type_cmds_score = None
def load(self, kb_path: str) -> Union[dict, None]:
"""Load knowledge base
Args:
kb_path (str): Knowledge base path
Raises:
Exception: Knowledge base check
Returns:
dict: Knowledge base
"""
if type(kb_path) is str:
f = open(kb_path)
data = f.read()
f.close()
self.kb = yaml.safe_load(data)
elif type(kb_path) is dict:
self.kb = kb_path
is_checked = self.check_kb()
if is_checked:
self.score_fingerprint()
return self.kb
else:
raise Exception("Knowledge Base check failed")
def check_kb(self) -> bool:
"""Check knowledge base config
Raises:
se: Schema error
Returns:
bool: check result
"""
if self.kb is None:
_LOGGER.error("Knowledge Base is not loaded")
return False
anomaly_schema = [{"index": int, "action": str, Optional("order"): int}]
custom_metrics_schema = {
Optional("network"): anomaly_schema,
Optional("cpu"): anomaly_schema,
Optional("memory"): anomaly_schema,
Optional("io"): anomaly_schema,
Optional("container"): anomaly_schema,
Optional("mongo"): anomaly_schema,
Optional("mysql"): anomaly_schema,
Optional("icmp"): anomaly_schema,
Optional("time"): anomaly_schema,
Optional("jvm"): anomaly_schema,
Optional("http"): anomaly_schema,
}
custom_traces_schema = {
Optional("onehop"): anomaly_schema,
}
custom_logs_schema = {
Optional("pod"): anomaly_schema,
}
custom_cmds_schema = {
Optional("config"): anomaly_schema,
Optional("exec"): anomaly_schema,
}
custom_schema = [
{
"index": int,
"experiment": str,
"instance_related": bool,
Optional("order"): bool,
"anomalies": {
Optional("metrics"): custom_metrics_schema,
Optional("traces"): custom_traces_schema,
Optional("logs"): custom_logs_schema,
Optional("cmds"): custom_cmds_schema,
},
}
]
config_schema = Schema(
{
Optional("network"): custom_schema,
Optional("pod"): custom_schema,
Optional("stress"): custom_schema,
Optional("time"): custom_schema,
Optional("jvm"): custom_schema,
Optional("dns"): custom_schema,
Optional("http"): custom_schema,
Optional("io"): custom_schema,
Optional("config"): custom_schema,
}
)
try:
config_schema.validate(self.kb)
_LOGGER.info("Configuration is valid.")
except SchemaError as se:
raise se
return True
def score_fingerprint(self):
"""Score fingerprint"""
# Two hierarchies for our experiment
chaos_types = self.kb.keys()
for chaos_type in chaos_types:
type_metrics = []
type_traces = []
type_logs = []
type_cmds = []
for chaos in self.kb[chaos_type]:
anomalies = chaos["anomalies"]
metrics = (
anomalies["metrics"] if "metrics" in anomalies else None
)
traces = anomalies["traces"] if "traces" in anomalies else None
logs = anomalies["logs"] if "logs" in anomalies else None
cmds = anomalies["cmds"] if "cmds" in anomalies else None
(
metrics_instance,
traces_instance,
logs_instance,
cmds_instance,
) = self.analyse(metrics, traces, logs, cmds)
type_metrics += metrics_instance
type_traces += traces_instance
type_logs += logs_instance
type_cmds += cmds_instance
self.type_metrics.append(type_metrics) if type_metrics else None
self.type_traces.append(type_traces) if type_traces else None
self.type_logs.append(type_logs) if type_logs else None
self.type_cmds.append(type_cmds) if type_cmds else None
for (data, score) in zip(
[
self.metrics,
self.traces,
self.logs,
self.cmds,
self.type_metrics,
self.type_traces,
self.type_logs,
self.type_cmds,
],
[
"metrics_score",
"traces_score",
"logs_score",
"cmds_score",
"type_metrics_score",
"type_traces_score",
"type_logs_score",
"type_cmds_score",
],
):
weight = Weight(data)
weighted_score = weight()
max_score = max(weighted_score.values())
for key in weighted_score:
weighted_score[key] = weighted_score[key] / max_score
setattr(self, score, weighted_score)
def analyse(
self, metrics: list, traces: list, logs: list, cmds: list
) -> tuple:
"""Analyse metrics, traces, logs, cmds
Args:
metrics (list): metrics
traces (list): traces
logs (list): logs
cmds (list): commands
Returns:
tuple: metrics, traces, logs, cmds
"""
metrics_instance = self.analyse_fingerprint(metrics, "metrics")
traces_instance = self.analyse_fingerprint(traces, "traces")
logs_instance = self.analyse_fingerprint(logs, "logs")
cmds_instance = self.analyse_fingerprint(cmds, "cmds")
return metrics_instance, traces_instance, logs_instance, cmds_instance
def analyse_fingerprint(
self, fingerprint: list, target_type: str = ""
) -> list:
"""Analyse fingerprint individually
Args:
fingerprint (list): Fingerprint
target_type (str, optional): Fingerprint type. Defaults to "".
Returns:
list: Rename instances
"""
if fingerprint is None or target_type == "":
_LOGGER.info("No {} found in Knowledge Base".format(target_type))
return []
types = fingerprint.keys()
new_instance = []
for one_type in types:
for clue in fingerprint[one_type]:
idx = clue["index"]
action = clue["action"]
clue_name = one_type + "-" + str(idx) + "-" + action
new_instance.append(clue_name)
if new_instance:
if target_type == "metrics":
self.metrics.append(new_instance)
elif target_type == "traces":
self.traces.append(new_instance)
elif target_type == "logs":
self.logs.append(new_instance)
elif target_type == "cmds":
self.cmds.append(new_instance)
self.logs.append(new_instance)
return new_instance
|
Fengrui-Liu/MicroCBR
|
microCBR/kb.py
|
kb.py
|
py
| 9,333 |
python
|
en
|
code
| 6 |
github-code
|
6
|
14448481028
|
from transformers.tokenization_utils import PreTrainedTokenizer
from transformers.utils.dummy_pt_objects import PreTrainedModel
from transformers.data.processors.utils import InputExample, InputFeatures
from .prompts import TemplateGenerator, VerbalizerGenerator
from openprompt import PromptDataLoader, PromptForClassification
from openprompt.prompts import ManualVerbalizer, LMBFFTemplate, ManualTemplate
from typing import List, Optional, Dict, Union
from . import Template, Verbalizer, PromptDataLoader
import copy
import warnings
from .trainer import ClassificationRunner
from yacs.config import CfgNode
from openprompt.utils.logging import logger
from openprompt.utils.cuda import model_to_device
class LMBFFClassificationRunner:
def __init__(self,
train_dataset: List[InputExample],
valid_dataset: List[InputExample],
test_dataset: List[InputExample],
model: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
template_generator_tokenizer: PreTrainedTokenizer,
initial_template: Union[LMBFFTemplate, ManualTemplate],
initial_verbalizer: ManualVerbalizer = None,
template_generator: TemplateGenerator = None,
verbalizer_generator: VerbalizerGenerator = None,
config: CfgNode = None):
r"""
This class implements the LM-BFF in paper (https://arxiv.org/pdf/2012.15723.pdf)
"""
self.model = model
self.tokenizer = tokenizer
self.template_generator_tokenizer = template_generator_tokenizer
self.max_epoch = config.classification.generation_epoch
self.train_dataset = train_dataset
self.valid_dataset = valid_dataset
self.test_dataset = test_dataset
self.template_generator = template_generator
self.verbalizer_generator = verbalizer_generator
self.config = config
self.metric = self.config.classification.metric[0]
self.initial_template = initial_template
self.initial_verbalizer = initial_verbalizer
self.auto_t = config.classification.auto_t
self.auto_v = config.classification.auto_v
if self.auto_t:
if not (initial_template and initial_verbalizer):
raise ValueError("initial template and verbalizer must not be None when both auto_t and auto_v are performed")
if not isinstance(initial_template, LMBFFTemplate):
raise ValueError("To perform template search, initial_template must be LMBFFTemplate, however {} is provided".format(type(initial_template.__class__.__name__)))
elif self.auto_v:
if initial_verbalizer is not None:
warnings.warn("only auto_v is True, the given initial_verbalizer is ignored")
if isinstance(initial_template, LMBFFTemplate):
raise ValueError("To perform template search, initial_template must be ManualTemplate, however LMBFFTemplate is provided")
else:
warnings.warn("auto_t and auto_v are both False, the trainer will degenerate to a simple classification trainer")
def _auto_t(self, dataset, template, verbalizer):
logger.info("performing auto-t...")
if self.template_generator is None:
raise ValueError("no template_generator available")
dataloader = PromptDataLoader(dataset, template, self.template_generator_tokenizer, batch_size=len(dataset)) # register all data at once
for data in dataloader:
data = data.to("cuda:{}".format(self.config.environment.local_rank))
self.template_generator.register_buffer(data.input_ids, data.attention_mask, data.label)
template_texts = self.template_generator.generate() # [['text_a', '<mask>', ...]]
best_template_text = self._get_best_template_text(template_texts, verbalizer)
return best_template_text
def _auto_v(self, dataset, template, verbalizer, batch_size=32):
logger.info("performing auto-v...")
if self.verbalizer_generator is None:
raise ValueError("no verbalizer_generator available")
dataloader = PromptDataLoader(dataset, template, self.tokenizer, batch_size=batch_size)
for data in dataloader:
data = template.process_batch(data)
data = data.to("cuda:{}".format(self.config.environment.local_rank))
self.verbalizer_generator.register_buffer(data)
label_words_list = self.verbalizer_generator.generate() # List[List[str]]
best_label_words = self._get_best_label_words(label_words_list, template, verbalizer)
return best_label_words
def run(self):
'''
if both auto_v and auto_t are set to True, perform auto_t first and then auto_v
'''
best_template = self.initial_template
best_verbalizer = self.initial_verbalizer
if self.auto_t:
best_template_text = self._auto_t(self.train_dataset, self.initial_template, best_verbalizer)
best_template = ManualTemplate(self.tokenizer, best_template_text)
if self.auto_v:
best_label_words = self._auto_v(self.train_dataset, best_template, best_verbalizer)
best_verbalizer.label_words = best_label_words
train_dataloader = PromptDataLoader(self.train_dataset, best_template, self.tokenizer)
valid_dataloader = PromptDataLoader(self.valid_dataset, best_template, self.tokenizer)
test_dataloader = PromptDataLoader(self.test_dataset, best_template, self.tokenizer)
model = PromptForClassification(copy.deepcopy(self.model), best_template, best_verbalizer)
model = model_to_device(model, self.config.environment)
runner = ClassificationRunner(model, train_dataloader, valid_dataloader, test_dataloader, config=self.config)
runner.run()
def _get_best_template_text(self, template_texts_candidates, verbalizer):
best_metrics = 0.0
best_template_text = None
for template_text in template_texts_candidates:
print(template_text)
template = ManualTemplate(self.tokenizer, template_text)
train_dataloader = PromptDataLoader(self.train_dataset, template, self.tokenizer)
valid_dataloader = PromptDataLoader(self.valid_dataset, template, self.tokenizer)
score = self._train_eval(template, verbalizer, train_dataloader, valid_dataloader)
if score > best_metrics:
best_template_text = template_text
return best_template_text
def _get_best_label_words(self, verbalizer_labelwords_candidates, template, verbalizer):
current_verbalizer = copy.deepcopy(verbalizer)
best_metrics = 0.0
best_label_words = None
for label_words in verbalizer_labelwords_candidates:
current_verbalizer.label_words = label_words
train_dataloader = PromptDataLoader(self.train_dataset, template, self.tokenizer)
valid_dataloader = PromptDataLoader(self.valid_dataset, template, self.tokenizer)
score = self._train_eval(template, current_verbalizer, train_dataloader, valid_dataloader)
if score > best_metrics:
best_label_words = label_words
return best_label_words
def _train_eval(self, template, verbalizer, train_dataloader, valid_dataloader):
model = PromptForClassification(copy.deepcopy(self.model), template, verbalizer)
model = model_to_device(model, self.config.environment)
runner = ClassificationRunner(model, train_dataloader, valid_dataloader, config=self.config)
best_score = 0.0
for epoch in range(self.max_epoch):
runner.train_epoch(epoch)
scores = runner.evaluate(valid_dataloader, 'Valid')
score = scores[self.metric]
if score > best_score:
best_score = score
return best_score
|
jiachenwestlake/PDA
|
openprompt/lm_bff_trainer.py
|
lm_bff_trainer.py
|
py
| 8,021 |
python
|
en
|
code
| 3 |
github-code
|
6
|
41858644978
|
# Faça um programa para o cálculo de uma folha de pagamento,
# sabendo que os descontos são do Imposto de Renda, que depende
# do salário bruto (conforme tabela abaixo) e 3% para o Sindicato
# e que o FGTS corresponde a 11% do Salário Bruto, mas não é descontado
# (é a empresa que deposita). O Salário Líquido corresponde ao Salário Bruto menos os descontos.
# O programa deverá pedir ao usuário o valor da sua hora e a quantidade de horas trabalhadas no mês.
# Desconto do IR:
# * Salário Bruto até 900 (inclusive) - isento
# * Salário Bruto até 1500 (inclusive) - desconto de 5%
# * Salário Bruto até 2500 (inclusive) - desconto de 10%
# * Salário Bruto acima de 2500 - desconto de 20% Imprima na tela as informações,
# dispostas conforme o exemplo abaixo. No exemplo o valor da hora é 5 e a quantidade de hora é 220.
print('-' * 42)
valorHora = float(input('Qual o valor da sua hora de trabalho? R$'))
print('-' * 42)
horasTrabalhadas = int(input('Quantas horas você trabalhou este mês? '))
print('-' * 42)
salárioBruto = valorHora * horasTrabalhadas
print(f'{"Salário Bruto":30}: R${salárioBruto:>8.2f}')
if 900 < salárioBruto <= 1500:
iRPercentual = 0.05
elif 1500 < salárioBruto <=2500:
iRPercentual = 0.1
elif salárioBruto > 2500:
iRPercentual = 0.2
else:
iRPercentual = 0
iR = salárioBruto * iRPercentual
if iRPercentual != 0:
print(f'{f"(-) IR ({int(iRPercentual * 100)}%)":30}: R${iR:>8.2f}')
else:
print(f'{"(-) IR (Isento)":30}: R${iR:8.2f}')
sindicato = salárioBruto * 0.03
print(f'{"(-) Sindicato (3%)":30}: R${sindicato:>8.2f}')
iNSS = salárioBruto * 0.1
print(f'{"(-) INSS (10%)":30}: R${iNSS:>8.2f}')
fGTS = salárioBruto * 0.11
print(f'{"FGTS (11%)":30}: R${fGTS:>8.2f}')
totalDosDescontos = iR + iNSS +sindicato
print(f'{"Total de descontos":30}: R${totalDosDescontos:>8.2f}')
salárioLiquido = salárioBruto - totalDosDescontos
print(f'{"Salário Liquido":30}: R${salárioLiquido:>8.2f}')
print('-' * 42)
|
nralex/Python
|
2-EstruturaDeDecisao/exercício12.py
|
exercício12.py
|
py
| 2,019 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
40110954603
|
import os
import random
from urllib.parse import quote
from typing import List, Optional
from dataclasses import dataclass, field
from enum import Enum
from jinja2 import Template
SEAT_NUMBER_MAX_LENGTH = 3
# ascii characters are prohibited :D
ru_alphabet_lower = 'абвгдеёжзиклмопрстуфхцчшщэюя'
names = ['Сергій', 'Іван', 'Микола', 'Софія', 'Дмитро', 'Ганна', 'Надія', 'Віктор', 'Василь']
surnames = ['Іванов', 'Морозов', 'Третяк', 'Проценко', 'Сорокін', 'Дубілет', 'Зеленський']
class WagonType(Enum):
K = 'К' # Купе
P = 'П' # Плацкарт
L = 'Л' # Люкс
def __str__(self):
return self.value
@dataclass
class Seat:
number: str
first_name: str
last_name: str
def __str__(self) -> str:
return f'Seat <number: {self.number}>'
def __post_init__(self):
if 3 < len(self.number) < 3:
self._normalize_seat_number()
def _normalize_seat_number(self):
self.number = self.number.zfill(SEAT_NUMBER_MAX_LENGTH)
@dataclass
class TrainOrder:
from_station_id: int
to_station_id: int
train_code: str
date: str
wagon_number: int
wagon_class: str
wagon_type: WagonType
wagon_railway: int
seats: List[Seat] = field(default_factory=list)
def __str__(self) -> str:
return f'Ticker order <From: {self.from_station_id}, To: {self.to_station_id}, ' \
f'Wagon: {self.wagon_number}, ' \
f'Date: {self.date}, Seats: {self.seats}>'
@staticmethod
def roll_random_name(name_list: List[str]) -> str:
return random.choice(name_list)
def add_seat(self, seat_number: str,
first_name: Optional[str] = None,
last_name: Optional[str] = None):
if not first_name:
first_name = self.roll_random_name(names)
if not last_name:
last_name = self.roll_random_name(surnames)
seat = Seat(seat_number, first_name, last_name)
self.seats.append(seat)
return self
def serialize(self) -> str:
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'order_template.jinja2')) as fd:
tpl = Template(fd.read())
tpl_str = tpl.render(**vars(self))
tpl_str = (
quote(tpl_str.replace('\n', ''))
.replace('%3D', '=')
.replace('%26', '&')
.replace('%20', '')
)
return tpl_str
|
Tehtehteh/loner-bot
|
src/bot/models/train_order.py
|
train_order.py
|
py
| 2,648 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73477701627
|
import functools
import pyopencl as cl
import numpy as np
from .sparsetensor import SparseFunction, SparseTensor
from .densetensor import GPUBuffer, DenseTensor
class GradData:
def __init__(self, data, xidx, yidx, shape):
self.data = data
self.xidx = xidx
self.yidx = yidx
self.shape = shape
def buffer_new(ctx, shape, zero=False, dtype=np.float32):
return GPUBuffer(shape, hostbuf=None if not zero else np.zeros(shape, dtype=dtype))
def buffer_np(ctx, x):
return cl.Buffer(ctx.cl_ctx, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR, hostbuf=x)
@functools.lru_cache
def clbuild(cl_ctx, name, prg):
return cl.Program(cl_ctx, prg).build().__getattr__(name)
def uint2(x, y):
return np.array((x,y), dtype=cl.cltypes.uint2)
i32 = np.int32
# ************* unary ops *************
def unary_op(ctx, code, x):
ret = buffer_new(ctx, x.shape)
unop = clbuild(ctx.cl_ctx, "unop", """
__kernel void unop(__global const float *a_g, __global float *res_g) {
int gid = get_global_id(0);
float a = a_g[gid];
res_g[gid] = """+code+""";
}""")
unop(ctx.cl_queue, [np.prod(ret.shape)], None, x.cl, ret.cl)
return ret
class ReLU(SparseFunction):
def forward(ctx, input):
ctx.save_for_backward(input)
return unary_op(ctx, 'max(a, (float)0.)', input)
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return binary_op(ctx, 'a * (b >= 0)', grad_output, input)
class Log(SparseFunction):
def forward(ctx, input):
ctx.save_for_backward(input)
return unary_op(ctx, 'log(a)', input)
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return binary_op(ctx, 'a / b', grad_output, input)
class Exp(SparseFunction):
def forward(ctx, input):
ret = unary_op(ctx, 'exp(a)', input)
ctx.save_for_backward(ret)
return ret
def backward(ctx, grad_output):
ret, = ctx.saved_tensors
return binary_op(ctx, 'a * b', grad_output, ret)
# ************* reduce ops *************
def reduce_op(ctx, code, code2, inp, axis=None, start="0.0"):
if axis is None:
# full reduce
osize = [1]*len(inp.shape)
else:
osize = np.array(inp.shape)
osize[list(axis)] = 1
ret = buffer_new(ctx, osize)
if axis is None:
ret.shape = (1,)
# TODO: this is insanely slow
reduce = clbuild(ctx.cl_ctx, "reduce", """
__kernel void reduce(__global const float *a_g, int sz, __global float *res_g, int prod, int n_dims,
__global const int *shape_x, __global const int *shape_ret) {
int gid = get_global_id(0);
float out = """+start+""";
for (int x = 0; x < sz; x++) {
int idx = 0; // compute index into a_g
int tprod = prod;
int tsz = sz;
for (int dim = 0; dim < n_dims; dim++) {
idx *= shape_x[dim];
if (shape_x[dim] == shape_ret[dim]) { // dim from gid, don't reduce
tprod /= shape_x[dim];
idx += (gid / tprod) % shape_x[dim];
} else { // dim from x
tsz /= shape_x[dim];
idx += (x / tsz) % shape_x[dim];
}
}
float a = a_g[idx];
"""+code+""";
}
res_g[gid] = """+code2+""";
}""")
reduce(ctx.cl_queue, [np.prod(osize)], None, inp.cl,
i32(np.prod(inp.shape)//np.prod(osize)), ret.cl,
i32(np.prod(osize)), i32(len(osize)),
buffer_np(ctx, np.array(inp.shape, dtype=np.int32)),
buffer_np(ctx, np.array(osize, dtype=np.int32)))
return ret
class Sum(SparseFunction):
def forward(ctx, input, axis=None):
if isinstance(axis, int): axis = [axis]
ctx.save_for_backward(input, axis)
ret = reduce_op(ctx, "out += a", "out", input, axis=axis)
if axis is not None:
ret.shape = tuple([input.shape[i] for i in range(len(input.shape)) if i not in axis])
return ret
def backward(ctx, grad_output):
input, axis = ctx.saved_tensors
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
output = GPUBuffer(shape, hostbuf=grad_output)
return binary_op(ctx, 'a+b', output, buffer_new(ctx, input.shape, zero=True))
class Max(SparseFunction):
def forward(ctx, input, axis=None):
if isinstance(axis, int): axis = [axis]
ret = reduce_op(ctx, "out = max(a,out)", "out", input, axis=axis, start="-INFINITY")
ctx.save_for_backward(input, axis, ret)
if axis is not None:
ret.shape = tuple([input.shape[i] for i in range(len(input.shape)) if i not in axis])
return ret
def backward(ctx, grad_output):
input, axis, ret = ctx.saved_tensors
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
ret2 = binary_op(ctx, "1.0*(a==b)", input, GPUBuffer(shape, ret))
div = reduce_op(ctx, "out += a", "out+1e-10", ret2, axis=axis)
ret3 = binary_op(ctx, "a/b", ret2, GPUBuffer(shape, div))
return binary_op(ctx, 'a*b', ret3, GPUBuffer(shape, grad_output))
# ************* binary ops *************
@functools.lru_cache
def get_binop_prg(cl_ctx, code, complist):
ndims = len(complist)
args = "".join([f", int d{i}" for i in range(ndims)] + [f", int p{i}" for i in range(ndims-1)])
compute_idx_rets = "".join([f"\n int idx_ret{i} = (gid0 / {f'p{i}' if i < ndims-1 else '1'}) % d{i};" for i in range(ndims)])
idx_exprs = ["0", "0"] # [idx_x, idx_y]
for i in range(ndims):
for j in range(2):
if complist[i][j]:
idx_exprs[j] = "idx_ret%d + d%d*(%s)" % (i, i, idx_exprs[j])
return cl.Program(cl_ctx, """__kernel void binop(__global const float *x_g, __global const float *y_g, __global float *res_g"""+args+""") {
int gid0 = get_global_id(0);"""+compute_idx_rets+"""
float a = x_g["""+idx_exprs[0]+"""];
float b = y_g["""+idx_exprs[1]+"""];
res_g[gid0] = """+code+""";\n}""").build()
def binary_op(ctx, code, x, y):
n_dims = max(len(x.shape), len(y.shape))
shape_x, shape_y = np.ones(n_dims, dtype=np.int32), np.ones(n_dims, dtype=np.int32)
shape_x[:len(x.shape)] = np.array(x.shape, dtype=np.int32)
shape_y[:len(y.shape)] = np.array(y.shape, dtype=np.int32)
if not np.all((shape_x == 1) | (shape_y == 1) | (shape_x == shape_y)):
raise Exception(f"binary op unbroadcastable shape mismatch: {x.shape} vs {y.shape}")
shape_ret = np.maximum(shape_x, shape_y)
dimlist, complist = [], [] # note: len(dimlist) may be less than n_dims
def push(dim, comp):
if len(complist) > 0 and complist[-1] == comp:
dimlist[-1] *= dim
elif comp != (False, False):
dimlist.append(dim); complist.append(comp)
for i in range(n_dims): # group together any adjacent dimensions that we can to simplify broadcasting
push(i32(max(shape_x[i], shape_y[i])), (shape_x[i] > 1, shape_y[i] > 1))
prg = get_binop_prg(ctx.cl_ctx, code, tuple(complist))
ret = buffer_new(ctx, shape_ret, zero=True)
prod_list = np.array(dimlist, dtype=i32)[-1::-1].cumprod(dtype=i32)[-1::-1] # take cumprod from back to front
prg.binop(ctx.cl_queue, [prod_list[0]] if len(dimlist) > 0 else [1], None, x.cl, y.cl, ret.cl, *dimlist, *(prod_list[1:]))
return ret
def unbroadcast(ctx, out, in_sh):
sum_axis = [i for i in range(len(in_sh)) if in_sh[i]==1 and out.shape[i]>1] if in_sh != (1,) else None
return reduce_op(ctx, "out += a", "out", out, sum_axis)
class Add(SparseFunction):
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return binary_op(ctx, 'a+b', x, y)
def backward(ctx, grad_output):
grad_x, grad_y = grad_output, grad_output
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(ctx, grad_x, shape_x), unbroadcast(ctx, grad_y, shape_y),
class Sub(SparseFunction):
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return binary_op(ctx, 'a-b', x, y)
def backward(ctx, grad_output):
grad_x, grad_y = grad_output, unary_op(ctx, '-a', grad_output)
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(ctx, grad_x, shape_x), unbroadcast(ctx, grad_y, shape_y),
class Mul(SparseFunction):
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return binary_op(ctx, 'a*b', x, y)
def backward(ctx, grad_output):
x,y = ctx.saved_tensors
grad_x = binary_op(ctx, 'a*b', y, grad_output)
grad_y = binary_op(ctx, 'a*b', x, grad_output)
return unbroadcast(ctx, grad_x, x.shape), unbroadcast(ctx, grad_y, y.shape),
class Pow(SparseFunction):
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return binary_op(ctx, 'pow(a,b)', x, y)
def backward(ctx, grad_output):
x,y = ctx.saved_tensors
grad_x = binary_op(ctx, 'a*b', grad_output,
binary_op(ctx, 'b * (pow((float)a, (float)(b-1.0)))', x, y))
grad_y = binary_op(ctx, 'a*b', grad_output,
binary_op(ctx, 'pow(a, (float)b) * log(a);', x, y))
return unbroadcast(ctx, grad_x, x.shape), unbroadcast(ctx, grad_y, y.shape),
# ************* movement ops *************
class Reshape(SparseFunction):
def forward(ctx, x, shape):
x.data.shape = tuple(shape)
ctx.save_for_backward(x)
return x
def backward(ctx, grad_output):
in_shape = ctx.saved_tensors
return in_shape
def perm_axis(ctx, inp, order):
# print("PERM:", inp, order)
osize = np.array(inp.shape)[list(order)]
ret = buffer_new(ctx, osize)
perm = clbuild(ctx.cl_ctx, "perm", """
__kernel void perm(__global const float *a_g, __global float *res_g, int n_axis,
__global const int *shape, __global const int *order) {
int gid = get_global_id(0);
int gi = gid;
int idx = 0;
for(int i = n_axis-1; i>-1; i--) {
int stride = 1;
for(int j=order[i]+1; j<n_axis; j++) stride *= shape[j];
idx += (gi % shape[order[i]])*stride;
gi /= shape[order[i]];
}
res_g[gid] = a_g[idx];
}""")
perm(ctx.cl_queue, [np.prod(osize)], None, inp.cl, ret.cl, i32(len(osize)),
buffer_np(ctx, np.array(inp.shape, dtype=np.int32)),
buffer_np(ctx, np.array(order, dtype=np.int32)))
# print("RAN")
return ret
class Transpose(SparseFunction):
def forward(ctx, x):
# print("T FWD:", x)
newdata = {
'data': x.datat,
'idxs': x.idxst,
'nnzs': x.nnzst,
'ellw': x.ellwt,
'datat': x.data,
'idxst': x.idxs,
'nnzst': x.nnzs,
'ellwt': x.ellw,
}
newshape = tuple(np.array(x.shape).T)
ret = SparseTensor(from_datas=newdata, shape=newshape)
return ret
def backward(ctx, grad_output):
return perm_axis(ctx, grad_output, np.argsort((1,0)))
# TODO: merge this with perm axis
def inner_slice(ctx, x, arg):
shift = [y[0] for y in arg]
oshape = [y[1]-y[0] for y in arg]
ret = buffer_new(ctx, oshape)
gslice = clbuild(ctx.cl_ctx, "gslice", """
__kernel void gslice(__global const float *input, __global float *output, int prod, int n_dims,
__global const int *shape_x, __global const int *shape_ret,
__global const int *shift) {
int gid = get_global_id(0);
int iptr = 0;
int zero = 1;
for (int dim = 0; dim < n_dims; dim++) {
prod /= shape_ret[dim];
int sidx = (gid / prod) % shape_ret[dim] + shift[dim];
zero &= (sidx >= 0 && sidx < shape_x[dim]);
iptr = (iptr * shape_x[dim]) + sidx;
}
output[gid] = zero ? input[iptr] : 0.0;
}""")
gslice(ctx.cl_queue, [np.prod(ret.shape)], None,
x.cl, ret.cl, i32(np.prod(ret.shape)), i32(len(ret.shape)),
buffer_np(ctx, np.array(x.shape, dtype=np.int32)),
buffer_np(ctx, np.array(ret.shape, dtype=np.int32)),
buffer_np(ctx, np.array(shift, dtype=np.int32)))
return ret
class Slice(SparseFunction):
def forward(ctx, x, arg=None):
ctx.save_for_backward(x.shape)
return inner_slice(ctx, x, arg)
def backward(ctx, grad_output):
shape, = ctx.saved_tensors
narg = [(0-p[0], grad_output.shape[i]+(shape[i]-p[1])) for i,p in enumerate(ctx.arg)]
return inner_slice(ctx, grad_output, narg)
# ************* processing ops *************
class Matmul(SparseFunction): # input and weights are swapped, legacy..
def forward(ctx, weight, input):
# print("WEIGHT/input:", weight.shape, input.shape)
# print(input.shape, weight.shape)
# assert weight.shape[-2] == input.shape[-1]
# if not weight.m:
# weight.m = DenseTensor(np.zeros((input.shape[0], weight.shape[1])))
isize, msize, osize = i32(input.shape[-2]), i32(input.shape[-1]), i32(weight.shape[-1])
outshape = np.array([input.shape[-2], weight.shape[-1]])
# print("OUT:", outshape, isize, msize, osize)
outdata = np.zeros(outshape)
ret = DenseTensor(outdata)
# ret = buffer_new(ctx.cl_ctx, outshape, zero=True)
# print("RET:", ret)
# print("RET:", input)
matmul = clbuild(ctx.cl_ctx, "matmul", """
// DENSE x SPARSE
__kernel void matmul(__global float* matData, // INPUT MATRIX DATA
__global uint* colIdx,
__global uint* rowNnz,
uint ellwidth,
uint mwidth,
uint ncols,
__global float* vector_x, // INPUT
__global float* vector_y // OUTPUT
) { // LOCAL SHARED BUFFER
uint gid = get_global_id(0);
uint nrows = get_global_size(0);
for (uint gid2 = 0; gid2 < ncols; gid2++) {
uint nnz = rowNnz[gid2];
float sum = 0;
for (uint i = 0; i < nnz; i++) {
uint index = (gid2 * ellwidth) + i;
uint col = colIdx[index];
float aval = matData[index];
float xval = vector_x[gid*mwidth+col];
sum += aval * xval;
//if (gid==0 && gid2==0)
// printf("aval, xval: %.2f,%.2f - %.2f: (%i,%i) \\n", aval, xval, sum, col, index);
}
//printf("SUM/NNZ: %.2f %i \\n", sum, nnz);
vector_y[gid*ncols+gid2] = sum;
}
}""")
ctx.save_for_backward(input, weight)
# (isize,msize) x (msize,osize) = (isize,osize)
matmul(ctx.cl_queue, [outshape.T[0]], None,
weight.datat.cl, weight.idxst.cl, weight.nnzst.cl, np.uint32(weight.ellwt), np.uint32(msize), np.uint32(outshape.T[1]), input.cl, ret.data.cl)
# resa = np.zeros(isize,osize).astype(np.float32)
# cl.enqueue_copy(ctx.cl_queue, resa, ret.cl)
# return ret.data
# return trans_axis(ctx, ret.data, (1,0)) # print("RES:", resa)
return ret.data
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
topkx, topky = weight.topkx, weight.topky
# print('BACK:', weight.shape, topkx, topky)
isize, msize, osize = i32(input.shape[-2]), i32(input.shape[-1]), i32(weight.shape[-1])
grad_input = DenseTensor(np.zeros(input.shape), dtype=np.float32)
grad_weight = DenseTensor(np.zeros(weight.shape), dtype=np.float32)
# print('GO:', input.shape, grad_output.shape)
# print("OUTSHAPE:", weight.shape, input.shape[0], isize, msize, weight.ellwt)
# grad_output = grad_output + weight.m
matmul2 = clbuild(ctx.cl_ctx, "matmul2", """
// DENSE x SPARSE-T
__kernel void matmul2(__global float* matData, // INPUT MATRIX DATA
__global uint* colIdx,
__global uint* rowNnz,
uint ellwidth,
uint mwidth,
uint ncols0,
__global float* vector_x, // INPUT
__global float* vector_y // OUTPUT
) { // LOCAL SHARED BUFFER
uint gid = get_global_id(0);
uint nrows = get_global_size(0);
uint nnz = rowNnz[gid];
uint gid2 = get_global_id(1);
uint ncols = get_global_size(1);
float sum = 0;
for (uint i = 0; i < nnz; i++) {
uint index = (gid2 * ellwidth) + i;
uint col = colIdx[index];
float aval = matData[index];
float xval = vector_x[gid*mwidth+col];
sum += aval * xval;
//if (gid==1 && gid2==0) {
// printf("aval, xval: %.2f,%.2f - %.2f: (%i,%i) \\n", aval, xval, sum, col, index);
//}
}
//printf("SUM/NNZ: %.2f %i \\n", sum, nnz);
vector_y[gid*ncols+gid2] = sum;
}""")
# (isize,osize) x (msize,osize) = (isize,msize)
# print('msize:', grad_output.shape, input.shape)
matmul2(ctx.cl_queue, input.shape, None,
weight.data.cl, weight.idxs.cl, weight.nnzs.cl, np.uint32(weight.ellw), np.uint32(grad_output.shape[1]), np.uint32(input.shape[0]), grad_output.cl, grad_input.data.cl)
# resa = np.zeros((input.shape[1], input.shape[0])).astype(np.float32)
# cl.enqueue_copy(ctx.cl_queue, resa, grad_input.data.cl)
# print('INPUT', DenseTensor(input).cpu().data, weight.shape[0], weight.shape[1])
# print('OUT:', grad_input.cpu().data)
gettopkx = clbuild(ctx.cl_ctx, "gettopkx", """
// multilplies x TRANSPOSED by y (dense-dense)
__kernel void gettopkx(__global float* x, // INPUT MATRIX DATA
__global float* xsum, // INPUT
__global uint* youtidx, // OUT
uint topky,
uint msize
) { // LOCAL SHARED BUFFER
uint isize = get_global_size(0);
int gidx = get_global_id(0); // row
// get topk
xsum[gidx] = 0;
for (uint i=0; i<msize; i++) {
float val = x[i*isize+gidx];
//if (gid == 0) {
// printf("\\nADD VALx: %.2f - %i", val, i*msize+gid);
//}
xsum[gidx] += val;
}
float valx = xsum[gidx];
uint posx = 0;
for (uint i = 0; i < isize; i++) {
float tempval = fabs(xsum[i]);
bool larger = (tempval > fabs(valx)) || (fabs(tempval) == fabs(valx) && i < gidx);
posx += (larger)?1:0;
}
if (posx < topky) {
youtidx[posx] = gidx;
}
}""")
gettopky = clbuild(ctx.cl_ctx, "gettopky", """
// multilplies x TRANSPOSED by y (dense-dense)
__kernel void gettopky(__global float* y, // INPUT
__global float* ysum, // INPUT
__global uint* xoutidx, // OUT
uint topkx,
uint msize
) { // LOCAL SHARED BUFFER
uint osize = get_global_size(0);
int gidy = get_global_id(0); // row
ysum[gidy] = 0;
for (uint i=0; i<msize; i++) {
float val = y[i*osize+gidy];
ysum[gidy] += val;
}
//barrier(CLK_GLOBAL_MEM_FENCE);
float valy = ysum[gidy];
uint posy = 0;
for (uint i = 0; i < osize; i++) {
float tempval = fabs(ysum[i]);
bool larger = (tempval > fabs(valy)) || (fabs(tempval) == fabs(valy) && i < gidy);
posy += (larger)?1:0;
}
if (posy < topkx) {
xoutidx[posy] = gidy;
}
}""")
sortuints = clbuild(ctx.cl_ctx, "sortuints", """
// multilplies x TRANSPOSED by y (dense-dense)
__kernel void sortuints(__global uint* x, // INPUT MATRIX DATA
__global uint* xs // INPUT
) { // LOCAL SHARED BUFFER
uint isize = get_global_size(0);
int gidx = get_global_id(0); // row
uint val = x[gidx];
uint posx = 0;
for (uint i = 0; i < isize; i++) {
uint tempval = x[i];
bool smaller = tempval < val;
posx += (smaller)?1:0;
}
xs[posx] = x[gidx];
}""")
matmul0 = clbuild(ctx.cl_ctx, "matmul0", """
// multilplies x TRANSPOSED by y (dense-dense)
__kernel void matmul0(__global float* x, // INPUT MATRIX DATA
__global float* y, // INPUT
__global uint* xidx, // INPUT YIDX
__global uint* yidx, // INPUT YIDX
__global float* resdata,// OUT
__global uint* rescols,
__global uint* resnnzs,
uint topkx,
uint ellw,
uint isize,
uint msize,
uint osize
) { // LOCAL SHARED BUFFER
uint topky = get_global_size(0);
uint gidx = yidx[get_global_id(0)]; // row
for (uint gidy0 = 0; gidy0 < topkx; gidy0++) {
uint gidy = xidx[gidy0];
float ret = 0.0;
uint i;
for (i = 0; i < msize; i++) {
uint xidx = i*isize+gidx;
float xval = x[xidx];
uint yidx = osize*i+gidy;
float yval = y[yidx];
ret += xval*yval;
//if (gidx==0 && gidy==0)
// printf("\\nmult: %.2f x %.2f - %.2f -- %i/%i", xval, yval, ret, xidx, yidx);
}
//if (gidx==0&&gidy==0)
// printf("\\nsum:%.2f", ret);
// add for
uint nnz = resnnzs[gidx];
for (i = 0; i < nnz; i++) {
if (rescols[i] >= gidy) {
break;
}
for (uint j = nnz; j >= i; j--) {
//resdata[j+1] = resdata[j];
}
}
resdata[gidx * ellw + gidy0] = ret;
rescols[gidx * ellw + gidy0] = gidy;
resnnzs[gidx] += 1;
}
}""")
matmul0t = clbuild(ctx.cl_ctx, "matmul0t", """
// multilplies x TRANSPOSED by y (dense-dense)
__kernel void matmul0t(__global float* x, // INPUT MATRIX DATA
__global float* y, // INPUT
__global uint* xidx, // INPUT YIDX
__global uint* yidx, // INPUT YIDX
__global float* resdata,// OUT
__global uint* rescols,
__global uint* resnnzs,
uint topky,
uint ellw,
uint isize,
uint msize,
uint osize
) { // LOCAL SHARED BUFFER
uint topkx = get_global_size(0);
uint gidy = xidx[get_global_id(0)]; // row
for (uint gidx0 = 0; gidx0 < topky; gidx0++) {
uint gidx = yidx[gidx0];
float ret = 0.0;
uint i;
for (i = 0; i < msize; i++) {
uint xidx = i*isize+gidx;
float xval = x[xidx];
uint yidx = osize*i+gidy;
float yval = y[yidx];
ret += xval*yval;
//if (gidx==0 && gidy==0)
// printf("\\nmult: %.2f x %.2f - %.2f -- %i/%i", xval, yval, ret, gidx, gidy,i);
}
//if (gidx==0&&gidy==0)
// printf("\\nsum:%.2f", ret);
// add for
uint nnz = resnnzs[gidx];
for (i = 0; i < nnz; i++) {
if (rescols[i] >= gidy) {
break;
}
for (uint j = nnz; j >= i; j--) {
//resdata[j+1] = resdata[j];
}
}
resdata[gidy * ellw + gidx0] = ret;
rescols[gidy * ellw + gidx0] = gidx;
resnnzs[gidy] += 1;
}
}""")
# Weight update
isize = weight.shape[0]
msize = grad_output.shape[0]
osize = weight.shape[1]
dim1 = weight.shape[1]#min(weight.shape[1], topkx)
dim2 = weight.shape[0]#min(weight.shape[0], topky)
x_sum_buf = DenseTensor(np.zeros(weight.shape[0]))
y_sum_buf = DenseTensor(np.zeros(weight.shape[1]))
x_idx_buf = DenseTensor(np.zeros(topkx), dtype=np.uint32)
y_idx_buf = DenseTensor(np.zeros(topky), dtype=np.uint32)
xs_idx_buf = DenseTensor(np.zeros(topkx), dtype=np.uint32)
ys_idx_buf = DenseTensor(np.zeros(topky), dtype=np.uint32)
sdata_buf = DenseTensor(np.zeros(weight.shape[0]*topkx))
sidxs_buf = DenseTensor(np.zeros(weight.shape[0]*topkx), dtype=np.uint32)
snnzs_buf = DenseTensor(np.zeros(weight.shape[0]), dtype=np.uint32)
sdatat_buf = DenseTensor(np.zeros(weight.shape[1]*topky))
sidxst_buf = DenseTensor(np.zeros(weight.shape[1]*topky), dtype=np.uint32)
snnzst_buf = DenseTensor(np.zeros(weight.shape[1]), dtype=np.uint32)
# print('IN', DenseTensor(input).cpu().data, weight.shape, input.shape[0])
# print('INPUT', grad_output.cpu().data)
# print('OUT', grad_input.cpu().data.sum())
# print('asdf:', isize, msize, osize)
gettopkx(ctx.cl_queue, [isize], None, input.cl, x_sum_buf.data.cl,
y_idx_buf.data.cl, np.uint32(topky), np.uint32(msize))
gettopky(ctx.cl_queue, [osize], None, grad_output.cl,
y_sum_buf.data.cl, x_idx_buf.data.cl, np.uint32(topkx), np.uint32(msize))
sortuints(ctx.cl_queue, [topkx], None, x_idx_buf.data.cl, xs_idx_buf.data.cl)
sortuints(ctx.cl_queue, [topky], None, y_idx_buf.data.cl, ys_idx_buf.data.cl)
matmul0(ctx.cl_queue, [topky], None, input.cl, grad_output.cl, xs_idx_buf.data.cl,
ys_idx_buf.data.cl, sdata_buf.data.cl, sidxs_buf.data.cl, snnzs_buf.data.cl,
np.uint32(topkx), np.uint32(topkx), np.uint32(isize), np.uint32(msize), np.uint32(osize))
matmul0t(ctx.cl_queue, [topkx], None, input.cl, grad_output.cl, xs_idx_buf.data.cl,
ys_idx_buf.data.cl, sdatat_buf.data.cl, sidxst_buf.data.cl, snnzst_buf.data.cl,
np.uint32(topky), np.uint32(topky), np.uint32(isize), np.uint32(msize), np.uint32(osize))
# x_sum_buf.data.cl.release()
# y_sum_buf.data.cl.release()
# sdata_buf.data.cl.release()
# sidxs_buf.data.cl.release()
# snnzs_buf.data.cl.release()
# sdatat_buf.data.cl.release()
# sidxst_buf.data.cl.release()
# snnzst_buf.data.cl.release()
# x_idx_buf.data.cl.release()
# y_idx_buf.data.cl.release()
newdata = {
'data': sdata_buf.data,
'idxs': sidxs_buf.data,
'nnzs': snnzs_buf.data,
'ellw': topkx,
'datat': sdatat_buf.data,
'idxst': sidxst_buf.data,
'nnzst': snnzst_buf.data,
'ellwt': topky,
}
w_grad = SparseTensor(from_datas=newdata, shape=weight.shape)
# gradpy = w_grad.to_numpy()
# print('grad_max:', w_grad.shape, gradpy.sum())
# gradpy = w_grad.to_numpy(dual=True)
# print('grad_max:', w_grad.shape, gradpy.sum())
# asdf
# updatem = clbuild(ctx.cl_ctx, "updatem", """
# // sorts x and y in ascending order and returns sorted indices
# __kernel void updatem(__global float* m, // INPUT MATRIX DATA
# __global float* grad, // INPUT MATRIX DATA
# uint msize,
# uint osize,
# uint topk,
# float scale,
# __global uint* xoutidx,
# __global uint* youtidx,
# __global float* matData, // OUTPUT MATRIX DATA
# __global uint* colIdx,
# __global uint* rowNnz
# ) {
# uint gid = get_global_id(0);
# uint nnz = rowNnz[gid];
# for (uint i=0; i<nnz; i++) {
# uint col = colIdx[gid*topk+i];
# float val = matData[gid*topk+i];
# m[osize*gid+col] = 0;
# }
# for (uint i=0; i<osize; i++) {
# m[osize*gid+i] = scale * grad[osize*gid+i];
# }
# }""")
# scale = 0.9
# updatem(ctx.cl_queue, [grad_output.shape[0],], None,
# weight.m.data.cl, grad_output.data.cl, np.uint32(grad_input.shape[-1]), np.uint32(grad_output.shape[1]), np.uint32(topky), np.float32(scale), xs_idx_buf.data.cl, ys_idx_buf.data.cl,
# sdata_buf.data.cl, sidxs_buf.data.cl, snnzs_buf.data.cl)
return w_grad, grad_input
def trans_axis(ctx, inp, order=(1,0)):
osize = np.array(inp.shape)[list(order)]
ret = buffer_new(ctx, osize)
trans = clbuild(ctx.cl_ctx, "trans", """
__kernel void trans(__global float *a_g,
__global float *res_g,
uint width) {
int row = get_global_id(0);
for(uint i=0; i<width; i++) {
//printf("\\nSET:%i-%i", row, i);
res_g[row*width+i] = 0;
}
}""")
trans(ctx.cl_queue, [osize[1]], None, inp.cl, ret.cl, np.uint32(osize[0]))
print("PERM RET:", ret)
return ret
class Conv2D(SparseFunction):
def forward(ctx, x, w, stride=1, groups=1):
if isinstance(ctx.stride, int): ctx.stride = (ctx.stride, ctx.stride)
cout,cin,H,W = w.shape
ys,xs = ctx.stride
bs,cin_,iy,ix = x.shape
oy,ox = (iy-(H-ys))//ys, (ix-(W-xs))//xs
if cin*ctx.groups != cin_: raise Exception(f"Input Tensor shape {x.shape} does not match the shape of the weights {w.shape}. ({cin*ctx.groups} vs. {cin_})")
assert cout % ctx.groups == 0
rcout = cout//ctx.groups
ctx.save_for_backward(x,w)
# output buffer
ret = buffer_new(ctx, (bs, cout, oy, ox))
# input = (bs, groups, cin, iy, ix)
# weight = (groups, rcout, cin, H, W)
# output = (bs, groups, rcout, oy, ox)
conv = clbuild(ctx.cl_ctx, "conv", """
__kernel void conv(__global const float *input, __global const float *weight, __global float *output,
int H, int W, int groups, int rcout, int cin, int oy, int ox, int iy, int ix, int ys, int xs) {
int B = get_global_id(0)/(groups*rcout); // range 0-bs
int g = (get_global_id(0)/rcout)%groups;
int c = get_global_id(0) % rcout;
int Y = get_global_id(1); // range 0-oy
int X = get_global_id(2); // range 0-ox
int IY = Y*ys;
int IX = X*xs;
float acc = 0.0;
for (int ci = 0; ci < cin; ci++) {
for (int y = IY; y < IY+H; y++) {
for (int x = IX; x < IX+W; x++) {
acc += input[B*groups*cin*iy*ix + g*cin*iy*ix + ci*iy*ix + y*ix + x] * \
weight[g*rcout*cin*H*W + c*cin*H*W + ci*H*W + (y-IY)*W + (x-IX)];
}
}
}
output[B*groups*rcout*oy*ox + g*rcout*oy*ox + c*oy*ox + Y*ox + X] = acc;
}""")
conv(ctx.cl_queue, [bs*groups*rcout, oy, ox], None,
x.cl, w.cl, ret.cl,
i32(H), i32(W), i32(groups), i32(rcout), i32(cin),
i32(oy), i32(ox), i32(iy), i32(ix), i32(ys), i32(xs)
)
return ret
def backward(ctx, grad_output):
bs,_,oy,ox = grad_output.shape
x, w = ctx.saved_tensors
cout,cin,H,W = w.shape
ys,xs = ctx.stride
bs,cin_,iy,ix = x.shape
oy,ox = (iy-(H-ys))//ys, (ix-(W-xs))//xs
assert cin*ctx.groups == cin_
assert cout % ctx.groups == 0
rcout = cout//ctx.groups
dx = buffer_new(ctx, (bs, cin_, iy, ix), zero=True)
dw = buffer_new(ctx, (cout, cin, H, W))
# tensx = (bs, groups*cin, iy, ix)
# tensw = (groups*rcout, cin, H, W)
# ggg = (bs, groups*rout, oy, ox)
convw = clbuild(ctx.cl_ctx, "convw", """
__kernel void convw(__global const float *tensx, __global const float *ggg, __global float *dw,
int H, int W, int groups, int rcout, int cin, int oy, int ox, int iy, int ix, int ys, int xs, int bs) {
int g = get_global_id(0)/(rcout*cin) ; // range 0-groups
int c = (get_global_id(0)/(cin)) %rcout; // range 0-rcout
int ci = get_global_id(0) % cin; // range 0-cin
int y = get_global_id(1); // range 0-H
int x = get_global_id(2); // range 0-W
float acc = 0.0;
for (int Y = 0; Y < oy; Y++) {
for (int X = 0; X < ox; X++) {
for (int B = 0; B < bs; B++) {
acc += ggg[B*groups*rcout*oy*ox + +g*rcout*oy*ox + c*oy*ox + Y*ox + X] * \
tensx[B*groups*cin*iy*ix + g*cin*iy*ix + ci*iy*ix + (Y*ys+y)*ix + X*xs+x];
}
}
}
dw[get_global_id(0)*H*W + y*W + x] = acc;
}""")
convx = clbuild(ctx.cl_ctx, "convx", """
__kernel void convx(__global const float *tensw, __global const float *ggg, __global float *dx,
int H, int W, int groups, int rcout, int cin, int oy, int ox, int iy, int ix, int ys, int xs, int bs) {
int B = get_global_id(0);
int g = get_global_id(1);
int ci = get_global_id(2);
for (int Y = 0; Y < oy; Y++) {
for (int X = 0; X < ox; X++) {
for (int y = 0; y < H; y++) {
for (int x = 0; x < W; x++) {
float acc = 0.0;
for (int c = 0; c < rcout; c++) {
acc += ggg[B*groups*rcout*oy*ox + g*rcout*oy*ox + c*oy*ox + Y*ox + X] * \
tensw[g*rcout*cin*H*W + c*cin*H*W + ci*H*W + y*W + x];
}
dx[B*groups*cin*iy*ix + g*cin*iy*ix + ci*iy*ix + (Y*ys+y)*ix + X*xs+x] += acc;
}
}
}
}
}
""")
conv_args = i32(H), i32(W), i32(ctx.groups), i32(rcout), i32(cin), i32(oy), i32(ox), i32(iy), i32(ix), i32(ys), i32(xs), i32(bs)
convw(ctx.cl_queue, [ctx.groups*rcout*cin, H, W], None, x.cl, grad_output.cl, dw.cl, *conv_args)
convx(ctx.cl_queue, [bs, ctx.groups, cin], None, w.cl, grad_output.cl, dx.cl, *conv_args)
return dx, dw
|
fpaboim/tinysparse
|
tinygrad/ops_gpusparse.py
|
ops_gpusparse.py
|
py
| 32,916 |
python
|
en
|
code
| 9 |
github-code
|
6
|
11149338597
|
def bfs(graph, vis, node, q):
while q:
sz = len(q)
for i in range(sz):
node = q.pop(0)
if vis[node] == True:
continue
print(node, end = " ")
for adj in graph[node]:
q.append(adj)
print()
n = int(input("Enter number of nodes: "))
m = int(input("Enter number of edges: "))
graph = [[] for i in range(n)]
print("Enter nodes in form of u, v")
for count in range(m):
u, v = map(int, input().strip().split())
graph[u].append(v)
q = []
q.append(0)
vis = [False for i in range(n)]
node = 0
print()
bfs(graph, vis, node, q)
print()
|
aditya-sar/Sem-6
|
AI/bfs.py
|
bfs.py
|
py
| 649 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22323641641
|
r'''
Python library for standard functions required for numerical methods.
- Method f() - returns value of polynomial at given value.
- Method dof() - returns value of derivative of polynomial at given value.
- Method value() - returns value of expression at given value of x.
'''
import lambdaFunction
def f(x: int | float, equ_list: list, deg: int) -> int | float :
'''
Function for obtaining value of any polynomial at given value of x.
Args :
x : value of x
equ_list = list of coeffs of x in decreasing order.
deg : degree of polynomial.
Returns:
returns float or int.
'''
power = deg
sum = 0
for coeff in equ_list:
sum = sum + (coeff * (x**power))
power = power - 1
return sum
def dof(x: int | float, equ_list: list, deg: int) -> int | float:
'''
Function for obtaining value of derivative of any polynomial at given value of x.
Args :
x : value of x
equ_list = list of coeffs of x in decreasing order.
deg : degree of polynomial.
Returns:
returns float or int.
'''
power = deg
diff = [0]*power
i = 0
j = power
while(i<power):
new_coeff = equ_list[i] * j
diff[i] = new_coeff
j = j - 1
i = i + 1
return f(x, diff, (len(diff)-1))
def value(exp : str, x : int | float) -> int | float:
'''
Function for obtaining value of given expression at given value of x.
Args :
exp : Expression
x : value of x
Returns :
returns float or int.
'''
return lambdaFunction.lambdafunction("def f(x):", f"\n\treturn {exp}", caller=f"f({x})")
|
NotShrirang/Numerical-Methods
|
StandardFunctions.py
|
StandardFunctions.py
|
py
| 1,714 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30418545772
|
from ete3 import Tree , TreeStyle , NodeStyle , faces , AttrFace
import csv
#t = Tree("(A:1,(B:1,(E:1,D:1):0.5):0.5);" )
#t.render("mytree.png", w=183, units="mm")
t = Tree("(0);" )
all_orgs = []
with open('testlineage.csv') as csvfile:
reader = csv.DictReader(csvfile)
for org in reader:
all_orgs.append((org['id'],org['ancestor_id'],org['recorded_at'],org['on_lod']))
with open('testunpruned.csv') as csvfile:
reader = csv.DictReader(csvfile)
for org in reader:
all_orgs.append((org['id'],org['ancestor_id'],'101','0'))
all_orgs.sort(key=lambda t: int(t[0]))
#print (all_orgs)
style = NodeStyle()
style["fgcolor"] = "#00f000"
#style["bgcolor"] = "lightred"
#style["size"] = 10
style["shape"] = "circle"
style["vt_line_color"] = "#0000aa"
style["hz_line_color"] = "#0000aa"
style["vt_line_width"] = 50
style["hz_line_width"] = 50
style["vt_line_type"] = 2 # 0 solid, 1 dashed, 2 dotted
style["hz_line_type"] = 2
for org in all_orgs:
#print (org)
new_node = Tree('(' + org[0] + ');') #+ ':' + str(int(org[2])^1)
if (org[3] == '1'):
#print (org)
new_node.set_style(style)
anc = t.search_nodes(name=org[1])[0]
anc.add_child(new_node)
#for n in t.traverse():
#print (n.name)
#if all_orgs[int(n.name)][3] == '1':
#n.img_style = style2
style2 = NodeStyle()
style2["fgcolor"] = "#000000"
style2["shape"] = "circle"
style2["vt_line_color"] = "#0000aa"
style2["hz_line_color"] = "#0000aa"
style2["vt_line_width"] = 5
style2["hz_line_width"] = 5
style2["vt_line_type"] = 1 # 0 solid, 1 dashed, 2 dotted
style2["hz_line_type"] = 1
for l in t.iter_leaves():
l.img_style = style2
ts = TreeStyle()
ts.show_leaf_name = True
#ts.rotation = 90
ts.mode = 'c'
ts.arc_start = 180
ts.arc_span = 350
#t.show(tree_style=ts)
#t.show()
t = Tree()
t.populate(8)
style2 = NodeStyle()
style2["fgcolor"] = "darkred"
style2["shape"] = "circle"
style2["vt_line_color"] = "green"
style2["hz_line_color"] = "red"
style2["vt_line_width"] = 5
style2["hz_line_width"] = 5
style2["vt_line_type"] = 1 # 0 solid, 1 dashed, 2 dotted
style2["hz_line_type"] = 1
for l in t.iter_leaves():
l.img_style = style2
ts = TreeStyle()
ts.show_leaf_name = False
#
t.show(tree_style=ts)
|
cgnitash/ded
|
dedli/pylin.py
|
pylin.py
|
py
| 2,243 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34239416344
|
"""
@author: nabin
This script runs pdb2seq.pl to get sequence from pdb file
"""
import os
import sys
def run_pdb2sec(pdb, density_name, input_path, perl_script_dir):
try:
density_map_dir = os.path.join(input_path, density_name)
pdb_fi = os.path.join(density_map_dir, pdb)
os.system("perl " + perl_script_dir + " " + pdb_fi + ">>" + density_map_dir + "/" + "atomic.fasta")
print(density_name, "Done")
except FileNotFoundError:
print("Error for file:", density_name)
if __name__ == "__main__":
input_path = sys.argv[1]
perl_script_dir = "pdb2seq.pl"
density_maps = [den for den in os.listdir(input_path) if os.path.isdir(os.path.join(input_path, den))]
for den in density_maps:
rm1 = f'{input_path}/{den}/atomic.fasta'
if os.path.exists(rm1):
os.remove(rm1)
pdb_file = [p for p in os.listdir(os.path.join(input_path, den)) if
p.endswith(".pdb") or p.endswith(".ent")]
pdb_file.sort()
pdb_file_n = pdb_file[0].split(".")[0]
pdb_file_na = pdb_file_n.split("_")[0]
pdb_file_name = pdb_file_na + ".pdb"
# print(den, "->", pdb_file_name)
run_pdb2sec(pdb_file_name, den, input_path, perl_script_dir)
|
BioinfoMachineLearning/cryo2struct
|
preprocessing/get_pdb_seq.py
|
get_pdb_seq.py
|
py
| 1,268 |
python
|
en
|
code
| 11 |
github-code
|
6
|
18200541516
|
'''class is user defined class and used to access more informations'''
class A:
param1= "ras"
param2="rashu"
def fun(self):
print(f'i am {self.param1}')
print(f'i am {self.param2}')
obj= A()
print(obj.param1)
obj.fun()
|
rashmi-fit/100-daysOf-Python_challenge
|
season2/userdefinedclass.py
|
userdefinedclass.py
|
py
| 248 |
python
|
en
|
code
| 2 |
github-code
|
6
|
43066420004
|
import tflearn as tl
import numpy as np
import os,glob,cv2
import sys,argparse
from read_image import read_valid_image
import tensorflow as tf
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data , dropout, fully_connected
# from tflearn.layers.estimator import regression
image_data,image_label,num_class,label_name = read_valid_image()
# First, pass the path of the image
dir_path = os.path.dirname(os.path.realpath(__file__))
image_path=sys.argv[1]
filename = dir_path +'/' +image_path
image_size=32
num_channels=1
images = []
# Reading the image using OpenCV
image = cv2.imread(filename,0)
# Resizing the image to our desired size and preprocessing will be done exactly as done during training
image = cv2.resize(image, (image_size, image_size), cv2.INTER_LINEAR)
image = image[:, :, np.newaxis]
images.append(image)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
x = np.multiply(images, 1.0/255.0)
# y = np.zeros([1,2])
#The input to the network is of shape [None image_size image_size num_channels]. Hence we reshape.
# x_batch = images.reshape(1, image_size,image_size,num_channels)
convnet = input_data(shape=[None,32,32,1],name='input')
convnet = conv_2d(convnet,32,2,activation='relu')
convnet = max_pool_2d(convnet,2)
convnet = conv_2d(convnet,64,2,activation='relu')
convnet = max_pool_2d(convnet,2)
convnet = conv_2d(convnet,32,2,activation='relu')
convnet = max_pool_2d(convnet,2)
convnet = conv_2d(convnet,64,2,activation='relu')
convnet = max_pool_2d(convnet,2)
convnet = conv_2d(convnet,32,2,activation='relu')
convnet = max_pool_2d(convnet,2)
convnet = conv_2d(convnet,64,2,activation='relu')
convnet = max_pool_2d(convnet,2)
convnet = fully_connected(convnet, 1024,activation='relu')
convnet = fully_connected(convnet,2,activation='softmax',name="output")
model = tl.DNN(convnet)
model.load('tflearn')
final = model.predict(x)
for i in range(0,len(label_name)):
print(label_name[i],final[0][i])
# print(final)
|
Eniyanilavan/DogCatIdentification-tensorflow-python
|
predict1.py
|
predict1.py
|
py
| 2,021 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22624757619
|
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases()
import chainer
from chainer import functions as F
import numpy as np
import chainerrl
def _wrap_by_variable(x):
if isinstance(x, chainer.Variable):
return x
else:
return chainer.Variable(x)
def _unwrap_variable(x):
if isinstance(x, chainer.Variable):
return x.data
else:
return x
def elementwise_gaussian_log_pdf(x, mean, var, ln_var):
# log N(x|mean,var)
# = -0.5log(2pi) - 0.5log(var) - (x - mean)**2 / (2*var)
return -0.5 * np.log(2 * np.pi) - \
0.5 * ln_var - \
((x - mean) ** 2) / (2 * var)
NPY_SQRT1_2 = 1 / (2 ** 0.5)
def _ndtr(a):
"""CDF of the standard normal distribution.
See https://github.com/scipy/scipy/blob/master/scipy/special/cephes/ndtr.c
"""
if not isinstance(a, chainer.Variable):
a = chainer.Variable(a)
x = a * NPY_SQRT1_2
z = abs(x)
half_erfc_z = 0.5 * F.erfc(z)
return F.where(
z.data < NPY_SQRT1_2,
0.5 + 0.5 * F.erf(x),
F.where(
x.data > 0,
1.0 - half_erfc_z,
half_erfc_z))
def _safe_log(x):
"""Logarithm function that won't backprop inf to input."""
return F.log(F.where(x.data > 0, x, x.data))
def _log_ndtr(x):
"""Log CDF of the standard normal distribution.
See https://github.com/scipy/scipy/blob/master/scipy/special/cephes/ndtr.c
"""
if not isinstance(x, chainer.Variable):
x = chainer.Variable(x)
return F.where(
x.data > 6,
-_ndtr(-x),
F.where(
x.data > -14,
_safe_log(_ndtr(x)),
-0.5 * x * x - _safe_log(-x) - 0.5 * np.log(2 * np.pi)))
def _gaussian_log_cdf(x, mu, sigma):
"""Log CDF of a normal distribution."""
if not isinstance(x, chainer.Variable):
x = chainer.Variable(x)
return _log_ndtr((x - mu) / sigma)
def _gaussian_log_sf(x, mu, sigma):
"""Log SF of a normal distribution."""
if not isinstance(x, chainer.Variable):
x = chainer.Variable(x)
return _log_ndtr(-(x - mu) / sigma)
class ClippedGaussian(chainerrl.distribution.GaussianDistribution):
"""Clipped Gaussian distribution."""
def __init__(self, mean, var, low, high):
super().__init__(mean, var)
self.low = F.broadcast_to(low, mean.shape)
self.high = F.broadcast_to(high, mean.shape)
assert isinstance(self.low, chainer.Variable)
assert isinstance(self.high, chainer.Variable)
def sample(self):
unclipped = F.gaussian(self.mean, self.ln_var)
return F.minimum(F.maximum(unclipped, self.low), self.high)
def log_prob(self, x):
unclipped_elementwise_log_prob = elementwise_gaussian_log_pdf(
x, self.mean, self.var, self.ln_var)
std = self.var ** 0.5
low_log_prob = _gaussian_log_cdf(self.low, self.mean, std)
high_log_prob = _gaussian_log_sf(self.high, self.mean, std)
x_data = _unwrap_variable(x)
elementwise_log_prob = F.where(
(x_data <= self.low.data),
low_log_prob,
F.where(
x_data >= self.high.data,
high_log_prob,
unclipped_elementwise_log_prob))
return F.sum(elementwise_log_prob, axis=1)
def prob(self, x):
return F.exp(self.log_prob(x))
def copy(self):
return ClippedGaussian(_wrap_by_variable(self.mean.data),
_wrap_by_variable(self.var.data),
self.low,
self.high)
|
pfnet-research/capg
|
clipped_gaussian.py
|
clipped_gaussian.py
|
py
| 3,811 |
python
|
en
|
code
| 28 |
github-code
|
6
|
40744293644
|
import chess
import math
def scoreCalcBasic(board: chess.Board):
currentScore = 0
for i in range(1,7):
if i == 1:
currentScore += len(board.pieces(i, chess.COLORS[0]))
if i == 2:
currentScore += 3*len(board.pieces(i, chess.COLORS[0]))
if i == 3:
currentScore += 3*len(board.pieces(i, chess.COLORS[0]))
if i == 4:
currentScore += 5*len(board.pieces(i, chess.COLORS[0]))
if i == 5:
currentScore += 9*len(board.pieces(i, chess.COLORS[0]))
for i in range(1,7):
if i == 1:
currentScore -= len(board.pieces(i, chess.COLORS[1]))
if i == 2:
currentScore -= 3*len(board.pieces(i, chess.COLORS[1]))
if i == 3:
currentScore -= 3*len(board.pieces(i, chess.COLORS[1]))
if i == 4:
currentScore -= 5*len(board.pieces(i, chess.COLORS[1]))
if i == 5:
currentScore -= 9*len(board.pieces(i, chess.COLORS[1]))
return currentScore
def scoreCalcBoard(board):
return scoreCalcBasic(board)
def AI(board : chess.Board, depth, alpha, beta):
#Minimax AI that beats you
#White maximizing, Black minimizing
outcome = board.outcome()
if outcome != None:
if(outcome.winner == True):
return float('inf'), None
elif(outcome.winner == False):
return float('-inf'), None
else:
if(board.turn == True):
return float('-inf'), None
else:
return float('inf'), None
if depth == 0:
return scoreCalcBoard(board), None
if board.turn == True:
#Maximizing White
maxScore = float('-inf')
maxMove = None
for move in board.legal_moves:
board.push(move)
score = AI(board, depth-1, alpha, beta)[0]
board.pop()
maxScore = max(score, maxScore)
if maxScore == score:
maxMove = move
alpha = max(alpha, score)
if beta <= alpha:
break
return maxScore, maxMove
else:
#Minimizing Black
minScore = float('inf')
minMove = None
for move in board.legal_moves:
board.push(move)
score = AI(board, depth-1, alpha, beta)[0]
board.pop()
minScore = min(score, minScore)
if minScore == score:
minMove = move
beta = min(beta, score)
if beta <= alpha:
break
return minScore, minMove
def bestMove(board: chess.Board):
score, bestMove = AI(board, 5, float('-inf'), float('inf'))
if bestMove == None:
for move in board.legal_moves:
return move
return bestMove
|
azkung/AI_Chess_5.5
|
python/bots/bot_basic.py
|
bot_basic.py
|
py
| 3,043 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9084085834
|
import os
import logging, requests
from rdflib import Namespace, Literal, Graph
from rdflib.namespace import DCTERMS, RDF, RDFS
from rdflib.plugins.stores.sparqlstore import SPARQLUpdateStore
from rdflib.graph import DATASET_DEFAULT_GRAPH_ID as default
from oslcapi.api.helpers.service_api import get_bucket
log = logging.getLogger('tester.sub')
OSLC = Namespace('http://open-services.net/ns/core#')
OSLC_EVENT = Namespace('http://open-services.net/ns/events#')
# Connect to fuseki triplestore.
FUSEKI_USER = os.getenv("FUSEKI_USER")
FUSEKI_PWD = os.getenv("FUSEKI_PWD")
fuseki_store = SPARQLUpdateStore(auth=(FUSEKI_USER,FUSEKI_PWD))
query_endpoint = 'http://fuseki.demos.gsi.upm.es/oslc-gc2/query'
update_endpoint = 'http://fuseki.demos.gsi.upm.es/oslc-gc2/update'
fuseki_data_endpoint = 'http://fuseki.demos.gsi.upm.es/oslc-gc2/data'
fuseki_store.open((query_endpoint, update_endpoint))
def generate_creation_event(resource, store):
log.warning('Creation event generated')
store.trs.generate_change_event(resource, 'Creation')
# Generate OSLC Event Resource for Fuseki Endpoint
g = Graph(fuseki_store, identifier=default)
g.add((resource.uri, RDF.type, OSLC_EVENT.Event))
g.add((resource.uri, DCTERMS.description, Literal('Creation Event')))
# Generate OSLC Event Resource for Kafka Topic
g2 = Graph()
g2.add((resource.uri, RDF.type, OSLC_EVENT.Event))
g2.add((resource.uri, DCTERMS.description, Literal('Creation Event')))
return g2
def generate_modification_event(payload, store):
log.warning('Modification event generated')
bucket = get_bucket(payload['bucket'])
service_provider = next(service_provider for service_provider in store.catalog.service_providers if
Literal(bucket.id) in service_provider.rdf.objects(None, DCTERMS.identifier))
resource = next(resource for resource in service_provider.oslc_resources if
Literal(bucket.number) in resource.rdf.objects(None, DCTERMS.identifier))
service_provider.oslc_resources.remove(resource)
resource = store.add_resource(service_provider, bucket)
store.trs.generate_change_event(resource, 'Modification')
return
def generate_deletion_event(resource, store):
log.warning('Deletion event generated')
log.warning(resource)
store.trs.generate_change_event(resource, 'Deletion')
# Generate OSLC Event Resource for Fuseki Endpoint
g = Graph(fuseki_store, identifier=default)
g.add((resource.uri, RDF.type, OSLC_EVENT.Event))
g.add((resource.uri, DCTERMS.description, Literal('Deletion Event')))
# Generate OSLC Event Resource for Kafka Topic
g2 = Graph()
g2.add((resource.uri, RDF.type, OSLC_EVENT.Event))
g2.add((resource.uri, DCTERMS.description, Literal('Deletion Event')))
return g2
|
AlexVaPe/pyOSLC_GCP
|
oslcapi/api/helpers/service_events.py
|
service_events.py
|
py
| 2,832 |
python
|
en
|
code
| 1 |
github-code
|
6
|
30367879511
|
from numpy import linspace, sin
from enable.api import ColorTrait, marker_trait
from chaco.api import ArrayPlotData, Plot
from enable.api import ComponentEditor
from traits.api import HasTraits, Instance, Int
from traitsui.api import Group, Item, View
class ScatterPlotTraits(HasTraits):
plot = Instance(Plot)
color = ColorTrait("blue")
marker = marker_trait
marker_size = Int(4)
traits_view = View(
Group(
Item("color", label="Color", style="custom"),
Item("marker", label="Marker"),
Item("marker_size", label="Size"),
Item("plot", editor=ComponentEditor(), show_label=False),
orientation="vertical",
),
width=800,
height=600,
resizable=True,
title="Chaco Plot",
)
def __init__(self):
# Create the data and the PlotData object
x = linspace(-14, 14, 100)
y = sin(x) * x ** 3
plotdata = ArrayPlotData(x=x, y=y)
# Create a Plot and associate it with the PlotData
plot = Plot(plotdata)
# Create a line plot in the Plot
self.renderer = plot.plot(("x", "y"), type="scatter", color="blue")[0]
self.plot = plot
def _color_changed(self):
self.renderer.color = self.color
def _marker_changed(self):
self.renderer.marker = self.marker
def _marker_size_changed(self):
self.renderer.marker_size = self.marker_size
# ===============================================================================
# demo object that is used by the demo.py application.
# ===============================================================================
demo = ScatterPlotTraits()
if __name__ == "__main__":
demo.configure_traits()
|
enthought/chaco
|
examples/tutorials/scipy2008/traits_example.py
|
traits_example.py
|
py
| 1,759 |
python
|
en
|
code
| 286 |
github-code
|
6
|
35445381053
|
from dexy.tests.utils import assert_output
from dexy.tests.utils import assert_in_output
from dexy.tests.utils import assert_output_cached
from dexy.tests.utils import wrap
from dexy.tests.utils import TEST_DATA_DIR
from dexy.doc import Doc
import os
import shutil
R_SECTIONS = """\
### @export "assign-vars"
x <- 6
y <- 7
### @export "multiply"
x * y
"""
def test_rint_mock():
with wrap() as wrapper:
doc = Doc("example.R|idio|rintmock",
contents=R_SECTIONS,
wrapper=wrapper)
wrapper.run_docs(doc)
assert doc.output().is_cached()
assert doc.output().as_sectioned()['assign-vars'] == "> x <- 6\n> y <- 7\n> \n"
assert doc.output().as_sectioned()['multiply'] == "> x * y\n[1] 42\n> \n"
def test_ht_latex():
assert_output_cached("htlatex", LATEX)
def test_r_batch():
assert_output('rout', 'print(1+1)', "[1] 2\n")
def test_r_int_batch():
assert_output('rintbatch', '1+1', "> 1+1\n[1] 2\n> \n")
def test_ragel_ruby_filter():
assert_in_output('rlrb', RAGEL, "_keys = _hello_and_welcome_key_offsets[cs]", ext=".rl")
def test_ps2pdf_filter():
with wrap() as wrapper:
doc = Doc("hello.ps|ps2pdf",
contents = PS,
wrapper=wrapper)
wrapper.run_docs(doc)
assert doc.output().is_cached()
assert doc.output().filesize() > 1000
def test_html2pdf_filter():
assert_output_cached("html2pdf", "<p>hello</p>", min_filesize=1000)
def test_dot_filter():
assert_output_cached("dot", "digraph { a -> b }", min_filesize=1000, ext=".dot")
def test_pdf2img_filter():
with wrap() as wrapper:
orig = os.path.join(TEST_DATA_DIR, 'color-graph.pdf')
shutil.copyfile(orig, 'example.pdf')
doc = Doc("example.pdf|pdf2img",
wrapper=wrapper)
wrapper.run_docs(doc)
assert doc.output().is_cached()
assert doc.output().filesize() > 1000
def test_pdf2jpg_filter():
with wrap() as wrapper:
orig = os.path.join(TEST_DATA_DIR, 'color-graph.pdf')
shutil.copyfile(orig, 'example.pdf')
doc = Doc("example.pdf|pdf2jpg",
wrapper=wrapper)
wrapper.run_docs(doc)
assert doc.output().is_cached()
def test_bw_filter():
with wrap() as wrapper:
orig = os.path.join(TEST_DATA_DIR, 'color-graph.pdf')
shutil.copyfile(orig, 'example.pdf')
doc = Doc("example.pdf|bw",
wrapper=wrapper)
wrapper.run_docs(doc)
assert doc.output().is_cached()
def test_asciidoc_filter():
assert_in_output("asciidoc", "hello", """<div class="paragraph"><p>hello</p></div>""")
def test_pandoc_filter():
assert_output("pandoc", "hello", "<p>hello</p>\n", ext=".md")
def test_espeak_filter():
assert_output_cached("espeak", "hello", min_filesize = 1000)
PS = """%!PS
1.00000 0.99083 scale
/Courier findfont 12 scalefont setfont
0 0 translate
/row 769 def
85 {/col 18 def 6 {col row moveto (Hello World)show /col col 90 add def}
repeat /row row 9 sub def} repeat
showpage save restore"""
RD = """
\\name{load}
\\alias{load}
\\title{Reload Saved Datasets}
\description{
Reload the datasets written to a file with the function
\code{save}.
}
"""
RAGEL = """%%{
machine hello_and_welcome;
main := ( 'h' @ { puts "hello world!" }
| 'w' @ { puts "welcome" }
)*;
}%%
data = 'whwwwwhw'
%% write data;
%% write init;
%% write exec;
"""
LATEX = """\
\documentclass{article}
\\title{Hello, World!}
\\begin{document}
\maketitle
Hello!
\end{document}
"""
|
gotosprey/dexy
|
dexy/tests/plugins/test_subprocess_filters.py
|
test_subprocess_filters.py
|
py
| 3,617 |
python
|
en
|
code
| null |
github-code
|
6
|
40866976474
|
def Count(value):
Esum=0;
Osum=0;
iDigit=0;
while(value!=0):
iDigit=value%10;
if(iDigit%2==0):
Esum=Esum+iDigit;
else:
Osum=Osum+iDigit;
value=value//10;
return Esum-Osum;
def main():
print("Enter Number");
no=int(input());
ret=Count(no);
print(ret);
if __name__=="__main__":
main();
|
Snehal-Patil72/Development
|
Python Module/DigitEvenOddSum__Module.py
|
DigitEvenOddSum__Module.py
|
py
| 337 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10355901647
|
import pandas as pd
MIN_PPL = 125
MAX_PPL = 300
def baseline(data):
# family indexed
choices = data[[col for col in data.columns if "choice_" in col]] # families' preferences
sizes = data["n_people"] # families' sizes
assignments = pd.Series(name="assigned_day") # holds assigned day for each family
# day indexed
potentials = pd.Series(0, index=range(1, 101)) # max number of people possibly attending for each day
occupancies = pd.Series([0] * 100, index=range(1, 101)) # holds number of people per day
# STEP 1 : initialize potentials
for fam, fam_choices in choices.iterrows():
potentials.loc[fam_choices] += sizes.loc[fam]
potentials = potentials.sort_values(ascending=True)
# STEP 2 : fill at least MIN_PPL for each day starting by least wanted days
for day, _ in potentials.iteritems():
possible_families = choices[choices == day].dropna(how="all").index.tolist()
for fam in possible_families:
if fam not in assignments.index:
assignments.loc[fam] = day
occupancies.loc[day] += sizes.loc[fam]
if occupancies.loc[day] > MIN_PPL:
break
# STEP 3 : assign remaining families with their most wanted choice (if possible)
remaining_fam_choices = choices.loc[~choices.index.isin(assignments.index)]
for fam, fam_choices in remaining_fam_choices.iterrows():
for day in fam_choices:
if occupancies.loc[day] + sizes.loc[fam] <= MAX_PPL:
assignments.loc[fam] = day
occupancies.loc[day] += sizes.loc[fam]
break
assignments = assignments.reset_index().rename({"index": "family_id"}, axis=1)
return assignments
def random_pick(data):
data = data["n_people"]
assignments = pd.Series(name="assigned_day")
occupancies = pd.Series(0, index=range(1, 101))
day = 1
min_constraint_satisfied = False
for family_id, family_size in data.iteritems():
assignments.loc[family_id] = day
occupancies.loc[day] += family_size
# while min_constraint is not satisfied, we fill days in the ascending order
if occupancies.loc[day] > MIN_PPL and not min_constraint_satisfied:
day += 1
# case where we have satisfied the min_constraint but there are still families to be placed
if occupancies.loc[100] > MIN_PPL and not min_constraint_satisfied:
min_constraint_satisfied = True
day = 1
# to place the remaining families, we just add them uniformly on the calendar
if min_constraint_satisfied:
day = max((day + 1) % 100, 1)
assignments = assignments.reset_index().rename({"index": "family_id"}, axis=1)
return assignments
|
remit0/workshop
|
workshop/models.py
|
models.py
|
py
| 2,796 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35207313249
|
import shutil
from PyQt5.QtCore import QPropertyAnimation, QEasingCurve
import sys
from PyQt5.QtWidgets import QSlider, QLabel
from PyQt5.QtGui import QFont
from PyQt5.QtCore import QSignalMapper
from classes.FrequencyDomain import *
from classes.TimeGraph import *
from collections import namedtuple
from Dialog import *
from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QVBoxLayout, QDialog
Slider_tuple = namedtuple("Slider", ["min_frequency", "max_frequency", "slider_object", "window_curve"])
FORM_CLASS, _ = loadUiType(path.join(path.dirname(__file__), "mainWindow.ui"))
class MainApp(QMainWindow, FORM_CLASS):
def __init__(self, parent=None):
super(MainApp, self).__init__(parent)
QMainWindow.__init__(self, parent=None)
self.setupUi(self)
self.standard_deviation = 0
self.playing = None
self.animation = None
self.mode_name = None
self.dialog_window = None
self.setWindowTitle("Equalizer")
self.timer = QtCore.QTimer()
self.timer.setInterval(25)
self.timer.timeout.connect(self.move_line)
self.mode_mapper = QSignalMapper()
self.mode_mapper.mapped[str].connect(self.add_signal_and_make_slider)
self.input_signal_graph = TimeGraph(self.inputAudio)
self.output_signal_graph = TimeGraph(self.outputAudio)
self.outputAudio.setXLink(self.inputAudio)
self.outputAudio.setYLink(self.inputAudio)
self.slider_mapper = QSignalMapper()
self.slider_mapper.mapped[str].connect(self.slider_value_change)
self.mode_dictionary = {
"ECG": {"RBBB": Slider_tuple(0, 17.5, None, None), "Sinus": Slider_tuple(0, 4, None, None),
"ventricular fibrillation": Slider_tuple(17, 150, None, None)},
"Animal": {"Owl": Slider_tuple(0, 800, None, None), "Horse": Slider_tuple(1000, 2200, None, None),
"Bat": Slider_tuple(2500, 5000, None, None), "Goat": Slider_tuple(0, 7000, None, None),
"Dolphin": Slider_tuple(0, 14000, None, None)},
"Musical": {"Guitar": Slider_tuple(0, 900, None, None),
"Piccolo": Slider_tuple(1000, 2000, None, None),
"Xylophone": Slider_tuple(7000, 15000, None, None),
"trianglemod": Slider_tuple(4000, 6000, None, None)},
"Uniform": {}}
self.ui_components = {"reset": self.resetGraph, "clear": self.clearGraph,
"zoom_in": self.zoomInBtn, "zoom_out": self.zoomOutBtn, "speed_up": self.speedUp,
"slow_down": self.slowDown, }
self.frequency_domain = FrequencyDomain(input_spectro_pointer=self.spectroInputLayout,
output_spectro_pointer=self.spectroOutputLayout,
frequency_graph=self.frequency_graph)
handle_graph_buttons(self.ui_components, self.input_signal_graph)
handle_graph_buttons(self.ui_components, self.output_signal_graph)
self.window_signal = "Rectangular window"
self.handle_buttons()
def handle_buttons(self):
self.SideBar.toggled.connect(self.toggle_side_bar)
self.windowComboBox.currentTextChanged.connect(self.window_control)
self.Add_signal.clicked.connect(self.open_add_signal_dialog)
self.mute_input.clicked.connect(self.unmute_input_graph)
self.mute_output.clicked.connect(self.unmute_output_graph)
self.muteAllSounds.clicked.connect(self.mute_all)
self.clearGraph.clicked.connect(self.clear_all)
self.playPauseGraph.clicked.connect(self.pause_play_graph)
self.volumeSlider.valueChanged.connect(self.control_volume)
disable_enable_buttons(self.ui_components, False)
self.saveAudio.clicked.connect(self.save_output_audio_file)
def move_line(self):
self.input_signal_graph.move_line()
self.output_signal_graph.move_line()
def control_volume(self):
volume = self.volumeSlider.value()
pygame.mixer.music.set_volume(volume / 100.0)
if volume == 0:
self.muteAllSounds.setIcon(QIcon('icons/mute.png'))
else:
self.muteAllSounds.setIcon(QIcon('icons/sound.png'))
def window_control(self):
is_play = self.playing
if is_play:
self.pause_graphs()
if self.windowComboBox.currentText() == 'Gaussian window':
self.open_gaussian_window()
self.window_signal = self.windowComboBox.currentText()
self.create_output(False)
if is_play:
self.play_graphs()
def open_gaussian_window(self):
gaussian_window = QDialog(self)
gaussian_window.setWindowTitle('Gaussian Window')
layout = QVBoxLayout(gaussian_window)
label = QLabel('standard deviation = 500', gaussian_window)
layout.addWidget(label)
standard_deviation_slider = QSlider(gaussian_window)
standard_deviation_slider.setOrientation(1)
standard_deviation_slider.setMinimum(50)
standard_deviation_slider.setMaximum(1000)
standard_deviation_slider.setValue(500)
standard_deviation_slider.valueChanged.connect(
lambda: label.setText(f'standard deviation = {standard_deviation_slider.value()}'))
layout.addWidget(standard_deviation_slider)
ok_button = QPushButton('OK', gaussian_window)
ok_button.clicked.connect(gaussian_window.accept)
layout.addWidget(ok_button)
result = gaussian_window.exec_()
if result == QDialog.Accepted:
self.standard_deviation = standard_deviation_slider.value()
def add_signal_and_make_slider(self, file_path, mode_name):
self.clear_all()
disable_enable_buttons(self.ui_components, True)
self.mode_name = mode_name
data, sample_rate = self.input_signal_graph.add_wav_file(file_path, "input")
self.frequency_domain.add_new_file(data, sample_rate)
if mode_name == "Uniform":
self.add_uniform_signal(self.frequency_domain.frequencies)
mode_slider_ranges = self.mode_dictionary[mode_name]
position_index = 1
for slider_name, slider_parameter in mode_slider_ranges.items():
label = QLabel(slider_name)
label.setFont(QFont('Helvetica [Cronyx]', 10))
slider = QSlider()
slider.setOrientation(0)
slider.setMinimum(0)
slider.setMaximum(50)
slider.setMinimumSize(20, 250)
slider.setValue(10)
slider.setTickPosition(QSlider.TicksAbove)
self.slider_mapper.setMapping(slider, slider_name)
slider.valueChanged.connect(self.slider_mapper.map)
self.sliderLayout.addWidget(slider)
self.sliderLayout.addWidget(label)
line_color = random_color_generator()
frequency_start_line = pg.InfiniteLine(pos=slider_parameter.min_frequency, movable=False,
markers=[('>|', (1 - 0.25) / len(mode_slider_ranges.keys()), 10.0)],
pen=line_color)
frequency_end_line = pg.InfiniteLine(pos=slider_parameter.max_frequency, movable=False,
markers=[('|<', (1 - 0.25) / len(mode_slider_ranges.keys()), 10.0)],
pen=line_color)
pg.InfLineLabel(frequency_start_line, text=slider_name,
position=(1 - 0.2) * position_index / len(mode_slider_ranges.keys()))
window_on_frequency_graph = pg.PlotCurveItem()
self.mode_dictionary[mode_name][slider_name] = slider_parameter._replace(slider_object=slider,
window_curve=window_on_frequency_graph)
self.frequency_domain.frequency_graph.addItem(window_on_frequency_graph)
self.frequency_domain.frequency_graph.addItem(frequency_start_line)
self.frequency_domain.frequency_graph.addItem(frequency_end_line)
position_index += 1
self.create_output(True)
self.pause_play_graph()
self.unmute_input_graph()
def add_uniform_signal(self, frequencies):
band_length = len(frequencies) // 10
for i in range(10):
self.mode_dictionary["Uniform"][f"{i + 1}"] = Slider_tuple(frequencies[i * band_length],
frequencies[(i + 1) * band_length],
None, None)
def open_add_signal_dialog(self):
self.dialog_window = Dialog()
self.dialog_window.submitClicked.connect(self.add_signal_and_make_slider)
self.dialog_window.show()
def toggle_side_bar(self):
if self.SideBar.isChecked():
new_width = 500
else:
new_width = 0
self.animation = QPropertyAnimation(self.sideBarFrame, b"minimumWidth")
self.animation.setDuration(20)
self.animation.setEndValue(new_width)
self.animation.setEasingCurve(QEasingCurve.InOutQuart)
self.animation.start()
self.sideBarFrame.update()
def unmute_input_graph(self):
self.output_signal_graph.pygame_play_mute()
self.input_signal_graph.pygame_play_unmute()
self.mute_input.setIcon(QIcon('icons/sound.png'))
self.mute_output.setIcon(QIcon('icons/mute.png'))
def unmute_output_graph(self):
self.input_signal_graph.pygame_play_mute()
self.output_signal_graph.pygame_play_unmute()
self.mute_output.setIcon(QIcon('icons/sound.png'))
self.mute_input.setIcon(QIcon('icons/mute.png'))
def mute_all(self):
self.output_signal_graph.pygame_play_mute()
self.input_signal_graph.pygame_play_unmute(False)
self.mute_input.setIcon(QIcon('icons/mute.png'))
self.mute_output.setIcon(QIcon('icons/mute.png'))
self.muteAllSounds.setIcon(QIcon('icons/mute.png'))
def clear_all(self):
self.frequency_domain.clear()
# Clear all sliders in the layout
for widget in reversed(range(self.sliderLayout.count())):
widget = self.sliderLayout.itemAt(widget).widget()
if isinstance(widget, QSlider) or isinstance(widget, QLabel):
widget.deleteLater()
disable_enable_buttons(self.ui_components, False)
def create_output(self, new):
for slider in self.mode_dictionary[self.mode_name].keys():
self.change_frequency_domain_amplitudes(slider)
self.mode_dictionary[self.mode_name][slider].window_curve.setData(
np.linspace(self.mode_dictionary[self.mode_name][slider].min_frequency,
self.mode_dictionary[self.mode_name][slider].max_frequency, 500),
window_function(std=self.standard_deviation, name=self.window_signal,
amplitude=self.frequency_domain.max_amplitudes), pen="black")
self.create_output_wav_file(new)
def slider_value_change(self, slider_name):
self.change_frequency_domain_amplitudes(slider_name)
self.create_output_wav_file()
def change_frequency_domain_amplitudes(self, slider_name):
frequency_array = self.frequency_domain.frequencies
modified_band = (frequency_array > self.mode_dictionary[self.mode_name][slider_name].min_frequency) & (
frequency_array < self.mode_dictionary[self.mode_name][slider_name].max_frequency)
window_array = window_function(n=len(frequency_array[modified_band]),
amplitude=self.mode_dictionary[self.mode_name][
slider_name].slider_object.value() / 10,
std=self.standard_deviation,
name=self.window_signal)
self.frequency_domain.output_amplitudes[modified_band] = self.frequency_domain.amplitudes[
modified_band] * window_array
def create_output_wav_file(self, new=False):
playing_status = self.input_signal_graph.playing
if playing_status:
self.pause_graphs()
reconstructed_signal = get_inverse_fourier_transform(self.frequency_domain.output_amplitudes)
wav.write("played_audio/reconstructed.wav", self.frequency_domain.sampling_rate,
reconstructed_signal.astype(np.int16))
if new:
data, sample_rate = self.output_signal_graph.add_wav_file("played_audio/reconstructed.wav", "output")
else:
data, sample_rate = self.output_signal_graph.update_wave_file("played_audio/reconstructed.wav", "output")
self.frequency_domain.update_output_spectrogram(data, sample_rate)
if playing_status:
self.play_graphs()
def pause_graphs(self):
self.timer.stop()
self.playing = False
self.input_signal_graph.pause()
self.output_signal_graph.pause()
def play_graphs(self):
self.input_signal_graph.play()
self.output_signal_graph.play()
self.timer.start()
self.playing = True
def pause_play_graph(self):
if self.playing:
self.pause_graphs()
self.playPauseGraph.setIcon(QIcon('icons/play.png'))
else:
self.play_graphs()
self.playPauseGraph.setIcon(QIcon('icons/pause.png'))
def save_output_audio_file(self):
save_path = QFileDialog.getSaveFileName(self, 'Save File', "audio file", "wav Files (*.wav)")[0]
shutil.copyfile("played_audio/output.wav", f"{save_path}")
def main():
app = QApplication(sys.argv)
window = MainApp()
window.show()
app.exec_()
if __name__ == '__main__':
main()
|
Zoz-HF/Ikoraiza
|
main.py
|
main.py
|
py
| 14,129 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25409835667
|
import cv2
import numpy as np
# Load video
cap = cv2.VideoCapture('lift.mp4')
# Define output video properties
output_file = 'output.avi'
fourcc = cv2.VideoWriter_fourcc(*'XVID')
fps = cap.get(cv2.CAP_PROP_FPS)
frame_size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
# Create VideoWriter object
out = cv2.VideoWriter(output_file, fourcc, fps, frame_size)
# Select ROI
ret, frame = cap.read()
r = cv2.selectROI(frame)
# Initialize tracker
tracker = cv2.legacy.TrackerMOSSE_create()
tracker.init(frame, r)
# Initialize variables
positions = []
speeds = []
# Create blank image for line overlay
overlay = np.zeros_like(frame)
# Process video frame by frame
while True:
# Read frame
ret, frame = cap.read()
if not ret:
break
# Track object
ok, bbox = tracker.update(frame)
# Draw bounding box and center point
if ok:
# Convert bounding box to integers
bbox = np.int0(bbox)
# Draw bounding box
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox[3]), (0, 255, 0), 2)
# Calculate center point
cx = bbox[0] + bbox[2] // 2
cy = bbox[1] + bbox[3] // 2
# Draw center point
cv2.circle(frame, (cx, cy), 4, (0, 255, 0), -1)
# Save position
positions.append((cx, cy))
# Draw line to previous center point
if len(positions) > 1:
cv2.line(overlay, positions[-1], positions[-2], (0, 0, 255), 2)
# Calculate speed
if len(positions) > 1:
distance = np.sqrt((positions[-1][0] - positions[-2][0]) ** 2 + (positions[-1][1] - positions[-2][1]) ** 2)
speed = distance / (1 / cap.get(cv2.CAP_PROP_FPS))
speeds.append(speed)
# Add line overlay to frame
frame = cv2.addWeighted(frame, 1, overlay, 0.5, 0)
# Display frame
cv2.imshow('Frame', frame)
# Write frame to output video
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Calculate maximum, minimum, and average speed
if len(speeds) > 0:
max_speed = max(speeds)
min_speed = min(speeds)
avg_speed = sum(speeds) / len(speeds)
print(f"Max speed: {max_speed:.2f} pixels per second")
print(f"Min speed: {min_speed:.2f} pixels per second")
print(f"Avg speed: {avg_speed:.2f} pixels per second")
else:
print("No speed data available")
# Release resources
cap.release()
out.release()
cv2.destroyAllWindows()
|
taoofstefan/BB-Tracking
|
main.py
|
main.py
|
py
| 2,502 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39868886992
|
'''
Criar um sistema bancário com as operações: sacar, depositar
e visualizar extrato.
'''
import textwrap
def menu():
# Define as opções do menu
menu_options = {
'd': 'Depositar',
's': 'Sacar',
'e': 'Extrato',
'nc': 'Nova conta',
'lc': 'Listar contas',
'nu': 'Novo usuário',
'lu': 'Listar usuários',
'q': 'Sair'
}
# Constrói a string do menu
menu = (
"\n=============== MENU ================\n"
+ "\n".join(f"[{key}] {value}" for key, value in menu_options.items())
+ "\n=> "
)
return input(menu).strip()
def depositar(contas):
numero_conta = input("Informe o número da conta: ")
# Verifica se a conta existe
if numero_conta in contas:
valor = float(input("Informe o valor do depósito: "))
conta = contas[numero_conta]
saldo_anterior = conta['saldo']
# Atualiza o saldo da conta e adiciona o depósito ao extrato
conta['saldo'] += valor
conta['extrato'] += f"Depósito:\tR$ {valor:.2f}\n"
print("\n=== Depósito realizado com sucesso! ===")
print(f"Saldo anterior:\tR$ {saldo_anterior:.2f}")
print(f"Novo saldo:\tR$ {conta['saldo']:.2f}")
else:
print("\n@@@ Conta não encontrada! Verifique o número da conta informado. @@@")
def sacar(contas):
numero_conta = input("Informe o número da conta: ")
# Verifica se a conta existe
if numero_conta in contas:
valor = float(input("Informe o valor do saque: "))
conta = contas[numero_conta]
saldo = conta['saldo']
extrato = conta['extrato']
limite = conta['limite']
numero_saques = conta['numero_saques']
limite_saques = conta['limite_saques']
# Verifica as condições de saque
excedeu_saldo = valor > saldo
excedeu_limite = valor > limite
excedeu_saques = numero_saques >= limite_saques
if excedeu_saldo:
print("\n@@@ Operação falhou! Você não tem saldo suficiente. @@@")
elif excedeu_limite:
print("\n@@@ Operação falhou! O valor do saque excede o limite. @@@")
elif excedeu_saques:
print("\n@@@ Operação falhou! Número máximo de saques excedido. @@@")
elif valor > 0:
saldo -= valor
extrato += f"Saque:\t\tR$ {valor:.2f}\n"
numero_saques += 1
print("\n=== Saque realizado com sucesso! ===")
else:
print("\n@@@ Operação falhou! O valor informado é inválido. @@@")
# Atualiza os dados da conta
conta['saldo'] = saldo
conta['extrato'] = extrato
conta['numero_saques'] = numero_saques
else:
print("\n@@@ Conta não encontrada! @@@")
def exibir_extrato(contas):
numero_conta = input("Informe o número da conta: ")
# Verifica se a conta existe
if numero_conta in contas:
conta = contas[numero_conta]
saldo = conta['saldo']
extrato = conta['extrato']
print("\n================ EXTRATO ================")
print("Não foram realizadas movimentações." if not extrato else extrato)
print(f"\nSaldo:\t\tR$ {saldo:.2f}")
print("==========================================")
else:
print("\n@@@ Conta não encontrada! @@@")
def criar_usuario(usuarios):
cpf = input("Informe o CPF (somente número): ")
# Verifica se já existe um usuário com o CPF informado
if filtrar_usuario(cpf, usuarios):
print("\n@@@ Já existe usuário com esse CPF! @@@")
return
nome = input("Informe o nome completo: ")
data_nascimento = input("Informe a data de nascimento (dd-mm-aaaa): ")
endereco = input("Informe o endereço (logradouro, nro - bairro - cidade/sigla estado): ")
usuarios.append({
"nome": nome,
"data_nascimento": data_nascimento,
"cpf": cpf,
"endereco": endereco
})
print("=== Usuário criado com sucesso! ===")
def listar_usuarios(usuarios):
if not usuarios:
print("\n@@@ Não existem usuários cadastrados! @@@")
return
else:
print("\n================ USUÁRIOS ================")
for usuario in usuarios:
linha = f"""
Nome:\t\t{usuario['nome']}
Data Nasc.:\t{usuario['data_nascimento']}
CPF:\t\t{usuario['cpf']}
Endereço:\t{usuario['endereco']}
"""
print(textwrap.dedent(linha))
def filtrar_usuario(cpf, usuarios):
# Filtra os usuários pelo CPF
usuarios_filtrados = [usuario for usuario in usuarios if usuario["cpf"] == cpf]
return usuarios_filtrados[0] if usuarios_filtrados else None
def criar_conta(contas, usuarios):
cpf = input("Informe o CPF do usuário: ")
usuario = filtrar_usuario(cpf, usuarios)
# Verifica se o usuário existe
if usuario:
numero_conta = str(len(contas) + 1)
saldo = 0
extrato = ""
limite = 5000
numero_saques = 0
limite_saques = 5
conta = {
'agencia': "0001",
'numero_conta': numero_conta,
'usuario': usuario,
'saldo': saldo,
'extrato': extrato,
'limite': limite,
'numero_saques': numero_saques,
'limite_saques': limite_saques
}
# Armazena a conta no dicionário contas usando o número da conta como chave
contas[numero_conta] = conta
print("\n=== Conta criada com sucesso! ===")
else:
print("\n@@@ Usuário não encontrado, fluxo de criação de conta encerrado! @@@")
def listar_contas(contas):
if not contas:
print("\n@@@ Não existem contas cadastradas! @@@")
return
else:
print("\n================ CONTAS ================")
for conta in contas.values():
linha = f"""
Agência:\t\t{conta['agencia']}
Número Conta:\t{conta['numero_conta']}
CPF:\t\t{conta['usuario']['cpf']}
Nome:\t\t{conta['usuario']['nome']}
Saldo:\t\tR$ {conta['saldo']:.2f}
"""
print(textwrap.dedent(linha))
def main():
# Dicionário para armazenar as contas bancárias
contas = {}
# Lista para armazenar os usuários
usuarios = []
while True:
opcao = menu()
if opcao == 'd':
depositar(contas)
elif opcao == 's':
sacar(contas)
elif opcao == 'e':
exibir_extrato(contas)
elif opcao == 'nc':
criar_conta(contas, usuarios)
elif opcao == 'lc':
listar_contas(contas)
elif opcao == 'nu':
criar_usuario(usuarios)
elif opcao == 'lu':
listar_usuarios(usuarios)
elif opcao == 'q':
print("\n=== Sessão encerrada. Até mais! ===")
break
else:
print("\n@@@ Opção inválida! Tente novamente. @@@")
if __name__ == '__main__':
main()
|
iurimega13/banqPy
|
version_2.0/banqPyRefactored.py
|
banqPyRefactored.py
|
py
| 7,160 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.