metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "15517965908/MY_WDSR",
"score": 2
} |
#### File: 15517965908/MY_WDSR/MY_wdsr.py
```python
from model.wdsr import wdsr_b
from callback import learning_rate, model_checkpoint_after, tensor_board
from keras.losses import mean_absolute_error, mean_squared_error
from train import psnr as psnr_tf
# import tensorflow as tf
import cv2
import os
import numpy as np
import keras
from keras.optimizers import Adam
from MY_datasets import read_img
from keras.callbacks import ModelCheckpoint
scale = 4
loss = mean_squared_error
weight = 'weight/wdsr.h5'
logs = 'logs/wdsr/'
learning_rate_step_size = 1e-4
learning_rate_decay = 200
lr_path = 'images/train/LR/'
hr_path = 'images/train/HR/'
lr_validation = 'images/test/LR/'
hr_validation = 'images/test/HR/'
def main():
sr = wdsr_b(scale)
sr.compile(optimizer=Adam(lr=1e-4), loss=loss, metrics=[psnr_tf])
sr.summary()
# sr.save_weights(weight)
sr.load_weights(weight)
callbacks = [
tensor_board(logs),
learning_rate(step_size=learning_rate_step_size, decay=learning_rate_decay),
ModelCheckpoint(weight, monitor='val_loss', verbose=1,
save_best_only=True, save_weights_only=True,)
# model_checkpoint_after(save_models_after_epoch, models_dir, monitor=f'val_psnr',
# save_best_only=save_best_models_only or benchmark)
]
sr.fit_generator(read_img(lr_path, hr_path),
epochs=10000, steps_per_epoch=32,
validation_data=read_img(lr_validation, hr_validation),
validation_steps=5,
callbacks=callbacks)
# sr.save_weights(weight)
def psnr(hr, sr):
return 10 * np.log(255 * 2 / (np.mean(np.square(hr - sr))))
def test_model(hr, lr, bic, model):
# main()
sr = model.predict(np.expand_dims(lr, axis=0))
sr = sr[0, :, :, :].astype(np.uint8)
lr_hr = psnr(hr, sr)
bic_sr = psnr(hr, bic)
print('psnr_sr:', lr_hr, 'psnr_bic:', bic_sr)
# cv2.imshow('sr', sr)
# cv2.imshow('hr', hr)
# cv2.imshow('lr', lr)
# cv2.imshow('bic', bic)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
def show_psnr(img_name, model):
lr_img_path = 'images/test/LR/'
hr_img_path = 'images/test/HR/'
img_lr_path = lr_img_path + img_name
img_hr_path = hr_img_path + img_name
lr = cv2.imread(img_lr_path)
bic = cv2.resize(lr, (lr.shape[1]*scale, lr.shape[0]*scale))
hr = cv2.imread(img_hr_path)
test_model(hr, lr, bic, model)
def use_it_test_model():
model = wdsr_b(scale)
model.load_weights(weight)
for name in os.listdir(lr_validation):
print(name)
show_psnr(name, model)
if __name__ == '__main__':
main()
``` |
{
"source": "15532th/rss2jbr",
"score": 3
} |
#### File: 15532th/rss2jbr/dl_module.py
```python
import asyncio
import logging
import os
URL_PLACEHOLDER = '{url}'
class YT2DL():
def __init__(self, download_command):
self.downloads = {}
self.command = download_command
def add(self, url, save_path, on_failure=lambda: None):
if save_path is not None:
if not os.path.exists(save_path):
logging.warning('download directory {} does not exist, creating'.format(save_path))
os.makedirs(save_path)
if self.downloads.get(url) is None:
self.downloads[url] = asyncio.get_event_loop().create_task(self.start_downloader(url, save_path, on_failure))
else:
logging.debug('downloader for {} was called already'.format(url))
async def start_downloader(self, url, save_path=None, on_failure=lambda: None):
args = self.command.replace(URL_PLACEHOLDER, url).split()
logging.info('starting download subprocess for {}'.format(url))
process = await asyncio.create_subprocess_exec(*args, cwd=save_path)
await process.wait()
logging.debug('download subprocess for {} finished with exit code {}'.format(url, process.returncode))
self.downloads.pop(url)
if process.returncode != 0:
on_failure()
``` |
{
"source": "15629069885/xiao_feixia",
"score": 3
} |
#### File: xiao_feixia/models/caffe_cifar.py
```python
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import math
## http://torch.ch/blog/2015/07/30/cifar.html
class CifarCaffeNet(nn.Module):
def __init__(self, num_classes):
super(CifarCaffeNet, self).__init__()
self.num_classes = num_classes
self.block_1 = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(),
nn.BatchNorm2d(32))
self.block_2 = nn.Sequential(
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=3, stride=2),
nn.BatchNorm2d(64))
self.block_3 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.Conv2d(64,128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=3, stride=2),
nn.BatchNorm2d(128))
self.classifier = nn.Linear(128*9, self.num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight)
m.bias.data.zero_()
def forward(self, x):
x = self.block_1.forward(x)
x = self.block_2.forward(x)
x = self.block_3.forward(x)
x = x.view(x.size(0), -1)
#print ('{}'.format(x.size()))
return self.classifier(x)
def caffe_cifar(num_classes=10):
model = CifarCaffeNet(num_classes)
return model
``` |
{
"source": "15653391491/black-broadcast-back-end",
"score": 2
} |
#### File: apps/con_control/views.py
```python
import json
import re
from django.http import JsonResponse
from django.views import View
from django.core.paginator import Paginator
import traceback
import logging
from big_screen.utils import sys_setting as code
from big_screen.serialization.allSerialization import serMobile, serMonitor, serUserRecord, serMobileNewLocation, \
serDistrict, serWhiteCategory, serWhiteList, serRedioTest, serMobileToPlatform
from big_screen.utils import re_format as f
from big_screen.utils.turn_format import time_formatter
from big_screen.redisOpration.AllOpration import MobListOp
errlog = logging.getLogger("Process")
# Create your views here.
# 表单下拉框信息
class FromSelectView(View):
# √
@classmethod
def get(cls, request):
"""
表单信息获取
:param request:
:return:
"""
try:
# ---------------- 接收 ------------------
# ---------------- 验证 ------------------
# ---------------- 处理 ------------------
# ******* 序列化器 **********
dis = serDistrict()
wc = serWhiteCategory()
# ******** 组织数据 *********
all_info = {"name": "全部", "num": "0"}
content_monitor = list()
content_mobile = list()
content_monitor.append(all_info)
content_mobile.append(all_info)
all_info_copy = {"label": "全部", "value": 0}
content_monitor_copy = list()
content_mobile_copy = list()
content_monitor_copy.append(all_info_copy)
content_mobile_copy.append(all_info_copy)
# *********** 监测人员 ***************
query_monitor = dis.mon.filter(is_delete=0).values()
for info in list(query_monitor):
con = dict()
con["name"] = info.get("name")
con["num"] = str(info.get("id"))
content_monitor.append(con)
query_monitor_copy = dis.mon.filter(is_delete=0).values()
for info in list(query_monitor_copy):
con = dict()
con["label"] = info.get("name")
con["value"] = info.get("id")
content_monitor_copy.append(con)
# *********** 手机 *******************
query_mobile = dis.mob.filter(is_delete=0).values()
for info in list(query_mobile):
con = dict()
con["name"] = info.get("name")
con["num"] = str(info.get("id"))
content_mobile.append(con)
query_mobile_copy = dis.mob.filter(is_delete=0).values()
for info in list(query_mobile_copy):
con = dict()
con["label"] = info.get("name")
con["value"] = info.get("id")
content_mobile_copy.append(con)
# ********** 区域 *************
content_district = dis.get_info_select()
content_district_bak = dis.get_info_select_bak()
# ********* 台站 **************
content_taizhan = dis.get_taizhan_select()
content_taizhan_copy = dis.get_taizhan_select_copy()
# ********* 汇总 ***************
content_freq_category = list()
content_freq_category_result = wc.get_info_select()
for fc_info in content_freq_category_result:
num = fc_info.get("num")
if str(num) == "4":
continue
else:
content_freq_category.append(fc_info)
content_freq_category_copy = list()
content_freq_category_result_copy = wc.get_info_select_copy()
for fc_info in content_freq_category_result_copy:
num = fc_info.get("value")
if str(num) == 4:
continue
else:
content_freq_category_copy.append(fc_info)
info = dict()
info["district"] = content_district
info["district_copy"] = content_district_bak
info["freq_category"] = content_freq_category
info["freq_category_copy"] = content_freq_category_copy
info["mobile"] = content_mobile
info["mobile_copy"] = content_mobile_copy
info["monitor"] = content_monitor
info["monitor_copy"] = content_monitor_copy
info["taizhan"] = content_taizhan
info["taizhan_copy"] = content_taizhan_copy
# ---------------- 返回 ------------------
con = code.con
con["data"] = info
return JsonResponse(con)
except Exception:
e = traceback.format_exc()
errlog.info(e)
# 移动端名单
class ControlTextView(View):
# √
@classmethod
def get(cls, request):
"""
获取移动端信息
:param request:
:return:
"""
# ------------ 接收 -------------
ret = request.GET.dict()
select_dict = ret.get("0")
page = ret.get("page")
limit = ret.get("limit")
if page is None:
page = 1
limit = 10
is_select = False
# ------------ 验证 -------------
if select_dict is None:
pass
else:
select_dict = json.loads(select_dict)
is_select = True
# ------------ 处理 -------------
# ******** 序列化器 ********
mob = serMobile()
mobl = MobListOp()
mobl.update_mob_list()
# ******** 查询结果 **********
if is_select:
result = mob.select_info(select_dict)
else:
result = mob.get_info()
# ********* 分页 **********
content = mob.page(query=result, page=page, limit=limit)
# ------------ 返回 -------------
con = code.con
con["data"] = content
con["count"] = len(result)
return JsonResponse(con)
# √
@classmethod
def post(cls, request):
"""
添加移动端
:param request:
:return:
"""
try:
# -------------- 接收 ----------------
ret = request.body.decode()
ret = eval(ret)
district = ret.get("district")
name = ret.get("name")
mobile = ret.get("mobile")
phonenumber = ret.get("phonenumber")
time = ret.get("time")
# -------------- 验证 ----------------
time_result = re.fullmatch(f.DATE_FORMATTER_RE, time)
# -------------- 处理 ----------------
# ******** 格式转化器 ***********
tf = time_formatter()
if time_result is None:
time = tf.now_time_str
# ******* 序列化器 **********
mob = serMobile()
mp = serMobileToPlatform()
# ******** 插入设备 ************
insert_dict = dict()
insert_dict["district"] = district
insert_dict["name"] = name
insert_dict["mobile"] = mobile
insert_dict["phonenumber"] = phonenumber
insert_dict["time"] = time
result = mob.insert_info(**insert_dict)
# *********** 注册版本管理 ************
mp_info = {
"mobile": mobile,
"platform": 1,
"name": "贵州"
}
mp.insert_info(**mp_info)
# ******* 更新redis中mobile 列表 ***********
moblist_op = MobListOp()
moblist_op.update_mob_list()
# -------------- 返回 ----------------
con = code.con
con["data"] = result
return JsonResponse(con)
except Exception:
e = traceback.format_exc()
errlog.warning(e)
# √
@classmethod
def delete(cls, request):
"""
删除移动端
:param request:
:return:
"""
# ----------- 接收 -----------------
ret = request.GET.dict()
del_id = ret.get("id")
# ----------- 验证 -----------------
# ----------- 处理 -----------------
# ******** 序列化器 *********
mob = serMobile()
# ******** 组织数据 ********
delete_dict = dict()
delete_dict["id"] = del_id
# ******** 处理 *********
result = mob.delete_info(delete_dict)
# ******* 更新redis中mobile 列表 ***********
moblist_op = MobListOp()
moblist_op.update_mob_list()
# ----------- 返回 -----------------
con = code.con
con["data"] = result
return JsonResponse(con)
# √
@classmethod
def patch(cls, request):
"""
修改设备信息
:param request:
:return:
"""
# -------------- 接收 ----------------
ret = request.body.decode()
ret = eval(ret)
district = ret.get("district")
name = ret.get("name")
phonenumber = ret.get("phonenumber")
time = ret.get("time")
mob_id = ret.get("id")
# -------------- 验证 ----------------
# -------------- 处理 ----------------
# ************ 序列化器 ***********
mob = serMobile()
# ************ 组织数据 ***********
update_dict = dict()
update_dict["id"] = mob_id
update_dict["district"] = district
update_dict["name"] = name
update_dict["phonenumber"] = phonenumber
update_dict["time"] = time
errlog.info(update_dict)
# ************* 更新数据 ************
result = mob.update_info(update_dict)
# -------------- 返回 ----------------
con = code.con
con["data"] = result
return JsonResponse(con)
# 移动端位置
class MobileLocationView(View):
@classmethod
def post(cls, request):
"""
根据发来的表单查询手机的工作路径,参数有开始时间,结束时间和设备id
:param request:
:return:
"""
# ---------------- 接收 --------------
ret = request.body.decode()
if ret == "":
pass
else:
ret = eval(ret)
mobile = ret.get("mobile")
s_time = ret.get("s_time")
e_time = ret.get("e_time")
# ---------------- 验证 --------------
# ---------------- 处理 --------------
# ********* 序列化器 **********
s = serMobileNewLocation()
# ********* 查询条件 *************
select_dict = dict()
select_dict["mobile"] = mobile
select_dict["s_time"] = s_time
select_dict["e_time"] = e_time
# ********** 查询 ***********
content = s.select_info(select_dict)
content = list(map(lambda info: info.split(","), content))
# ******* 去掉错误数据 ***************
if ['x', 'x'] in content:
content.remove(['x', 'x'])
if ['0.0', '0.0'] in content:
content.remove(['0.0', '0.0'])
# ---------------- 返回 --------------
con = code.con
con["data"] = content
return JsonResponse(con)
# 频点分类
class FreqCategoryView(View):
@classmethod
def get(cls, request):
"""
获取频点分类
:param request:
:return:
"""
# --------------- 接收 --------------------
ret = request.GET.dict()
select_dict = ret.get("msg")
page = ret.get("page")
limit = ret.get("limit")
# --------------- 验证 --------------------
is_select = False
if select_dict is None:
pass
else:
select_dict = json.loads(select_dict)
is_select = True
# --------------- 处理 --------------------
# ******* 序列化器 *********
wh = serWhiteList()
# ******* 结果查询 ********
if is_select:
result = wh.select_info(select_dict)
else:
result = wh.get_info()
# ******* 分页 *********
content = wh.page(query=result, page=page, limit=limit)
# --------------- 返回 --------------------
con = code.con
con["data"] = content
con["count"] = len(result)
return JsonResponse(con)
@classmethod
def post(cls, request):
"""
添加频点分类
:param request:
:return:
"""
try:
# --------------- 接收 --------------------
ret = request.body.decode()
ret = eval(ret)
district = ret.get("district")
freq = ret.get("freq")
time = ret.get("time")
freq_type = ret.get("type")
name = ret.get("name")
# --------------- 验证 --------------------
# --------------- 处理 --------------------
# ******* 序列化器 *********
wh = serWhiteList()
# ****** 组织数据 *********
insert_dict = dict()
insert_dict["district"] = district
insert_dict["type"] = freq_type
insert_dict["time"] = time
insert_dict["freq"] = freq
insert_dict["name"] = name
# ****** 操作数据 *********
result = wh.insert_info(insert_dict)
# --------------- 返回 --------------------
con = code.con
con["data"] = result
return JsonResponse(con)
except Exception:
traceback.print_exc()
@classmethod
def patch(cls, request):
"""
修改频点分类
:param request:
:return:
"""
# --------------- 接收 --------------------
ret = request.body.decode()
if ret == "":
pass
else:
ret = eval(ret)
district = ret.get("district")
freq_type = ret.get("type")
name = ret.get("name")
time = ret.get("time")
freq_id = ret.get("id")
# --------------- 验证 --------------------
# --------------- 处理 --------------------
# ******* 序列化器 *********
wh = serWhiteList()
# ****** 组织数据 *********
update_dict = dict()
update_dict["district"] = district
update_dict["type"] = freq_type
update_dict["name"] = name
update_dict["time"] = time
update_dict["id"] = freq_id
# ****** 更新数据 *******
result = wh.update_info(update_dict)
# --------------- 返回 --------------------
con = code.con
con["data"] = result
return JsonResponse(con)
@classmethod
def delete(cls, request):
"""
删除该频点分类
:param request:
:return:
"""
# ----------- 接收 -----------------
ret = request.GET.dict()
del_id = ret.get("id")
# ----------- 验证 -----------------
# ----------- 处理 -----------------
# ******** 序列化器 *********
mob = serWhiteList()
# ******** 组织数据 ********
delete_dict = dict()
delete_dict["id"] = del_id
# ******** 处理 *********
result = mob.delete_info(delete_dict)
# ----------- 返回 -----------------
con = code.con
con["data"] = result
return JsonResponse(con)
# 监测人员
class MonitorView(View):
# √
@classmethod
def get(cls, request):
"""
获取检测人员信息
:param request:
:return:
"""
# --------------- 接收 --------------------
ret = request.GET.dict()
select_dict = ret.get("msg")
page = ret.get("page")
limit = ret.get("limit")
# --------------- 验证 --------------------
is_select = False
if select_dict is None:
pass
else:
select_dict = json.loads(select_dict)
is_select = True
# --------------- 处理 --------------------
# ******* 序列化器 *********
mon = serMonitor()
# 查询
if is_select:
result = mon.select_info(select_dict)
else:
result = mon.get_info()
# ****** 分页 *******
content = mon.page(query=result, page=page, limit=limit)
# --------------- 返回 --------------------
con = code.con
con["data"] = content
con["count"] = len(result)
return JsonResponse(con)
# √
@classmethod
def post(cls, request):
"""
添加检测人员信息
:param request:
:return:
"""
# --------------- 接收 --------------------
ret = request.body.decode()
if ret == "":
pass
else:
ret = eval(ret)
district = ret.get("district")
idcard = ret.get("idcard")
name = ret.get("name")
# --------------- 验证 --------------------
# --------------- 处理 --------------------
ff = time_formatter()
# ******* 序列化器 *********
mon = serMonitor()
# ****** 组织数据 *********
insert_dict = dict()
insert_dict["district"] = district
insert_dict["idcard"] = idcard
insert_dict["time"] = ff.now_time_str
insert_dict["name"] = name
# ****** 操作数据 *********
result = mon.insert_info(**insert_dict)
# --------------- 返回 --------------------
con = code.con
con["data"] = result
return JsonResponse(con)
@classmethod
def delete(cls, request):
"""
删除检测人员信息
:param request:
:return:
"""
# ---------- 接收 -----------
ret = request.GET.dict()
idcard = ret.get("idcard")
# ---------- 验证 -----------
# ****** 序列化器 ****
mon = serMonitor()
# ---------- 处理 -----------
#
try:
mon_id = mon.table.get(idcard=idcard).id
except mon.table.model.DoesNotExist:
result = {"code": code.STATUSCODE_UNSUCCESS, "msg": "无该设备"}
else:
#
delete_dict = dict()
delete_dict["id"] = mon_id
#
result = mon.delete_info(delete_dict)
# ---------- 返回 -----------
con = code.con
con["data"] = result
return JsonResponse(con)
# √
@classmethod
def patch(cls, request):
"""
修改检测人员信息
:param request:
:return:
"""
# ----------------- 接收 ------------------
ret = request.body.decode()
ret = eval(ret)
district = ret.get("district")
time = ret.get("time")
name = ret.get("name")
mon_id = ret.get("id")
# ----------------- 验证 ------------------
# ----------------- 处理 ------------------
mon = serMonitor()
update_dict = dict()
update_dict["name"] = name
update_dict["time"] = time
update_dict["district"] = district
update_dict["id"] = mon_id
result = mon.update_info(update_dict)
# ----------------- 返回 ------------------
con = code.con
con["data"] = result
return JsonResponse(con)
# 使用记录
class UseRecord(View):
# √
@classmethod
def get(cls, request):
"""
查询手机使用记录
"""
# -------------- 接收 ------------------
ret = request.GET.dict()
page = ret.get("page")
limit = ret.get("limit")
select_dict = ret.get("msg")
# -------------- 验证 ------------------
is_select = False
if select_dict is None:
pass
else:
select_dict = json.loads(select_dict)
is_select = True
# -------------- 处理 ------------------
ur = serUserRecord()
if is_select:
result = ur.select_info(select_dict)
else:
result = ur.get_info()
# ********* 分页************
content = ur.page(query=result, page=page, limit=limit)
# -------------- 返回 ------------------
con = code.con
con["data"] = content
con["count"] = len(result)
return JsonResponse(con)
class RedioTestView(View):
def get(self, request):
"""
:param request:
:return:
"""
# ----------- 接收 ---------------
ret = request.GET.dict()
mobile = ret.get("mobile")
page = ret.get("page")
limit = ret.get("limit")
# ----------- 验证 ---------------
# ----------- 处理 ---------------
rt = serRedioTest()
result = rt.get_by_mobile(mobile)
content = rt.page(query=result, limit=limit, page=page)
# ----------- 返回 ---------------
con = code.con
con["data"] = content
con["count"] = len(result)
return JsonResponse(con)
```
#### File: apps/mobile/middlewares.py
```python
from django.utils.deprecation import MiddlewareMixin
from django.http import JsonResponse
import traceback
import logging
from big_screen.utils import sys_setting as code
from big_screen.redisOpration.AllOpration import MobListOp
errlog = logging.getLogger("Process")
class MD1(MiddlewareMixin):
def process_request(self, request):
try:
path = request.path.split("/")
re_type = path[2]
if re_type == "phone":
moblist_op = MobListOp()
mob_list = moblist_op.get_mob_list()
re_method = request.method
if re_method == "GET":
ret = request.GET.dict()
mobile = ret.get("phoneid")
if mobile not in mob_list:
errlog.warning(mobile)
con = code.con_false
return JsonResponse(con)
if re_method == "POST" and path[3] != "record":
ret = request.body.decode()
ret = eval(ret)
mobile = ret.get("phoneid")
if mobile not in mob_list:
errlog.warning(mobile)
con = code.con_false
return JsonResponse(con)
except Exception:
e = traceback.format_exc()
errlog.warning(e)
```
#### File: utils/box/broadcast.py
```python
from django_redis import get_redis_connection
import json
from big_screen.utils import tools as t
try:
from con_brocast.models import BlackCategory
except Exception as e:
print(e)
def broadcast_to_redis(bro_list):
"""
将黑广播进行白名单过滤、处理存入redis中海量点、热力图、
轮播表的队列中.
:param bro_list:
:return:
"""
# ---------------- 验证 --------------------------
if type(bro_list) is not list:
return '需要输入黑广播列表'
# ---------------- 建立连接 ----------------------
massmark_con = get_redis_connection('massmark')
bro_con = get_redis_connection('broadcast')
# 广播种类
category_obj = BlackCategory.objects.all()
# 坐标
info = bro_list[0]
lnglat_str = t.getLocation(info['location'])
# ---------------- 组织数据 ----------------------
for b_d in bro_list:
# ------------------------ 组织数据 ---------------------------------------
time = t.get_time(b_d['time']) # 时间
freq = round(float(b_d['freq']) / 10, 2) # 频点
category = category_obj.get(num=b_d['category'])['Name'] # 种类
lnglat = lnglat_str.split(',')
address = t.getaddress(lnglat_str)['formatted_address']
# -------------------------- massmark缓存 --------------------------------------
mass_message = dict()
mass_message['Channel'] = freq
mass_message['Time'] = time
mass_message['Category__Name'] = category
massmark_con.lpush(lnglat_str, json.dumps(mass_message))
# --------------------------- 热力图缓存 ----------------------------------------
heat_message = dict()
heat_message['lng'] = lnglat[0]
heat_message['lat'] = lnglat[1]
heat_message['count'] = 1
heat_message['time'] = time
bro_con.lpush('heatmap_n', json.dumps(heat_message))
# --------------------------- 轮播表缓存 ----------------------------------------
scroll_message = list()
scroll_message.append(time)
scroll_message.append(freq)
scroll_message.append(category)
scroll_message.append(address)
print(address)
bro_con.lpush('scroll_n', json.dumps(scroll_message))
# ------------------------------ 结束 -----------------------------------------------
return '缓存成功'
``` |
{
"source": "15696/async-web-framework",
"score": 2
} |
#### File: async-web-framework/examples/parameters.py
```python
from subway import Application, Request
app = Application()
@app.route('/hello/{name}')
async def say_hello(request: Request[Application], name: str):
return f'Hello, {name}'
app.run()
```
#### File: async-web-framework/examples/templates.py
```python
from subway import Application, Request
# If templates_dir is not specified, the default is 'templates'
app = Application()
@app.route('/home')
async def home(request: Request[Application]):
# This works the same way as flask's render_template
return await request.app.render('home.html', title='Home')
app.run()
```
#### File: extensions/sqlalchemy/errors.py
```python
from typing import Tuple
from subway.errors import RailwayException
class SQLAlchemyException(RailwayException):
pass
class EngineException(SQLAlchemyException):
pass
class InvalidDatabase(EngineException):
def __init__(self, name: str) -> None:
self.name = name
super().__init__(f'{name!r} is not supported')
class InvalidDialect(EngineException):
def __init__(self, dialect: str) -> None:
self.dialect = dialect
super().__init__(f'{dialect!r} is not supported')
class NoDriverFound(EngineException):
def __init__(self, drivers: Tuple[str, ...]):
self.drivers = drivers
super().__init__(f'Could not find any of the following drivers: {", ".join(drivers)}')
```
#### File: extensions/sqlalchemy/schemas.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING, AsyncIterator, Callable, Generic, ItemsView, Optional, Union, Type, Tuple, List, Dict, Any, Mapping, Sequence
import importlib
import inspect
import sqlalchemy
from sqlalchemy import sql
from subway.models import Model
from subway.app import Application
from subway.response import JSONResponse
from .results import CursorResult, Row, TypedCursorResult
from .engine import Connection
from .sqltypes import Column
from .types import SchemaT, Bind, Entity
from .filters import SelectFilter, ColumnFilter
__all__ = (
'MetaData',
'SchemaMeta',
'Schema',
'create_schema'
)
class ConnectionContext(Generic[SchemaT]):
connection: Connection
def __init__(self, bind: Bind, schema: Type[SchemaT]) -> None:
self.bind = bind
self.schema = schema
self.should_close = False
async def execute(self, query: Any, *args: Any, **kwargs: Any):
result = await self.raw_execute(query, *args, **kwargs)
return TypedCursorResult.from_cursor_result(result, type=self.schema)
async def raw_execute(self, *args: Any, **kwargs: Any) -> CursorResult:
return await self.connection.execute(*args, **kwargs)
async def run(self, fn: Callable[..., Any], *args: Any, **kwargs: Any) -> None:
await self.connection.run(fn, *args, **kwargs)
async def __aenter__(self) -> ConnectionContext[SchemaT]:
if isinstance(self.bind, Connection):
self.connection = self.bind
else:
self.connection = await self.bind.acquire()
self.should_close = True
return self
async def __aexit__(self, *args: Any) -> Any:
await self.connection.commit()
if self.should_close:
await self.connection.close()
class MetaData:
def __init__(self, bind: Optional[Bind]=None) -> None:
self.wrapped = sqlalchemy.MetaData()
self._schemas: List[Type[Schema]] = []
self.bind = bind
@classmethod
def from_file(cls, *files: str, bind: Optional[Bind] = None):
schemas = []
for file in files:
module = importlib.import_module(file)
for _, obj in inspect.getmembers(module):
if inspect.isclass(obj) and issubclass(obj, Schema):
schemas.append(obj)
metadata = cls(bind=bind)
for schema in schemas:
metadata.add_schema(schema)
return metadata
@property
def schemas(self) -> Tuple[Type[Schema], ...]:
return tuple(self._schemas)
def add_schema(self, schema: Type[Schema]) -> None:
if self.bind:
schema.query.bind = self.bind
self._schemas.append(schema)
def clear(self) -> None:
self._schemas.clear()
def update_schema_connections(self, bind: Bind) -> None:
for schema in self._schemas:
schema.query.bind = bind
def set_bind(self, bind: Bind) -> None:
self._connection = bind
self.update_schema_connections(bind)
async def create_all(self, *, bind: Optional[Bind]=None) -> None:
if bind is not None:
self.set_bind(bind)
for schema in self.schemas:
await schema.query.create_schema()
async def drop_all(self) -> None:
for schema in self.schemas:
await schema.query.drop_schema()
class SchemaQuery(Generic[SchemaT]):
def __init__(self, schema: Type[SchemaT], *, bind: Optional[Bind]=None) -> None:
self.schema = schema
self.bind = bind
def context(self) -> ConnectionContext[SchemaT]:
if self.bind is None:
raise ValueError('A bind must be set')
return ConnectionContext(self.bind, self.schema)
def select(self, *args: Any) -> sql.Select:
return self.schema.table.select(*args)
async def execute(self, *args: Any, **kwargs: Any) -> TypedCursorResult[SchemaT]:
async with self.context() as context:
return await context.execute(*args, **kwargs)
async def create_schema(self) -> None:
async with self.context() as context:
await context.run(self.schema.table.create, checkfirst=True)
async def drop_schema(self) -> None:
async with self.context() as context:
await context.run(self.schema.table.drop, checkfirst=True)
async def all(self) -> List[SchemaT]:
select = self.schema.table.select()
cursor = await self.execute(select)
return await cursor.fetchall()
async def first(self) -> Optional[SchemaT]:
select = self.schema.table.select().limit(1)
cursor = await self.execute(select)
return await cursor.first()
async def get(self, *conditions: Any) -> Optional[SchemaT]:
select = self.schema.table.select().where(*conditions)
cursor = await self.execute(select)
return await cursor.fetchone()
async def getall(self, *conditions: Any) -> List[SchemaT]:
select = self.schema.table.select()
select.where(*conditions)
async with self.context() as context:
cursor = await context.execute(select)
return await cursor.fetchall()
async def put(self, entity: Entity[SchemaT]) -> Optional[Dict[str, Any]]:
insert = self.schema.table.insert()
values: Union[Mapping[str, Any], Sequence[Any]]
if isinstance(entity, Model):
values = entity.to_dict()
elif isinstance(entity, (Mapping, Sequence)):
values = entity
else:
values = entity.get_column_values()
insert = insert.values(values)
await self.execute(insert)
primary_key = self.schema.get_primary_key()
if primary_key is not None:
select = self.schema.table.select().order_by(primary_key.desc()).limit(1)
async with self.context() as context:
cursor = await context.raw_execute(select)
row = await cursor.fetchone()
assert row is not None
return row.as_dict()
return None
async def putall(self, *entities: Entity[SchemaT]) -> List[SchemaT]:
objects = []
for entity in entities:
obj = await self.put(entity)
objects.append(obj)
if any(objects):
return objects
return []
async def insert(self, *entities: Entity[SchemaT]) -> List[SchemaT]:
return await self.putall(*entities)
async def update(self, *where: Any, **attrs: Any) -> None:
update = self.schema.table.update().where(*where).values(**attrs)
async with self.context() as context:
await context.execute(update)
async def delete(self, *where: Any) -> None:
delete = self.schema.table.delete().where(*where)
async with self.context() as context:
await context.execute(delete)
async def exists(self, *where: Any) -> bool:
select = self.schema.table.select().where(*where)
async with self.context() as context:
cursor = await context.execute(select)
return await cursor.fetchone() is not None
def filter(self) -> SelectFilter[SchemaT]:
return SelectFilter(self)
def with_columns(self, *columns: Column[Any]) -> ColumnFilter[SchemaT]:
return ColumnFilter(self, columns)
def __aiter__(self) -> AsyncIterator[SchemaT]:
return self.filter().__aiter__() # type: ignore
class SchemaMeta(type):
if TYPE_CHECKING:
def __getattr__(self, name: str) -> Column[Any]: ...
__all_schemas__: Dict[str, Type[Schema]] = {}
__columns__: List[sqlalchemy.Column]
__metadata__: sqlalchemy.MetaData
__table__: sqlalchemy.Table
__query__: SchemaQuery[Any]
def __new__(cls, cls_name: str, bases: Tuple[Any, ...], attrs: Dict[str, Any], **kwargs: Any) -> Any:
name = kwargs.get('name', attrs.get('__tablename__', cls_name))
metadata = kwargs.get('metadata', MetaData())
columns: List[sqlalchemy.Column] = []
pk_found = False
for attr, value in attrs.items():
if isinstance(value, sqlalchemy.Column):
value.name = attr
if value.primary_key:
if pk_found:
raise ValueError('Only one primary key is allowed')
pk_found = True
columns.append(value)
attrs['__columns__'] = columns
attrs['__metadata__'] = metadata
attrs['__table__'] = sqlalchemy.Table(name, metadata.wrapped, *columns)
schema = super().__new__(cls, cls_name, bases, attrs)
schema.__query__ = SchemaQuery(schema) # type: ignore
metadata.add_schema(schema)
cls.__all_schemas__[name] = schema # type: ignore
return schema
def get_schema(self, name: str) -> Optional[Type[Schema]]:
return self.__all_schemas__.get(name)
@property
def columns(self) -> Tuple[sqlalchemy.Column[Any], ...]:
return tuple(self.__columns__)
@property
def table(self) -> sqlalchemy.Table:
return self.__table__
@property
def query(self: Type[SchemaT]) -> SchemaQuery[SchemaT]: # type: ignore
return self.__query__
@property
def metadata(self) -> sqlalchemy.MetaData:
return self.__metadata__
def get_primary_key(self) -> Optional[Column[Any]]:
primary_key = None
if self.table.primary_key.columns:
primary_key = self.table.primary_key.columns.values()[0]
return primary_key
def get_primary_keys(self) -> List[Column[Any]]:
return self.table.primary_key.columns.values()
class Schema(metaclass=SchemaMeta):
if TYPE_CHECKING:
__columns__: List[sqlalchemy.Column[Any]]
def __init__(self, **kwargs: Any):
self.update_attributes(**kwargs)
@classmethod
def from_row(cls: Type[SchemaT], row: Row) -> SchemaT:
return cls(**row.as_dict())
@classmethod
async def create(cls: Type[SchemaT], **attrs: Any) -> SchemaT:
instance = cls(**attrs)
await instance.save()
return instance
def to_dict(self) -> Dict[str, Any]:
data = {}
for column in self.__columns__:
value = getattr(self, column.name)
data[column.name] = value
return data
def items(self) -> ItemsView[str, Any]:
return ItemsView(self.to_dict())
def update_attributes(self, **values: Any) -> None:
for key, value in values.items():
if not hasattr(self, key):
raise AttributeError(f'{key} is not a valid attribute')
if getattr(self, key) != value:
setattr(self, key, value)
def get_primary_key_conditions(self) -> Tuple[Any, ...]:
cls = type(self)
columns = cls.get_primary_keys()
return tuple(column == getattr(self, column.name) for column in columns)
def get_execute_conditions(self) -> Tuple[Any, ...]:
cls = type(self)
if cls.get_primary_keys():
return self.get_primary_key_conditions()
return tuple(column == getattr(self, column.name) for column in cls.columns)
def get_column_values(self) -> Dict[str, Any]:
cls = type(self)
values = {}
for column in cls.columns:
if column.primary_key:
continue
value = getattr(self, column.name)
values[column.name] = value
return values
async def update(
self,
**attrs: Any
) -> None:
cls = type(self)
where = self.get_execute_conditions()
values = self.get_column_values()
values.update(attrs)
await cls.query.update(*where, **attrs)
self.update_attributes(**attrs)
async def save(self) -> None:
cls = type(self)
attrs = await cls.query.put(self)
if attrs is not None:
self.update_attributes(**attrs)
async def delete(self) -> None:
where = self.get_execute_conditions()
cls = type(self)
await cls.query.delete(*where)
def create_schema(name: str, *columns: Column, metadata: Optional[MetaData]=None) -> Type[Schema]:
namespace = {
column.name: column for column in columns
}
kwargs = {}
if metadata:
kwargs['metadata'] = metadata
bases = (Schema,)
return SchemaMeta(name, bases, namespace, **kwargs) # type: ignore
```
#### File: extensions/sqlalchemy/types.py
```python
from __future__ import annotations
from typing import (
Mapping,
Sequence,
TypeVar,
TYPE_CHECKING,
Union,
Tuple,
Callable,
Any,
List,
Protocol,
Optional,
AsyncIterator,
KeysView
)
from sqlalchemy.engine.cursor import ResultProxy
from sqlalchemy.ext.asyncio import AsyncResult
from sqlalchemy.sql.visitors import Visitable
if TYPE_CHECKING:
from .schemas import Schema
from .engine import Engine, Connection
from .results import Row
from subway.models import Model
AnyRow = Union['Row', Tuple[Any, ...]]
UniqueStrategy = Callable[..., Any]
Bind = Union['Engine', 'Connection']
MappingValue = Mapping[str, Any]
ResultT = TypeVar('ResultT', bound='SupportsRow')
SchemaT = TypeVar("SchemaT", bound="Schema")
SupportsWhereT = TypeVar('SupportsWhereT', bound="SupportsWhere")
Entity = Union[SchemaT, 'Model', Mapping[str, Any], Sequence[Any]]
class SupportsRow(Protocol):
@classmethod
def from_row(cls, row: Row) -> SupportsRow:
...
class IteratorResult(ResultProxy):
pass
class FrozenResult(Protocol):
def __init__(self, result: AsyncResult) -> None: ...
def rewrite_rows(self) -> List[AnyRow]: ...
def with_new_rows(self, rows: List[AnyRow]) -> FrozenResult: ...
def __call__(self) -> IteratorResult: ...
class MappingResult(Protocol):
def keys(self) -> KeysView[str]: ...
def unique(self, strategy: Optional[UniqueStrategy]=None) -> MappingResult: ...
async def fetchone(self) -> Optional[MappingValue]: ...
async def fetchmany(self, size: Optional[int]=None) -> List[MappingValue]: ...
async def fetchall(self) -> List[MappingValue]: ...
async def all(self) -> List[MappingValue]: ...
async def first(self) -> Optional[MappingValue]: ...
async def one(self) -> MappingValue: ...
async def one_or_none(self) -> Optional[MappingValue]: ...
async def partitions(self, size: Optional[int]=None) -> AsyncIterator[MappingValue]: ...
async def __aiter__(self) -> AsyncIterator[MappingValue]: ...
class ScalarsResult(Protocol):
def unique(self, strategy: Optional[UniqueStrategy]=None) -> ScalarsResult: ...
async def fetchone(self) -> Optional[Any]: ...
async def fetchmany(self, size: Optional[int]=None) -> List[Any]: ...
async def fetchall(self) -> List[Any]: ...
async def all(self) -> List[Any]: ...
async def first(self) -> Optional[Any]: ...
async def one(self) -> Any: ...
async def one_or_none(self) -> Optional[Any]: ...
async def partitions(self, size: Optional[int]=None) -> AsyncIterator[Any]: ...
async def __aiter__(self) -> AsyncIterator[Any]: ...
class SupportsWhere(Protocol):
def where(self, whereclause: Union[str, bool, Visitable]) -> SupportsWhere: ...
```
#### File: async-web-framework/subway/formdata.py
```python
from __future__ import annotations
from typing import IO, Any, Dict, Iterator, List, NamedTuple, Optional, TYPE_CHECKING, Tuple, TypeVar, Union, Iterable
import itertools
import string
import random
import re
from .files import File
from .utils import parse_http_data, CLRF
if TYPE_CHECKING:
from .app import Application
from .request import Request
T = TypeVar('T')
BOUNDARY_LIMITER = b'--'
BOUNDARY_REGEX = re.compile(r'.*;\sboundary=(?P<boundary>\S{1,70})')
__all__ = (
'Disposition',
'FormData',
'FormDataField'
)
def _get(iterable: List[T], index: int) -> Optional[T]:
try:
return iterable[index]
except IndexError:
return None
def pairwise(iterable: Iterable[T]) -> Iterable[Tuple[T, T]]:
left, right = itertools.tee(iterable)
next(right, None)
return itertools.zip_longest(left, right)
def find_fields(boundary: bytes, data: bytes) -> Iterator[bytes]:
for match, next in pairwise(re.finditer(boundary, data)):
start = match.end()
end = next.start() if next else len(data)
chunk = data[start:end]
if chunk == boundary + BOUNDARY_LIMITER:
continue
yield chunk.strip(CLRF)
def get_boundary(content_type: str) -> bytes:
match = BOUNDARY_REGEX.match(content_type)
assert match, 'No boundary found in content-type'
return match.group('boundary').encode('ascii')
def unquote(text: str) -> str:
return re.sub(r'"|\'', '', text)
class DispositionNotFound(Exception):
pass
class InvalidDisposition(Exception):
pass
class FormDataField(NamedTuple):
"""
A named tuple representing a form data field.
Attributes
----------
file: :class:`~.File`
The file object.
headers: :class:`dict`
The headers of the field.
disposition: :class:`~.Disposition`
The disposition of the field.
"""
file: File
headers: Dict[str, str]
disposition: Disposition
@property
def name(self) -> str:
return self.disposition.name
@property
def filename(self) -> Optional[str]:
return self.disposition.filename
@property
def content_type(self) -> str:
return self.disposition.content_type
class Disposition:
"""
A Content-Disposition header.
Parameters
----------
name: :class:`str`
The name of the field.
filename: Optional[:class:`str`]
The filename of the field.
content_type: Optional[:class:`str`]
The content type of the field. Defaults to ``application/octet-stream``.
"""
def __init__(self, *, name: str, filename: Optional[str]=None, content_type: Optional[str]=None) -> None:
self.content_type = content_type or 'application/octet-stream'
self.name = name
self.filename = filename
@classmethod
def from_headers(cls, headers: Dict[str, str]) -> Disposition:
"""
Create a Disposition object from a Content-Disposition header.
Parameters
----------
headers: :class:`dict`
A dictionary of headers.
Returns
-------
:class:`~subway.formdata.Disposition`
A Disposition object.
"""
disposition = headers.get('Content-Disposition')
if not disposition:
raise DispositionNotFound('Content-Disposition header not found.')
disposition = disposition.split('; ')
if disposition[0] != 'form-data':
raise InvalidDisposition('Invalid Content-Disposition header.')
name = unquote(disposition[1])
filename = _get(disposition, 2)
if filename:
filename = unquote(filename)
content_type = headers.get('Content-Type')
return cls(name=name, filename=filename, content_type=content_type)
def to_header(self) -> str:
"""
Create a Content-Disposition header.
Returns
-------
:class:`str`
A Content-Disposition header.
"""
header = f'form-data; name="{self.name}"'
if self.filename:
header += f'; filename="{self.filename}"'
return header
class FormData(Dict[str, FormDataField]):
"""
A form data object.
"""
def __init__(self) -> None:
self._boundary: Optional[bytes] = None
@property
def boundary(self) -> Optional[bytes]:
"""
The boundary string.
Returns
-------
:class:`str`
The boundary string.
"""
return self._boundary
@boundary.setter
def boundary(self, boundary: bytes) -> None:
self._boundary = boundary
def from_bytes(self, data: bytes, headers: Dict[str, Any]):
"""
Parse form data from bytes.
Parameters
----------
data: :class:`bytes`
The data to parse.
headers: :class:`dict`
The headers of the form data.
"""
content_type = headers.get('Content-Type')
if not content_type:
return self
boundary = get_boundary(content_type)
for field in find_fields(BOUNDARY_LIMITER + boundary, data):
result = parse_http_data(field, strip_status_line=False)
disposition = Disposition.from_headers(result.headers)
file = File(result.body)
field = FormDataField(file=file, headers=result.headers, disposition=disposition)
self[field.name] = field
self.boundary = boundary
return self
def generate_boundary(self) -> bytes:
"""
Generate a boundary string.
Returns
-------
:class:`str`
The boundary string.
"""
length = random.randint(1, 70)
return ''.join([random.choice(string.ascii_letters) for _ in range(length)]).encode()
def add_field(
self,
file: Union[File, IO[bytes]],
*,
name: Optional[str] = None,
filename: Optional[str] = None,
content_type: Optional[str] = None,
headers: Optional[Dict[str, str]] = None
) -> FormDataField:
"""
Add a file to the form data.
Parameters
----------
file: Union[:class:`~.File`, :class:`io.IOBase`]
The file object.
name: Optional[:class:`str`]
The name of the field.
filename: Optional[:class:`str`]
The filename of the field.
content_type: Optional[:class:`str`]
The content type of the field. Defaults to ``application/octet-stream``.
headers: Optional[:class:`dict`]
The headers of the field.
"""
if not isinstance(file, File):
file = File(file)
assert file.filename or name, 'A file name or disposition name must be provided'
headers = headers or {}
disposition = Disposition(
name=(name or file.filename),
filename=filename,
content_type=content_type
)
field = FormDataField(file=file, headers=headers, disposition=disposition)
self[field.name] = field
return field
async def _prepare_field(self, field: FormDataField) -> bytes:
assert self.boundary is not None, 'Boundary not set'
disposition = field.disposition
boundary = BOUNDARY_LIMITER + self.boundary
headers = [
f'Content-Disposition: {disposition.to_header()}',
f'Content-Type: {disposition.content_type}',
]
body = boundary
body += CLRF.join([header.encode() for header in headers]) + (CLRF * 2)
body += await field.file.read() + CLRF
return body
async def prepare(self) -> Tuple[bytearray, str]:
"""
Prepare the form data for sending.
"""
self.boundary = boundary = self.generate_boundary()
content_type = f'multipart/form-data; boundary={boundary}'
body = bytearray()
for field in self.values():
chunk = await self._prepare_field(field)
body.extend(chunk)
body.extend(BOUNDARY_LIMITER + boundary + BOUNDARY_LIMITER)
return body, content_type
```
#### File: async-web-framework/subway/headers.py
```python
from typing import Dict, Optional
from functools import cached_property
from .cookies import CookieJar
from . import utils
__all__ = 'Headers',
class Headers(Dict[str, str]):
@property
def content_type(self) -> Optional[str]:
return self.get('Content-Type')
@property
def content_length(self) -> Optional[int]:
length = self.get('Content-Length')
if length:
return int(length)
return None
@property
def charset(self) -> Optional[str]:
content_type = self.content_type
if content_type:
return utils.get_charset(content_type)
return None
@property
def user_agent(self) -> Optional[str]:
return self.get('User-Agent')
@cached_property
def cookies(self) -> CookieJar:
return CookieJar.from_headers(self)
@property
def host(self) -> Optional[str]:
return self.get('Host')
```
#### File: subway/http/abc.py
```python
from __future__ import annotations
from typing import Any, Optional, Dict, TYPE_CHECKING
from abc import ABC, abstractmethod
import copy
import ssl
import asyncio
from subway.utils import parse_headers
from subway.streams import StreamReader, StreamWriter
from subway.types import StrURL
from .errors import HookerAlreadyConnected, HookerClosed
from .response import HTTPResponse, HTTPStatus
from .request import HTTPRequest
if TYPE_CHECKING:
from .sessions import HTTPSession
SSL_SCHEMES = ('https', 'wss')
__all__ = (
'SSL_SCHEMES',
'Hooker'
)
class Hooker(ABC):
def __init__(self, session: HTTPSession) -> None:
self.session = session
self.reader: Optional[StreamReader] = None
self.writer: Optional[StreamWriter] = None
self.connected = False
self.closed = False
def __repr__(self) -> str:
name = self.__class__.__name__
return f'<{name} closed={self.closed} connected={self.connected}>'
@property
def loop(self) -> asyncio.AbstractEventLoop:
return self.session.loop
@staticmethod
def create_default_ssl_context() -> ssl.SSLContext:
context = ssl.create_default_context()
return context
def ensure(self) -> None:
if self.connected:
raise HookerAlreadyConnected(hooker=self)
if self.closed:
raise HookerClosed(hooker=self)
def copy(self):
hooker = copy.copy(self)
return hooker
@abstractmethod
async def connect(self, url: StrURL) -> None:
raise NotImplementedError
@abstractmethod
async def write(self, data: Any) -> None:
raise NotImplementedError
def build_request(
self,
method: str,
host: str,
path: str,
headers: Dict[str, Any],
body: Optional[str]
) -> HTTPRequest:
headers.setdefault('Connection', 'close')
return HTTPRequest(method, path, host, headers, body)
async def read_response(self) -> HTTPResponse:
if self.reader is None:
raise RuntimeError('Not connected')
status_line = await self.reader.readuntil(b'\r\n')
version, status_code, _ = status_line.decode().split(' ', 2)
hdrs = await self.reader.readuntil(b'\r\n\r\n')
status = HTTPStatus(int(status_code))
headers: Dict[str, Any] = dict(parse_headers(hdrs))
return HTTPResponse(
hooker=self,
status=status,
version=version,
headers=headers,
)
@abstractmethod
async def close(self) -> None:
raise NotImplementedError
```
#### File: subway/http/sessions.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
import asyncio
import json as _json
from .hooker import TCPHooker, WebSocketHooker, WebSocket
from .response import HTTPResponse
from .utils import RequestContextManager, WebSocketContextManager
from subway import compat, utils
from subway.types import StrURL
if TYPE_CHECKING:
from subway import URL
__all__ = (
'HTTPSession',
'request',
'ws_connect',
)
class HTTPSession:
"""
A class representing an HTTP session.
Parameters
----------
loop: :class:`asyncio.AbstractEventLoop`
The event loop to use.
Attributes
----------
loop: :class:`asyncio.AbstractEventLoop`
The event loop used by the session.
Example
-------
.. code-block:: python3
from subway import http
import asyncio
async def request():
async with http.HTTPSession() as session:
async with session.request('GET', 'https://example.com/') as response:
text = response.text()
print(text)
headers = response.headers
print(headers)
session.loop.run_until_complete(request())
"""
def __init__(
self,
*,
headers: Optional[Dict[str, Any]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None
) -> None:
self.loop = loop or compat.get_event_loop()
self.headers = headers or {}
self._hookers: List[TCPHooker] = []
@property
def hookers(self) -> List[TCPHooker]:
return self._hookers.copy()
def _ensure_hookers(self) -> None:
self._hookers = [hooker for hooker in self._hookers if not hooker.closed]
async def __aenter__(self):
return self
async def __aexit__(self, *exc: Any):
await self.close()
return self
async def close(self):
"""
Closes the session.
"""
for hooker in self._hookers:
if not hooker.closed:
await hooker.close()
def request(
self,
url: StrURL,
method: str,
*,
headers: Optional[Dict[str, Any]] = None,
body: Any = None,
json: Optional[Dict[str, Any]] = None,
ignore_redirects: bool = False,
hooker: Optional[TCPHooker] = None
) -> RequestContextManager:
"""
Sends an HTTP request with the given method.
Parameters
----------
method: :class:`str`
The HTTP method to use.
url: :class:`str`
The URL to request.
**kwargs: Any
The keyword arguments to pass to the request.
Example
-------
.. code-block:: python3
async with session.request('https://example.com/', 'GET') as response:
text = await response.text()
print(text)
# or you could use it without a context manager, but make sure to close the response yourself
response = await session.request('https://example.com/')
text = await response.text()
print(text)
await response.close()
"""
coro = self._request(
url=url,
method=method,
headers=headers,
body=body,
json=json,
hooker=hooker,
ignore_redirects=ignore_redirects
)
return RequestContextManager(coro)
def ws_connect(self, url: Union[str, URL], **kwargs: Any) -> WebSocketContextManager:
"""
Connects to a URL using websockets.
Parameters
----------
url: :class:`str`
The URL to connect to.
**kwargs: Any
The keyword arguments to pass to the websocket request.
Example
-------
.. code-block:: python3
async with session.ws_connect('ws://echo.websocket.org') as ws:
await ws.send(b'Hello, world!')
data = await ws.receive()
print(data.data)
# or, once again, without a context manager
ws = await session.ws_connect('ws://echo.websocket.org')
await ws.send(b'Hello, world!')
data = await ws.receive()
print(data.data)
"""
return WebSocketContextManager(self._connect(url))
def get(self, url: StrURL, **kwargs: Any):
return self.request(url, 'GET', **kwargs)
def post(self, url: StrURL, **kwargs: Any):
return self.request(url, 'POST', **kwargs)
def put(self, url: StrURL, **kwargs: Any):
return self.request(url, 'PUT', **kwargs)
def delete(self, url: StrURL, **kwargs: Any):
return self.request(url, 'DELETE', **kwargs)
def head(self, url: StrURL, **kwargs: Any) :
return self.request(url, 'HEAD', **kwargs)
async def _request(
self,
url: StrURL,
method: str,
*,
headers: Optional[Dict[str, Any]] = None,
body: Any = None,
json: Optional[Dict[str, Any]] = None,
ignore_redirects: bool = False,
hooker: Optional[TCPHooker] = None
) -> HTTPResponse:
self._ensure_hookers()
url = utils.to_url(url)
if not hooker:
hooker = TCPHooker(self)
if not headers:
headers = {}
if json:
if body:
raise ValueError('body and json cannot be used together')
body = utils.dumps(json)
headers['Content-Type'] = 'application/json'
elif body:
if not isinstance(body, str):
raise TypeError('body must be a string')
if 'Content-Type' not in headers:
headers['Content-Type'] = 'text/plain'
headers['Content-Length'] = len(body) if body else 0
await hooker.connect(url)
assert url.hostname is not None, 'url must have a hostname'
headers.update(self.headers)
request = hooker.build_request(
method=method,
host=url.hostname,
path=url.path or '/',
headers=headers,
body=body
)
await hooker.write(request)
response = await hooker.read_response()
if not ignore_redirects:
if 301 <= response.status <= 308:
location = response.headers['Location']
return await self._request(
url=location,
method=method,
headers=headers,
body=body,
json=json,
)
self._hookers.append(hooker)
return response
async def _connect(self, url: StrURL) -> WebSocket:
url = utils.to_url(url)
hooker = WebSocketHooker(self)
websocket = await hooker.connect(url)
self._hookers.append(hooker)
return websocket
def request(url: StrURL, method: str, **kwargs: Any):
client = HTTPSession(loop=kwargs.pop('loop', None))
return client.request(url, method, **kwargs)
def ws_connect(url: str, **kwargs: Any):
client = HTTPSession(loop=kwargs.pop('loop', None))
return client.ws_connect(url, **kwargs)
```
#### File: subway/models/fields.py
```python
from __future__ import annotations
from collections.abc import Sequence, Mapping, MutableMapping, Iterable
from typing import TYPE_CHECKING, Any, Callable, Optional, Literal, Set, Type, TypeVar, Union, Generic
from .utils import DEFAULT
T = TypeVar('T')
if TYPE_CHECKING:
from .models import Model
__all__ = 'Field', 'field', 'validator'
LIST_LIKE_TYPES = (Iterable, Sequence, list, set, Set, frozenset)
DICT_TYPES = (Mapping, MutableMapping, dict)
def validator(field: str) -> Callable[..., Any]:
def callback(func: Callable[..., Any]) -> Callable[..., Any]:
func.__validator__ = field
return func
return callback
class Field(Generic[T]):
def __init__(
self,
*,
default: Any = DEFAULT,
default_factory: Type[Any] = None,
strict: bool = False,
name: Optional[str] = None,
validator: Optional[Callable[['Model', T, Field[T]], Any]] = None
) -> None:
self.name = name
self.default = default
self.default_factory = default_factory
self.strict = strict
self.validator = validator or (lambda _, value, field: value)
self._annotation: Any = None
def __repr__(self) -> str:
return f'<Field name={self.name!r} default={self.default!r} strict={self.strict!r}>'
@property
def annotation(self) -> Any:
return self._annotation
@annotation.setter
def annotation(self, value: Any) -> None:
self._annotation = value
@staticmethod
def has_args(annotation: Any) -> bool:
args = getattr(annotation, '__args__', None)
if not args:
return False
return True
@staticmethod
def any_or_object(value: Any) -> Any:
return object if value is Any else value
def evaluate(self, value: Any, *, annotation: Optional[Any] = None) -> bool:
ann: Any = annotation or self.annotation
origin = getattr(annotation, '__origin__', None)
if origin is Union:
if type(None) in ann.__args__:
expected, _ = ann.__args__
return isinstance(value, expected) or value is None
return any(self.evaluate(value, annotation=typ) for typ in ann.__args__)
if origin is not None:
if not isinstance(value, origin):
return False
else:
if not self.strict:
return True
if not self.has_args(ann):
return True
else:
return isinstance(value, self.any_or_object(ann))
if origin is Literal:
if value not in ann.__args__:
return False
return True
elif origin in LIST_LIKE_TYPES:
expected = self.any_or_object(ann.__args__[0])
return all(isinstance(v, expected) for v in value)
elif origin is tuple:
has_ellipsis = ann.__args__[-1] is ...
expected = ann.__args__[:-1] if has_ellipsis else ann.__args__
elements = len(expected)
if has_ellipsis:
expected = self.any_or_object(expected[0])
else:
expected = tuple(self.any_or_object(e) for e in expected)
if len(value) != elements and not has_ellipsis:
return False
if has_ellipsis:
return all(isinstance(v, expected) for v in value)
else:
return all(isinstance(value[i], typ) for i, typ in enumerate(expected))
elif origin in DICT_TYPES:
pair = ann.__args__
key_type, value_type = pair[0], self.any_or_object(pair[1])
return all(isinstance(k, key_type) and isinstance(v, value_type) for k, v in value.items())
raise RuntimeError(f'Unsupported annotation type: {origin}')
def field(
*,
default: Any = DEFAULT,
default_factory: Type[Any] = None,
strict: bool = False,
name: Optional[str] = None,
validator: Optional[Callable[[Model, Any, Field[Any]], Any]] = None
) -> Any:
if default is not DEFAULT and default_factory is not None:
raise ValueError('Cannot specify both default and default_factory')
return Field(default=default, default_factory=default_factory, strict=strict, name=name, validator=validator)
```
#### File: async-web-framework/subway/objects.py
```python
from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING, Callable, Generic, List, Any, Literal, Optional, Dict, Union, NoReturn, TypeVar, overload
import inspect
import re
from .types import CoroFunc, Coro, ResponseMiddleware, RequestMiddleware
from .responses import HTTPException
from .response import Response
from .request import Request
from .errors import RegistrationError
from . import utils
if TYPE_CHECKING:
from .router import Router
from .resources import Resource
class MiddlewareType(str, Enum):
request = 'request'
response = 'response'
T = TypeVar('T', bound=MiddlewareType)
__all__ = (
'Object',
'Route',
'PartialRoute',
'WebSocketRoute',
'Middleware',
'MiddlewareType',
'Listener',
'route',
'websocket_route',
'middleware',
'listener',
)
class Object:
"""
A base object.
Parameters
----------
callback: Callable[..., Coroutine[Any, Any, Any]]]
the function used by the object.
Attributes
----------
callback: Callable[..., Coroutine[Any, Any, Any]]]
The coroutine function used by the object.
"""
callback: CoroFunc[Any]
def __init__(self, callback: CoroFunc) -> None:
self.callback = callback
self.parent: Any = None
def __call__(self, *args: Any, **kwds: Any) -> Any:
if self.parent:
return self.callback(self.parent, *args, **kwds)
else:
return self.callback(*args, **kwds)
class PartialRoute:
"""
A partial route. This object is created whenever an error occurs during the route handling process.
Parameters
----------
path: :class:`str`
The part of the route.
method: :class:`str`
The method of the route.
Attributes
----------
path:
The path of the route.
method:
The method of the route.
"""
def __init__(self, path: str, method: str) -> None:
self.path: str = path
self.method: str = method
def __repr__(self) -> str:
return f'<PartialRoute path={self.path!r} method={self.method!r}>'
class Route(Object):
"""
A route object.
Parameters
----------
path: :class:`str`
The path of the route.
method: :class:`str`
The method of the route.
callback: Callable[..., Coroutine[Any, Any, Any]]
The function used by the route.
router: Optional[:class:`~subway.router.Router`]
The router to register the route with.
The case that this can be ``None`` is when using :func:`~subway.objects.route`.
Attributes
----------
path: :class:`str`
The path of the route.
method: :class:`str`
The method of the route.
callback: Callable[..., Coroutine[Any, Any, Any]]
The coroutine function used by the route.
"""
__cache_control__: Dict[str, Any]
def __init__(
self,
path: str,
method: str,
callback: CoroFunc,
*,
name: Optional[str] = None,
router: Optional['Router']
) -> None:
if hasattr(callback, '__cache_control__'):
self.__cache_control__ = callback.__cache_control__
self._router = router
self.path: str = path
self.method: str = method
self.name = name or callback.__name__.replace('_', ' ').title()
self.raw_path: str = path
self._error_handler = None
self._status_code_handlers: Dict[int, Callable[..., Coro[Any]]] = {}
self._request_middlewares: List[Middleware] = []
self._response_middlewares: List[Middleware] = []
self._after_request = None
self.__doc__ = inspect.getdoc(callback)
super().__init__(callback)
async def dispatch(
self,
request: Request,
exc: Exception
) -> bool:
if isinstance(exc, HTTPException):
callback = self._status_code_handlers.get(exc.status)
if callback:
if self.parent:
response = await callback(self.parent, request, exc, self)
else:
response = await callback(request, exc, self)
await request.send(response)
return True
if self._error_handler:
if self.parent:
response = await self._error_handler(self.parent, request, exc, self)
else:
response = await self._error_handler(request, exc, self)
await request.send(response)
return True
return False
@property
def signature(self) -> inspect.Signature:
"""
The signature of the route.
"""
return inspect.signature(self.callback)
@property
def request_middlewares(self) -> List[Middleware]:
"""
A list of request middlewares registered with the route.
"""
return self._request_middlewares.copy()
@property
def response_middlewares(self) -> List[Middleware]:
"""
A list of response middlewares registered with the route.
"""
return self._response_middlewares.copy()
@property
def router(self) -> Optional[Router]:
"""
The router used to register the route with.
"""
return self._router
@router.setter
def router(self, router: Router) -> None:
self._router = router
def is_websocket(self) -> bool:
"""
Checks if the route is a websocket route.
"""
return isinstance(self, WebSocketRoute)
def match(self, path: str) -> Optional[Dict[str, str]]:
"""
Matches the path with the route.
Parameters
----------
path: :class:`str`
The path to match.
Returns
-------
:class:`dict`
A dictionary of the matched parameters.
"""
match = re.fullmatch(self.path, path)
if match:
return match.groupdict()
return None
def cleanup_middlewares(self):
"""
Clears all the middlewares registered with the route.
"""
self._request_middlewares.clear()
self._response_middlewares.clear()
def add_status_code_handler(
self,
status: int,
callback: Callable[..., Coro[Any]]
):
"""
Adds a specific status code handler to the route.
This applies to only error status codes for obvious reasons.
Parameters
----------
status: :class:`int`
The status code to handle.
callback: Callable[[:class:`~subway.objects.Request`, :class:`~subway.exceptions.HTTPException`, :class:`~subway.objects.Route`], Coro]
The callback to handle the status code.
"""
if not utils.iscoroutinefunction(callback):
raise RegistrationError('Status code handlers must be coroutine functions')
self._status_code_handlers[status] = callback
return callback
def remove_status_code_handler(self, status: int):
"""
Removes a status code handler from the route.
Parameters
----------
status: :class:`int`
The status code to remove.
"""
callback = self._status_code_handlers.pop(status, None)
return callback
def status_code_handler(self, status: int):
"""
A decorator that adds a status code handler to the route.
The handler function MUST return something.
Parameters
----------
status: :class:`int`
The status code to handle.
Example
---------
.. code-block :: python3
import subway
app = subway.Application()
app.users = {}
@app.route('/users/{id}', 'GET')
async def get_user(request: subway.Request, id: int):
user = app.users.get(id)
if not user:
raise subway.NotFound()
return user
@get_user.status_code_handler(404)
async def handle_404(
request: subway.Request,
exception: subway.HTTPException,
route: subway.Route
):
return {
'message': 'User not found.',
'status': 404
}
app.run()
"""
def decorator(func: Callable[..., Coro[Any]]):
return self.add_status_code_handler(status, func)
return decorator
def on_error(self, callback: Callable[..., Coro[Any]]):
"""
Registers an error handler for the route.
The handler function MUST return something.
Parameters
----------
callback: Callable[[:class:`~.Request`, :class:`~.HTTPException`, :class:`~.Route`], Coro]
The callback to handle errors.
"""
if not utils.iscoroutinefunction(callback):
raise RegistrationError('Error handlers must be coroutine functions')
self._error_handler = callback
return callback
def add_request_middleware(self, callback: RequestMiddleware) -> Middleware:
"""
Registers a middleware with the route.
Parameters
----------
callback: Callable[..., Coroutine[Any, Any, Any]]
The coroutine function used by the middleware.
"""
if not utils.iscoroutinefunction(callback):
raise RegistrationError('All middlewares must be coroutine functions')
middleware = Middleware(MiddlewareType.request, callback, route=self)
self._request_middlewares.append(middleware)
return middleware
def remove_middleware(self, middleware: Middleware) -> Middleware:
"""
Removes a middleware from the route.
Parameters
----------
middleware: :class:`~subway.objects.Middleware`
The middleware to remove.
"""
self._request_middlewares.remove(middleware)
return middleware
def request_middleware(self, callback: RequestMiddleware) -> Middleware:
"""
A decorator that registers a middleware with the route.
Parameters
----------
callback: Callable
The coroutine function used by the middleware.
Example
--------
.. code-block:: python3
import subway
app = subway.Application()
@app.route('/')
async def index(request: subway.Request):
return 'Hello, world!'
@index.request_middleware
async def middleware(route: subway.Route, request: subway, **kwargs):
print('Middleware called')
app.run()
"""
return self.add_request_middleware(callback)
def add_response_middleware(self, callback: ResponseMiddleware) -> Middleware:
"""
Registers a response middleware with the route.
Parameters
----------
callback: Callable[..., Any]
The coroutine function used by the middleware.
"""
if not utils.iscoroutinefunction(callback):
raise RegistrationError('Response middlewares must be coroutine functions')
middleware = Middleware(MiddlewareType.response, callback, route=self)
self._response_middlewares.append(middleware)
return middleware
def remove_response_middleware(self, middleware: Middleware) -> Middleware:
"""
Removes a response middleware from the route.
Parameters
----------
callback: Callable[..., Any]
The coroutine function used by the middleware.
"""
self._response_middlewares.remove(middleware)
return middleware
def response_middleware(self, callback: ResponseMiddleware) -> Middleware:
"""
A decorator that registers a response middleware with the route.
Parameters
----------
callback: Callable[..., Any]
The coroutine function used by the middleware.
Example
--------
.. code-block:: python3
import subway
app = subway.Application()
@app.route('/')
async def index(request: subway.Request):
return 'Hello, world!'
@index.response_middleware
async def middleware(request: subway.Request, response: subway.Response, route: subway.Route):
print('Middleware called')
app.run()
"""
return self.add_response_middleware(callback)
@overload
def middleware(self, type: Literal['request']) -> Callable[[RequestMiddleware], Middleware]:
...
@overload
def middleware(self, type: Literal['response']) -> Callable[[ResponseMiddleware], Middleware]:
...
def middleware(self, type: Literal['request', 'response']) -> Any:
"""
A decorator that registers a middleware with the route.
Parameters
----------
type: :class:`str`
The type of middleware to register.
Example
--------
.. code-block:: python3
import subway
app = subway.Application()
@app.route('/')
async def index(request: subway.Request):
return 'Hello, world!'
@index.middleware('request')
async def middleware(route: subway.Route, request: subway.Request, **kwargs):
print('Middleware called')
app.run()
"""
def decorator(callback: Callable[..., Any]):
if type not in ('request', 'response'):
raise RegistrationError('Invalid middleware type')
if type == 'request':
return self.add_request_middleware(callback)
else:
return self.add_response_middleware(callback)
return decorator
def after_request(self, callback: CoroFunc) -> CoroFunc:
"""
Registers a callback to be called after the route is handled.
Parameters
----------
callback: Callable[..., Coroutine[Any, Any, Any]]]
The coroutine function or a function to be called.
"""
if not utils.iscoroutinefunction(callback):
raise RegistrationError('After request callbacks must be coroutine functions')
self._after_request = callback
return callback
def destroy(self):
"""
Destroys the route.
"""
self.clear()
if not self._router:
return
self._router.remove_route(self)
return self
def clear(self):
"""
Clears the route's attached callbacks.
"""
self._after_request = None
self._error_handler = None
self.cleanup_middlewares()
self._status_code_handlers.clear()
def __repr__(self) -> str:
return '<Route path={0.path!r} method={0.method!r}>'.format(self)
class Middleware(Object):
"""
A middleware object.
Parameters
----------
callback: Callable[..., Coroutine[Any, Any, Any]]]
The coroutine function used by the middleware.
route: Optional[:class:`~subway.objects.Route`]
The route to register the middleware with.
router: Optional[:class:`~subway.router.Router`]
The router to register the middleware with.
Attributes
----------
callback: Callable[..., Coroutine[Any, Any, Any]]]
The coroutine(?) function used by the middleware.
"""
@overload
def __init__(
self,
type: Literal[MiddlewareType.request],
callback: RequestMiddleware,
route: Optional[Route] = None,
router: Optional[Router] = None
) -> None:
...
@overload
def __init__(
self,
type: Literal[MiddlewareType.response],
callback: ResponseMiddleware,
route: Optional[Route] = None,
router: Optional[Router] = None
) -> None:
...
def __init__( # type: ignore
self,
type: Any,
callback: Any,
route: Optional[Route] = None,
router: Optional[Router] = None
) -> None:
self.type = type
super().__init__(callback)
self._router = router
self._route = route
self._is_global = False
@property
def router(self) -> Optional[Router]:
"""
The router used to register the middleware with.
"""
return self._router
@router.setter
def router(self, value):
if not isinstance(value, Router):
raise TypeError('router must be a Router instance')
self._router = value
@property
def route(self) -> Optional[Route]:
"""
The route used to register the middleware with.
"""
return self._route
@route.setter
def route(self, value):
if not isinstance(value, Route):
raise TypeError('route must be a Route instance')
self._route = value
def is_global(self) -> bool:
"""
True if the middleware is registered with the global router.
"""
return self._is_global
def is_route_specific(self) -> bool:
"""
True if the middleware is registered with a route.
"""
return not self.is_global()
def __repr__(self) -> str:
return f'<Middleware is_global={self.is_global()!r}>'
class WebSocketRoute(Route):
"""
A subclass of :class:`~subway.objects.Route` representing a websocket route
"""
pass
class Listener(Object):
"""
A listener object.
Parameters
----------
callback: Callable[..., Coroutine[Any, Any, Any]]
The coroutine function used by the listener.
event: :class:`str`
The event the listener is registered to.
Attributes
----------
callback: Callable[..., Coroutine[Any, Any, Any]]
The coroutine function used by the listener.
event: :class:`str`
The event the listener is registered to.
"""
def __init__(self, callback: CoroFunc[Any], name: str) -> None:
self.event: str = name
super().__init__(callback)
def __repr__(self) -> str:
return '<Listener event={0.event!r}>'.format(self)
def route(path: str, method: str, *, name: Optional[str]=None) -> Callable[[CoroFunc], Route]:
"""
A decorator that returns a :class:`~subway.objects.Route` object.
Parameters
----------
path: :class:`str`
The path to register the route with.
method: :class:`str`
The HTTP method to register the route with.
"""
def decorator(func: CoroFunc) -> Route:
route = Route(path, method, func, name=name, router=None)
route.raw_path = path
return route
return decorator
def websocket_route(path: str, *, name: Optional[str] = None) -> Callable[[CoroFunc], WebSocketRoute]:
"""
A decorator that returns a :class:`~subway.objects.WebSocketRoute` object.
Parameters
----------
path: :class:`str`
The path to register the route with.
"""
def decorator(func: CoroFunc) -> WebSocketRoute:
return WebSocketRoute(path, 'GET', func, name=name, router=None)
return decorator
def middleware(type: MiddlewareType) -> Callable[[CoroFunc[Any]], Middleware]:
"""
A decorator that returns a :class:`~subway.objects.Middleware` object.
Parameters
----------
type: :class:`~subway.objects.MiddlewareType`
The type of middleware to register the middleware with.
"""
def decorator(callback: CoroFunc) -> Middleware:
middleware = Middleware(type, callback) # type: ignore
middleware._is_global = True
return middleware
return decorator
def listener(event: str = None) -> Callable[[CoroFunc], Listener]:
"""
A decorator that returns a :class:`~subway.objects.Listener` object.
Parameters
----------
event: :class:`str`
The event to register the listener to.
"""
def decorator(func: CoroFunc) -> Listener:
return Listener(func, event or func.__name__)
return decorator
```
#### File: async-web-framework/subway/response.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, overload, AsyncIterator
import enum
import mimetypes
from .cookies import Cookie, CookieJar
from .files import File
from .headers import Headers
from .multidict import MultiDict
from .types import AnyBody, JSONResponseBody, ResponseBody, ResponseHeaders, ResponseStatus
from .utils import CLRF, dumps
if TYPE_CHECKING:
from .objects import Route
__all__ = (
'Response',
'HTTPStatus',
'HTMLResponse',
'JSONResponse',
'FileResponse',
'cache_control',
)
class HTTPStatus(enum.IntEnum):
_description_: str
def __new__(cls, value: int, description: str):
self = int.__new__(cls, value)
self._value_ = value
self._description_ = description
self.__doc__ = description
return self
@property
def status(self):
return self.value
@property
def description(self):
return self._description_
CONTINUE = 100, 'Continue'
SWITCHING_PROTOCOLS = 101, 'Switching Protocols'
PROCESSING = 102, 'Processing'
EARLY_HINTS = 103, 'Early Hints'
OK = 200, 'OK'
CREATED = 201, 'Created'
ACCEPTED = 202, 'Accepted'
NON_AUTHORITATIVE_INFORMATION = 203, 'Non-Authoritative Information'
NO_CONTENT = 204, 'No Content'
RESET_CONTENT = 205, 'Reset Content'
PARTIAL_CONTENT = 206, 'Partial Content'
MULTI_STATUS = 207, 'Multi-Status'
ALREADY_REPORTED = 208, 'Already Reported'
IM_USED = 226, 'IM Used'
MULTIPLE_CHOICES = 300, 'Multiple Choices'
MOVED_PERMANENTLY = 301, 'Moved Permanently'
FOUND = 302, 'Found'
SEE_OTHER = 303, 'See Other'
NOT_MODIFIED = 304, 'Not Modified'
USE_PROXY = 305, 'Use Proxy'
TEMPORARY_REDIRECT = 307, 'Temporary Redirect'
PERMANENT_REDIRECT = 308, 'Permanent Redirect'
BAD_REQUEST = 400, 'Bad Request'
UNAUTHORIZED = 401, 'Unauthorized'
PAYMENT_REQUIRED = 402, 'Payment Required'
FORBIDDEN = 403, 'Forbidden'
NOT_FOUND = 404, 'Not Found'
METHOD_NOT_ALLOWED = 405, 'Method Not Allowed'
NOT_ACCEPTABLE = 406, 'Not Acceptable'
PROXY_AUTHENTICATION_REQUIRED = 407, 'Proxy Authentication Required'
REQUEST_TIMEOUT = 408, 'Request Timeout'
CONFLICT = 409, 'Conflict'
GONE = 410, 'Gone'
LENGTH_REQUIRED = 411, 'Length Required'
PRECONDITION_FAILED = 412, 'Precondition Failed'
REQUEST_ENTITY_TOO_LARGE = 413, 'Request Entity Too Large'
REQUEST_URI_TOO_LONG = 414, 'Request-URI Too Long'
UNSUPPORTED_MEDIA_TYPE = 415, 'Unsupported Media Type'
REQUESTED_RANGE_NOT_SATISFIABLE = 416, 'Requested Range Not Satisfiable'
EXPECTATION_FAILED = 417, 'Expectation Failed'
IM_A_TEAPOT = 418, 'I\'m a Teapot'
MISDIRECTED_REQUEST = 421, 'Misdirected Request'
UNPROCESSABLE_ENTITY = 422, 'Unprocessable Entity'
LOCKED = 423, 'Locked'
FAILED_DEPENDENCY = 424, 'Failed Dependency'
TOO_EARLY = 425, 'Too Early'
UPGRADE_REQUIRED = 426, 'Upgrade Required'
PRECONDITION_REQUIRED = 428, 'Precondition Required'
TOO_MANY_REQUESTS = 429, 'Too Many Requests'
REQUEST_HEADER_FIELDS_TOO_LARGE = 431, 'Request Header Fields Too Large'
UNAVAILABLE_FOR_LEGAL_REASONS = 451, 'Unavailable For Legal Reasons'
INTERNAL_SERVER_ERROR = 500, 'Internal Server Error'
NOT_IMPLEMENTED = 501, 'Not Implemented'
BAD_GATEWAY = 502, 'Bad Gateway'
SERVICE_UNAVAILABLE = 503, 'Service Unavailable'
GATEWAY_TIMEOUT = 504, 'Gateway Timeout'
HTTP_VERSION_NOT_SUPPORTED = 505, 'HTTP Version Not Supported'
VARIANT_ALSO_NEGOTIATES = 506, 'Variant Also Negotiates'
INSUFFICIENT_STORAGE = 507, 'Insufficient Storage'
LOOP_DETECTED = 508, 'Loop Detected'
NOT_EXTENDED = 510, 'Not Extended'
NETWORK_AUTHENTICATION_REQUIRED = 511, 'Network Authentication Required'
class Response:
"""
A class that is used to build a response that is later sent to the client.
Parameters
----------
body: :class:`str`
The body of the response.
status: :class:`int`
The status code of the response.
content_type: :class:`str`
The content type of the response.
headers: :class:`dict`
The headers of the response.
version: :class:`str`
The HTTP version of the response.
Attributes
----------
version: :class:`str`
The HTTP version of the response.
cookies: :class:`~subway.cookies.CookieJar`
A cookie jar that contains all the cookies that should be set on the response.
"""
def __init__(
self,
body: Optional[ResponseBody] = None,
status: Optional[ResponseStatus] = None,
content_type: Optional[str] = None,
headers: Optional[ResponseHeaders] = None,
version: Optional[str] = None
) -> None:
self.version: str = version or '1.1'
self._status = HTTPStatus(status or 200) # type: ignore
self._body = body
self._content_type = content_type or 'text/html'
self._encoding = "utf-8"
if not headers:
headers = {}
self._headers = Headers(headers)
if body is not None:
self._headers['Content-Type'] = self._content_type
self._headers['Content-Length'] = str(len(body))
self.cookies = CookieJar()
@property
def body(self) -> Optional[AnyBody]:
"""
The body of the response.
"""
return self._body
@body.setter
def body(self, value):
self._body = value
self._headers['Content-Type'] = self.content_type
self._headers['Content-Length'] = str(len(value))
@property
def status(self) -> HTTPStatus:
"""
The status code of the response.
"""
return self._status
@property
def content_type(self) -> str:
"""
The content type of the response.
"""
return self._content_type
@property
def headers(self) -> Dict[str, Any]:
"""
The headers of the response.
"""
return self._headers
def add_header(self, key: str, value: str):
"""
Adds a header to the response.
Parameters
----------
key: :class:`str`
The key of the header.
value: :class:`str`
The value of the header.
"""
self._headers[key] = value
def add_cookie(self, name: str, value: str, **kwargs: Any) -> Cookie:
"""
Adds a cookie to the response.
Parameters
----------
name: :class:`str`
The name of the cookie.
value: :class:`str`
The value of the cookie.
domain: Optional[:class:`str`]
The domain of the cookie.
http_only: :class:`bool`
If the cookie should be set as HTTP only. Defaults to ``False``.
is_secure: :class:`bool`
If the cookie should be set as secure. Defaults to ``False``.
"""
return self.cookies.add_cookie(name, value, **kwargs)
def __repr__(self) -> str:
name = self.__class__.__name__
return f'<{name} status={self.status} content_type={self.content_type!r} version={self.version!r}>'
def _prepare(self, body: Any) -> bytes:
response = [f'HTTP/{self.version} {self.status} {self.status.description}']
response.extend(f'{k}: {v}' for k, v in self._headers.items())
if self.cookies:
response.append(self.cookies.encode())
response = CLRF.join(part.encode() for part in response)
response += CLRF * 2
if body is not None:
if not isinstance(body, (bytes, bytearray, str)):
raise TypeError(f'body must be bytes, bytearray or str, not {type(body)}')
if isinstance(body, str):
body = body.encode()
elif isinstance(body, bytearray):
body = bytes(body)
response += body
return response
async def prepare(self) -> bytes:
"""
Encodes the response into a sendable bytes object.
"""
return self._prepare(self.body)
class StreamResponse(Response):
def __init__(
self,
stream: AsyncIterator[ResponseBody],
*,
body: Optional[ResponseBody] = None,
status: Optional[ResponseStatus] = None,
content_type: Optional[str] = None,
headers: Optional[ResponseHeaders] = None,
version: Optional[str] = None
):
super().__init__(body=body, status=status, content_type=content_type, headers=headers, version=version)
self.stream = stream
def __aiter__(self):
return self
async def __anext__(self) -> bytes:
chunk = await self.stream.__anext__()
if isinstance(chunk, str):
chunk = chunk.encode()
return chunk
class HTMLResponse(Response):
"""
A class used to build an HTML response
"""
def __init__(
self,
body: Optional[str] = None,
status: Optional[ResponseStatus] = None,
headers: Optional[ResponseHeaders] = None,
version: Optional[str] = None
) -> None:
super().__init__(
body=body,
status=status,
content_type='text/html',
headers=headers,
version=version
)
class JSONResponse(Response):
"""
A class used to build a JSON response
"""
def __init__(
self,
body: Optional[JSONResponseBody] = None,
status: Optional[ResponseStatus] = None,
headers: Optional[ResponseHeaders] = None,
version: Optional[str] = None
) -> None:
if body is not None:
body = dumps(body, default=self.default_json_dump) # type: ignore
super().__init__(
body=dumps(body) if body is not None else None,
status=status,
content_type='application/json',
headers=headers,
version=version
)
def default_json_dump(self, obj: Any) -> Any:
if isinstance(obj, MultiDict):
return obj._dict
return obj.__dict__
class FileResponse(Response):
"""
A class used to build a file response
Parameters
----------
file: :class:`~subway.file.File`
The file to send.
status: :class:`int`
The status code of the response.
headers: :class:`dict`
The headers of the response.
version: :class:`str`
The HTTP version of the response.
"""
def __init__(
self,
file: File,
status: Optional[ResponseStatus] = None,
headers: Optional[ResponseHeaders] = None,
version: Optional[str] = None
) -> None:
self.file = file
super().__init__(
status=status,
content_type=self.get_content_type(),
headers=headers,
version=version
)
def get_content_type(self) -> str:
"""
Gets the content type of the response.
You don't have to call this method since it gets called in the constructor.
"""
filename = self.file.filename
content_type = None
if filename:
content_type, _ = mimetypes.guess_type(filename)
if not content_type:
content_type = 'application/octet-stream'
return content_type
async def prepare(self):
"""
Encodes the response into a sendable bytes object.
"""
self.body = body = await self.file.read()
data = self._prepare(body)
await self.file.close()
return data
@overload
def cache_control(
*,
max_age: Optional[int] = ...,
s_maxage: Optional[int] = ...,
no_cache: Optional[bool] = ...,
no_store: Optional[bool] = ...,
no_transform: Optional[bool] = ...,
must_revalidate: Optional[bool] = ...,
must_understand: Optional[bool] = ...,
proxy_revalidate: Optional[bool] = ...,
public: Optional[bool] = ...,
private: Optional[bool] = ...,
immutable: Optional[bool] = ...,
stale_while_revalidate: Optional[int] = ...,
stale_if_error: Optional[int] = ...
) -> Callable[[Route], Route]:
...
@overload
def cache_control(
*,
max_age: Optional[int] = ...,
s_maxage: Optional[int] = ...,
no_cache: Optional[bool] = ...,
no_store: Optional[bool] = ...,
no_transform: Optional[bool] = ...,
must_revalidate: Optional[bool] = ...,
must_understand: Optional[bool] = ...,
proxy_revalidate: Optional[bool] = ...,
public: Optional[bool] = ...,
private: Optional[bool] = ...,
immutable: Optional[bool] = ...,
stale_while_revalidate: Optional[int] = ...,
stale_if_error: Optional[int] = ...
) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
...
def cache_control(**kwargs: Any) -> Callable[..., Any]:
"""
A decorator used to add cache control headers to a route's response.
"""
def decorator(obj: Any) -> Any:
obj.__cache_control__ = kwargs
return obj
return decorator
```
#### File: subway/server/__init__.py
```python
from typing import Any, Optional, Union
import asyncio
import sys
import ssl
import socket
from subway import compat
from subway.streams import StreamReader, StreamWriter, start_server, start_unix_server
__all__ = [
'BaseServer',
'TCPServer',
]
if sys.platform != 'win32':
__all__.append('UnixServer')
def _get_event_loop(loop: Union[asyncio.AbstractEventLoop, Any]):
if loop:
if not isinstance(loop, asyncio.AbstractEventLoop):
raise TypeError('Invalid argument type for loop argument')
return loop
try:
return compat.get_running_loop()
except RuntimeError:
return compat.get_event_loop()
class BaseServer:
"""
A base server class, All server classes should inherit from this class.
Parameters
----------
loop: :class:`asyncio.AbstractEventLoop`
The event loop used.
is_ssl: :class:`bool`
Whether to use SSL.
ssl_context: :class:`ssl.SSLContext`
The SSL context to use.
Attributes
----------
loop: :class:`asyncio.AbstractEventLoop`
The event loop used.
"""
def __init__(
self,
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
is_ssl: bool = False,
ssl_context: Optional[ssl.SSLContext] = None
) -> None:
self.loop: asyncio.AbstractEventLoop = _get_event_loop(loop)
self._is_ssl = is_ssl
self._ssl_context = ssl_context
if self._is_ssl and not self._ssl_context:
self._ssl_context = self.create_ssl_context()
self._closed = False
self._server: Optional[asyncio.AbstractServer] = None
@staticmethod
def create_ssl_context() -> ssl.SSLContext:
"""
Creates a default SSL context.
"""
context = ssl.create_default_context()
return context
def is_ssl(self) -> bool:
"""
True if the server is using SSL.
"""
return self._is_ssl and isinstance(self._ssl_context, ssl.SSLContext)
def is_serving(self) -> bool:
"""
True if the server is serving.
"""
return self._server is not None
def is_closed(self) -> bool:
"""
True if the server is closed.
"""
return self._closed
def __await__(self):
return self.serve().__await__()
async def __aenter__(self):
await self.serve()
return self
async def __aexit__(self, *exc: Any):
await self.close()
async def serve(self, *, sock: Optional[socket.socket] = None) -> Any:
raise NotImplementedError
async def close(self):
"""
Closes the server.
"""
if self._server:
self._server.close()
await self._server.wait_closed()
self._server = None
self._closed = True
async def on_transport_connect(self, reader: StreamReader, writer: StreamWriter) -> None:
"""
A callback called on a new connection.
To be subclassed and overriden by users.
Parameters
----------
writer: :class:`~subway.streams.StreamWriter`
The writer of the connection.
reader: :class:`~subway.streams.StreamReader`
The reader of the connection.
"""
class TCPServer(BaseServer):
"""
A TCP server
Parameters
----------
host: Optional[:class:`str`]
The host to listen on.
port: Optional[:class:`int`]
The port to listen on.
loop: :class:`asyncio.AbstractEventLoop`
The event loop used.
is_ssl: :class:`bool`
Whether to use SSL.
ssl_context: :class:`ssl.SSLContext`
The SSL context to use.
Attributes
----------
loop: :class:`asyncio.AbstractEventLoop`
The event loop used.
host: :class:`str`
The host to listen on.
port: :class:`int`
The port to listen on.
"""
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
is_ssl: bool = False,
ssl_context: Optional[ssl.SSLContext] = None
) -> None:
self.host = host
self.port = port
super().__init__(
loop=loop,
is_ssl=is_ssl,
ssl_context=ssl_context
)
def __repr__(self) -> str:
name = self.__class__.__name__
return f'<{name} host={self.host!r} port={self.port!r}>'
async def serve(self, *, sock: Optional[socket.socket] = None) -> None:
"""
Starts the server.
Parameters
----------
sock: :class:`socket.socket`
The socket to use.
"""
if sock:
if self.host is not None or self.port is not None:
raise ValueError('Cannot specify both sock and host/port')
if sock.type is not socket.SOCK_STREAM:
raise TypeError('Invalid argument type for sock argument')
self._server = server = await start_server(
host=self.host,
port=self.port,
connection_callback=self.on_transport_connect,
sock=sock,
ssl=self._ssl_context,
start_serving=False,
)
await server.start_serving()
class UnixServer(BaseServer):
"""
A Unix server
Attributes
----------
loop: :class:`asyncio.AbstractEventLoop`
The event loop used.
path: :class:`str`
The path of the socket to listen on.
Parameters
------------
path: :class:`str`
The path of the socket to listen on.
loop: :class:`asyncio.AbstractEventLoop`
The event loop used.
is_ssl: :class:`bool`
Whether to use SSL.
ssl_context: :class:`ssl.SSLContext`
The SSL context to use.
"""
def __init__(
self,
path: Optional[str] = None,
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
is_ssl: bool = False,
ssl_context: Optional[ssl.SSLContext] = None
) -> None:
self.path = path
super().__init__(
loop=loop,
is_ssl=is_ssl,
ssl_context=ssl_context
)
def __repr__(self) -> str:
name = self.__class__.__name__
return f'<{name} path={self.path!r}>'
async def serve(self, *, sock: Optional[socket.socket] = None) -> None:
"""
Starts the UNIX server.
"""
if sock:
if self.path:
raise ValueError('path and sock cannot be specified together')
if sock.type is not socket.SOCK_STREAM:
raise ValueError('sock must be of type SOCK_STREAM')
if sock.family is not socket.AF_UNIX:
raise ValueError('sock must be of family AF_UNIX')
self._server = server = await start_unix_server(
path=self.path,
connection_callback=self.on_transport_connect,
sock=sock,
)
await server.start_serving()
```
#### File: async-web-framework/subway/sessions.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Optional
from abc import ABC, abstractmethod
if TYPE_CHECKING:
from .app import Application
from .request import Request
__all__ = 'AbstractRequestSession', 'CookieSession',
class AbstractRequestSession(ABC):
@classmethod
@abstractmethod
async def from_request(cls, request: Request[Application]):
raise NotImplementedError
class CookieSession(AbstractRequestSession, Dict[str, Any]):
"""
A session that is managed by a cookie.
"""
cache: Dict[str, CookieSession] = {}
@classmethod
async def from_request(cls, request: Request[Application]) -> CookieSession:
"""
Returns a session from the given request.
Parameters
----------
request: :class:`~subway.request.Request`
The request to get the session from.
"""
cookie = request.get_default_session_cookie()
if not cookie:
return cls(None)
return cls.cache.setdefault(cookie.value, cls(cookie.value))
def __init__(self, session_id: Optional[str]) -> None:
self.id = session_id
super().__init__()
```
#### File: async-web-framework/subway/settings.py
```python
from typing import Any, Dict, Optional, TypedDict, Union
import importlib
import os
import ssl as _ssl
import multiprocessing
import json
from .utils import LOCALHOST, LOCALHOST_V6, SETTING_ENV_PREFIX, validate_ip, loads
from .types import StrPath
__all__ = (
'Settings',
)
def default_ssl_context():
context = _ssl.create_default_context(purpose=_ssl.Purpose.CLIENT_AUTH)
return context
class SettingsDict(TypedDict):
host: Optional[str]
port: int
path: Optional[str]
ssl: Optional[_ssl.SSLContext]
ipv6: bool
worker_count: int
session_cookie_name: str
backlog: int
class Settings:
__slots__ = (
'host', 'port', 'path', 'ipv6', 'ssl', 'worker_count', 'session_cookie_name', 'backlog'
)
def __init__(
self,
*,
host: Optional[str] = None,
port: Optional[int] = None,
path: Optional[str] = None,
ipv6: bool = False,
ssl: Union[bool, _ssl.SSLContext] = False,
worker_count: Optional[int] = None,
session_cookie_name: Optional[str] = None,
backlog: Optional[int] = None
):
self.host = host
self.path = path
self.ipv6 = ipv6
self.ssl = ssl if isinstance(ssl, _ssl.SSLContext) else default_ssl_context() if ssl else None
if port is not None:
if not isinstance(port, int):
raise TypeError('port must be an integer')
if 0 > port > 65535:
raise ValueError('port must be in range 0-65535')
else:
port = 8080
self.port = port
if worker_count is not None:
if not isinstance(worker_count, int) or worker_count < 0:
raise TypeError('worker_count must be a positive integer')
else:
worker_count = (multiprocessing.cpu_count() * 2) + 1
self.worker_count = worker_count
if session_cookie_name is not None:
if not isinstance(session_cookie_name, str):
raise TypeError('session_cookie_name must be a str')
else:
session_cookie_name = '__subway'
self.session_cookie_name = session_cookie_name
if backlog is not None:
if not isinstance(backlog, int) or backlog < 0:
raise TypeError('backlog must be a positive integer')
else:
backlog = 200
self.backlog = backlog
self.ensure_host()
def __getitem__(self, item: str):
try:
return self.__getattribute__(item)
except AttributeError:
raise KeyError(item) from None
def __setitem__(self, key: str, value: Any) -> None:
return self.__setattr__(key, value)
@classmethod
def from_env(cls):
kwargs = {}
env = os.environ
settings = cls.__slots__ # silly thing to do, but it works
for setting in settings:
name = SETTING_ENV_PREFIX + setting.casefold()
kwargs[setting] = env.get(name)
return cls(**kwargs)
@classmethod
def from_file(cls, path: StrPath):
module = importlib.import_module(str(path))
kwargs = {}
settings = cls.__slots__
for setting in settings:
value = getattr(module, setting.casefold(), None)
kwargs[setting] = value
return cls(**kwargs)
@classmethod
def from_json(cls, data: Union[StrPath, Dict[str, Any]]):
if isinstance(data, (str, os.PathLike)):
with open(data, 'r') as f:
value = loads(f.read())
else:
value = data
return cls(**value)
def ensure_host(self) -> None:
if self.path is not None:
return None
if self.ipv6:
if not self.host:
self.host = LOCALHOST_V6
else:
validate_ip(self.host, ipv6=True)
else:
if not self.host:
self.host = LOCALHOST
else:
validate_ip(self.host)
return None
def update(
self,
*,
host: Optional[str] = None,
port: Optional[int] = None,
path: Optional[str] = None,
ipv6: bool = False,
ssl: Union[bool, _ssl.SSLContext] = False,
worker_count: Optional[int] = None,
session_cookie_name: Optional[str] = None,
backlog: Optional[int] = None
) -> None:
if host is not None:
self.host = host
if port is not None:
self.port = port
if path is not None:
self.path = path
if ssl is not None:
self.ssl = ssl if isinstance(ssl, _ssl.SSLContext) else default_ssl_context() if ssl else None
if worker_count is not None:
self.worker_count = worker_count
if session_cookie_name is not None:
self.session_cookie_name = session_cookie_name
if backlog is not None:
self.backlog = backlog
self.ipv6 = ipv6 or self.ipv6
self.ensure_host()
def to_dict(self) -> SettingsDict:
return {
'host': self.host,
'port': self.port,
'path': self.path,
'ipv6': self.ipv6,
'ssl': self.ssl,
'worker_count': self.worker_count,
'session_cookie_name': self.session_cookie_name,
'backlog': self.backlog
}
class Config(dict):
pass
```
#### File: async-web-framework/subway/streams.py
```python
from typing import Callable, List, Literal, Tuple, Optional, Any, overload, TypedDict, Union
import asyncio
import socket
import ssl
from . import compat, utils
from .types import BytesLike, Coro
from .errors import PartialRead
class Peercert(TypedDict, total=False):
subject: Tuple[Tuple[Tuple[str, str]]]
issuer: Tuple[Tuple[Tuple[str, str]]]
version: int
serialNumber: int
notBefore: str
notAfter: str
subjectAltName: Tuple[Tuple[str, str], ...]
OCSP: Tuple[Tuple[str, str]]
caIssuers: Tuple[str, ...]
crlDistributionPoints: Tuple[str, ...]
__all__ = (
'StreamWriter',
'StreamReader',
'StreamProtocol',
'open_connection',
'start_server',
'start_unix_server'
)
class StreamWriter:
"""
Parameters
-----------
transport: :class:`asyncio.Transport`
The transport to use.
"""
def __init__(self, transport: asyncio.Transport, close_waiter: asyncio.Future[None]) -> None:
self._transport = transport
self._close_waiter = close_waiter
self._waiter: Optional[asyncio.Future[None]] = None
self._loop = compat.get_running_loop()
async def __aenter__(self):
return self
async def __aexit__(self, *args: Any):
self.close()
await self.wait_closed()
@property
def transport(self) -> asyncio.Transport:
"""
The transport used by this writer
"""
return self._transport
async def _wait_for_drain(self, timeout: Optional[float] = None) -> None:
if self._waiter is None:
return
try:
await asyncio.wait_for(self._waiter, timeout)
finally:
self._waiter = None
def pause_writing(self):
"""
Creates a future that is resolved when :meth:~`.StreamWriter.resume_writing` is called.
This is supposed to be called when :meth:`asyncio.Protocol.pause_writing` is called.
"""
if not self._waiter:
self._waiter = self._loop.create_future()
def resume_writing(self):
"""
Sets the future that was created by :meth:~`.StreamWriter.pause_writing` to be resolved.
This is supposed to be called when :meth:`asyncio.Protocol.resume_writing` is called.
"""
if self._waiter:
self._waiter.set_result(None)
@overload
def write(self, data: BytesLike) -> None:
...
@overload
def write(self, data: BytesLike, *, drain: Literal[True]) -> Coro[None]:
...
@overload
def write(self, data: BytesLike, *, drain: Literal[False]) -> None:
...
def write(self, data: BytesLike, *, drain: bool = False) -> Any:
"""
Writes data to the transport.
Parameters
----------
data: Union[:class:`bytearray`, :class:`bytes`]
data to write.
drain: :class:`bool`
Whether to wait until all data has been written.
"""
self._transport.write(data)
if drain:
return self.drain()
@overload
def writelines(self, data: List[BytesLike]) -> None:
...
@overload
def writelines(self, data: List[BytesLike], *, drain: Literal[True]) -> Coro[None]:
...
@overload
def writelines(self, data: List[BytesLike], *, drain: Literal[False]) -> None:
...
def writelines(self, data: List[BytesLike], *, drain: bool = False) -> Any:
"""
Writes a list of data to the transport.
Parameters
----------
data: List[Union[:class:`bytearray`, :class:`bytes`]]
list of data to write.
drain: :class:`bool`
Whether to wait until all data has been written.
"""
self._transport.writelines(data)
if drain:
return self.drain()
def write_eof(self):
"""
Writes EOF to the transport.
"""
self._transport.write_eof()
def set_write_buffer_limits(self, high: Optional[int] = None, low: Optional[int] = None) -> None:
"""
Sets the high-water and low-water limits for write flow control.
Parameters
------------
high: Optional[:class:`int`]
The high-water limit.
low: Optional[:class:`int`]
The low-water limit.
"""
self._transport.set_write_buffer_limits(high, low)
def get_write_buffer_size(self) -> int:
"""
Gets the size of the write buffer.
"""
return self._transport.get_write_buffer_size()
async def drain(self, *, timeout: Optional[float] = None):
"""
Waits until all data has been written.
"""
if self.transport.is_closing():
await asyncio.sleep(0)
await self._wait_for_drain(timeout)
@overload
def get_extra_info(self, name: Literal['peername', 'sockname']) -> Union[Tuple[str, int], Tuple[str, int, int, int]]: ...
@overload
def get_extra_info(self, name: Literal['socket']) -> socket.socket: ...
@overload
def get_extra_info(self, name: Literal['compression']) -> Optional[str]: ...
@overload
def get_extra_info(self, name: Literal['cipher']) -> Optional[Tuple[str, str, int]]: ...
@overload
def get_extra_info(self, name: Literal['peercert']) -> Optional[Peercert]: ...
@overload
def get_extra_info(self, name: Literal['sslcontext']) -> Optional[ssl.SSLContext]: ...
@overload
def get_extra_info(self, name: Literal['ssl_object']) -> Optional[Union[ssl.SSLObject, ssl.SSLSocket]]: ...
def get_extra_info(self, name: str) -> Any:
"""
Gets extra info about the transport.
Parameters
----------
name: :class:`str`
The name of the extra info.
"""
return self._transport.get_extra_info(name)
def set_protocol(self, protocol: asyncio.BaseProtocol) -> None:
"""
Sets the protocol used by the transport.
Parameters
----------
protocol: :class:`asyncio.BaseProtocol`
The protocol to use.
"""
self._transport.set_protocol(protocol)
def get_protocol(self) -> asyncio.BaseProtocol:
"""
Gets the protocol used by the transport.
"""
return self._transport.get_protocol()
def close(self):
"""
Closes the transport.
"""
self._transport.close()
async def wait_closed(self) -> None:
"""
Waits until the transport is closed.
"""
await self._close_waiter
class StreamReader:
"""
Attributes
----------
buffer: :class:`bytearray`
A bytearray containing the data.
loop: :class:`asyncio.AbstractEventLoop`
A reference to the event loop.
"""
def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
self.buffer: bytearray = bytearray()
self.loop = loop or compat.get_running_loop()
self._waiter: Optional[asyncio.Future[None]] = None
self._eof = False
def __aiter__(self):
return self
async def __anext__(self):
data = await self.readline(wait=False)
if not data:
raise StopAsyncIteration
return data
async def _wait_for_data(self, timeout: Optional[float] = None):
if self.at_eof():
raise RuntimeError('Cannot wait for data after EOF')
if self._waiter is not None:
raise RuntimeError('Already waiting for data')
self._waiter = self.loop.create_future()
try:
await asyncio.wait_for(self._waiter, timeout)
finally:
self._waiter = None
def at_eof(self) -> bool:
"""
Returns whether the reader has reached EOF.
"""
return self._eof
def reset(self) -> bytes:
"""
Resets the reader's buffer.
"""
data = self.buffer
self.buffer = bytearray()
return bytes(data)
def feed_data(self, data: BytesLike) -> None:
"""
Feeds the data to the reader.
Parameters
----------
data: Union[:class:`bytearray`, :class:`bytes`]
data to be fed.
"""
if self._eof:
raise RuntimeError('Cannot feed data after EOF')
self.buffer.extend(data)
if self._waiter:
try:
self._waiter.set_result(None)
except asyncio.InvalidStateError:
pass
def feed_eof(self):
"""
Feeds EOF to the reader.
"""
if self._waiter:
try:
self._waiter.set_result(None)
except asyncio.InvalidStateError:
pass
self._eof = True
async def read(
self,
nbytes: Optional[int] = None,
*,
timeout: Optional[float] = None,
wait: bool = True
) -> bytes:
"""
Reads ``nbytes`` off the stream. If ``nbytes`` is not provided, reads the whole stream.
Parameters
----------
nbytes: :class:`int`
Number of bytes to read.
timeout: Optional[:class:`float`]
Timeout to wait for the read to complete.
wait: :class:`bool`
Whether to wait for data to be available.
Raises
------
asyncio.TimeoutError: If the timeout expires.
"""
if not self.buffer:
if wait:
await self._wait_for_data(timeout=timeout)
else:
return b''
if not nbytes:
return self.reset()
while nbytes > len(self.buffer):
if self.at_eof():
buffer = self.reset()
raise PartialRead(buffer, nbytes)
await self._wait_for_data(timeout=timeout)
data = self.buffer[:nbytes]
self.buffer = self.buffer[nbytes:]
return bytes(data)
async def readuntil(
self,
delimiter: BytesLike,
*,
timeout: Optional[float] = None,
wait: bool = True,
include: bool = False
) -> bytes:
"""
Reads until the delimiter is found.
Parameters
----------
delimiter: Union[:class:`bytearray`, :class:`bytes`]
The delimiter to read until.
timeout: Optional[:class:`float`]
Timeout to wait for the read to complete.
wait: :class:`bool`
Whether to wait for data to be available.
include: :class:`bool`
Whether to include the delimiter in the returned data.
Raises
------
asyncio.TimeoutError: If the timeout expires.
"""
if not self.buffer:
if wait:
await self._wait_for_data(timeout=timeout)
else:
return b''
pos = self.buffer.find(delimiter)
while pos == -1:
if self.at_eof():
buffer = self.reset()
raise PartialRead(buffer, None)
await self._wait_for_data(timeout=timeout)
pos = self.buffer.find(delimiter)
if include:
data = self.buffer[:pos + len(delimiter)]
else:
data = self.buffer[:pos]
self.buffer = self.buffer[pos + len(delimiter):]
return bytes(data)
async def readline(
self,
*,
timeout: Optional[float] = None,
wait: bool = True,
include: bool = False
) -> bytes:
"""
Reads a line off the stream.
Parameters
----------
timeout: Optional[:class:`float`]
Timeout to wait for the read to complete.
wait: :class:`bool`
Whether to wait for data to be available.
include: :class:`bool`
Whether to include the delimiter in the returned data.
Raises
------
asyncio.TimeoutError: If the timeout expires.
"""
try:
return await self.readuntil(b'\n', timeout=timeout, wait=wait, include=include)
except PartialRead as e:
return e.partial
async def readlines(
self,
hint: Optional[int] = None,
*,
timeout: Optional[float] = None,
wait: bool = False
) -> List[bytes]:
"""
Reads all lines off the stream.
Parameters
----------
hint: Optional[:class:`int`]
Hint to the number of lines to read.
timeout: Optional[:class:`float`]
Timeout to wait for the read to complete.
wait: :class:`bool`
Whether to wait for data to be available.
Raises
------
asyncio.TimeoutError: If the timeout expires.
"""
lines = []
while True:
try:
line = await self.readline(timeout=timeout, wait=wait)
except asyncio.TimeoutError:
break
if (hint is not None and len(lines) >= hint) or not line:
break
lines.append(line)
return lines
class StreamProtocol(asyncio.Protocol):
def __init__(
self,
loop: asyncio.AbstractEventLoop,
connection_callback: Callable[[StreamReader, StreamWriter], Any],
) -> None:
self.loop = loop
self.connection_callback = connection_callback
self.reader = StreamReader(loop)
self.writer: Optional[StreamWriter] = None
self.paused = False
self.waiter = loop.create_future()
def __call__(self) -> Any:
return self.__class__(self.loop, self.connection_callback)
def connection_made(self, transport: Any) -> None:
self.writer = writer = StreamWriter(transport, self.waiter)
if utils.iscoroutinefunction(self.connection_callback):
self.loop.create_task(self.connection_callback(self.reader, writer))
else:
self.connection_callback(self.reader, writer)
def connection_lost(self, exc: Optional[BaseException]) -> None:
if exc:
self.waiter.set_exception(exc)
else:
self.waiter.set_result(None)
self.writer = None
self.reader.reset()
def data_received(self, data: bytes) -> None:
self.reader.feed_data(data)
def eof_received(self) -> None:
self.reader.feed_eof()
def resume_writing(self) -> None:
if not self.writer or not self.paused:
return
self.paused = False
self.writer.resume_writing()
def pause_writing(self) -> None:
if not self.writer or self.paused:
return
self.paused = True
self.writer.pause_writing()
async def open_connection(
host: Optional[str] = None, port: Optional[int] = None, **kwargs: Any
) -> Tuple[StreamReader, StreamWriter]:
"""
Opens a connection to a remote host.
Parameters
-----------
host: Optional[:class:`str`]
The host to connect to.
port: Optional[:class:`int`]
The port to connect to.
**kwargs: Any
Additional keyword arguments to pass to :meth:`asyncio.loop.create_connection`.
"""
loop = kwargs.pop('loop', None) or compat.get_running_loop()
protocol = StreamProtocol(loop, lambda w, r: None)
_, proto = await loop.create_connection(protocol, host=host, port=port, **kwargs) # type: ignore
return proto.reader, proto.writer
async def start_server(
connection_callback: Callable[[StreamReader, StreamWriter], Any],
host: Optional[str] = None,
port: Optional[int] = None,
**kwargs: Any
) -> asyncio.AbstractServer:
"""
Starts a server.
Parameters
-----------
connection_callback: Callable[[:class:`~StreamReader`, :class:`~StreamWriter`], Any]
The callback to call when a connection is made.
host: Optional[:class:`str`]
The host to listen on.
port: Optional[:class:`int`]
The port to listen on.
**kwargs: Any
Additional keyword arguments to pass to :meth:`asyncio.loop.create_server`.
"""
loop = kwargs.pop('loop', None) or compat.get_running_loop()
protocol = StreamProtocol(loop, connection_callback)
server = await loop.create_server(protocol, host=host, port=port, **kwargs) # type: ignore
return server
async def start_unix_server(
connection_callback: Callable[[StreamReader, StreamWriter], Any],
path: Optional[str] = None,
**kwargs: Any
) -> asyncio.AbstractServer:
"""
Starts a Unix server.
Note
----
This only works on unix based systems.
Parameters
-----------
connection_callback: Callable[[:class:`~StreamReader`, :class:`~StreamWriter`], Any]
The callback to call when a connection is made.
path: Optional[:class:`str`]
The path of the unix domain socket.
**kwargs: Any
Additional keyword arguments to pass to :meth:`asyncio.loop.create_unix_server`.
"""
loop = kwargs.pop('loop', None) or compat.get_running_loop()
protocol = StreamProtocol(loop, connection_callback)
server = await loop.create_unix_server(protocol, path=path, **kwargs) # type: ignore
return server
```
#### File: async-web-framework/subway/testclient.py
```python
from typing import Any
from .http import HTTPSession
from .app import Application
__all__ = (
'TestClient',
)
class TestClient:
"""
Test client for the application.
Attributes
----------
app: :class:`~.Application`
The application to test.
session: :class:`~.http.HTTPSession`
The HTTP session used.
"""
def __init__(self, app: Application) -> None:
self.app: Application = app
self.session = HTTPSession()
async def __aenter__(self):
if not self.app.is_serving():
await self.app.start()
return self
async def __aexit__(self, *args):
if self.app.is_serving():
await self.app.close()
await self.session.close()
@property
def host(self) -> str:
return self.app.host
@property
def port(self) -> int:
return self.app.port
def ws_connect(self, path: str):
"""
Performs a websocket connection.
Parameters
-------------
path: :class:`str`
The path to the websocket resource.
Example
---------
.. code-block:: python3
import subway
from subway import websockets
app = subway.Application()
client = subway.TestClient(app)
@app.websocket('/ws')
async def handler(request: subway.Request, ws: websockets.ServerWebSocket):
await ws.send(b'Hello!')
data = await ws.receive()
print(data.data)
await ws.close()
async def main():
async with client:
async with client.ws_connect('/ws') as ws:
message = await ws.receive_str()
print(message)
await ws.send(b'Hi!')
app.loop.run_until_complete(main())
```
"""
url = self.app.url_for(path, is_websocket=True)
return self.session.ws_connect(str(url))
def request(self, path: str, method: str, **kwargs: Any):
"""
Sends a request to the application.
Parameters
----------
path: :class:`str`
The path to the resource.
method: :class:`str`
The HTTP method to use.
**kwargs: Any
The keyword arguments to pass to the request.
Example
---------
.. code-block:: python3
import subway
app = subway.Application()
@app.route('/')
async def index(request: subway.Request):
return 'another creative response'
async def main():
async with subway.TestClient(app) as client:
async with client.get('/') as response:
print(response.status)
text = await response.text()
print(text)
app.loop.run_until_complete(main())
"""
url = self.app.url_for(path)
return self.session.request(url=str(url), method=method, **kwargs)
def get(self, path: str, **kwargs: Any):
return self.request(path, 'GET', **kwargs)
def post(self, path: str, **kwargs: Any):
return self.request(path, 'POST', **kwargs)
def put(self, path: str, **kwargs: Any):
return self.request(path, 'PUT', **kwargs)
def delete(self, path: str, **kwargs: Any):
return self.request(path, 'DELETE', **kwargs)
```
#### File: subway/websockets/enums.py
```python
from typing import Tuple
import enum
__all__ = (
'WebSocketState',
'WebSocketOpcode',
'WebSocketCloseCode',
'VALID_OPCODES',
'VALID_CLOSE_CODES',
'UNASSIGNED_NON_CONTROL_OPCODES',
'UNASSIGNED_CONTROL_OPCODES'
)
class WebSocketState(enum.Enum):
"""
An enumeration.
"""
CONNECTING = 0
OPEN = 1
SENDING = 2
RECEIVING = 3
CLOSED = 4
CLOSING = 5
class WebSocketOpcode(enum.IntEnum):
"""
An enumeration.
"""
CONTINUATION = 0x0
TEXT = 0x1
BINARY = 0x2
CLOSE = 0x8
PING = 0x9
PONG = 0xA
class WebSocketCloseCode(enum.IntEnum):
"""
An enumeration. \
Taken from https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent/code#value
Attributes
----------
value: :class:`int`
The value of the enum.
name: :class:`str`
The name of the enum.
description: :class:`str`
A description of the close code.
reason: :class:`str`
The reason for the close code.
"""
description: str
reason: str
def __new__(cls, value: int, reason: str='', description: str='') -> 'WebSocketCloseCode':
obj = int.__new__(cls, value)
obj._value_ = value
obj.reason = reason
obj.description = description
obj.__doc__ = description
return obj
NORMAL = 1000, 'Normal Closure', \
'Normal closure; the connection successfully completed whatever purpose for which it was created.'
GOING_AWAY = 1001, 'Going Away', \
'The endpoint is going away, either because of a server failure or because the browser is navigating away from the page that opened the connection.'
PROTOCOL_ERROR = 1002, 'Protocol Error', \
'The endpoint is terminating the connection due to a protocol error.'
UNSUPPORTED = 1003, 'Unsupported Data', \
'The connection is being terminated because the endpoint received data of a type it cannot accept (for example, a text-only endpoint received binary data).'
RESERVED = 1004, 'Reserved', \
'Reserved for future use by the WebSocket standard.'
NO_STATUS = 1005, 'No Status Received', \
'Indicates that no status code was provided even though one was expected.'
ABNORMAL = 1006, 'Abnormal Closure', \
'Used to indicate that a connection was closed abnormally (that is, with no close frame being sent) when a status code is expected.'
UNSUPPORTED_PAYLOAD = 1007, 'Unsupported Payload Data', \
'Indicates that an endpoint is terminating the connection because it received a message that contained inconsistent data (e.g., non-UTF-8 data within a text message).'
POLICY_VIOLATION = 1008, 'Policy Violation', \
'Indicates that an endpoint is terminating the connection because it received a message that violates its policy. This is a generic status code, used when codes 1003 and 1009 are not suitable.'
TOO_LARGE = 1009, 'Message Too Large', \
'Indicates that an endpoint is terminating the connection because it received a message that is too big for it to process.'
MANDATORY_EXTENSION = 1010, 'Mandatory Extension', \
'Indicates that an endpoint (client) is terminating the connection because it expected the server to negotiate one or more extension, but the server didn\'t.'
INTERNAL_ERROR = 1011, 'Internal Error', \
'Indicates that an endpoint is terminating the connection because it encountered an unexpected condition that prevented it from fulfilling the request.'
SERVICE_RESTART = 1012, 'Service Restart', \
'Indicates that the service will be restarted.'
TRY_AGAIN_LATER = 1013, 'Try Again Later', \
'Indicates that the service is restarted after a temporary interruption.'
BAD_GATEWAY = 1014, 'Bad Gateway', \
'Indicates that the server, while acting as a gateway or proxy, received an invalid response from the upstream server it accessed in attempting to fulfill the request.'
TLS_HANDSHAKE = 1015, 'TLS Handshake', \
'Indicates that the connection was closed due to a failure to perform a TLS handshake (e.g., the server certificate can\'t be verified).'
UNASSIGNED_NON_CONTROL_OPCODES: Tuple[int, ...] = (0x4, 0x5, 0x6, 0x7)
UNASSIGNED_CONTROL_OPCODES: Tuple[int, ...] = (0xB, 0xC, 0xD, 0xE, 0xF)
VALID_OPCODES = {opcode.value for opcode in WebSocketOpcode}
VALID_OPCODES.update(
UNASSIGNED_NON_CONTROL_OPCODES,
UNASSIGNED_CONTROL_OPCODES
)
VALID_CLOSE_CODES = {code.value for code in WebSocketCloseCode}
``` |
{
"source": "157239n/k1lib",
"score": 2
} |
#### File: k1lib/docs/conf.py
```python
import os, sys, k1lib
sys.path.insert(0, os.path.abspath('../k1lib'))
# -- Project information -----------------------------------------------------
project = 'k1lib'
copyright = '2021, <NAME>'
author = '<NAME>'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx_autorun",
"sphinx_toolbox.collapse",
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
autodoc_member_order = 'bysource'
def skip(app, what, name, obj, would_skip, options):
if name == "__init__":
if (doc := obj.__doc__) is None: return True
return doc.strip() == ""
if name in {"__ror__", "__invert__"}: return False
return would_skip
def setup(app):
app.connect("autodoc-skip-member", skip)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**/.ipynb_checkpoints', '.ipynb_checkpoints', '**/_*', '_*', "literals/*", "literals/**/*"]
# -- Options for HTML output -------------------------------------------------
html_theme = 'sphinx_rtd_theme' # 'alabaster'
html_theme_options = {"navigation_depth": 20}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'torch': ('https://pytorch.org/docs/master/', None),
'matplotlib': ('https://matplotlib.org/stable/', None),
'graphviz': ('https://graphviz.readthedocs.io/en/stable/', None),
'PIL': ('https://pillow.readthedocs.io/en/stable/', None),
}
# generating literals
with open("literals/settings.rst", "w") as f:
with k1lib.captureStdout() as out:
print(k1lib.settings.__repr__())
f.write(".. code-block:: text\n\n" + "\n".join([f" {e}" for e in out]))
```
#### File: k1lib/callbacks/callbacks.py
```python
import k1lib, time, os, logging, numpy as np, matplotlib.pyplot as plt
from typing import Set, List, Union, Callable, ContextManager, Iterator
from collections import OrderedDict
__all__ = ["Callback", "Callbacks", "Cbs"]
class Callback:
r"""Represents a callback. Define specific functions
inside to intercept certain parts of the training
loop. Can access :class:`k1lib.Learner` like this::
self.l.xb = self.l.xb[None]
This takes x batch of learner, unsqueeze it at
the 0 position, then sets the x batch again.
Normally, you will define a subclass of this and
define specific intercept functions, but if you
want to create a throwaway callback, then do this::
Callback().withCheckpoint("startRun", lambda: print("start running"))
You can use :attr:`~k1lib.callbacks.callbacks.Cbs` (automatically exposed) for
a list of default Callback classes, for any particular needs.
**order**
You can also use `.order` to set the order of execution of the callback.
The higher, the later it gets executed. Value suggestions:
- 7: pre-default runs, like LossLandscape
- 10: default runs, like DontTrainValid
- 13: custom mods, like ModifyBatch
- 15: pre-recording mod
- 17: recording mods, like Profiler.memory
- 20: default recordings, like Loss
- 23: post-default recordings, like ParamFinder
- 25: guards, like TimeLimit, CancelOnExplosion
Just leave as default (10) if you don't know what values to choose.
**dependsOn**
If you're going to extend this class, you can also specify dependencies
like this::
class CbC(k1lib.Callback):
def __init__(self):
super().__init__()
self.dependsOn = {"Loss", "Accuracy"}
This is so that if somewhere, ``Loss`` callback class is temporarily
suspended, then CbC will be suspended also, therefore avoiding errors.
**Suspension**
If your Callback is mainly dormant, then you can do something like this::
class CbD(k1lib.Callback):
def __init__(self):
super().__init__()
self.suspended = True
def startBatch(self):
# these types of methods will only execute
# if ``self.suspended = False``
pass
def analyze(self):
self.suspended = False
# do something that sometimes call ``startBatch``
self.suspended = True
cbs = k1lib.Callbacks().add(CbD())
# dormant phase:
cbs("startBatch") # does not execute CbD.startBatch()
# active phase
cbs.CbB.analyze() # does execute CbD.startBatch()
So yeah, you can easily make every checkpoint active/dormant by changing
a single variable, how convenient. See over :meth:`Callbacks.suspend`
for more."""
def __init__(self):
self.l = None; self.cbs = None; self.suspended = False
self.name = self.__class__.__name__; self.dependsOn:Set[str] = set()
self.order = 10 # can be modified by subclasses. A smaller order will be executed first
def suspend(self):
"""Checkpoint, called when the Callback is temporarily suspended. Overridable"""
pass
def restore(self):
"""Checkpoint, called when the Callback is back from suspension. Overridable"""
pass
def __getstate__(self):
state = dict(self.__dict__)
del state["l"]; del state["cbs"]; return state
def __setstate__(self, state): self.__dict__.update(state)
def __repr__(self): return f"{self._reprHead}, can...\n{self._reprCan}"
@property
def _reprHead(self): return f"Callback `{self.name}`"
@property
def _reprCan(self): return """- cb.something: to get specific attribute "something" from learner if not available
- cb.withCheckpoint(checkpoint, f): to quickly insert an event handler
- cb.detach(): to remove itself from its parent Callbacks"""
def withCheckpoint(self, checkpoint:str, f:Callable[["Callback"], None]):
"""Quickly set a checkpoint, for simple, inline-able functions
:param checkpoint: checkpoints like "startRun"
:param f: function that takes in the Callback itself"""
setattr(self, checkpoint, lambda: f(self)); return self
def __call__(self, checkpoint):
if not self.suspended and hasattr(self, checkpoint):
return getattr(self, checkpoint)() != None
def attached(self):
"""Called when this is added to a :class:`Callback`. Overrides this to
do custom stuff when this happens."""
pass
def detach(self):
"""Detaches from the parent :class:`Callbacks`"""
self.cbs.remove(self.name); return self
Cbs = k1lib.Object()
Callback.lossCls = k1lib.Object()
class Timings:
"""List of checkpoint timings. Not intended to be instantiated by the end user.
Used within :class:`~k1lib.callbacks.callbacks.Callbacks`, accessible via
:attr:`Callbacks.timings` to record time taken to execute a single
checkpoint. This is useful for profiling stuff."""
@property
def state(self):
answer = dict(self.__dict__); answer.pop("getdoc", None); return answer
@property
def checkpoints(self) -> List[str]:
"""List of all checkpoints encountered"""
return [cp for cp in self.state if k1lib.isNumeric(self[cp])]
def __getattr__(self, attr):
if attr.startswith("_"): raise AttributeError()
self.__dict__[attr] = 0; return 0
def __getitem__(self, idx): return getattr(self, idx)
def __setitem__(self, idx, value): setattr(self, idx, value)
def plot(self):
"""Plot all checkpoints' execution times"""
plt.figure(dpi=100); checkpoints = self.checkpoints
timings = np.array([self[cp] for cp in checkpoints])
maxTiming = timings.max()
if maxTiming >= 1:
plt.bar(checkpoints, timings); plt.ylabel("Time (s)")
elif maxTiming >= 1e-3 and maxTiming < 1:
plt.bar(checkpoints, timings*1e3); plt.ylabel("Time (ms)")
elif maxTiming >= 1e-6 and maxTiming < 1e-3:
plt.bar(checkpoints, timings*1e6); plt.ylabel("Time (us)")
plt.xticks(rotation="vertical"); plt.show()
def clear(self):
"""Clears all timing data"""
for cp in self.checkpoints: self[cp] = 0
def __repr__(self):
cps = '\n'.join([f'- {cp}: {self[cp]}' for cp in self.checkpoints])
return f"""Timings object. Checkpoints:\n{cps}\n
Can...
- t.startRun: to get specific checkpoint's execution time
- t.plot(): to plot all checkpoints"""
_time = time.time
class Callbacks:
def __init__(self):
self._l: k1lib.Learner = None; self.cbsDict = {}; self._timings = Timings()
@property
def timings(self) -> Timings:
"""Returns :class:`~k1lib.callbacks.callbacks.Timings` object"""
return self._timings
@property
def l(self) -> "k1lib.Learner":
""":class:`k1lib.Learner` object. Will be set automatically when
you set :attr:`k1lib.Learner.cbs` to this :class:`Callbacks`"""
return self._l
@l.setter
def l(self, learner):
self._l = learner
for cb in self.cbs: cb.l = learner
@property
def cbs(self) -> List[Callback]:
"""List of :class:`Callback`"""
return [*self.cbsDict.values()] # convenience method for looping over stuff
def _sort(self) -> "Callbacks":
self.cbsDict = OrderedDict(sorted(self.cbsDict.items(), key=(lambda o: o[1].order))); return self
def add(self, cb:Callback, name:str=None):
"""Adds a callback to the collection."""
if cb in self.cbs: cb.l = self.l; cb.cbs = self; return self
cb.l = self.l; cb.cbs = self; name = name or cb.name
if name in self.cbsDict:
i = 0
while f"{name}{i}" in self.cbsDict: i += 1
name = f"{name}{i}"
cb.name = name; self.cbsDict[name] = cb; self._sort()
self._appendContext_append(cb); cb("attached"); return self
def __contains__(self, e:str) -> bool:
"""Whether a specific Callback name is in this :class:`Callback`."""
return e in self.cbsDict
def remove(self, *names:List[str]):
"""Removes a callback from the collection."""
for name in names:
if name not in self.cbsDict: return print(f"Callback `{name}` not found")
cb = self.cbsDict[name]; del self.cbsDict[name]; cb("detached")
self._sort(); return self
def removePrefix(self, prefix:str):
"""Removes any callback with the specified prefix"""
for cb in self.cbs:
if cb.name.startswith(prefix): self.remove(cb.name)
return self
def __call__(self, *checkpoints:List[str]) -> bool:
"""Calls a number of checkpoints one after another.
Returns True if any of the checkpoints return anything at all"""
self._checkpointGraph_call(checkpoints)
answer = False
for checkpoint in checkpoints:
beginTime = _time()
answer |= any([cb(checkpoint) for cb in self.cbs])
self._timings[checkpoint] += _time() - beginTime
return answer
def __getitem__(self, idx:Union[int, str]) -> Callback:
"""Get specific cbs.
:param idx: if :class:`str`, then get the Callback with this specific name,
if :class:`int`, then get the Callback in that index."""
return self.cbs[idx] if isinstance(idx, int) else self.cbsDict[idx]
def __iter__(self) -> Iterator[Callback]:
"""Iterates through all :class:`Callback`."""
for cb in self.cbsDict.values(): yield cb
def __len__(self):
"""How many :class:`Callback` are there in total?"""
return len(self.cbsDict)
def __getattr__(self, attr):
if attr == "cbsDict": raise AttributeError(attr)
if attr in self.cbsDict: return self.cbsDict[attr]
else: raise AttributeError(attr)
def __getstate__(self):
state = dict(self.__dict__); del state["_l"]; return state
def __setstate__(self, state):
self.__dict__.update(state)
for cb in self.cbs: cb.cbs = self
def __dir__(self):
answer = list(super().__dir__())
answer.extend(self.cbsDict.keys())
return answer
def __repr__(self):
return "Callbacks:\n" + '\n'.join([f"- {cbName}" for cbName in self.cbsDict if not cbName.startswith("_")]) + """\n
Use...
- cbs.add(cb[, name]): to add a callback with a name
- cbs("startRun"): to trigger a specific checkpoint, this case "startRun"
- cbs.Loss: to get a specific callback by name, this case "Loss"
- cbs[i]: to get specific callback by index
- cbs.timings: to get callback execution times
- cbs.checkpointGraph(): to graph checkpoint calling orders
- cbs.context(): context manager that will detach all Callbacks attached inside the context
- cbs.suspend("Loss", "Cuda"): context manager to temporarily prevent triggering checkpoints"""
def withBasics(self):
"""Adds a bunch of very basic Callbacks that's needed for everything. Also
includes Callbacks that are not necessary, but don't slow things down"""
self.add(Cbs.CoreNormal()).add(Cbs.Profiler()).add(Cbs.Recorder())
self.add(Cbs.ProgressBar()).add(Cbs.Loss()).add(Cbs.Accuracy()).add(Cbs.DontTrainValid())
return self.add(Cbs.CancelOnExplosion()).add(Cbs.ParamFinder())
def withQOL(self):
"""Adds quality of life Callbacks."""
return self
def withAdvanced(self):
"""Adds advanced Callbacks that do fancy stuff, but may slow things
down if not configured specifically."""
return self.add(Cbs.HookModule().withMeanRecorder().withStdRecorder()).add(Cbs.HookParam())
@k1lib.patch(Callbacks)
def _resolveDependencies(self):
for cb in self.cbs:
cb._dependents:Set[Callback] = set()
cb.dependsOn = set(cb.dependsOn)
for cb in self.cbs:
for cb2 in self.cbs:
if cb2.__class__.__name__ in cb.dependsOn:
cb2._dependents.add(cb)
class SuspendContext:
def __init__(self, cbs:Callbacks, cbsNames:List[str], cbsClasses:List[str]):
self.cbs = cbs; self.cbsNames = cbsNames; self.cbsClasses = cbsClasses
self.cbs.suspendStack = getattr(self.cbs, "suspendStack", [])
def __enter__(self):
cbsClasses = set(self.cbsClasses); cbsNames = set(self.cbsNames)
self._resolveDependencies()
def explore(cb:Callback):
for dept in cb._dependents:
cbsClasses.add(dept.__class__.__name__); explore(dept)
[explore(cb) for cb in self.cbs if cb.__class__.__name__ in cbsClasses or cb.name in cbsNames]
stackFrame = {cb:cb.suspended for cb in self.cbs if cb.__class__.__name__ in cbsClasses or cb.name in cbsNames}
for cb in stackFrame: cb.suspend(); cb.suspended = True
self.suspendStack.append(stackFrame)
def __exit__(self, *ignored):
for cb, oldValue in self.suspendStack.pop().items():
cb.suspended = oldValue; cb.restore()
def __getattr__(self, attr): return getattr(self.cbs, attr)
@k1lib.patch(Callbacks)
def suspend(self, *cbNames:List[str]) -> ContextManager:
"""Creates suspension context for specified Callbacks. Matches callbacks with
their name. Works like this::
cbs = k1lib.Callbacks().add(CbA()).add(CbB()).add(CbC())
with cbs.suspend("CbA", "CbC"):
pass # inside here, only CbB will be active, and its checkpoints executed
# CbA, CbB and CbC are all active
.. seealso:: :meth:`suspendClasses`"""
return SuspendContext(self, cbNames, [])
@k1lib.patch(Callbacks)
def suspendClasses(self, *classNames:List[str]) -> ContextManager:
"""Like :meth:`suspend`, but matches callbacks' class names to the given list,
instead of matching names. Meaning::
cbs.k1lib.Callbacks().add(Cbs.Loss()).add(Cbs.Loss())
# cbs now has 2 callbacks "Loss" and "Loss0"
with cbs.suspendClasses("Loss"):
pass # now both of them are suspended"""
return SuspendContext(self, [], classNames)
@k1lib.patch(Callbacks)
def suspendEval(self, more:List[str]=[], less:List[str]=[]) -> ContextManager:
"""Same as :meth:`suspendClasses`, but suspend some default classes typical
used for evaluation callbacks. Just convenience method really. Currently includes:
- HookModule, HookParam, ProgressBar
- ParamScheduler, Loss, Accuracy, Autosave
- ConfusionMatrix
:param more: include more classes to be suspended
:param less: exclude classes supposed to be suspended by default"""
classes = {"HookModule", "HookParam", "ProgressBar", "ParamScheduler", "Loss", "Accuracy", "Autosave", "ConfusionMatrix"}
classes.update(more); classes -= set(less)
return self.suspendClasses(*classes)
class AppendContext:
def __init__(self, cbs:Callbacks): self.cbs = cbs
def __enter__(self): self.cbs.contexts.append([])
def __exit__(self, *ignored):
[cb.detach() for cb in self.cbs.contexts.pop()]
@k1lib.patch(Callbacks)
def _appendContext_append(self, cb):
if "contexts" not in self.__dict__: self.contexts = [[]]
self.contexts[-1].append(cb)
@k1lib.patch(Callbacks)
def context(self) -> ContextManager:
"""Add context.
Works like this::
cbs = k1lib.Callbacks().add(CbA())
# CbA is available
with cbs.context():
cbs.add(CbB())
# CbA and CbB available
cbs.add(CbC())
# all 3 are available
# only CbA is available
"""
return AppendContext(self)
@k1lib.patch(Callbacks)
def _checkpointGraph_call(self, checkpoints:List[str]):
if not hasattr(self, "_checkpointGraphDict"):
self._checkpointGraphDict = k1lib.Object().withAutoDeclare(lambda: k1lib.Object().withAutoDeclare(lambda: 0))
self._lastCheckpoint = "<root>"
for cp in checkpoints:
self._checkpointGraphDict[self._lastCheckpoint][cp] += 1
self._lastCheckpoint = cp
@k1lib.patch(Callbacks)
def checkpointGraph(self, highlightCb:Union[str, Callback]=None):
"""Graphs what checkpoints follows what checkpoints. Has to run at least once
first. Requires graphviz package though. Example::
cbs = Callbacks()
cbs("a", "b", "c", "d", "b")
cbs.checkpointGraph() # returns graph object. Will display image if using notebooks
.. image:: ../images/checkpointGraph.png
:param highlightCb: if available, will highlight the checkpoints the callback
uses. Can be name/class-name/class/self of callback."""
g = k1lib.digraph(); s = set()
for cp1, cp1o in self._checkpointGraphDict.state.items():
for cp2, v in cp1o.state.items():
g.edge(cp1, cp2, label=f" {v} "); s.add(cp2)
if highlightCb != None:
_cb = None
if isinstance(highlightCb, Callback): _cb = highlightCb
elif isinstance(highlightCb, type) and issubclass(highlightCb, Callback): # find cb that has the same class
for cbo in self.cbs:
if isinstance(cbo, highlightCb): _cb = cbo; break
if _cb is None: raise AttributeError(f"Can't find any Callback inside this Callbacks which is of type `{cb.__name__}`")
elif isinstance(highlightCb, str):
for cbName, cbo in self.cbsDict.items():
if cbName == highlightCb: _cb = cbo; break
if type(cbo).name == highlightCb: _cb = cbo; break
if _cb is None: raise AttributeError(f"Can't find any Callback inside this Callbacks with name or class `{cb}`")
else: raise AttributeError(f"Don't understand {cb}")
print(f"Highlighting callback `{_cb.name}`, of type `{type(_cb)}`")
for cp in s:
if hasattr(_cb, cp): g.node(cp, color="red")
return g
```
#### File: k1lib/callbacks/hookParam.py
```python
from .callbacks import Callback, Callbacks, Cbs
import k1lib, torch.nn as nn
import matplotlib.pyplot as plt
from functools import partial
from typing import List, Tuple, Callable, Union
__all__ = ["HookParam"]
class ParamData(k1lib.Object):
def __init__(self):
super().__init__()
self.means = []; self.stds = []
self.mins = []; self.maxs = []
def update(self, torchParam:nn.Parameter):
self.means.append(torchParam.mean().item())
self.stds.append(torchParam.std().item())
self.mins.append(torchParam.min().item())
self.maxs.append(torchParam.max().item())
def __len__(self): return len(self.means)
def __repr__(self):
return f"""Param's saved data. Use...
- d.means: to get list of means
- d.stds: to get list of means
- d.mins: to get list of mins
- d.maxs: to get list of maxs"""
class Param:
def __init__(self, name:str, torchParam:nn.Parameter):
self.name = name
self.torchParam = torchParam
self.data = ParamData()
self.every = k1lib.Every(3)
def update(self):
if self.every(): self.data.update(self.torchParam.detach())
def __repr__(self):
return f"""Param `{self.name}`. Use...
- p.torchParam: to get actual underlying parameter
- p.data: to get data stored
- cb.plot(): to quickly look at everything"""
@k1lib.patch(Cbs)
class HookParam(Callback):
"""Records means and stds of all parameters"""
def __init__(self):
""
super().__init__(); self.params:List[Param] = []
def __getitem__(self, idx:Union[int, slice]):
if type(idx) == int: return self.params[idx]
answer = HookParam(); answer.params = self.params[idx]; return answer
def __len__(self): return len(self.params)
def _selected(self, paramName:str):
splits = paramName.split(".")
try:
mS = self.l.selector
for split in splits[:-1]: mS = mS[split]
return "HookParam" in mS and hasattr(mS, splits[-1])
except KeyError: return False
def startRun(self):
if len(self) == 0: # set things up first time only
self.params = [Param(k, v) for k, v in self.l.model.named_parameters() if self._selected(k)]
def startBatch(self): [param.update() for param in self.params]
def css(self, css:str):
"""Creates a new HookParam object with selected modules. May be useful
for displaying a subset of the recorded data"""
oldSelector = self.l.selector; answer = HookParam()
self.l.selector = k1lib.selector.select(self.l.model, css)
answer.params = [param for param in self.params if self._selected(param.name)]
self.l.selector = oldSelector; return answer
def __repr__(self):
s = f", {len(self[0].data)} means and stds each" if len(self) > 0 else ""
names = "\n".join([f" {i}. {p.name}" for i, p in enumerate(self)])
return f"""{super()._reprHead}: {len(self)} params{s}:\n{names}\n
Use...
- p.plot(): to quickly look at everything
- p[i]: to view a single param
- p[a:b]: to get a new HookParam with selected params
- p.css("..."): to select a specific subset of modules only
{super()._reprCan}"""
def plotF(params:Union[HookParam, Param, List[Param]], rangeSlice:slice):
if type(params) == Param: params = [params]
fields = params[0].data.state.keys(); step = rangeSlice.step or 1
fig, axes = plt.subplots(2, 2, figsize=(10, 6), dpi=100)
axes = axes.flatten()
for field, ax in zip(fields, axes):
for param in params:
fieldData = param.data[field]
r = k1lib.Range(len(fieldData))[rangeSlice]
ax.plot(r.range_[::step], fieldData[r.slice_][::step])
ax.set_title(field.capitalize())
plt.figlegend([p.name for p in params], loc='right')
@k1lib.patch(HookParam)
@k1lib.patch(Param)
def plot(self): return k1lib.viz.SliceablePlot(partial(plotF, self))
```
#### File: callbacks/lossFunctions/accuracy.py
```python
from ..callbacks import Callback, Callbacks, Cbs
from typing import Callable, Tuple
import torch, k1lib
__all__ = ["AccF"]
AccFSig = Callable[[Tuple[torch.Tensor, torch.Tensor]], float]
PredFSig = Callable[[torch.Tensor], torch.Tensor]
@k1lib.patch(Cbs)
class AccF(Callback):
" "
def __init__(self, predF:PredFSig=None, accF:AccFSig=None, integrations:bool=True):
"""Generic accuracy function.
Built in default accuracies functions are fine, if you don't do something too
dramatic/different. Expected variables in :class:`~k1lib.Learner`:
- y: :class:`~torch.Tensor` of shape (\*N, C)
- yb: :class:`~torch.Tensor` of shape (\*N,)
Deposits variables into :class:`~k1lib.Learner`:
- preds: detached, batched tensor output of ``predF``
- accuracies: detached, batched tensor output of ``accF``
- accuracy: detached, single float, mean of ``accuracies``
Where:
- N is the batch size. Can be multidimensional, but has to agree between ``y`` and ``yb``
- C is the number of categories
:param predF: takes in ``y``, returns predictions (tensor with int elements indicating the categories)
:param accF: takes in ``(predictions, yb)``, returns accuracies (tensor with 0 or 1 elements)
:param integrations: whether to integrate :class:`~k1lib.callbacks.confusionMatrix.ConfusionMatrix` or not."""
super().__init__(); self.order = 10; self.integrations = integrations; self.ownsConMat = False
self.predF = predF or (lambda y: y.argmax(-1))
self.accF = accF or (lambda p, yb: (p == yb)+0)
def attached(self):
if self.integrations:
if "ConfusionMatrix" not in self.cbs:
self.conMatCb = Cbs.ConfusionMatrix()
self.cbs.add(self.conMatCb); self.ownsConMat = True
else: self.conMatCb = self.cbs.ConfusionMatrix
def endLoss(self):
preds = self.predF(self.l.y); self.l.preds = preds.detach()
accs = self.accF(preds, self.l.yb); self.l.accuracies = accs.detach()
self.l.accuracy = accs.float().mean().item()
def detach(self):
super().detach()
if self.conMatCb != None:
if self.ownsConMat: self.conMatCb.detach()
self.conMatCb = None
```
#### File: callbacks/profilers/computation.py
```python
from k1lib.callbacks import Callback, Cbs
import k1lib, numpy as np; from torch import nn
_spacing = lambda s: f"{s} "; # inserted at end of everything, if that element existed
_lcomp = 14; _lp1 = 8; _lp2 = 15; _lp3 = 14
class ComputationData:
def __init__(self, cProfiler, mS:k1lib.selector.ModuleSelector):
self.cProfiler = cProfiler; self.mS = mS; self.flop = 0
self.handle = None; self.hook()
self.flops = 0; self.tS = None # corresponding time selector
def hook(self):
def hk(m, i, o):
i = k1lib.squeeze(i)
if isinstance(m, nn.Linear): self.flop += i.numel() * m.out_features
elif isinstance(m, nn.Conv2d):
self.flop += m.out_channels * i.shape.numel() * np.prod(m.kernel_size)
elif isinstance(m, (nn.LeakyReLU, nn.ReLU, nn.Sigmoid)):
self.flop += i.numel()
self.handle = self.mS.nn.register_forward_hook(hk)
def unhook(self):
self.cProfiler.totalFlop += self.flop; self.handle.remove()
def __getstate__(self):
answer = dict(self.__dict__)
del answer["mS"]; del answer["cProfiler"]; return answer
def __setstate__(self, state): self.__dict__.update(dict(state))
def __str__(self):
if self.flop <= 0: return ""
a = _spacing(f"{k1lib.fmt.comp(self.flop)}".ljust(_lcomp))
b = _spacing(f"{round(100 * self.flop / self.cProfiler.totalFlop)}%".rjust(_lp1))
c = ""
if self.cProfiler._tpAvailable:
self.flops = self.flop / self.tS.data.time
c = _spacing(f"{k1lib.fmt.compRate(self.flops)}".ljust(_lp2))
d = ""
if self.cProfiler.selected:
if "_compProf_" in self.mS:
d = f"{round(100 * self.flop / self.cProfiler.selectedTotalFlop)}%"
d = _spacing(d.rjust(_lp3))
return f"{a}{b}{c}{d}"
class ComputationProfiler(Callback):
"""Profiles computation. Only provide reports on well known
layers only, and thus can't really be universal. Example::
l = k1lib.Learner.sample()
l.cbs.add(Cbs.Profiler())
# views table
l.Profiler.computation
# views table highlighted
l.Profiler.computation.css("#lin1 > #lin")
"""
def __init__(self, profiler:"Profiler"):
super().__init__(); self.profiler = profiler
def startRun(self):
if not hasattr(self, "selector"): # if no selectors found
self.selector = self.l.model.select("")
for m in self.selector.modules(): m.data = ComputationData(self, m)
self.selector.displayF = lambda m: (k1lib.fmt.txt.red if "_compProf_" in m else k1lib.fmt.txt.identity)(m.data)
self.totalFlop = 0; self.selectedTotalFlop = None
@property
def selected(self): return self.selectedTotalFlop != None
@property
def _tpAvailable(self) -> bool:
"""Whether TimeProfiler's results are available"""
try: self.profiler._time(); return True
except Exception as e: return False
def startStep(self): return True
def _run(self):
"""Runs everything"""
with self.cbs.context(), self.cbs.suspendEval():
self.cbs.add(Cbs.Cpu()); self.l.run(1, 1)
for m in self.selector.modules(): m.data.unhook()
def detached(self): # time profiler integration, so that flops can be displayed
if self._tpAvailable:
for cS, tS in zip(self.selector.modules(), self.profiler.time.selector.modules()):
cS.data.tS = tS # injecting dependency
def css(self, css:str):
"""Selects a small part of the network to highlight. See also: :mod:`k1lib.selector`."""
self.selector.parse(k1lib.selector.preprocess(css, "_compProf_"))
self.selectedTotalFlop = 0
for m in self.selector.modules():
if "_compProf_" in m:
self.selectedTotalFlop += m.data.flop
print(self.__repr__())
self.selector.clearProps(); self.selectedTotalFlop = None
def __repr__(self):
header = _spacing("computation".ljust(_lcomp))
header += _spacing("% total".rjust(_lp1))
header += _spacing("rate".ljust(_lp2)) if self._tpAvailable else ""
header += _spacing("% selected".rjust(_lp3)) if self.selected else ""
footer = _spacing(f"{k1lib.fmt.comp(self.totalFlop)}".ljust(_lcomp))
footer += _spacing("".rjust(_lp1))
footer += _spacing("".ljust(_lp2)) if self._tpAvailable else ""
footer += _spacing(f"{k1lib.fmt.comp(self.selectedTotalFlop)}".rjust(_lp3)) if self.selected else ''
footer = ("Total", footer)
return f"""ComputationProfiler:
{k1lib.tab(self.selector.__repr__(intro=False, header=header, footer=footer))}
The "rate" column will appear if integration with Profiler.time is
possible, showing actual ops/s
Can...
- cp.css("..."): highlights a particular part of the network
- cp.selector: to get internal k1lib.ModuleSelector object"""
```
#### File: k1lib/callbacks/shorts.py
```python
from .callbacks import Callback, Callbacks, Cbs
import k1lib, os, torch
__all__ = ["Autosave", "DontTrainValid", "InspectLoss", "ModifyLoss", "Cpu", "Cuda",
"DType", "InspectBatch", "ModifyBatch", "InspectOutput", "ModifyOutput",
"Beep"]
@k1lib.patch(Cbs)
class Autosave(Callback):
"""Autosaves 3 versions of the network to disk"""
def __init__(self): super().__init__(); self.order = 23
def endRun(self):
os.system("mv autosave-1.pth autosave-0.pth")
os.system("mv autosave-2.pth autosave-1.pth")
self.l.save("autosave-2.pth")
@k1lib.patch(Cbs)
class DontTrainValid(Callback):
"""If is not training, then don't run m.backward() and opt.step().
The core training loop in k1lib.Learner don't specifically do this,
cause there may be some weird cases where you want to also train valid."""
def _common(self):
if not self.l.model.training: return True
def startBackward(self): return self._common()
def startStep(self): return self._common()
@k1lib.patch(Cbs)
class InspectLoss(Callback):
"""Expected `f` to take in 1 float."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 15
def endLoss(self): self.f(self.loss.detach())
@k1lib.patch(Cbs)
class ModifyLoss(Callback):
"""Expected `f` to take in 1 float and return 1 float."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 13
def endLoss(self): self.l.loss = self.f(self.loss)
@k1lib.patch(Cbs)
class Cuda(Callback):
"""Moves batch and model to the default GPU"""
def startRun(self): self.l.model.cuda()
def startBatch(self):
self.l.xb = self.l.xb.cuda()
self.l.yb = self.l.yb.cuda()
@k1lib.patch(Cbs)
class Cpu(Callback):
"""Moves batch and model to CPU"""
def startRun(self): self.l.model.cpu()
def startBatch(self):
self.l.xb = self.l.xb.cpu()
self.l.yb = self.l.yb.cpu()
@k1lib.patch(Cbs)
class DType(Callback):
"""Moves batch and model to a specified data type"""
def __init__(self, dtype): super().__init__(); self.dtype = dtype
def startRun(self): self.l.model = self.l.model.to(self.dtype)
def startBatch(self):
self.l.xb = self.l.xb.to(self.dtype)
self.l.yb = self.l.yb.to(self.dtype)
@k1lib.patch(Cbs)
class InspectBatch(Callback):
"""Expected `f` to take in 2 tensors."""
def __init__(self, f:callable): super().__init__(); self.f = f; self.order = 15
def startBatch(self): self.f(self.l.xb, self.l.yb)
@k1lib.patch(Cbs)
class ModifyBatch(Callback):
"""Modifies xb and yb on the fly. Expected `f`
to take in 2 tensors and return 2 tensors."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 13
def startBatch(self): self.l.xb, self.l.yb = self.f(self.l.xb, self.l.yb)
@k1lib.patch(Cbs)
class InspectOutput(Callback):
"""Expected `f` to take in 1 tensor."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 15
def endPass(self): self.f(self.y)
@k1lib.patch(Cbs)
class ModifyOutput(Callback):
"""Modifies output on the fly. Expected `f` to take
in 1 tensor and return 1 tensor"""
def __init__(self, f): super().__init__(); self.f = f; self.order = 13
def endPass(self): self.l.y = self.f(self.y)
@k1lib.patch(Cbs)
class Beep(Callback):
"""Plays a beep sound when the run is over"""
def endRun(self): k1lib.beep()
```
#### File: k1lib/cli/gb.py
```python
from k1lib import cli
__all__ = ["feats", "origin"]
class feats(cli.BaseCli):
"""Fetches features, each on a separate stream"""
def __ror__(self, it):
it = it | cli.grep("FEATURES", 0, 1e9).till("ORIGIN") | cli.rows()[1:-1]
cache = []
for line in it:
if line[4:9] != " ": # new section detected
if len(cache) > 0: yield iter(cache)
cache = []
cache.append(line)
if len(cache) > 0: yield iter(cache)
@staticmethod
def filt(*terms:str) -> cli.BaseCli:
"""Filters for specific terms in all the features texts. If there
are multiple terms, then filters for first term, then second, then third,
so the term's order might matter to you"""
if len(terms) == 0: return cli.identity()
if len(terms) > 1: return cli.deref() | cli.init.serial(*(feats.filt(term) for term in terms))
return cli.toList().all() | cli.filt(lambda F: F | cli.grep(terms[0]) | cli.shape(0) > 0)
@staticmethod
def tag(tag:str) -> cli.BaseCli:
"""Gets a single tag out. Applies this on a single feature only"""
class _tag(cli.BaseCli):
def __ror__(self, it):
lines = it | cli.grep(f"/{tag}").till(f"/(?!{tag})") | cli.deref()
# check if on same line
if len(lines) > 1 and lines[-1].lstrip().startswith("/"): lines.pop()
return (lines | cli.op().split(f"/{tag}=\"").all() | cli.joinStreams() | ~cli.head(1)\
| cli.op().strip().all() | cli.join("")).rstrip("\"")
return _tag()
class origin(cli.BaseCli):
"""Return the origin section of the genbank file"""
def __ror__(self, it):
return it | cli.grep("ORIGIN", 0, 1e9) | ~cli.head(1) | cli.op().strip().all()\
| cli.op().split(" ").all() | cli.cut()[1:] | cli.join("").all()\
| cli.remove("/") | cli.join("")
```
#### File: k1lib/cli/others.py
```python
__all__ = ["crissCross"]
from typing import Callable, Iterator, Any, Union, List
from k1lib.cli import BaseCli; from k1lib import cli
import torch
def crissCross():
"""Like the monkey-patched function :meth:`torch.crissCross`.
Example::
# returns another Tensor
[torch.randn(3, 3), torch.randn(3)] | crissCross()"""
return cli.applyS(lambda x: torch.crissCross(*x))
#torch.stack = cli.applyS(torch.stack)
#torch.stack.__doc__ = "Stacks tensors together"
```
#### File: k1lib/cli/sam.py
```python
from k1lib import cli; import k1lib
__all__ = ["cat", "header", "flag"]
settings = k1lib.Settings()
k1lib.settings.cli.add("sam", settings, "from k1lib.cli.sam module");
def cat(bamFile:str, header:bool=True):
"""Get sam file outputs from bam file.
Example::
sam.cat("file.bam") | display()
:param header: whether to include headers or not"""
return None | cli.cmd(f"samtools view {'-h' if header else ''} {bamFile}") | cli.table("\t")
settings.add("header", k1lib.Settings()
.add("short", ["qname", "flag", "rname", "pos", "mapq", "cigar", "rnext", "pnext", "tlen", "seq", "qual"])
.add("long", ["Query template name", "Flags", "Reference sequence name", "Position", "Mapping quality", "CIGAR string", "Rname of next read", "Position of next read", "Template length", "Sequence", "Quality"]), "sam headers")
class header(cli.BaseCli):
def __init__(self, long=True):
"""Adds a header to the table.
Example::
sam.cat("file.bam") | sam.header() | display()
You can change the header labels like this::
settings.cli.sam.header.long = ["Query template name", ...]
:param long: whether to use a long descriptive header, or a short one"""
super().__init__(); self.long = long
def __ror__(self, it):
return it | cli.insertRow(*(settings.header.long if self.long else settings.header.short))
settings.add("flags", ['PAIRED', 'PROPER_PAIR', 'UNMAP', 'MUNMAP', 'REVERSE', 'MREVERSE', 'READ1', 'READ2', 'SECONDARY', 'QCFAIL', 'DUP', 'SUPPLEMENTARY'], "list of flags")
class flag(cli.bindec):
def __init__(self, f=None):
"""Decodes flags attribute.
Example::
# returns ['PAIRED', 'UNMAP']
5 | flag()
# returns 'PAIRED, UNMAP'
5 | flag(cli.join(", "))
You'll mostly use this in this format::
sam.cat("file.bam", False) | apply(sam.flag(), 1) | display()
You can change the flag labels like this::
settings.cli.sam.flags = ["paired", ...]
:param f: transform function fed into :class:`~k1lib.cli.utils.bindec`, defaulted
to `join(", ")`"""
super().__init__(k1lib.settings.cli.sam.flags, f or cli.join(", "))
```
#### File: k1lib/cli/trace.py
```python
import k1lib
from k1lib.cli import *
__all__ = ["trace"]
traceIdxAuto = k1lib.AutoIncrement(prefix="TD_")
class TraceData:
def __init__(self, _cli, inS, outS, name=None):
"""
:param inS: in and out strings to be displayed in the edges"""
self.idx = traceIdxAuto()
self.inS = inS; self.outS = outS; self.cli = _cli
self.name = name or _cli.__class__.__name__
def __str__(self):
return f"<TraceData idx='{self.idx}' inS='{self.inS}' outS='{self.outS}' name='{self.name}' cli='{self.cli}'>"
def isMTM(c):
if not isinstance(c, BaseCli): return False
if isinstance(c, (manyToMany, applyMp, applyTh)): return True
if isinstance(c, apply) and isinstance(c.f, BaseCli) and c.column is None: return True
return False
class TraceException(Exception): pass
clusterAuto = k1lib.AutoIncrement()
emptyInputSentinel = object()
class _trace(BaseCli):
def __init__(self, inp, f, g=None, depth=None):
"""
Some notes. startTd will always tries to grab the first thing, lastTd will only
grab the last thing at the end of __ror__, hence "last" and not "end".
:param inp: initial input to pipe into other cli tools
:param f: function to display result of cli tools, default just shows the shape of the stream
:param env: :class:`graphviz.dot.Digraph` to use (hence subgraph, hence no "start" and "end")"""
if depth is None: depth = k1lib.MaxDepth(float("inf"), 0)
self.inp = inp # will change constantly as new clis are being piped into by trace
self.f = f; self.depth = depth; self._reprRO = k1lib.RunOnce()
if g is None:
self.lastTd = TraceData(None, None, None, "\\<start\\>")
self.g = k1lib.digraph(); self._formNode(self.lastTd)
else: self.g = g; self.lastTd = None
self.firstTime = True # every other time other than the first should not record any data. It should just pass data through
def _formNode(self, td:TraceData, g=None): (g or self.g).node(td.idx, td.name)
def _formEdge(self, td1:TraceData, td2:TraceData, g=None, label:str=None):
if td1 is None or td2 is None: return
(g or self.g).edge(td1.idx, td2.idx, label=f" {label or td2.inS or td1.outS}")
def _run(self, c, inp, cliName=None):
"""Takes in cli tool and input, runs it, and get trace data and output"""
if isinstance(c, op): c.op_solidify()
out = c(inp) | deref() # why not "inp | c"? Cause we want to serve plain old functions inside apply too
return TraceData(c, f"{self.f(inp)}", f"{self.f(out)}", cliName), out
def __repr__(self):
try: from IPython.core import display as dis
except: raise RuntimeError("You have to install IPython/execute in a notebook first!")
if not self._reprRO():
td = TraceData(None, self.lastTd.outS, None, "\\<end\\>")
self._formNode(td); self._formEdge(self.lastTd, td)
try: svg = self.g._repr_svg_()
except:
try: svg = self.g._repr_image_svg_xml()
except: pass
dis.display(dis.SVG(k1lib.scaleSvg(svg))); return "<trace object>"
def __ror__(self, it):
"""Alternative way to specify input."""
#if self.inp != emptyInputSentinel: raise TraceException("Input to trace has already been set, but it's being set again (possibly due to `.all()`). Check last trace using ``trace.last``")
if self.inp != emptyInputSentinel: self.firstTime = False
self.inp = it | deref(); return self
def __iter__(self): return iter(self.inp)
@k1lib.patch(_trace)
def __or__(self, c):
if self.inp is emptyInputSentinel:
# do this to separate out potentially a serial right after this, so that trace() is actually in control, and not merge with the outside serial
if isinstance(c, serial): return serial(self, c)
return super(_trace, self).__or__(c)
if not isinstance(c, BaseCli): return NotImplemented
if self._reprRO.value: raise RuntimeError("Can't pipe this trace() into another cli tool, as it is used! Make a new trace instead.")
td, out = self._run(c, self.inp) # runs through the entire thing, then decides whether to go into the details or not
if not self.firstTime: return out
if not hasattr(self, "startTd"): self.startTd = td; startTdSet = True
else: startTdSet = False # whether startTd is set lately
def bypass(): # default connection case, don't go into clis and explore
self._formNode(td); self._formEdge(self.lastTd, td); self.lastTd = td
if self.depth and isinstance(c, serial):
with self.g.subgraph(name=f"cluster_{clusterAuto()}") as subG:
subG.attr(label="|, serial")
t = _trace(self.inp, self.f, subG, self.depth.enter())
for _c in c.clis: t = t | _c
self._formEdge(self.lastTd, t.startTd); self.lastTd = t.lastTd
if startTdSet: self.startTd = t.startTd
elif self.depth and isMTM(c):
if isinstance(c, (apply, applyMp)): _c = c.f
elif isinstance(c, manyToMany): _c = c.cli
try: singleInp = self.inp | item()
except StopIteration: bypass() # no items at all, can't trace!
else:
with self.g.subgraph(name=f"cluster_{clusterAuto()}") as subG:
subG.attr(label=".all(), manyToMany, apply")
t = _trace(self.inp | item(), self.f, subG, self.depth.enter())
o1Td = TraceData(None, self.f(self.inp), None, "*"); self._formNode(o1Td, g=subG); t = t | _c
o2Td = TraceData(None, self.f(t.inp), None, "*"); self._formNode(o2Td, g=subG)
t._formEdge(o1Td, t.startTd); t._formEdge(t.lastTd, o2Td); o2Td.outS = self.f(out)
self._formEdge(self.lastTd, o1Td); self.lastTd = o2Td
if startTdSet: self.startTd = o1Td
elif self.depth and isinstance(c, oneToMany):
with self.g.subgraph(name=f"cluster_{clusterAuto()}") as subG:
subG.attr(label="&, oneToMany")
o1Td = TraceData(None, self.f(self.inp), None, "*"); self._formNode(o1Td, g=subG)
o2Td = TraceData(None, None, None, "*"); self._formNode(o2Td, g=subG)
for _c in c.clis:
t = _trace(self.inp, self.f, subG, self.depth.enter()) | _c
self._formEdge(o1Td, t.startTd); self._formEdge(t.lastTd, o2Td)
self._formEdge(self.lastTd, o1Td); self.lastTd = o2Td; o2Td.outS = self.f(out)
if startTdSet: self.startTd = o1Td
elif self.depth and isinstance(c, mtmS):
with self.g.subgraph(name=f"cluster_{clusterAuto()}") as subG:
subG.attr(label="+, mtmS")
o1Td = TraceData(None, self.f(self.inp), None, "*"); self._formNode(o1Td, g=subG)
o2Td = TraceData(None, None, self.f(out), "*"); self._formNode(o2Td, g=subG)
for _c, _it in zip(c.clis, self.inp):
t = _trace(_it, self.f, subG, self.depth.enter()) | _c
self._formEdge(o1Td, t.startTd); self._formEdge(t.lastTd, o2Td)
self._formEdge(self.lastTd, o1Td); self.lastTd = o2Td; o2Td.outS = self.f(out)
if startTdSet: self.startTd = o1Td
elif self.depth and isinstance(c, apply) and isinstance(c.f, BaseCli) and c.column is not None:
try: singleInp = self.inp | item()
except StopIteration: bypass()
else:
with self.g.subgraph(name=f"cluster_{clusterAuto()}") as subG:
subG.attr(label=f"apply (column: {c.column})")
singleInp = singleInp[c.column]
t = _trace(singleInp, self.f, subG, self.depth.enter()) | c.f
o1Td = TraceData(None, self.f(self.inp), None, "*"); self._formNode(o1Td, g=subG)
o2Td = TraceData(None, self.f(t.inp), None, "*"); self._formNode(o2Td, g=subG)
t._formEdge(o1Td, t.startTd); t._formEdge(t.lastTd, o2Td); o2Td.outS = self.f(out)
self._formEdge(self.lastTd, o1Td); self.lastTd = o2Td
if startTdSet: self.startTd = o1Td
elif self.depth and isinstance(c, filt) and isinstance(c.predicate, BaseCli):
try: singleInp = self.inp | item()
except StopIteration: bypass()
else:
with self.g.subgraph(name=f"cluster_{clusterAuto()}") as subG:
subG.attr(label=f"filt (column: {c.column})")
self._formNode(td, g=subG); self._formEdge(self.lastTd, td) # main filt node
if c.column is not None: singleInp = singleInp[c.column]
t = _trace(singleInp, self.f, subG, self.depth.enter()) | c.predicate
tdEndFilt = td; #tdEndFilt = TraceData(None, None, None, "*"); self._formNode(tdEndFilt, g=subG) # can switch between styles
self._formEdge(td, t.startTd); self._formEdge(t.lastTd, tdEndFilt, label=f"{t.lastTd.outS}")
self.lastTd = td
if startTdSet: self.startTd = td
else: bypass()
self.inp = out; return self
class trace(_trace):
last = None
"""Last instantiated trace object. Access this to view the previous (possibly nested) trace."""
def __init__(self, f=shape(), maxDepth=float("inf")):
"""Traces out how the data stream is transformed through complex cli tools.
Example::
# returns [1, 4, 9, 16], normal command
range(1, 5) | apply(lambda x: x**2) | deref()
# traced command, will display how the shapes evolve through cli tools
range(1, 5) | trace() | apply(lambda x: x**2) | deref()
There're a lot more instructions and code examples over the tutorial section. Go check it out!
:param f: function to display the data stream. Defaulted to :class:`~k1lib.cli.utils.shape`,
and to :class:`~k1lib.cli.utils.iden` if is None."""
f = f or iden()
g = lambda x: f"{f(x)}".split("\n")[:2] | apply(lambda s: f"{s[:50]}..." if len(s) > 50 else s) | join("\n")
super().__init__(emptyInputSentinel, g, depth=k1lib.MaxDepth(maxDepth))
trace.last = self
```
#### File: k1lib/cli/utils.py
```python
from k1lib.cli.init import patchDefaultDelim, BaseCli, Table, T
import k1lib.cli as cli, numbers, torch, numpy as np
from typing import overload, Iterator, Any, List, Set, Union
import k1lib
__all__ = ["size", "shape", "item", "identity", "iden",
"toStr", "join", "toNumpy", "toTensor",
"toList", "wrapList", "toSet", "toIter", "toRange", "toType",
"equals", "reverse", "ignore",
"toSum", "toAvg", "toMean", "toMax", "toMin", "toPIL",
"toBin", "toIdx",
"lengths", "headerIdx", "deref", "bindec"]
settings = k1lib.settings.cli
def exploreSize(it):
"""Returns first element and length of array"""
if isinstance(it, str): raise TypeError("Just here to terminate shape()")
sentinel = object(); it = iter(it)
o = next(it, sentinel); count = 1
if o is sentinel: return None, 0
try:
while True: next(it); count += 1
except StopIteration: pass
return o, count
class size(BaseCli):
def __init__(self, idx=None):
"""Returns number of rows and columns in the input.
Example::
# returns (3, 2)
[[2, 3], [4, 5, 6], [3]] | size()
# returns 3
[[2, 3], [4, 5, 6], [3]] | size(0)
# returns 2
[[2, 3], [4, 5, 6], [3]] | size(1)
# returns (2, 0)
[[], [2, 3]] | size()
# returns (3,)
[2, 3, 5] | size()
# returns 3
[2, 3, 5] | size(0)
# returns (3, 2, 2)
[[[2, 1], [0, 6, 7]], 3, 5] | size()
# returns (1,) and not (1, 3)
["abc"] | size()
# returns (1, 2, 3)
[torch.randn(2, 3)] | size()
# returns (2, 3, 5)
size()(np.random.randn(2, 3, 5))
There's also :class:`lengths`, which is sort of a simplified/faster version of
this, but only use it if you are sure that ``len(it)`` can be called.
If encounter PyTorch tensors or Numpy arrays, then this will just get the shape
instead of actually looping over them.
:param idx: if idx is None return (rows, columns). If 0 or 1, then rows or
columns"""
super().__init__(); self.idx = idx
def __ror__(self, it:Iterator[str]):
if self.idx is None:
answer = []
try:
while True:
if isinstance(it, (torch.Tensor, np.ndarray)):
return tuple(answer + list(it.shape))
it, s = exploreSize(it); answer.append(s)
except TypeError: pass
return tuple(answer)
else:
return exploreSize(it | cli.item(self.idx))[1]
shape = size
noFill = object()
class item(BaseCli):
def __init__(self, amt:int=1, fill=noFill):
"""Returns the first row.
Example::
# returns 0
iter(range(5)) | item()
# returns torch.Size([5])
torch.randn(3,4,5) | item(2) | shape()
# returns 3
[] | item(fill=3)
:param amt: how many times do you want to call item() back to back?
:param fill: if iterator length is 0, return this"""
self.amt = amt; self.fill = fill
self.fillP = [fill] if fill != noFill else [] # preprocessed, to be faster
def __ror__(self, it:Iterator[str]):
if self.amt != 1:
return it | cli.serial(*(item(fill=self.fill) for _ in range(self.amt)))
return next(iter(it), *self.fillP)
class identity(BaseCli):
"""Yields whatever the input is. Useful for multiple streams.
Example::
# returns range(5)
range(5) | identity()"""
def __ror__(self, it:Iterator[Any]): return it
iden = identity
class toStr(BaseCli):
def __init__(self, column:int=None):
"""Converts every line to a string.
Example::
# returns ['2', 'a']
[2, "a"] | toStr() | deref()
# returns [[2, 'a'], [3, '5']]
assert [[2, "a"], [3, 5]] | toStr(1) | deref()"""
super().__init__(); self.column = column
def __ror__(self, it:Iterator[str]):
c = self.column
if c is None:
for line in it: yield str(line)
else:
for row in it:
yield [e if i != c else str(e) for i, e in enumerate(row)]
class join(BaseCli):
def __init__(self, delim:str=None):
r"""Merges all strings into 1, with `delim` in the middle. Basically
:meth:`str.join`. Example::
# returns '2\na'
[2, "a"] | join("\n")"""
super().__init__(); self.delim = patchDefaultDelim(delim)
def __ror__(self, it:Iterator[str]):
return self.delim.join(it | toStr())
class toNumpy(BaseCli):
"""Converts generator to numpy array. Essentially ``np.array(list(it))``"""
def __ror__(self, it:Iterator[float]) -> np.array:
return np.array(list(it))
class toTensor(BaseCli):
def __init__(self, dtype=torch.float32):
"""Converts generator to :class:`torch.Tensor`. Essentially
``torch.tensor(list(it))``.
Also checks if input is a PIL Image. If yes, turn it into a :class:`torch.Tensor`
and return."""
self.dtype = dtype
def __ror__(self, it:Iterator[float]) -> torch.Tensor:
try:
import PIL; pic=it
if isinstance(pic, PIL.Image.Image): # stolen from torchvision ToTensor transform
mode_to_nptype = {'I': np.int32, 'I;16': np.int16, 'F': np.float32}
img = torch.from_numpy(np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True))
if pic.mode == '1': img = 255 * img
img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
return img.permute((2, 0, 1)).contiguous().to(self.dtype) # put it from HWC to CHW format
except: pass
return torch.tensor(list(it)).to(self.dtype)
class toList(BaseCli):
"""Converts generator to list. :class:`list` would do the
same, but this is just to maintain the style"""
def __ror__(self, it:Iterator[Any]) -> List[Any]:
return list(it)
class wrapList(BaseCli):
"""Wraps inputs inside a list. There's a more advanced cli tool
built from this, which is :meth:`~k1lib.cli.structural.unsqueeze`."""
def __ror__(self, it:T) -> List[T]:
return [it]
class toSet(BaseCli):
"""Converts generator to set. :class:`set` would do the
same, but this is just to maintain the style"""
def __ror__(self, it:Iterator[T]) -> Set[T]:
return set(it)
class toIter(BaseCli):
"""Converts object to iterator. `iter()` would do the
same, but this is just to maintain the style"""
def __ror__(self, it:List[T]) -> Iterator[T]:
return iter(it)
class toRange(BaseCli):
"""Returns iter(range(len(it))), effectively"""
def __ror__(self, it:Iterator[Any]) -> Iterator[int]:
for i, _ in enumerate(it): yield i
class toType(BaseCli):
"""Converts object to its type.
Example::
# returns [int, float, str, torch.Tensor]
[2, 3.5, "ah", torch.randn(2, 3)] | toType() | deref()"""
def __ror__(self, it:Iterator[T]) -> Iterator[type]:
for e in it: yield type(e)
class _EarlyExp(Exception): pass
class equals:
"""Checks if all incoming columns/streams are identical"""
def __ror__(self, streams:Iterator[Iterator[str]]):
streams = list(streams)
for row in zip(*streams):
sampleElem = row[0]
try:
for elem in row:
if sampleElem != elem: yield False; raise _EarlyExp()
yield True
except _EarlyExp: pass
class reverse(BaseCli):
"""Reverses incoming list.
Example::
# returns [3, 5, 2]
[2, 5, 3] | reverse() | deref()"""
def __ror__(self, it:Iterator[str]) -> List[str]:
return reversed(list(it))
class ignore(BaseCli):
r"""Just loops through everything, ignoring the output.
Example::
# will just return an iterator, and not print anything
[2, 3] | apply(lambda x: print(x))
# will prints "2\n3"
[2, 3] | apply(lambda x: print(x)) | ignore()"""
def __ror__(self, it:Iterator[Any]):
for _ in it: pass
class toSum(BaseCli):
"""Calculates the sum of list of numbers. Can pipe in :class:`torch.Tensor`.
Example::
# returns 45
range(10) | toSum()"""
def __ror__(self, it:Iterator[float]):
if isinstance(it, torch.Tensor): return it.sum()
return sum(it)
class toAvg(BaseCli):
"""Calculates average of list of numbers. Can pipe in :class:`torch.Tensor`.
Example::
# returns 4.5
range(10) | toAvg()
# returns nan
[] | toAvg()"""
def __ror__(self, it:Iterator[float]):
if isinstance(it, torch.Tensor): return it.mean()
s = 0; i = -1
for i, v in enumerate(it): s += v
i += 1
if not k1lib.settings.cli.strict and i == 0: return float("nan")
return s / i
toMean = toAvg
class toMax(BaseCli):
"""Calculates the max of a bunch of numbers. Can pipe in :class:`torch.Tensor`.
Example::
# returns 6
[2, 5, 6, 1, 2] | toMax()"""
def __ror__(self, it:Iterator[float]) -> float:
if isinstance(it, torch.Tensor): return it.max()
return max(it)
class toMin(BaseCli):
"""Calculates the min of a bunch of numbers. Can pipe in :class:`torch.Tensor`.
Example::
# returns 1
[2, 5, 6, 1, 2] | toMin()"""
def __ror__(self, it:Iterator[float]) -> float:
if isinstance(it, torch.Tensor): return it.min()
return min(it)
class toPIL(BaseCli):
def __init__(self):
"""Converts a path to a PIL image.
Example::
ls(".") | toPIL().all() | item() # get first image"""
import PIL; self.PIL = PIL
def __ror__(self, path) -> "PIL.Image.Image":
return self.PIL.Image.open(path)
class toBin(BaseCli):
"""Converts integer to binary string.
Example::
# returns "101"
5 | toBin()"""
def __ror__(self, it):
return bin(int(it))[2:]
class toIdx(BaseCli):
def __init__(self, chars:str):
"""Get index of characters according to a reference.
Example::
# returns [1, 4, 4, 8]
"#&&*" | toIdx("!#$%&'()*+") | deref()"""
self.chars = {v:k for k, v in enumerate(chars)}
def __ror__(self, it):
chars = self.chars
for e in it: yield chars[e]
class lengths(BaseCli):
"""Returns the lengths of each element.
Example::
[range(5), range(10)] | lengths() == [5, 10]
This is a simpler (and faster!) version of :class:`shape`. It assumes each element
can be called with ``len(x)``, while :class:`shape` iterates through every elements
to get the length, and thus is much slower."""
def __ror__(self, it:Iterator[List[Any]]) -> Iterator[int]:
for e in it: yield len(e)
def headerIdx():
"""Cuts out first line, put an index column next to it, and prints it
out. Useful when you want to know what your column's index is to cut it
out. Also sets the context variable "header", in case you need it later.
Example::
# returns [[0, 'a'], [1, 'b'], [2, 'c']]
["abc"] | headerIdx() | deref()"""
return item() | cli.wrapList() | cli.transpose() | cli.insertIdColumn(True)
settings.atomic.add("deref", (numbers.Number, np.number, str, dict, bool, bytes, torch.nn.Module), "used by deref")
Tensor = torch.Tensor; atomic = settings.atomic
class inv_dereference(BaseCli):
def __init__(self, ignoreTensors=False):
"""Kinda the inverse to :class:`dereference`"""
super().__init__(); self.ignoreTensors = ignoreTensors
def __ror__(self, it:Iterator[Any]) -> List[Any]:
for e in it:
if e is None or isinstance(e, atomic.deref): yield e
elif isinstance(e, Tensor):
if not self.ignoreTensors and len(e.shape) == 0: yield e.item()
else: yield e
else:
try: yield e | self
except: yield e
class deref(BaseCli):
def __init__(self, maxDepth=float("inf"), ignoreTensors=True):
"""Recursively converts any iterator into a list. Only :class:`str`,
:class:`numbers.Number` and :class:`~torch.nn.Module` are not converted. Example::
# returns something like "<range_iterator at 0x7fa8c52ca870>"
iter(range(5))
# returns [0, 1, 2, 3, 4]
iter(range(5)) | deref()
# returns [2, 3], yieldSentinel stops things early
[2, 3, yieldSentinel, 6] | deref()
You can also specify a ``maxDepth``::
# returns something like "<list_iterator at 0x7f810cf0fdc0>"
iter([range(3)]) | deref(0)
# returns [range(3)]
iter([range(3)]) | deref(1)
# returns [[0, 1, 2]]
iter([range(3)]) | deref(2)
There are a few classes/types that are considered atomic, and :class:`deref`
will never try to iterate over it. If you wish to change it, do something like::
settings.cli.atomic.deref = (int, float, ...)
.. warning::
Can work well with PyTorch Tensors, but not Numpy arrays as they screw things up
with the __ror__ operator, so do torch.from_numpy(...) first. Don't worry about
unnecessary copying, as numpy and torch both utilizes the buffer protocol.
:param maxDepth: maximum depth to dereference. Starts at 0 for not doing anything
at all
:param ignoreTensors: if True, then don't loop over :class:`torch.Tensor`
internals"""
super().__init__(); self.ignoreTensors = ignoreTensors
self.maxDepth = maxDepth; self.depth = 0
def __ror__(self, it:Iterator[T]) -> List[T]:
ignoreTensors = self.ignoreTensors
if self.depth >= self.maxDepth: return it
elif isinstance(it, atomic.deref): return it
elif isinstance(it, Tensor):
if ignoreTensors: return it
if len(it.shape) == 0: return it.item()
try: iter(it)
except: return it
self.depth += 1; answer = []
for e in it:
if e is cli.yieldSentinel: return answer
answer.append(self.__ror__(e))
self.depth -= 1; return answer
def __invert__(self) -> BaseCli:
"""Returns a :class:`~k1lib.cli.init.BaseCli` that makes
everything an iterator. Not entirely sure when this comes in handy, but it's
there."""
return inv_dereference(self.ignoreTensors)
class bindec(BaseCli):
def __init__(self, cats:List[Any], f=None):
"""Binary decodes the input.
Example::
# returns ['a', 'c']
5 | bindec("abcdef")
# returns 'a,c'
5 | bindec("abcdef", join(","))
:param cats: categories
:param f: transformation function of the selected elements. Defaulted to :class:`toList`, but others like :class:`join` is useful too"""
self.cats = cats; self.f = f or toList()
def __ror__(self, it):
it = bin(int(it))[2:][::-1]
return (e for i, e in zip(it, self.cats) if i == '1') | self.f
```
#### File: k1lib/k1lib/fmt.py
```python
import k1lib, math; from k1lib import cli
from typing import Dict, Iterator, Tuple
__all__ = ["size", "sizeOf", "comp", "compRate", "time", "item", "txt"]
metricPrefixes = {-8:"y",-7:"z",-6:"a",-5:"f",-4:"p",-3:"n",-2:"u",-1:"m",0:"",1:"k",2:"M",3:"G",4:"T",5:"P",6:"E",7:"Z",8:"Y"}
#metricPrefixes = ["", "k", "M", "G", "T", "P", "E", "Z", "Y"]
def _formatScale(x, units:Dict[int, str]):
for i, unit in units.items():
upperBound = 1000 * 1000**i
if abs(x) < upperBound:
return f"{round(1e3*x/upperBound, 2)} {unit}"
return (f"{round(1e3*x/upperBound, 2)} {unit}").strip()
sizes = {i: f"{p}B" for i, p in metricPrefixes.items() if i >= 0}; sizes[0] = "bytes"
def size(_bytes=0):
"""Formats disk size.
Example::
# returns "50.0 bytes"
fmt.size(50)
# returns "12.0 MB"
fmt.size(1.2e7)
"""
return _formatScale(_bytes, sizes)
def sizeOf(l:Iterator[float]) -> Tuple[str, Iterator[float]]:
"""Figures out appropriate scale, scales back the Iterator, and return both.
Example::
x = torch.abs(torch.randn(2)) * 1e4 + 1e5
label, t = fmt.sizeOf(x) # label is "kB"
(t | toTensor()).min() # min value should be close to 100"""
l = list(l | cli.apply(lambda n: abs(n)))
v = l | cli.toMax()
v = math.log10(v) if v > 0 else -math.log10(-v)
idx = math.floor(v/3)
coef = 1.0/1000**idx
return sizes[idx], l | cli.apply(lambda x: x * coef) | cli.deref()
computations = {i: f"{p}FLOPs" for i, p in metricPrefixes.items() if i >= 0}
def comp(flop=0):
"""Formats computation amount.
Example::
# returns "50.0 FLOPs"
fmt.computation(50)
# returns "50.0 MFLOPs"
fmt.computation(5e7)
"""
return _formatScale(flop, computations)
computationRates = {i: f"{p}FLOPS" for i, p in metricPrefixes.items() if i >= 0}
def compRate(flops=0):
"""Formats computation rate.
Example::
# returns "50.0 FLOPS"
fmt.computationRate(50)
# returns "50.0 MFLOPS"
fmt.computationRate(5e7)
"""
return _formatScale(flops, computationRates)
times = {i:f"{p}s" for i, p in metricPrefixes.items() if i <= 0}
def time(seconds=0):
"""Formats small times.
Example::
fmt.time(50) # returns "50.0 s"
fmt.time(4000) # returns "4000.0 s"
fmt.time(0.02) # returns "20.0 ms"
fmt.time(1e-5) # returns "10.0 us"
"""
return _formatScale(seconds, times)
items = {0: "", 1: "k", 2: "M", 3: "B", 4: "T"}
def item(n=0):
"""Formats generic item.
Example::
# returns "50.0"
fmt.item(50)
# returns "500.0 k"
fmt.item(5e5)
"""
return _formatScale(n, items).strip()
_esc = '\033['
_end = f'{_esc}0m'
class txt:
"""Text formatting.
Example::
# will print out red text
print(fmt.txt.red("some text"))"""
@staticmethod
def darkcyan(s:str): return f"{_esc}36m{s}{_end}"
@staticmethod
def red(s:str): return f"{_esc}91m{s}{_end}"
@staticmethod
def green(s:str): return f"{_esc}92m{s}{_end}"
@staticmethod
def yellow(s:str): return f"{_esc}93m{s}{_end}"
@staticmethod
def blue(s:str): return f"{_esc}94m{s}{_end}"
@staticmethod
def purple(s:str): return f"{_esc}95m{s}{_end}"
@staticmethod
def cyan(s:str): return f"{_esc}96m{s}{_end}"
@staticmethod
def bold(s:str): return f"{_esc}1m{s}{_end}"
@staticmethod
def grey(s:str): return f"{_esc}38;2;150;150;150m{s}{_end}"
@staticmethod
def darkgrey(s:str): return f"{_esc}38;2;100;100;100m{s}{_end}"
@staticmethod
def underline(s:str): return f"{_esc}4m{s}{_end}"
@staticmethod
def identity(s:str): return f"{s}"
```
#### File: k1lib/k1lib/kdata.py
```python
import torch, numpy as np; from k1lib.cli import *
from typing import Callable, Union, Iterator
import matplotlib.pyplot as plt
__all__ = ["FunctionData", "tfImg", "tfFloat", "analyzeFloat"]
class FunctionData:
@staticmethod
def main(f:Callable, bs:int=32, epochs:int=300):
"""Constructs 2 dataloaders, train and valid, for a particular function.
Example::
trainDl, validDl = kdata.FunctionData.main(torch.exp, 32, 300)
for epoch in range(3):
for xb, yb in trainDl:
model(xb)"""
x = torch.linspace(-5, 5, 1000)
ds = [x, f(x)] | transpose() | randomize(None)
return ds | splitList(8, 2) | (repeatFrom() | randomize() | batched(32)\
| (transpose() | toTensor()).all()).all()\
| (stagger(epochs*.8) + stagger(epochs*.2)) | toList()
@staticmethod
def exp(bs, epochs): return FunctionData.main(torch.exp, bs, epochs)
@staticmethod
def log(bs, epochs): return FunctionData.main(torch.log, bs, epochs)
@staticmethod
def inverse(bs, epochs): return FunctionData.main(lambda x: 1/x, bs, epochs)
@staticmethod
def linear(bs, epochs): return FunctionData.main(lambda x: 2*x+8, bs, epochs)
@staticmethod
def sin(bs, epochs): return FunctionData.main(torch.sin, bs, epochs)
aS = applyS
def tfImg(size:int=None, flip=True) -> BaseCli:
"""Get typical image transforms.
Example::
"path/img.png" | toPIL() | kdata.tfImg(224)"""
import torchvision.transforms as tf
op = identity()
if size: op |= aS(tf.Resize(size)) | aS(tf.CenterCrop(size))
op |= aS(tf.ColorJitter(0.2, 0.2, 0.2)) | aS(tf.RandomAffine(5))
if flip: op |= aS(tf.RandomHorizontalFlip())
return op
def tensorGuard(t, force:bool):
if isinstance(t, np.ndarray): t = torch.tensor(t)
if not isinstance(t, torch.Tensor): t = t | toFloat(force=force) | deref() | toTensor()
return t
def tfFloat(t:Union[Iterator[float], torch.Tensor], force=True) -> BaseCli:
"""Suggested float input transformation function.
Example::
# before training
data = torch.randn(10, 20) * 100 + 20 # weird data with weird hist distribution
f = kdata.tfFloat(data)
# while training
newData = torch.randn(10, 20) * 105 + 15
newData | f # nicely formatted Tensor, going uniformly from -1 to 1
:param force: if True, forces weird values to 0.0, else filters out all weird rows."""
t = tensorGuard(t, force); bounds = t.histBounds()
return applyS(lambda t: tensorGuard(t, force).histScaled(0, bounds)*2 - 1)
@applyS
def analyzeFloat(l:Iterator[float]):
"""Preliminary input float stream analysis.
Example::
torch.linspace(-2, 2, 50) | kdata.analyzeFloat"""
l = l | deref(ignoreTensors=False); lf = l | toFloat() | toTensor()
nl = l | shape(0); nlf = len(lf)
print(f"Percent of useful data: {nlf}/{nl} ({round(100*nlf/nl)}%)")
print(f"- Mean: {lf.mean()}"); print(f"- Std: {lf.std()}")
print(f"- Min: {lf.min()}"); print(f"- Max: {lf.max()}")
plt.hist(lf.numpy(), bins=30); plt.title("Values histogram"); plt.ylabel("Frequency"); plt.show()
plt.hist(lf.histScaled().numpy(), bins=30); plt.ylabel("Frequency"); plt.title("Scaled histogram")
```
#### File: k1lib/k1lib/_learner.py
```python
import k1lib, torch.nn as nn, torch, dill, traceback
from k1lib.callbacks import Cbs
from time import time as _time
__all__ = ["CancelRunException", "CancelEpochException", "CancelBatchException",
"Learner"]
class CancelRunException(Exception):
"""Used in core training loop, to skip the run entirely"""
pass
class CancelEpochException(Exception):
"""Used in core training loop, to skip to next epoch"""
pass
class CancelBatchException(Exception):
"""Used in core training loop, to skip to next batch"""
pass
class Learner:
def __init__(self):
self._model = None; self._data = None; self._opt = None
self._cbs = None; self.fileName = None
self.css = "*"; self.exceptionRaised = None # slowly pops
self.cbs = k1lib.Callbacks().withBasics().withQOL().withAdvanced()
@property
def model(self):
"""Set this to change the model to run"""
return self._model
@model.setter
def model(self, model): self._model = model
@property
def data(self):
"""Set this to change the data (list of 2 dataloader) to run against."""
return self._data
@data.setter
def data(self, data): self._data = data
@property
def opt(self):
"""Set this to change the optimizer. If you're making your own
optimizers, beware to follow the PyTorch's style guide as there are callbacks
that modifies optimizer internals while training like
:class:`k1lib.schedule.ParamScheduler`."""
return self._opt
@opt.setter
def opt(self, opt): self._opt = opt
@property
def cbs(self):
"""The :class:`~k1lib.callbacks.callbacks.Callbacks` object. Initialized to
include all the common callbacks. You can set a new one if you want to."""
return self._cbs
@cbs.setter
def cbs(self, cbs): cbs.l = self; self._cbs = cbs
@property
def css(self) -> str:
"""The css selector string. Set this to select other parts of the network.
After setting, you can access the selector like this: :code:`l.selector`
See also: :class:`~k1lib.selector.ModuleSelector`"""
return self._css
@css.setter
def css(self, css:str):
self._css = css
if self.model != None: self.selector = k1lib.selector.select(self.model, self.css)
@property
def lossF(self):
"""Set this to specify a loss function."""
raise NotImplementedError("lossF actually doesn't really exist. Used to exist as a core part of Learner, but then has been converted to Cbs.LossF")
@lossF.setter
def lossF(self, lossF):
if hasattr(self.cbs, "LossF"): self.cbs.LossF.lossF = lossF
else: self.cbs.add(Cbs.LossF(lossF))
def __getattr__(self, attr):
if attr == "cbs": raise AttributeError()
return getattr(self.cbs, attr)
def __getstate__(self):
answer = dict(self.__dict__); del answer["selector"]; return answer
def __setstate__(self, state):
self.__dict__.update(state)
self.css = self.css; self.cbs.l = self
def evaluate(self): pass # supposed to be overriden, to provide functionality here
@property
def _warnings(self):
warnings = "Warning: no model yet. Set using `l.model = ...`\n" if self.model == None else ""
lossClasses = tuple([*k1lib.Callback.lossCls])
lossFnCbs = [True for cb in self.cbs if isinstance(cb, lossClasses)]
warnings += "Warning: no loss function callback detected (or you set `lossF` already but then erased all callbacks)! Set using `l.lossF = ...` or `l.cbs.add(Cbs.LossF(...))`\n" if len(lossFnCbs) == 0 else ""
warnings += "Warning: no data yet. Set using `l.data = ...`\n" if self.data == None else ""
warnings += "Warning: no optimizer yet. Set using `l.opt = ...`\n" if self.opt == None else ""
if warnings != "": warnings += "\n\n"
return warnings
def __dir__(self):
answer = list(super().__dir__())
answer.extend(self.cbs.cbsDict.keys()); return answer
def __repr__(self):
return f"""{self._warnings}l.model:\n{k1lib.tab(k1lib.limitLines(str(self.model)))}
l.opt:\n{k1lib.tab(k1lib.limitLines(str(self.opt)))}
l.cbs:\n{k1lib.tab(k1lib.limitLines(self.cbs.__repr__()))}
Use...
- l.model = ...: to specify a nn.Module object
- l.data = ...: to specify data object
- l.opt = ...: to specify an optimizer
- l.lossF = ...: to specify a loss function
- l.css = ...: to select modules using CSS. "#root" for root model
- l.cbs = ...: to use a custom `Callbacks` object
- l.selector: to get the modules selected by `l.css`
- l.run(epochs): to run the network
- l.Loss: to get a specific callback, this case "Loss"\n\n"""
@k1lib.patch(Learner)
def save(self, fileName:str=None):
"""Saves this :class:`Learner` to file. See also: :meth:`load`
:param fileName: if empty, then will save as "learner-0.pth", with 0
changeable to avoid conflicts. If resave this exact :class:`Learner`, then
use the old name generated before"""
self.fileName = fileName or self.fileName
if self.fileName == None:
files = [file for file in os.listdir() if file.startswith("learner") and file.endswith(".pth")]
files = set([int(file.split(".pth")[0].split("learner-")[1]) for file in files])
count = 0;
while count in files: count += 1
self.fileName = f"l-{count}.pth"
torch.save(self, self.fileName, pickle_module=dill)
print(f"Saved to {self.fileName}")
@k1lib.patch(Learner, static=True)
def load(fileName:str=None):
"""Loads a :class:`Learner` from a file. See also: :meth:`save`
:param fileName: if empty, then will prompt for file name"""
f = fileName or input("Enter learner file name to load:")
print(f"Loaded from {f}"); return torch.load(f, pickle_module=dill)
@k1lib.patch(Learner)
def _run1Batch(self):
self.cbs("startBatch")
try:
self.cbs("startPass", "inPass", "endPass")
self.cbs("startLoss", "inLoss", "endLoss")
if not self.cbs("startBackward"): self.lossG.backward()
if not self.cbs("startStep"): self.opt.step()
if not self.cbs("startZeroGrad"): self.opt.zero_grad(set_to_none=True)
except k1lib.CancelBatchException as ex:
self.cbs("cancelBatch"); print(f"Batch cancelled: {ex}.")
except (k1lib.CancelEpochException, k1lib.CancelRunException) as ex:
# makes sure cancelBatch and endBatch gets called, for potential
# cleanups, then reraise the exception
self.cbs("cancelBatch", "endBatch"); raise ex
self.cbs("endBatch")
class DI: # data interceptor, just to record data loading times
def __init__(self, l:Learner, data): self.l = l; self.data = data
def __len__(self): return len(self.data)
def __iter__(self):
try:
data = iter(self.data); timings = self.l.cbs.timings
while True:
beginTime = _time(); d = next(data)
timings.loadData += _time() - beginTime; yield d
except StopIteration: pass
@k1lib.patch(Learner)
def _run1Epoch(self):
self.cbs("startEpoch")
try:
train, valid = self.data; train = DI(self, train); valid = DI(self, valid)
try: self.batches = len(train) + len(valid)
except: pass
self.model.train()
for self.batch, (self.xb, self.yb, *self.metab) in enumerate(train):
self._run1Batch()
trainLen = self.batch + 1
if not self.cbs("startValidBatches"):
self.model.eval();
for self.batch, (self.xb, self.yb, *self.metab) in enumerate(valid):
self.batch += trainLen; self._run1Batch()
if self.batches is None: self.batches = self.batch + 1
except k1lib.CancelEpochException as ex:
self.cbs("cancelEpoch"); print(f"Epoch cancelled: {ex}.")
except k1lib.CancelRunException as ex:
self.cbs("cancelEpoch", "endEpoch"); raise ex
self.cbs("endEpoch")
@k1lib.patch(Learner)
def run(self, epochs:int, batches:int=None):
"""Main run function.
:param epochs: number of epochs to run. 1 epoch is the length of the dataset
:param batches: if set, then cancels the epoch after reaching the specified batch"""
if self._warnings != "":
if not input(f"""You still have these warnings:\n\n{self._warnings}
Do you want to continue? (y/n) """).lower().startswith("y"):
print("Run ended"); return
self.epochs = epochs; self.batches = None
self.css = self.css # update module selector
with self.cbs.context():
if batches != None: self.cbs.add(Cbs.BatchLimit(batches))
self.cbs("startRun")
try:
for self.epoch in range(epochs): self._run1Epoch()
except k1lib.CancelRunException as ex:
self.cbs("cancelRun"); print(f"Run cancelled: {ex}.")
self.cbs("endRun"); return self
@k1lib.patch(Learner)
def __call__(self, xb, yb=None):
"""Executes just a small batch. Convenience method to query how the network is
doing.
:param xb: x batch
:param yb: y batch. If specified, return (y, loss), else return y alone
"""
oldData = self.data; self.data = [[(xb, (yb or torch.tensor(0)))], []]
with self.cbs.suspendEval(), self.cbs.context():
ex = lambda _: k1lib.raiseEx(k1lib.CancelBatchException)
self.cbs.add(k1lib.Callback().withCheckpoint("startLoss" if yb is None else "startBackward", ex))
self.run(1, 1)
self.data = oldData; return self.y if yb is None else (self.y, self.loss)
@k1lib.patch(Learner)
def evaluate(self):
"""Function to visualize quickly how the network is doing. Undefined by default,
just placed here as a convention, so you have to do something like this::
l = k1lib.Learner()
def evaluate(self):
xbs, ybs, ys = self.Recorder.record(1, 3)
plt.plot(torch.vstack(xbs), torch.vstack(ys))
l.evaluate = partial(evaluate(l))
"""
raise NotImplementedError("You have to define evaluate() by yourself")
from k1lib.cli import *
@k1lib.patch(Learner, static=True)
def sample() -> Learner:
"""Creates an example learner, just for simple testing stuff anywhere. The
network tries to learn the function y=x. Only bare minimum callbacks are included."""
l = Learner(); l.data = k1lib.kdata.FunctionData.main(lambda x: x)
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin1 = k1lib.knn.LinBlock(1, 3)
self.lin2 = nn.Linear(3, 1)
def forward(self, x):
return ((x[:, None] + 2) | self.lin1 | self.lin2).squeeze()
l.model = Model(); l.cbs = k1lib.Callbacks().add(Cbs.CoreNormal()).add(Cbs.Loss()).add(Cbs.ProgressBar())
l.lossF = lambda y, yb: ((y - yb) ** 2).sum()
l.opt = torch.optim.Adam(l.model.parameters(), lr=3e-3); return l
```
#### File: k1lib/_mo/atom.py
```python
import k1lib
from typing import Dict, List
settings = k1lib.Settings().add("overOctet", False, "whether to allow making bonds that exceeds the octet rule")
k1lib.settings.add("mo", settings, "from k1lib.mo module")
__all__ = ["Atom", "substances", "NoFreeElectrons", "OctetFull"]
class NoFreeElectrons(RuntimeError): pass
class OctetFull(RuntimeError): pass
# if Atom's gDepth is smaller than this, then it means that it has not been visited
_depthAuto = k1lib.AutoIncrement()
_idxAuto = k1lib.AutoIncrement()
class Atom:
"""Just an atom really. Has properties, can bond to other atoms, and can
generate a :class:`System` for simulation."""
def __init__(self, name:str, atomicN:int, massN:float, valenceE:int, radius:List[float]=[], octetE:int=8):
"""Creates a new atom. Not intended to be used by the end user. If you
wish to get a new atom, just do stuff like this::
c1 = mo.C
c2 = mo.C
c1 == c2 # returns False, demonstrating that these are different atoms
If you wish to register new substances with the module, you can do this::
genF = lambda: Atom(...)
mo.registerSubstance("elementName", genF)
mo.elementName # should executes `genF` and returns
:param name: element name (eg. "C")
:param atomicN: atomic number (eg. 6)
:param massN: atomic mass in g/mol (eg. 12)
:param valenceE: how many valence electrons initially?
:param radius: covalent radiuses (in pm) for single, double and triple bonds
:param octetE: how many electrons in a full octet? Default 8, but can be 2 for H and He"""
self.name = name; self.atomicN = atomicN; self.massN = massN
self.ogValenceE = valenceE # original
self.valenceE = valenceE; self.octetE = octetE; self.radius = radius
self._bonds = [] # list of Atoms this Atom is bonded to
self.gDepth = -1 # graph depth, for graph traversal stuff. Values will be updated from _depthAuto
self.idx = f"A{_idxAuto()}" # unique value for Atoms everywhere
# contracts:
# - valenceE = eClouds * 2 + freeE + len(bonds) * 2
# - valenceE <= octetE. "<" happens when octet not full
# can only form a new bond if freeE >= 1. Can dec eClouds to inc freeE
if name != "_e":
self.eClouds = []; self.freeE = valenceE % 2
for i in range(valenceE//2): self.eClouds.append(mo._e)
else: self.eClouds = []; self.freeE = 0
@property
def bonds(self):
"""List of Atoms bonded to this Atom"""
return self._bonds
@bonds.setter
def bonds(self, v): self._bonds = v
@property
def nonHBonds(self) -> List["Atom"]:
"""All atoms this atom is bonded to, minus the Hydrogens."""
return [a for a in self.bonds if a.name != "H"]
@property
def HBonds(self) -> List["Atom"]:
"""All hydrogens this atom is bonded to."""
return [a for a in self.bonds if a.name == "H"]
@property
def uniqueBonds(self) -> List["Atom"]:
"""All unique bonds. Meaning, if there's a double bond, only return 1
atom, not 2."""
return list(set(self.bonds))
@property
def uniqueNonHBonds(self) -> List["Atom"]:
"""All unique non Hydrogen bonds."""
return list(set(self.nonHBonds))
def nBonds(self, atom:"Atom"):
"""Get number of bonds between this and another atom."""
return len([bond for bond in self.bonds if bond == atom])
@property
def availableBonds(self) -> int:
"""Available bonds. This includes electron clouds, radical electrons, and
Hydrogen bonds."""
return len(self.eClouds) * 2 + self.freeE + len([a for a in self.bonds if a.name == "H"])
def __repr__(self):
return f"""<Atom {self.name} ({self.atomicN}), {len(self.bonds)} bonds, {self.valenceE}/{self.octetE} valence electrons, {len(self.eClouds)} electron clouds, {self.freeE} free (radical) electrons>"""
@k1lib.patch(Atom)
def _show(self, g=None, gDepth=-1, H:bool=True, GVKwargs={}):
self.gDepth = gDepth
if not H:
nH = len(self.HBonds); nH = "" if nH==0 else ("H" if nH == 1 else f"H{nH}")
g.node(self.idx, f"{self.name}{nH}", **GVKwargs)
else: g.node(self.idx, self.name, **GVKwargs)
for atom in self.bonds:
if atom.gDepth >= gDepth or (not H and atom.name == "H"): continue
# all this complexity just to determine arrow direction
d1 = (self.nonHBonds[0] == atom) if len(self.nonHBonds) > 0 else False
d2 = (atom.nonHBonds[0] == self) if len(atom.nonHBonds) > 0 else False
if d1 and d2: g(self.idx, atom.idx, dir="both")
elif d1: g(self.idx, atom.idx)
elif d2: g(atom.idx, self.idx)
else: g(self.idx, atom.idx, arrowhead="none")
if H: [atom._show(g, gDepth, H) for atom in self.bonds if atom.gDepth < gDepth]
else: [atom._show(g, gDepth, H) for atom in self.nonHBonds if atom.gDepth < gDepth]
@k1lib.patch(Atom)
def show(self, H:bool=True):
"""Show the molecule graph this atom is a part of. Meant for debugging
simple substances only, as graphs of big molecules look unwieldy. This also
highlights the current :class:`Atom`, and each bond is an arrow, indicating
where :meth:`next` will go next.
:param H: whether to display hydrogens as separate atoms, or bunched into the main atom"""
g = k1lib.digraph(); self._show(g, _depthAuto(), H, {"style": "filled"}); return g
@k1lib.patch(Atom)
def _addFreeE(self, amt:int=1):
"""Adds free electron to atom."""
if amt > 1: [self._addFreeE() for i in range(amt)]
self.freeE += 1
if self.freeE >= 2: self.eClouds.append(mo._e); self.freeE -= 2
@k1lib.patch(Atom)
def _subFreeE(self, amt:int=1) -> bool:
"""Tries to use ``amt`` free electrons. Returns successful or not."""
if amt > 1: [self._subFreeE() for i in range(amt)]
elif self.freeE > 0: self.freeE -= 1
elif len(self.eClouds) > 0:
self.freeE += 1; self.eClouds.pop()
else: raise RuntimeError(f"Can't give away any more free electrons on atom {self.name}!")
@k1lib.patch(Atom)
def _makeRoom(self, nBonds:int):
"""Tries to remove bonds with Hydrogen to make room for ``nBonds`` more bonds."""
nBondsToRemove = self.valenceE + nBonds - self.octetE
if nBondsToRemove > 0:
Hs = [bond for bond in self.bonds if bond.name == "H"]
if len(Hs) >= nBondsToRemove:
for i in range(nBondsToRemove): self.removeBond(Hs[i])
elif not settings.overOctet:
ans = input(f"Can't remove Hydrogen bonds to make room for new bond! Do you want to do anyway (y/n): ")
print("Btw, you can auto accept this by doing `settings.mo.overOctet = True`")
if ans.lower()[0] != "y": raise OctetFull("Stopping...")
availableE = len(self.eClouds) * 2 + self.freeE
if availableE < nBonds: raise NoFreeElectrons(f"Can't make room for {nBonds} new bonds on {self.name}. Only {availableE} electrons left for bonds!")
@k1lib.patch(Atom)
def __call__(self, atom:Atom, nBonds:int=1, main=False) -> Atom:
"""Forms a bond with another atom. If valence electrons are full, will
attempt to disconnect Hydrogens from self to make room.
:param bond: number of bonds. 2 for double, 3 for triple
:param main: whether to put this bond in front of existing bonds, to
signify the "main" chain, so that it works well with :meth:`next`
:return: self"""
self._makeRoom(nBonds); atom._makeRoom(nBonds)
if main: self.bonds = [atom] * nBonds + self.bonds
else: self.bonds += [atom] * nBonds
atom.bonds += [self] * nBonds
self.valenceE += nBonds; self._subFreeE(nBonds)
atom.valenceE += nBonds; atom._subFreeE(nBonds)
return self
@k1lib.patch(Atom)
def bond(self, atom:Atom, nBonds:int=1, main=False) -> Atom:
"""Like :meth:`__call__`, but returns the atom passed in instead, so you
can form the main loop quickly."""
self(atom, nBonds, main); return atom
@k1lib.patch(Atom)
def main(self, atom:Atom, nBonds:int=1) -> Atom:
"""Like :meth:`bond`, but with ``main`` param defaulted to True."""
return self.bond(atom, nBonds, True)
@k1lib.patch(Atom)
def removeBond(self, atom:"Atom"):
"""Removes all bonds between this and another atom"""
nBonds = self.nBonds(atom)
self.bonds = [bond for bond in self.bonds if bond != atom]
self.valenceE -= nBonds; self._addFreeE(nBonds)
atom.bonds = [bond for bond in atom.bonds if bond != self]
atom.valenceE -= nBonds; atom._addFreeE(nBonds)
@k1lib.patch(Atom, "next")
def _next(self, offset=0, times:int=1) -> "Atom":
"""Returns the next atom bonded to this. Tries to avoid going into Hydrogens.
This is the main way to navigate around the molecule.
You kinda have to make sure that your molecule's bonding order is appropriate by
choosing between :meth:`bond` and :meth:`main`. Check the bonding order with
:meth:`show`.
:param offset: if there are multiple non-Hydrogen atoms, which ones should I pick?
:param times: how many times do you want to chain ``.next()``?"""
if times < 0: raise RuntimeError("Can't do .next() with negative `times`")
if times == 0: return self
atoms = self.nonHBonds + self.HBonds
if len(atoms) == 0: return None
_next = atoms[offset]
if times == 1: return _next
else: return _next.next(offset, times-1)
@k1lib.patch(Atom)
def nexts(self, atoms:int=2) -> List[Atom]:
"""Kinda like :meth:`next`, but fetches multiple atoms on the backbone.
Example::
c1, c2 = mo.CH4(mo.CH4).nexts()"""
if atoms < 1: raise RuntimeError(f"Zero or negative ({atoms}) number of atoms does not make sense!")
if atoms == 1: return [self]
return [self, *(self.next().nexts(atoms-1))]
empiricalOrder = ["C", "H", "O", "N"]
def em1(e:str, n:int):
if n == 1: return e
else: return f"{e}{n}"
@k1lib.patch(Atom)
def _empirical(self, d:Dict[str, int], gDepth:int):
if self.gDepth >= gDepth: return
self.gDepth = gDepth; d[self.name] += 1
for atom in self.bonds: atom._empirical(d, gDepth)
@k1lib.patch(Atom)
def empirical(self) -> str:
"""Returns an empirical formula for the molecule this :class:`Atom` is attached to."""
d = k1lib.Object().withAutoDeclare(lambda: 0)
self._empirical(d, _depthAuto()); answer = ""
for e in empiricalOrder:
if e in d: answer += em1(e,d[e]); del d[e]
for e in d.state.keys(): answer += em1(e,d[e])
return answer
@k1lib.patch(Atom)
def _atoms(self, l, gDepth):
if self.gDepth >= gDepth: return
self.gDepth = gDepth; l.append(self)
for atom in self.bonds: atom._atoms(l, gDepth)
@k1lib.patch(Atom)
def atoms(self) -> List[Atom]:
"""Returns a list of Atoms in the molecule this specific Atom is attached to."""
l = []; self._atoms(l, _depthAuto()); return l
@k1lib.patch(Atom, "endChain")
@property
def endChain(a) -> Atom:
"""Do a bunch of .next() until reached the end of the carbon chain.
Example::
c1 = mo.alcohol(3, 1)
c3 = c1.endChain
c3(mo.NH3)
c1.show() # displays in cell"""
lastA = None
for i in range(200): # for loop to prevent infinite recursion
nextA = a.next()
if nextA == lastA: return a
lastA = a; a = nextA
@k1lib.patch(Atom)
def moveLastCTo2ndC(a:Atom) -> Atom:
"""Move last carbon to 2nd carbon. Useful in constructing iso- and tert-."""
end = a.endChain; nearEnd = end.next()
end.removeBond(nearEnd); nearEnd(mo.H); a.next()(mo.CH4); return a
_a = {} # dict of atoms, which will be used to patch the entire module
class _Mo:
def __init__(self): self._MoWrap_dirs = []
def registerSubstance(self, name:str, _f):
setattr(_Mo, name, property(lambda self: _f()))
self._MoWrap_dirs.append(name)
def __dir__(self):
return super().__dir__() + self._MoWrap_dirs
pass
mo = _Mo() # internal convenience object so that I can use the same style as the module
def _atom(name, *args, **kwargs):
_a[name] = f = lambda: Atom(name, *args, **kwargs)
mo.registerSubstance(name, f)
def substances() -> List[str]:
"""Get a list of builtin substances. To register new substances, check over
:class:`Atom`."""
return [k for k in _a.keys() if not k.startswith("_")]
# covalent radius taken from (Pyykko & Atsumi) https://chem.libretexts.org/@api/deki/pages/2182/pdf/A3%253A%2bCovalent%2bRadii.pdf?stylesheet=default
_atom("_e", 0, 0.1, 0, [25]) # electron cloud, for internal use
_atom("H", 1, 1.008, 1, [32], octetE=2)
_atom("Li", 3, 6.94, 1, [133, 124])
_atom("Be", 4, 9.0122, 2, [102, 90, 85])
_atom("B", 5, 10.81, 3, [85, 78, 73])
_atom("C", 6, 12.011, 4, [75, 67, 60])
_atom("N", 7, 14.007, 5, [71, 60, 54])
_atom("O", 8, 15.999, 6, [63, 57, 53])
_atom("F", 9, 18.998, 7, [64, 59, 53])
_atom("Na", 11, 22.990, 1, [155, 160])
_atom("Mg", 12, 24.305, 2, [139, 132, 127])
_atom("Al", 13, 26.982, 3, [126, 113, 111])
_atom("Si", 14, 28.085, 4, [116, 107, 102])
_atom("P", 15, 30.974, 5, [111, 102, 94])
_atom("S", 16, 32.06, 6, [103, 94, 95])
_atom("Cl", 17, 35.45, 7, [99, 95, 93])
_atom("K", 19, 39.098, 1, [196, 193])
_atom("Ca", 20, 40.078, 2, [171, 147, 133])
_atom("Ga", 31, 69.723, 3, [124, 117, 121])
_atom("Ge", 32, 72.630, 4, [121, 111, 114])
_atom("As", 33, 74.922, 5, [121, 114, 106])
_atom("Se", 34, 78.971, 6, [116, 107, 107])
_atom("Br", 35, 79.904, 7, [114, 109, 110])
_atom("I", 53, 126.9, 7, [133, 129, 125])
```
#### File: k1lib/k1lib/_perlin.py
```python
import numpy as np, matplotlib.pyplot as plt, matplotlib as mpl
from typing import Any, List, Union, Tuple
__all__ = ["perlin3d"]
def interpolant(t):
return t*t*t*(t*(t*6 - 15) + 10)
def perlin3d(shape=(100, 100, 100), res=(2, 2, 2), tileable=(False, False, False), interpolant=interpolant):
"""Generate a 3D numpy array of perlin noise. Not my code! All credits go
to the author of this library: https://github.com/pvigier/perlin-numpy
:param shape: The shape of the generated array (tuple of three ints).
This must be a multiple of res.
:param res: The number of periods of noise to generate along each
axis (tuple of three ints). Note shape must be a multiple
of res.
:param tileable: If the noise should be tileable along each axis
(tuple of three bools). Defaults to (False, False, False).
:param interpolant: The interpolation function, defaults to
t*t*t*(t*(t*6 - 15) + 10).
:return: A numpy array of shape shape with the generated noise.
:raises ValueError: If shape is not a multiple of res."""
delta = (res[0] / shape[0], res[1] / shape[1], res[2] / shape[2])
d = (shape[0] // res[0], shape[1] // res[1], shape[2] // res[2])
grid = np.mgrid[0:res[0]:delta[0],0:res[1]:delta[1],0:res[2]:delta[2]]
grid = np.mgrid[0:res[0]:delta[0],0:res[1]:delta[1],0:res[2]:delta[2]]
grid = grid.transpose(1, 2, 3, 0) % 1
# Gradients
theta = 2*np.pi*np.random.rand(res[0] + 1, res[1] + 1, res[2] + 1)
phi = 2*np.pi*np.random.rand(res[0] + 1, res[1] + 1, res[2] + 1)
gradients = np.stack(
(np.sin(phi)*np.cos(theta), np.sin(phi)*np.sin(theta), np.cos(phi)),
axis=3
)
if tileable[0]:
gradients[-1,:,:] = gradients[0,:,:]
if tileable[1]:
gradients[:,-1,:] = gradients[:,0,:]
if tileable[2]:
gradients[:,:,-1] = gradients[:,:,0]
gradients = gradients.repeat(d[0], 0).repeat(d[1], 1).repeat(d[2], 2)
g000 = gradients[ :-d[0], :-d[1], :-d[2]]
g100 = gradients[d[0]: , :-d[1], :-d[2]]
g010 = gradients[ :-d[0],d[1]: , :-d[2]]
g110 = gradients[d[0]: ,d[1]: , :-d[2]]
g001 = gradients[ :-d[0], :-d[1],d[2]: ]
g101 = gradients[d[0]: , :-d[1],d[2]: ]
g011 = gradients[ :-d[0],d[1]: ,d[2]: ]
g111 = gradients[d[0]: ,d[1]: ,d[2]: ]
# Ramps
n000 = np.sum(np.stack((grid[:,:,:,0] , grid[:,:,:,1] , grid[:,:,:,2] ), axis=3) * g000, 3)
n100 = np.sum(np.stack((grid[:,:,:,0]-1, grid[:,:,:,1] , grid[:,:,:,2] ), axis=3) * g100, 3)
n010 = np.sum(np.stack((grid[:,:,:,0] , grid[:,:,:,1]-1, grid[:,:,:,2] ), axis=3) * g010, 3)
n110 = np.sum(np.stack((grid[:,:,:,0]-1, grid[:,:,:,1]-1, grid[:,:,:,2] ), axis=3) * g110, 3)
n001 = np.sum(np.stack((grid[:,:,:,0] , grid[:,:,:,1] , grid[:,:,:,2]-1), axis=3) * g001, 3)
n101 = np.sum(np.stack((grid[:,:,:,0]-1, grid[:,:,:,1] , grid[:,:,:,2]-1), axis=3) * g101, 3)
n011 = np.sum(np.stack((grid[:,:,:,0] , grid[:,:,:,1]-1, grid[:,:,:,2]-1), axis=3) * g011, 3)
n111 = np.sum(np.stack((grid[:,:,:,0]-1, grid[:,:,:,1]-1, grid[:,:,:,2]-1), axis=3) * g111, 3)
# Interpolation
t = interpolant(grid)
n00 = n000*(1-t[:,:,:,0]) + t[:,:,:,0]*n100
n10 = n010*(1-t[:,:,:,0]) + t[:,:,:,0]*n110
n01 = n001*(1-t[:,:,:,0]) + t[:,:,:,0]*n101
n11 = n011*(1-t[:,:,:,0]) + t[:,:,:,0]*n111
n0 = (1-t[:,:,:,1])*n00 + t[:,:,:,1]*n10
n1 = (1-t[:,:,:,1])*n01 + t[:,:,:,1]*n11
return ((1-t[:,:,:,2])*n0 + t[:,:,:,2]*n1)
```
#### File: k1lib/k1lib/selector.py
```python
from torch import nn; import k1lib, re, torch
from typing import List, Tuple, Dict, Union, Any, Iterator, Callable
from contextlib import contextmanager; from functools import partial
__all__ = ["ModuleSelector", "preprocess", "select"]
def preprocess(selectors:str, defaultProp="*") -> List[str]:
r"""Removes all quirkly features allowed by the css
language, and outputs nice lines. Example::
# returns ["a:f", "a:g,h", "b:g,h", "t:*"]
selector.preprocess("a:f; a, b: g,h; t")
:param selectors: single css selector string. Statements separated
by "\\n" or ";"
:param defaultProp: default property, if statement doesn't have one"""
# filtering unwanted characters and quirky spaces
lines = [e for l in selectors.split("\n") for e in l.split(";")]
selectors = [re.sub("(^\s+)|(\s+$)", "", re.sub("\s\s+", " ", line)).replace(" >", ">").replace("> ", ">").replace(" :", ":").replace(": ", ":").replace(" ,", ",").replace(", ", ",").replace(";", "\n").replace(" \n", "\n").replace("\n ", "\n") for line in lines if line != ""]
# adding "*" to all selectors with no props specified
selectors = [selector if ":" in selector else f"{selector}:{defaultProp}" for selector in selectors]
# expanding comma-delimited selectors
return [f"{segment}:{selector.split(':')[1]}" for selector in selectors for segment in selector.split(":")[0].split(",")]
def _getParts(s:str): return [a for elem in s.split(":")[0].split(">") if elem for a in elem.split(" ") if a]
def _getProps(s:str): return [elem for elem in s.split(":")[1].split(",") if elem]
_idxAuto = k1lib.AutoIncrement()
class ModuleSelector: # empty methods so that Sphinx generates the docs in order
props:List[str]
"""Properties of this :class:`ModuleSelector`"""
idx:int
"""Unique id of this :class:`ModuleSelector` in the entire script. May be useful
for module recognition"""
nn:"torch.nn.Module"
"""The associated :class:`torch.nn.Module` of this :class:`ModuleSelector`"""
def __init__(self, parent:"ModuleSelector", name:str, nn:"torch.nn.Module"):
self.parent = parent; self.name = name; self.nn = nn
self._children:Dict["ModuleSelector"] = {}
self.props:List[str] = []; self.depth:int = 0
self.directSelectors:List[str] = []
self.indirectSelectors:List[str] = []
self.displayF:Callable[["ModuleSelector"], str] = lambda mS: ', '.join(mS.props)
self.idx = _idxAuto()
def deepestDepth(self): pass
def highlight(self, prop:str):
"""Highlights the specified prop when displaying the object."""
self.displayF = lambda self: (k1lib.fmt.txt.red if prop in self else k1lib.fmt.txt.identity)(', '.join(self.props))
return self
def __call__(self, *args, **kwargs):
"""Calls the internal :class:`torch.nn.Module`"""
return self.nn(*args, **kwargs)
def __contains__(self): pass
def named_children(self): pass
def children(self): pass
def named_modules(self): pass
def modules(self): pass
def directParams(self): pass
def parse(self): pass
def apply(self): pass
def clearProps(self): pass
@property
def displayF(self):
"""Function to display each ModuleSelector's lines.
Default is just::
lambda mS: ", ".join(mS.props) """
return self._displayF
@displayF.setter
def displayF(self, f):
def applyF(self): self._displayF = f
self.apply(applyF)
def __getattr__(self, attr):
if attr.startswith("_"): raise AttributeError(attr)
if attr in self._children: return self._children[attr]
return self.directParams[attr]
def __getitem__(self, idx): return getattr(self, str(idx))
@staticmethod
def sample() -> "ModuleSelector":
"""Create a new example :class:`ModuleSelector` that has a bit of
hierarchy to them, with no css."""
return nn.Sequential(nn.Linear(3, 4), nn.Sequential(nn.Conv2d(3, 8, 3, 2), nn.ReLU(), nn.Linear(5, 6)), nn.Linear(7, 8)).select("")
def hookF(self): pass
def hookFp(self): pass
def hookB(self): pass
def freeze(self): pass
def unfreeze(self): pass
@k1lib.patch(nn.Module)
def select(model:"torch.nn.Module", css:str="*") -> "k1lib.selector.ModuleSelector":
"""Creates a new ModuleSelector, in sync with a model.
Example::
mS = selector.select(nn.Linear(3, 4), "#root:propA")
Or, you can do it the more direct way::
mS = nn.Linear(3, 4).select("#root:propA")
:param model: the :class:`torch.nn.Module` object to select from
:param css: the css selectors"""
root = ModuleSelector(None, "root", model)
root.parse(preprocess(css)); return root
@k1lib.patch(ModuleSelector, name="apply")
def _apply(self, f:Callable[[ModuleSelector], None]):
"""Applies a function to self and all child :class:`ModuleSelector`"""
f(self)
for child in self._children.values(): child.apply(f)
@k1lib.patch(ModuleSelector, name="parse")
def _parse(self, selectors:Union[List[str], str]) -> ModuleSelector:
"""Parses extra selectors. Clears all old selectors, but retain
the props. Returns self. Example::
mS = selector.ModuleSelector.sample().parse("Conv2d:propA")
# returns True
"propA" in mS[1][0]
:param selectors: can be the preprocessed list, or the unprocessed css string"""
if isinstance(selectors, str): selectors = preprocess(selectors)
self.directSelectors = []; self.indirectSelectors = []
ogSelectors = selectors
if self.parent != None:
selectors = [] + selectors + self.parent.indirectSelectors + self.parent.directSelectors
self.indirectSelectors += self.parent.indirectSelectors
self.depth = self.parent.depth + 1
for selector in selectors:
parts = _getParts(selector)
matches = parts[0] == self.nn.__class__.__name__ or parts[0] == "#" + self.name or parts[0] == "*"
if len(parts) == 1:
if matches: self.props += _getProps(selector)
else:
a = selector.find(">"); a = a if a > 0 else float("inf")
b = selector.find(" "); b = b if b > 0 else float("inf")
direct = a < b
if matches:
if direct: self.directSelectors.append(selector[a+1:])
else: self.indirectSelectors.append(selector[b+1:])
for name, mod in self.nn.named_children():
if name not in self._children:
self._children[name] = ModuleSelector(self, name, mod)
self._children[name].parse(ogSelectors)
self.props = list(set(self.props)); return self
@k1lib.patch(ModuleSelector)
def __contains__(self, prop:str=None) -> bool:
"""Whether this :class:`ModuleSelector` has a specific prop.
Example::
# returns True
"b" in nn.Linear(3, 4).select("*:b")
# returns False
"h" in nn.Linear(3, 4).select("*:b")
# returns True, "*" here means the ModuleSelector has any properties at all
"*" in nn.Linear(3, 4).select("*:b")"""
if "*" in self.props: return True
if prop in self.props: return True
if prop == "*" and len(self.props) > 0: return True
return False
@k1lib.patch(ModuleSelector)
def named_children(self, prop:str=None) -> Iterator[Tuple[str, ModuleSelector]]:
"""Get all named direct childs.
:param prop: Filter property. See also: :meth:`__contains__`"""
if prop is None: return self._children.items()
return ((k, v) for k, v in self._children.items() if prop in v)
@k1lib.patch(ModuleSelector)
def children(self, prop:str=None) -> Iterator[ModuleSelector]:
"""Get all direct childs.
:param prop: Filter property. See also: :meth:`__contains__`"""
return (x for _, x in self.named_children(prop))
@k1lib.patch(ModuleSelector, "directParams")
@property
def directParams(self) -> Dict[str, nn.Parameter]:
"""Dict params directly under this module"""
return {name: param for name, param in self.nn.named_parameters() if "." not in name}
@k1lib.patch(ModuleSelector)
def named_modules(self, prop:str=None) -> Iterator[Tuple[str, ModuleSelector]]:
"""Get all named child recursively.
Example::
modules = list(nn.Sequential(nn.Linear(3, 4), nn.ReLU()).select().named_modules())
# return 3
len(modules)
# return tuple ('0', <ModuleSelector of Linear>)
modules[1]
:param prop: Filter property. See also: :meth:`__contains__`"""
if prop != None:
yield from ((name, m) for name, m in self.named_modules() if prop in m)
return
yield self.name, self
for child in self._children.values(): yield from child.named_modules()
@k1lib.patch(ModuleSelector)
def modules(self, prop:str=None) -> Iterator[ModuleSelector]:
"""Get all child recursively.
:param prop: Filter property. See also: :meth:`__contains__`"""
for name, x in self.named_modules(prop): yield x
@k1lib.patch(ModuleSelector)
def clearProps(self) -> "ModuleSelector":
"""Clears all existing props of this and all descendants
:class:`ModuleSelector`. Example::
# returns False
"b" in nn.Linear(3, 4).select("*:b").clearProps()"""
def applyF(self): self.props = []
self.apply(applyF); return self
@k1lib.patch(ModuleSelector, name="deepestDepth")
@property
def deepestDepth(self):
"""Deepest depth of the tree. If self doesn't
have any child, then depth is 0"""
if len(self._children) == 0: return 0
return 1 + max([child.deepestDepth for child in self._children.values()])
@k1lib.patch(ModuleSelector)
def __repr__(self, intro:bool=True, header:Union[str, Tuple[str]]="", footer="", tabs:int=None):
"""
:param intro: whether to include a nice header and footer info
:param header:
str: include a header that starts where `displayF` will start
Tuple[str, str]: first one in tree, second one in displayF section
:param footer: same thing with header, but at the end
:param header: include a header that starts where `displayF` will start
:param tabs: number of tabs at the beginning. Best to leave this empty
"""
if tabs == None: tabs = 5 + self.deepestDepth
answer = "ModuleSelector:\n" if intro else ""
if header:
h1, h2 = ("", header) if isinstance(header, str) else header
answer += h1.ljust(tabs*4, " ") + h2 + "\n"
answer += f"{self.name}: {self.nn.__class__.__name__}".ljust(tabs*4, " ")
answer += self.displayF(self) + ("\n" if len(self._children) > 0 else "")
answer += k1lib.tab("\n".join([child.__repr__(tabs=tabs-1, intro=False) for name, child in self._children.items()]))
if footer:
f1, f2 = ("", footer) if isinstance(footer, str) else footer
answer += "\n" + f1.ljust(tabs*4, " ") + f2
if intro: answer += f"""\n\nCan...
- mS.deepestDepth: get deepest depth possible
- mS.nn: get the underlying nn.Module object
- mS.apply(f): apply to self and all descendants
- "HookModule" in mS: whether this module has a specified prop
- mS.highlight(prop): highlights all modules with specified prop
- mS.parse([..., ...]): parses extra css
- mS.directParams: get Dict[str, nn.Parameter] that are directly under this module"""
return answer
def _strTensor(t): return "None" if t is None else f"{t.shape}"
def strTensorTuple(ts):
if len(ts) > 1:
shapes = "\n".join(f"- {_strTensor(t)}" for t in ts)
return f"tensors ({len(ts)} total) shapes:\n{shapes}"
else:
return f"tensor shape: {_strTensor(ts[0])}"
@k1lib.patch(ModuleSelector)
@contextmanager
def hookF(self, f:Callable[[ModuleSelector, "torch.nn.Module", Tuple[torch.Tensor], torch.Tensor], None]=None, prop:str="*"):
"""Context manager for applying forward hooks.
Example::
def f(mS, m, i, o):
print(i, o)
m = nn.Linear(3, 4)
with m.select().hookF(f):
m(torch.randn(2, 3))
:param f: hook callback, should accept :class:`ModuleSelector`, inputs and output
:param prop: filter property of module to hook onto. If not specified, then it will print out input and output tensor shapes."""
if f is None: f = lambda mS, i, o: print(f"Forward hook {m}:\n" + k1lib.tab(f"Input {strTensorTuple(i)}\nOutput tensor shape: {o.shape}"))
g = lambda m, i, o: f(self, i, o)
handles = [m.nn.register_forward_hook(g) for m in self.modules(prop)]
try: yield
finally:
for h in handles: h.remove()
@k1lib.patch(ModuleSelector)
@contextmanager
def hookFp(self, f=None, prop:str="*"):
"""Context manager for applying forward pre hooks.
Example::
def f(mS, m, i):
print(i)
m = nn.Linear(3, 4)
with m.select().hookFp(f):
m(torch.randn(2, 3))
:param f: hook callback, should accept :class:`ModuleSelector` and inputs
:param prop: filter property of module to hook onto. If not specified, then it will print out input tensor shapes."""
if f is None: f = lambda mS, i: print(f"Forward pre hook {m}:\n" + k1lib.tab(f"Input {strTensorTuple(i)}"))
g = lambda m, i: f(self, i)
handles = [m.nn.register_forward_pre_hook(g) for m in self.modules(prop)]
try: yield
finally:
for h in handles: h.remove()
@k1lib.patch(ModuleSelector)
@contextmanager
def hookB(self, f=None, prop:str="*"):
"""Context manager for applying backward hooks.
Example::
def f(mS, m, i, o):
print(i, o)
m = nn.Linear(3, 4)
with m.select().hookB(f):
m(torch.randn(2, 3)).sum().backward()
:param f: hook callback, should accept :class:`ModuleSelector`, grad inputs and outputs
:param prop: filter property of module to hook onto. If not specified, then it will print out input tensor shapes."""
if f is None: f = lambda mS, i, o: print(f"Backward hook {m}:\n" + k1lib.tab(f"Input {strTensorTuple(i)}\nOutput {strTensorTuple(o)}"))
g = lambda m, i, o: f(self, i, o)
handles = [m.nn.register_full_backward_hook(g) for m in self.modules(prop)]
try: yield
finally:
for h in handles: h.remove()
from contextlib import ExitStack
@contextmanager
def _freeze(self, value:bool, prop:str):
modules = [m for m in self.modules(prop)]
with ExitStack() as stack:
for m in self.modules(prop):
stack.enter_context(m.nn.gradContext())
m.nn.requires_grad_(value)
try: yield
finally: pass
@k1lib.patch(ModuleSelector)
def freeze(self, prop:str="*"):
"""Returns a context manager that freezes (set requires_grad to False) parts of
the network. Example::
l = k1lib.Learner.sample()
w = l.model.lin1.lin.weight.clone() # weights before
with l.model.select("#lin1").freeze():
l.run(1)
# returns True
(l.model.lin1.lin.weight == w).all()"""
return _freeze(self, False, prop)
@k1lib.patch(ModuleSelector)
def unfreeze(self, prop:str="*"):
"""Returns a context manager that unfreezes (set requires_grad to True) parts of
the network. Example::
l = k1lib.Learner.sample()
w = l.model.lin1.lin.weight.clone() # weights before
with l.model.select("#lin1").freeze():
with l.model.select("#lin1 > #lin").unfreeze():
l.run(1)
# returns False
(l.model.lin1.lin.weight == w).all()"""
return _freeze(self, True, prop)
``` |
{
"source": "1572990942/meiduoshop",
"score": 3
} |
#### File: meiduoshop/utils/functions.py
```python
import time
import jwt
from django.conf import settings
def jwt_encode(payload: dict, max_age: int) -> str:
"""
使用PyJWT加密数据
"""
payload['exp'] = int(time.time())+max_age
return jwt.encode(payload, key=settings.SECRET_KEY, algorithm='HS256')
def jwt_decode(payload: str) -> dict:
"""
使用PyJWT解密数据
如果过期:ExpiredSignatureError
如果篡改:InvalidSignatureError
"""
r = jwt.decode(payload, key=settings.SECRET_KEY, algorithms='HS256')
return r
``` |
{
"source": "157601/Group-Project-Game",
"score": 2
} |
#### File: Group-Project-Game/users/views.py
```python
from django.shortcuts import redirect, render
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm
from .forms import RegistrationForm
#index page for the exeguesser site
def index(request):
context = {}
return render(request, 'users/index.html', context)
def loginPage(request):
#checking if a user has tried to login, if valid they get redirected to the home page
if request.method == "POST":
form = AuthenticationForm(request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as {username}.")
return redirect("home")
else:
messages.error(request,"Invalid username or password.")
else:
messages.error(request,"Invalid username or password.")
form = AuthenticationForm()
return render(request=request, template_name="users/login.html", context={"login_form":form})
def registerPage(request):
#checks if form submitted and is valid, adds user to database and redirects to home
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
user = form.save()
user.refresh_from_db()
# load the profile instance created by the signal
user.save()
raw_password = form.cleaned_data.get('<PASSWORD>')
# login user after signing up
user = authenticate(username=user.username, password=raw_password)
login(request, user)
# redirect user to home page
return redirect('home')
else:
form = RegistrationForm()
return render(request, 'users/register.html', {'form': form})
#Allows the user to logout and then redirects them to index page
def logout_request(request):
logout(request)
messages.info(request, "You have successfully logged out.")
return redirect("index")
``` |
{
"source": "15805383399/miditoolkit",
"score": 2
} |
#### File: miditoolkit/midi/parser.py
```python
import re
import mido
import warnings
import functools
import collections
import numpy as np
from copy import deepcopy
from .containers import KeySignature, TimeSignature, Lyric, Note, PitchBend, ControlChange, Instrument, TempoChange, Marker
DEFAULT_BPM = int(120)
class MidiFile(object):
def __init__(self, filename=None, file=None, ticks_per_beat=480, clip=False, charset ='latin1'):
# create empty file
if (filename is None and file is None):
self.ticks_per_beat = ticks_per_beat
self.max_tick = 0
self.tempo_changes = []
self.time_signature_changes = []
self.key_signature_changes = []
self.lyrics = []
self.markers = []
self.instruments = []
# load
else:
if filename:
# filename
mido_obj = mido.MidiFile(filename=filename, clip=clip, charset=charset)
else:
mido_obj = mido.MidiFile(file=file, clip=clip, charset=charset)
# ticks_per_beat
self.ticks_per_beat = mido_obj.ticks_per_beat
# convert delta time to cumulative time
mido_obj = self._convert_delta_to_cumulative(mido_obj)
# load tempo changes
self.tempo_changes = self._load_tempo_changes(mido_obj)
# load key signatures
self.key_signature_changes = self._load_key_signatures(mido_obj)
# load time signatures
self.time_signature_changes = self._load_time_signatures(mido_obj)
# load markers
self.markers = self._load_markers(mido_obj)
# load lyrics
self.lyrics = self._load_lyrics(mido_obj)
# sort events by time
self.time_signature_changes.sort(key=lambda ts: ts.time)
self.key_signature_changes.sort(key=lambda ks: ks.time)
self.lyrics.sort(key=lambda lyc: lyc.time)
# compute max tick
self.max_tick = max([max([e.time for e in t]) for t in mido_obj.tracks]) + 1
# load instruments
self.instruments = self._load_instruments(mido_obj)
# tick and sec mapping
def _convert_delta_to_cumulative(self, mido_obj):
for track in mido_obj.tracks:
tick = int(0)
for event in track:
event.time += tick
tick = event.time
return mido_obj
def _load_tempo_changes(self, mido_obj):
# default bpm
tempo_changes = [TempoChange(DEFAULT_BPM, 0)]
# traversing
for track in mido_obj.tracks:
for event in track:
if event.type == 'set_tempo':
# convert tempo to BPM
tempo = mido.tempo2bpm(event.tempo)
tick = event.time
if tick == 0:
tempo_changes = [TempoChange(tempo, 0)]
else:
last_tempo = tempo_changes[-1].tempo
if tempo != last_tempo:
tempo_changes.append(TempoChange(tempo, tick))
return tempo_changes
def _load_time_signatures(self, mido_obj):
# no default
time_signature_changes = []
# traversing
for track in mido_obj.tracks:
for event in track:
if event.type == 'time_signature':
ts_obj = TimeSignature(
event.numerator,
event.denominator,
event.time)
time_signature_changes.append(ts_obj)
return time_signature_changes
def _load_key_signatures(self, mido_obj):
# no default
key_signature_changes = []
# traversing
for track in mido_obj.tracks:
for event in track:
if event.type == 'key_signature':
key_obj = KeySignature(
event.key,
event.time)
key_signature_changes.append(key_obj)
return key_signature_changes
def _load_markers(self, mido_obj):
# no default
markers = []
# traversing
for track in mido_obj.tracks:
for event in track:
if event.type == 'marker':
markers.append(Marker(event.text, event.time))
return markers
def _load_lyrics(self, mido_obj):
# no default
lyrics = []
# traversing
for track in mido_obj.tracks:
for event in track:
if event.type == 'lyrics':
lyrics.append(Lyric(event.text, event.time))
return lyrics
def _load_instruments(self, midi_data):
instrument_map = collections.OrderedDict()
# Store a similar mapping to instruments storing "straggler events",
# e.g. events which appear before we want to initialize an Instrument
stragglers = {}
# This dict will map track indices to any track names encountered
track_name_map = collections.defaultdict(str)
def __get_instrument(program, channel, track, create_new):
"""Gets the Instrument corresponding to the given program number,
drum/non-drum type, channel, and track index. If no such
instrument exists, one is created.
"""
# If we have already created an instrument for this program
# number/track/channel, return it
if (program, channel, track) in instrument_map:
return instrument_map[(program, channel, track)]
# If there's a straggler instrument for this instrument and we
# aren't being requested to create a new instrument
if not create_new and (channel, track) in stragglers:
return stragglers[(channel, track)]
# If we are told to, create a new instrument and store it
if create_new:
is_drum = (channel == 9)
instrument = Instrument(
program, is_drum, track_name_map[track_idx])
# If any events appeared for this instrument before now,
# include them in the new instrument
if (channel, track) in stragglers:
straggler = stragglers[(channel, track)]
instrument.control_changes = straggler.control_changes
instrument.pitch_bends = straggler.pitch_bends
# Add the instrument to the instrument map
instrument_map[(program, channel, track)] = instrument
# Otherwise, create a "straggler" instrument which holds events
# which appear before we actually want to create a proper new
# instrument
else:
# Create a "straggler" instrument
instrument = Instrument(program, track_name_map[track_idx])
# Note that stragglers ignores program number, because we want
# to store all events on a track which appear before the first
# note-on, regardless of program
stragglers[(channel, track)] = instrument
return instrument
for track_idx, track in enumerate(midi_data.tracks):
# Keep track of last note on location:
# key = (instrument, note),
# value = (note-on tick, velocity)
last_note_on = collections.defaultdict(list)
# Keep track of which instrument is playing in each channel
# initialize to program 0 for all channels
current_instrument = np.zeros(16, dtype=np.int)
for event in track:
# Look for track name events
if event.type == 'track_name':
# Set the track name for the current track
track_name_map[track_idx] = event.name
# Look for program change events
if event.type == 'program_change':
# Update the instrument for this channel
current_instrument[event.channel] = event.program
# Note ons are note on events with velocity > 0
elif event.type == 'note_on' and event.velocity > 0:
# Store this as the last note-on location
note_on_index = (event.channel, event.note)
last_note_on[note_on_index].append((
event.time, event.velocity))
# Note offs can also be note on events with 0 velocity
elif event.type == 'note_off' or (event.type == 'note_on' and
event.velocity == 0):
# Check that a note-on exists (ignore spurious note-offs)
key = (event.channel, event.note)
if key in last_note_on:
# Get the start/stop times and velocity of every note
# which was turned on with this instrument/drum/pitch.
# One note-off may close multiple note-on events from
# previous ticks. In case there's a note-off and then
# note-on at the same tick we keep the open note from
# this tick.
end_tick = event.time
open_notes = last_note_on[key]
notes_to_close = [
(start_tick, velocity)
for start_tick, velocity in open_notes
if start_tick != end_tick]
notes_to_keep = [
(start_tick, velocity)
for start_tick, velocity in open_notes
if start_tick == end_tick]
for start_tick, velocity in notes_to_close:
start_time = start_tick
end_time = end_tick
# Create the note event
note = Note(velocity, event.note, start_time,
end_time)
# Get the program and drum type for the current
# instrument
program = current_instrument[event.channel]
# Retrieve the Instrument instance for the current
# instrument
# Create a new instrument if none exists
instrument = __get_instrument(
program, event.channel, track_idx, 1)
# Add the note event
instrument.notes.append(note)
if len(notes_to_close) > 0 and len(notes_to_keep) > 0:
# Note-on on the same tick but we already closed
# some previous notes -> it will continue, keep it.
last_note_on[key] = notes_to_keep
else:
# Remove the last note on for this instrument
del last_note_on[key]
# Store pitch bends
elif event.type == 'pitchwheel':
# Create pitch bend class instance
bend = PitchBend(event.pitch, event.time)
# Get the program for the current inst
program = current_instrument[event.channel]
# Retrieve the Instrument instance for the current inst
# Don't create a new instrument if none exists
instrument = __get_instrument(
program, event.channel, track_idx, 0)
# Add the pitch bend event
instrument.pitch_bends.append(bend)
# Store control changes
elif event.type == 'control_change':
control_change = ControlChange(
event.control, event.value, event.time)
# Get the program for the current inst
program = current_instrument[event.channel]
# Retrieve the Instrument instance for the current inst
# Don't create a new instrument if none exists
instrument = __get_instrument(
program, event.channel, track_idx, 0)
# Add the control change event
instrument.control_changes.append(control_change)
# Initialize list of instruments from instrument_map
instruments = [i for i in instrument_map.values()]
return instruments
def get_tick_to_time_mapping(self):
return _get_tick_to_time_mapping(
self.ticks_per_beat,
self.max_tick,
self.tempo_changes)
def __repr__(self):
return self.__str__()
def __str__(self):
output_list = [
"ticks per beat: {}".format(self.ticks_per_beat),
"max tick: {}".format(self.max_tick),
"tempo changes: {}".format(len(self.tempo_changes)),
"time sig: {}".format(len(self.time_signature_changes)),
"key sig: {}".format(len(self.key_signature_changes)),
'markers: {}'.format(len(self.markers)),
"lyrics: {}".format(bool(len(self.lyrics))),
"instruments: {}".format(len(self.instruments))
]
output_str = "\n".join(output_list)
return output_str
def dump(self,
filename=None,
file=None,
segment=None,
shift=True,
instrument_idx=None,
charset ='latin1'):
# comparison function
def event_compare(event1, event2):
secondary_sort = {
'set_tempo': lambda e: (1 * 256 * 256),
'time_signature': lambda e: (2 * 256 * 256),
'key_signature': lambda e: (3 * 256 * 256),
'marker': lambda e: (4 * 256 * 256),
'lyrics': lambda e: (5 * 256 * 256),
'program_change': lambda e: (6 * 256 * 256),
'pitchwheel': lambda e: ((7 * 256 * 256) + e.pitch),
'control_change': lambda e: (
(8 * 256 * 256) + (e.control * 256) + e.value),
'note_off': lambda e: ((9 * 256 * 256) + (e.note * 256)),
'note_on': lambda e: (
(10 * 256 * 256) + (e.note * 256) + e.velocity),
'end_of_track': lambda e: (11 * 256 * 256)
}
if (event1.time == event2.time and
event1.type in secondary_sort and
event2.type in secondary_sort):
return (secondary_sort[event1.type](event1) -
secondary_sort[event2.type](event2))
return event1.time - event2.time
if (filename is None) and (file is None):
raise IOError('please specify the output.')
if instrument_idx is None:
pass
elif len(instrument_idx)==0:
return
elif isinstance(instrument_idx, int):
instrument_idx = [instrument_idx]
elif isinstance(instrument_idx, list):
pass
else:
raise ValueError('Invalid instrument index')
# crop segment
if segment is not None:
if not isinstance(segment, list) and not isinstance(segment, tuple):
raise ValueError('Invalid segment format')
start_tick = segment[0]
end_tick= segment[1]
# Create file
midi_parsed = mido.MidiFile(ticks_per_beat=self.ticks_per_beat, charset=charset)
# Create track 0 with timing information
meta_track = mido.MidiTrack()
# -- meta track -- #
# 1. Time signature
# add default
add_ts = True
ts_list = []
if self.time_signature_changes:
add_ts = min([ts.time for ts in self.time_signature_changes]) > 0.0
if add_ts:
ts_list.append(
mido.MetaMessage(
'time_signature',
time=0,
numerator=4,
denominator=4))
# add each
for ts in self.time_signature_changes:
ts_list.append(
mido.MetaMessage(
'time_signature',
time=ts.time,
numerator=ts.numerator,
denominator=ts.denominator))
# 2. Tempo
# - add default
add_t = True
tempo_list = []
if self.tempo_changes:
add_t = min([t.time for t in self.tempo_changes]) > 0.0
if add_t:
tempo_list.append(
mido.MetaMessage(
'set_tempo',
time=0,
tempo=mido.bpm2tempo(DEFAULT_BPM)))
# - add each
for t in self.tempo_changes:
t.tempo = mido.bpm2tempo(t.tempo)
tempo_list.append(
mido.MetaMessage(
'set_tempo',
time=t.time,
tempo=int(t.tempo)))
# 3. Lyrics
lyrics_list = []
for l in self.lyrics:
lyrics_list.append(
mido.MetaMessage(
'lyrics',
time=l.time,
text=l.text))
# 4. Markers
markers_list = []
for m in self.markers:
markers_list.append(
mido.MetaMessage(
'marker',
time=m.time,
text=m.text))
# 5. Key
key_number_to_mido_key_name = [
'C', 'Db', 'D', 'Eb', 'E', 'F', 'F#', 'G', 'Ab', 'A', 'Bb', 'B',
'Cm', 'C#m', 'Dm', 'D#m', 'Em', 'Fm', 'F#m', 'Gm', 'G#m', 'Am',
'Bbm', 'Bm']
key_list = []
for ks in self.key_signature_changes:
key_list.append(mido.MetaMessage(
'key_signature', time=ks.time,
key=key_number_to_mido_key_name[ks.key_number]))
if segment:
ts_list = _include_meta_events_within_range(ts_list, start_tick, end_tick, shift=shift, front=True)
tempo_list = _include_meta_events_within_range(tempo_list, start_tick, end_tick, shift=shift, front=True)
lyrics_list = _include_meta_events_within_range(lyrics_list, start_tick, end_tick, shift=shift, front=False)
markers_list = _include_meta_events_within_range(markers_list, start_tick, end_tick, shift=shift, front=False)
key_list = _include_meta_events_within_range(key_list, start_tick, end_tick, shift=shift, front=True)
meta_track = ts_list + tempo_list + lyrics_list+ markers_list + key_list
# sort
meta_track.sort(key=functools.cmp_to_key(event_compare))
# end of meta track
meta_track.append(mido.MetaMessage(
'end_of_track', time=meta_track[-1].time + 1))
midi_parsed.tracks.append(meta_track)
# -- instruments -- #
channels = list(range(16))
channels.remove(9) # for durm
for cur_idx, instrument in enumerate(self.instruments):
if instrument_idx:
if cur_idx not in instrument_idx:
continue
track = mido.MidiTrack()
# segment-free
# track name
if instrument.name:
track.append(mido.MetaMessage(
'track_name', time=0, name=instrument.name))
# If it's a drum event, we need to set channel to 9
if instrument.is_drum:
channel = 9
# Otherwise, choose a channel from the possible channel list
else:
channel = channels[cur_idx % len(channels)]
# Set the program number
track.append(mido.Message(
'program_change', time=0, program=instrument.program,
channel=channel))
# segment-related
# Add all pitch bend events
bend_list = []
for bend in instrument.pitch_bends:
bend_list.append(mido.Message(
'pitchwheel', time=bend.time,
channel=channel, pitch=bend.pitch))
# Add all control change events
cc_list = []
for control_change in instrument.control_changes:
cc_list.append(mido.Message(
'control_change',
time=control_change.time,
channel=channel, control=control_change.number,
value=control_change.value))
if segment:
bend_list = _include_meta_events_within_range(bend_list, start_tick, end_tick, shift=shift, front=True)
cc_list = _include_meta_events_within_range(cc_list, start_tick, end_tick, shift=shift, front=True)
track += (bend_list + cc_list)
# Add all note events
for note in instrument.notes:
if segment:
note = _check_note_within_range(note, start_tick, end_tick, shift=True)
if note:
track.append(mido.Message(
'note_on', time=note.start,
channel=channel, note=note.pitch, velocity=note.velocity))
# Also need a note-off event (note on with velocity 0)
track.append(mido.Message(
'note_on', time=note.end,
channel=channel, note=note.pitch, velocity=0))
track = sorted(track, key=functools.cmp_to_key(event_compare))
# If there's a note off event and a note on event with the same
# tick and pitch, put the note off event first
for n, (event1, event2) in enumerate(zip(track[:-1], track[1:])):
if (event1.time == event2.time and
event1.type == 'note_on' and
event2.type == 'note_on' and
event1.note == event2.note and
event1.velocity != 0 and
event2.velocity == 0):
track[n] = event2
track[n + 1] = event1
# Finally, add in an end of track event
track.append(mido.MetaMessage(
'end_of_track', time=track[-1].time + 1))
# Add to the list of output tracks
midi_parsed.tracks.append(track)
# Cumulative timing to delta
for track in midi_parsed.tracks:
tick = 0
for event in track:
event.time -= tick
tick += event.time
# Write it out
if filename:
midi_parsed.save(filename=filename)
else:
midi_parsed.save(file=file)
def _check_note_within_range(note, st, ed, shift=True):
tmp_st = max(st, note.start)
tmp_ed = max(st, min(note.end, ed))
if (tmp_ed - tmp_st) <= 0:
return None
if shift:
tmp_st -= st
tmp_ed -= st
note.start = int(tmp_st)
note.end = int(tmp_ed)
return note
def _include_meta_events_within_range(events, st, ed, shift=True, front=True):
'''
For time, key signatutr
'''
proc_events = []
num = len(events)
if not events:
return events
# include events from back
i = num - 1
while i >= 0:
event = events[i]
if event.time < st:
break
if event.time < ed:
proc_events.append(event)
i -= 1
# if the first tick has no event, add the previous one
if front and (i >= 0):
if not proc_events:
proc_events = [events[i]]
elif proc_events[-1].time != st:
proc_events.append(events[i])
else:
pass
# reverse
proc_events = proc_events[::-1]
# shift
result = []
shift = st if shift else 0
for event in proc_events:
event.time -= st
event.time = int(max(event.time, 0))
result.append(event)
return proc_events
def _find_nearest_np(array, value):
return (np.abs(array - value)).argmin()
def _get_tick_index_by_seconds(sec, tick_to_time):
if not isinstance(sec, float):
raise ValueError('Seconds should be float')
if isinstance(sec, list) or isinstance(sec, tuple):
return [_find_nearest_np(tick_to_time, s) for s in sec]
else:
return _find_nearest_np(tick_to_time, sec)
def _get_tick_to_time_mapping(ticks_per_beat, max_tick, tempo_changes):
tick_to_time = np.zeros(max_tick + 1)
num_tempi = len(tempo_changes)
fianl_tick = max_tick
acc_time = 0
for idx in range(num_tempi):
start_tick = tempo_changes[idx].time
cur_tempo = tempo_changes[idx].tempo
# compute tick scale
seconds_per_beat = 60 / cur_tempo
seconds_per_tick = seconds_per_beat / float(ticks_per_beat)
# set end tick of interval
end_tick = tempo_changes[idx + 1].time if (idx + 1) < num_tempi else fianl_tick
# wrtie interval
ticks = np.arange(end_tick - start_tick + 1)
tick_to_time[start_tick:end_tick + 1] = (acc_time + seconds_per_tick *ticks)
acc_time = tick_to_time[end_tick]
return tick_to_time
``` |
{
"source": "1582795529/KGNet",
"score": 2
} |
#### File: data/transforms/build.py
```python
import torchvision.transforms as T
import torch
import numpy
from .transforms import RandomErasing
def build_transforms(cfg, is_train=True):
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
if is_train:
transform_hr = T.Compose([
T.Resize(cfg.INPUT.SIZE_TRAIN),
# T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
# T.Pad(cfg.INPUT.PADDING),
T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
T.ToTensor(),
normalize_transform,
# RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
])
transform_lr = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.Resize(cfg.INPUT.LR_TRAIN),
# T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
# T.Pad(cfg.INPUT.PADDING),
T.RandomCrop(cfg.INPUT.LR_TRAIN),
T.ToTensor(),
normalize_transform,
# RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
])
transform_lr_hr = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.Resize(cfg.INPUT.SIZE_TRAIN),
#T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
# T.Pad(cfg.INPUT.PADDING),
# T.RandomCrop(cfg.INPUT.LR_TRAIN),
T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
T.ToTensor(),
normalize_transform,
# RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
])
else:
transform_hr = T.Compose([
T.Resize(cfg.INPUT.SIZE_TRAIN),
T.ToTensor(),
normalize_transform
])
transform_lr = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.Resize(cfg.INPUT.LR_TRAIN),
T.ToTensor(),
normalize_transform
])
transform_lr_hr = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.Resize(cfg.INPUT.SIZE_TRAIN),
T.ToTensor(),
normalize_transform
])
return transform_hr,transform_lr,transform_lr_hr
# problem****
def build_unnorm(cfg,img):
img = img.numpy()
mean = numpy.array(cfg.INPUT.PIXEL_MEAN)
std = numpy.array(cfg.INPUT.PIXEL_STD)
img = img*std+mean
img = torch.from_numpy(img)
return img
```
#### File: KGNet/engine/inference_visual.py
```python
import logging
import torch
import torch.nn as nn
from ignite.engine import Engine
import cv2
import os
from .visual import calc_metrics,torch_vis_color,save_featmap,draw_heatmap_gaussi,apply_attention,showPointSingleModal,showclassifier
from data.solver import solve_invisible_heatmap
import matplotlib.pyplot as plt
from utils.reid_metric import R1_mAP, R1_mAP_reranking
from data import get_gt_landmark_pair,save_current_SR,load_test_query,load_test_gallary,get_current_visuals
from modeling.modules.srfbn_hg_arch import merge_heatmap_4
def create_supervised_evaluator(name_list,total_id_set,feat_total,id_total,psnr_list,ssim_list,model, metrics,
device=None):
"""
Factory function for creating an evaluator for supervised models
Args:
model (`torch.nn.Module`): the model to train
metrics (dict of str - :class:`ignite.metrics.Metric`): a map of metric names to Metrics
device (str, optional): device type specification (default: None).
Applies to both model and batches.
Returns:
Engine: an evaluator engine with supervised inference function
"""
if device:
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model.to(device)
def _inference(engine, batch):
model.eval()
with torch.no_grad():
hr, lr, lr_hr, landmark, gt_heatmap, pids, camid, img_path = batch
hr = hr.to(device) if torch.cuda.device_count() >= 1 else hr
lr = lr.to(device) if torch.cuda.device_count() >= 1 else lr
lr_hr = lr_hr.to(device) if torch.cuda.device_count() >= 1 else lr_hr
save_dir = '/home/fei/fei/firstwork/picture/554'
# load_test_query(model_test_query,model)
# load_test_gallary(model_test_gallary, model)
name = img_path[0].split('/')[-1].split('.')[0]
id = pids[0]
if img_path[0].split('/')[-2] == 'image_query':
if id in total_id_set and name not in name_list:
print(len(id_total), name)
feat, visual_feat, sr, heatmap = model.Baseline_LR(lr_hr, lr)
feat_total.append(feat)
id_total.append(id)
name_list.append(name)
elif img_path[0].split('/')[-2] == 'image_test':
if id in total_id_set and name not in name_list:
print("ccccc",len(id_total),name)
visual_feat, feat1 = model.Baseline_HR(hr)
# feat1 = model.Baseline_HR(hr)
feat_total.append(feat1)
id_total.append(id)
name_list.append(name)
# return feat, pids, camid
engine = Engine(_inference)
# for name, metric in metrics.items():
# metric.attach(engine, name)
return engine,feat_total,id_total
def inference_visual(
cfg,
model,
val_loader,
num_query,
psnr,
ssim,
ssim_max,
feat_total,
id,
total_id_set,
name_list
):
device = cfg.MODEL.DEVICE
logger = logging.getLogger("reid_baseline.inference")
logger.info("Enter inferencing")
if cfg.TEST.RE_RANKING == 'no':
print("Create evaluator")
evaluator,feat,id = create_supervised_evaluator(name_list,total_id_set,feat_total,id,psnr,ssim,model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)},
device=device)
elif cfg.TEST.RE_RANKING == 'yes':
print("Create evaluator for reranking")
evaluator,feat,id = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP_reranking(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)},
device=device)
else:
print("Unsupported re_ranking config. Only support for no or yes, but got {}.".format(cfg.TEST.RE_RANKING))
evaluator.run(val_loader)
# cmc, mAP = evaluator.state.metrics['r1_mAP']
logger.info('Validation Results')
logger.info(len(feat))
logger.info(len(id))
feat1 = feat[0]
print(type(feat1))
for i in range(1,len(feat)):
feat1 = torch.cat((feat1,feat[i]),dim=0)
showclassifier(feat1,id,r'E:\Dataset_test\firstwork\picture\classifer\motivationours.png')
# logger.info(sum(psnr)/len(psnr))
# logger.info(sum(ssim) / len(ssim))
# logger.info("mAP: {:.1%}".format(mAP))
# for r in [1, 5, 10]:
# logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
```
#### File: KGNet/engine/visual.py
```python
import os
import torch
import math
from datetime import datetime
import numpy as np
from PIL import Image
import cv2
import matplotlib.pyplot as plt
import torch.nn.functional as F
import numpy
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from sklearn.manifold import TSNE
from torchvision import transforms
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def ycbcr2rgb(img):
'''same as matlab ycbcr2rgb
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
####################
# metric
####################
def calc_metrics(img1, img2, crop_border, test_Y=True):
#
if test_Y and img1.shape[2] == 3: # evaluate on Y channel in YCbCr color space
im1_in = rgb2ycbcr(img1)
im2_in = rgb2ycbcr(img2)
else:
im1_in = img1
im2_in = img2
if im1_in.ndim == 3:
cropped_im1 = im1_in[crop_border:-crop_border, crop_border:-crop_border, :]
cropped_im2 = im2_in[crop_border:-crop_border, crop_border:-crop_border, :]
elif im1_in.ndim == 2:
cropped_im1 = im1_in[crop_border:-crop_border, crop_border:-crop_border]
cropped_im2 = im2_in[crop_border:-crop_border, crop_border:-crop_border]
else:
raise ValueError('Wrong image dimension: {}. Should be 2 or 3.'.format(im1_in.ndim))
psnr = calc_psnr(cropped_im1 * 255, cropped_im2 * 255)
ssim = calc_ssim(cropped_im1 * 255, cropped_im2 * 255)
return psnr, ssim
def calc_psnr(img1, img2):
# img1 and img2 have range [0, 255]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calc_ssim(img1, img2):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
# draw feature map
def torch_vis_color(name, pic, feature_tensor, col, raw, save_path, colormode=2, margining=1):
'''
COLORMAP_AUTUMN = 0,
COLORMAP_BONE = 1,
COLORMAP_JET = 2,
COLORMAP_WINTER = 3,
COLORMAP_RAINBOW = 4,
COLORMAP_OCEAN = 5,
COLORMAP_SUMMER = 6,
COLORMAP_SPRING = 7,
COLORMAP_COOL = 8,
COLORMAP_HSV = 9,
COLORMAP_PINK = 10,
COLORMAP_HOT = 11
:param feature_tensor: torch.Tensor [1,c,w,h]
:param col: col num
:param raw: raw num
:param save_path: save path
:param colormode: cv2.COLORMAP
:return:None
'''
# print(feature_tensor.shape)
# pdb.set_trace()
# show_k = col * raw # total num
# #print('111',feature_tensor.shape)
# f = feature_tensor[0, :show_k, :, :] # n,c,h,w
# #print('2',f.shape)
# size = f[0, :, :].shape # h*w
# #print('3333',size)
# f = f.data.cpu().numpy()
# fmin = np.min(f)
# fmax = np.max(f)
# #print(fmax, fmin)
# for i in range(raw):
# f = (f - fmin) / (fmax - fmin + 0.0001)
# tem = f[i * col, :, :] * 255 / (np.max(f[i * col, :, :] + 1e-14))
# # print("tem",tem.shape)
# tem = cv2.applyColorMap(np.array(tem, dtype=np.uint8), colormode)
# for j in range(col):
# if not j == 0:
# tem = np.concatenate((tem, np.ones((size[0], margining, 3), dtype=np.uint8) * 255), 1)
# tem2 = cv2.applyColorMap(
# np.array(f[i * col + j, :, :] * 255 / (np.max(f[i * col + j, :, :]) + 1e-14), dtype=np.uint8),
# colormode)
# tem = np.concatenate((tem, tem2), 1)
# if i == 0:
# final = tem
# else:
# final = np.concatenate(
# (final, np.ones((margining, size[1] * col + (col - 1) * margining, 3), dtype=np.uint8) * 255), 0)
# final = np.concatenate((final, tem), 0)
# print(final.shape)
# cv2.imwrite(save_path+name+'.jpg',final)
# cv2.imwrite(save_path+name+str(col)+'*'+str(raw)+'.png',final)
# feature mean
feature_mean = feature_tensor.mean(dim=1, keepdim=True) # n,c,h,w
feature_mean = feature_mean * 255 / torch.max(feature_mean + 1e-14)
feature_mean = F.interpolate(feature_mean, size=(256, 256), mode='bilinear', align_corners=False)
feature_mean = feature_mean.squeeze().data.cpu().numpy()
feature_mean = cv2.applyColorMap(np.array(feature_mean, dtype=np.uint8), colormode)
un_norm = transforms.Normalize(
mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225],
std=[1 / 0.229, 1 / 0.224, 1 / 0.225]
)
pic = un_norm(pic.data.float().cpu())
pic = pic * 255 / torch.max(pic + 1e-14)
pic = F.interpolate(pic, size=(256, 256), mode='bilinear', align_corners=False)
pic = pic.squeeze().data.cpu().numpy().transpose(1,2,0)
draw_pic = 0.8*pic + 0.4*feature_mean
# draw_pic = draw_pic / np.max(draw_pic)
feature_name = 'KG120_'+name+'.jpg'
print(os.path.join(save_path, feature_name))
cv2.imwrite(os.path.join(save_path, feature_name), draw_pic)
def save_featmap(feat, name, output_dir, colormode=2):
# pdb.set_trace()
feat = feat.squeeze()
if not os.path.exists(output_dir):
p = os.path.abspath(output_dir)
os.mkdir(p)
print("dir dose not exist, make it:" + p)
shape = feat.shape
# if len(shape) != 3:
# raise Exception("input feat should be a 3-dim tensor")
C, H, W = shape
target_H, target_W = H, W
flag_resize = False
if H < 32 or W < 32:
flag_resize = True
feat = feat.cuda().data.cpu().numpy()
fmin = np.min(feat)
fmax = np.max(feat)
print(fmax, fmin)
for i in range(C):
# pdb.set_trace()
map_name = name + '_c{}'.format(i)
featmap = feat[i, :, :]
featmap = (featmap - fmin) / (fmax - fmin + 0.0001)
featmap = (featmap * 255).astype(np.uint8)
featmap = cv2.applyColorMap(np.array(featmap, dtype=np.uint8), colormode)
if flag_resize:
featmap = cv2.resize(featmap, (W * 5, H * 5), interpolation=cv2.INTER_LINEAR)
map_name += '_upsamp'
map_name += '.jpg'
cv2.imwrite(os.path.join(output_dir, map_name), featmap)
def draw_heatmap_gaussi(heatmap,name):
#heatmap = F.interpolate(heatmap, size=(128, 128), mode='bilinear', align_corners=False)
heatmap = heatmap.squeeze(0)
heatmaps = heatmap.cpu().numpy()
C, H, W = heatmap.shape
flag = 0
for j in range(C):
yy, xx = np.where(heatmaps[j] == heatmaps[j].max())
y = yy[0]
x = xx[0]
if y == 0 and x == 0:
if flag == 0:
flag = 1
continue
heat = torch.from_numpy(heatmaps[j])
heat = torch.zeros_like(heat).numpy()
heatmaps[j] = heat
# else:
# heatmaps[j] = np.maximum(heatmaps[i, j], 0)
# print('11111',heatmaps.shape)
heat = heatmaps.sum(0)
# print('2222',heat.shape)
# heat = heatmaps[4]
C, H, W = heatmaps.shape
fig = plt.figure()
ax = fig.gca(projection='3d')
np.set_printoptions(threshold=100000)
# Make data.
# w, h = (H,W)
# X = np.arange(start=0, stop=w, dtype=int)
# Y = np.arange(start=0, stop=h, dtype=int)
# X, Y = np.meshgrid(X, Y)
X = np.arange(0, 32, 1)
Y = np.arange(0, 32, 1)
X, Y = np.meshgrid(X, Y)
Z =heat
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# surf = ax.plot_trisurf(X, Y, Z, linewidth=0, antialiased=False)
# Customize the z axis.
ax.set_zlim(-1.01, 1.01)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
fig.savefig(
os.path.join('./outputs/img', 'GT_Landmark_'+name+'.png'))
plt.close(fig)
# plt.show()
def apply_attention(heatmap,img,sr_feat,dir,name):
# heatmap_ori = heatmap
# fig = get_gt_landmark_pair(img, heatmap_ori, heatmap)
# fig.savefig(
# os.path.join('/home/zhang/work/pic/pic_4attention/landmark.png') )
# plt.close(fig)
print(sr_feat.shape)
sr_feat = sr_feat.cpu().numpy()[0]
sr_feat = sr_feat.transpose(0, 2, 3, 1)
N,C,H,W = sr_feat.shape
# for i in range(N):
# cv2.imwrite(os.path.join(dir, 'cam_{}.jpg'.format(i)), np.uint8(255 * sr_feat[i]))
heatmap = F.interpolate(heatmap, size=[128, 128], mode="bilinear")
# img_lr = F.interpolate(heatmap, size=[128, 128], mode="bilinear")
heatmaps = heatmap.cpu().numpy()
img = img.cpu()
pic = img.numpy()[0]
img = img.permute(0,2,3,1)
heatmap = heatmaps[0]
C,H,W = heatmap.shape
heatmap_all = heatmap.sum(0)
img = img[0]
for i in range(C): # 20heatmap
heatmap_i = cv2.applyColorMap(np.uint8(255 * heatmap[i]), cv2.COLORMAP_JET)
heatmap_i = np.float32(heatmap_i) / 255
heatmap_i = heatmap_i/np.max(heatmap_i)
# cam = np.float32(img)
# cam = cam + heatmap_i
cam = heatmap_i
cam = cam / np.max(cam)
cv2.imwrite(os.path.join(dir,name+'_dir{}.jpg'.format(i)), np.uint8(255 * cam))
cam = cam[:, :, ::-1]
plt.figure(figsize=(10, 10))
plt.imshow(np.uint8(255 * cam))
# for i in range(C): # 3heatmap front,back,side
# heatmap_i = cv2.applyColorMap(np.uint8(255 * heatmap[i]), cv2.COLORMAP_JET)
# heatmap_i = np.float32(heatmap_i) / 255
# heatmap_i = heatmap_i/np.max(heatmap_i)
#
# pic_i = cv2.applyColorMap(np.uint8(255 * pic[i]), cv2.COLORMAP_JET)
# pic_i = np.float32(pic_i) / 255
# pic_i = pic_i / np.max(pic_i)
#
# image = pic_i
# image = image / np.max(image)
# # cam = np.float32(img)
# # cam = cam + heatmap_i
# cam = heatmap_i
# cam = cam / np.max(cam)
# if i==0:
# cv2.imwrite(os.path.join(dir,'cam_'+name+'front.jpg'), np.uint8(255 * cam))
# cv2.imwrite(os.path.join(dir, 'image_' + name + 'r.jpg'), np.uint8(255 * image))
# elif i==1:
# cv2.imwrite(os.path.join(dir,'cam_'+name+'back.jpg'), np.uint8(255 * cam))
# cv2.imwrite(os.path.join(dir, 'image_' + name + 'g.jpg'), np.uint8(255 * image))
# else:
# cv2.imwrite(os.path.join(dir, 'cam_' + name + 'side.jpg'), np.uint8(255 * cam))
# cv2.imwrite(os.path.join(dir, 'image_' + name + 'b.jpg'), np.uint8(255 * image))
# cam = cam[:, :, ::-1]
# plt.figure(figsize=(10, 10))
# plt.imshow(np.uint8(255 * cam))
heatmap_all = cv2.applyColorMap(np.uint8(255*heatmap_all),cv2.COLORMAP_JET)
heatmap_all = np.float32(heatmap_all)/255
heatmap_all = heatmap_all / np.max(heatmap_all)
cam = np.float32(img)
cam = cam + heatmap_all
cam = cam / np.max(cam)
cv2.imwrite(os.path.join(dir, 'attention_'+name+'.png'),np.uint8(255*cam))
cam = cam[:,:,::-1]
plt.figure(figsize=(10,10))
plt.imshow(np.uint8(255*cam))
def showPointSingleModal(features, label, save_path):
# label = self.relabel(label)
tsne = TSNE(n_components=2, init='pca', random_state=501)
features_tsne = tsne.fit_transform(features.cpu())
COLORS = ['darkorange', 'limegreen', 'royalblue', 'red', 'darkviolet', 'black', 'blue','pink','yellow','green']
MARKS = ['x', 'o', '+', '^', 's','D','d','1','8']
features_min, features_max = features_tsne.min(0), features_tsne.max(0)
features_norm = (features_tsne - features_min) / (features_max - features_min)
plt.figure(figsize=(20, 20))
for i in range(features_norm.shape[0]):
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[label[i] % 10],
marker=MARKS[label[i] % 9])
# plt.scatter(features_norm[i, 0], features_norm[i, 1], str(label[i]), color=COLORS[label[i] % 6],
# marker=MARKS[label[i] % 5])
# plt.text(features_norm[i, 0], features_norm[i, 1], str(label[i]), color=COLORS[label[i] % 6],
# fontdict={'weight':'bold', 'size':9})
plt.savefig(save_path)
plt.show()
plt.close()
def showclassifier(features, label, save_path):
# label = self.relabel(label)
tsne = TSNE(n_components=2, init='pca', random_state=501)
features_tsne = tsne.fit_transform(features.cpu())
COLORS = ['black', 'blue', 'pink', 'gold', 'green', 'darkorange', 'firebrick', 'teal', 'olivedrab', 'rosybrown', 'chocolate', 'indigo']
MARKS = ['x', 'o']
features_min, features_max = features_tsne.min(0), features_tsne.max(0)
features_norm = (features_tsne - features_min) / (features_max - features_min)
plt.figure(figsize=(20, 20))
for i in range(features_norm.shape[0]):
if i < 131:
if label[i] == 2:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[0],
marker=MARKS[0])
elif label[i] == 5:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[1],
marker=MARKS[0])
elif label[i] == 6:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[2],
marker=MARKS[0])
elif label[i] == 9:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[3],
marker=MARKS[0])
elif label[i] == 14:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[4],
marker=MARKS[0])
elif label[i] == 118:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[5],
marker=MARKS[0])
elif label[i] == 134:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[6],
marker=MARKS[0])
elif label[i] == 177:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[7],
marker=MARKS[0])
elif label[i] == 192:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[8],
marker=MARKS[0])
elif label[i] == 273:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[9],
marker=MARKS[0])
elif label[i] == 310:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[10],
marker=MARKS[0])
elif label[i] == 402:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[11],
marker=MARKS[0])
else:
if label[i] == 2:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[0],
marker=MARKS[1])
elif label[i] == 5:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[1],
marker=MARKS[1])
elif label[i] == 6:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[2],
marker=MARKS[1])
elif label[i] == 9:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[3],
marker=MARKS[1])
elif label[i] == 14:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[4],
marker=MARKS[1])
elif label[i] == 118:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[5],
marker=MARKS[1])
elif label[i] == 134:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[6],
marker=MARKS[1])
elif label[i] == 177:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[7],
marker=MARKS[1])
elif label[i] == 192:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[8],
marker=MARKS[1])
elif label[i] == 273:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[9],
marker=MARKS[1])
elif label[i] == 310:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[10],
marker=MARKS[1])
elif label[i] == 402:
plt.scatter(features_norm[i, 0], features_norm[i, 1], s=60, color=COLORS[11],
marker=MARKS[1])
# plt.scatter(features_norm[i, 0], features_norm[i, 1], str(label[i]), color=COLORS[label[i] % 6],
# marker=MARKS[label[i] % 5])
# plt.text(features_norm[i, 0], features_norm[i, 1], str(label[i]), color=COLORS[label[i] % 6],
# fontdict={'weight':'bold', 'size':9})
plt.savefig(save_path)
# plt.show()
plt.close()
#####################################################################
#Show result
def imshow(path, title=None):
"""Imshow for Tensor."""
im = Image.open(path)
im = im.resize((128, 128))
im = np.array(im)
plt.imshow(im)
if title is not None:
plt.title(title)
# plt.pause(0.001) # pause a bit so that plots are updated
#######################################################################
# sort the images
def sort_img(qf, ql, qc, gf, gl, gc):
query = qf.view(-1,1)
# print(query.shape)
score = torch.mm(gf,query)
score = score.squeeze(1).cpu()
score = score.numpy()
# predict index
index = np.argsort(score) #from small to large
index = index[::-1]
# index = index[0:2000]
# good index
query_index = np.argwhere(gl==ql)
#same camera
camera_index = np.argwhere(gc==qc)
#good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
junk_index1 = np.argwhere(gl==-1)
junk_index2 = np.intersect1d(query_index, camera_index)
junk_index = np.append(junk_index2, junk_index1)
mask = np.in1d(index, junk_index, invert=True)
index = index[mask]
return index
########################################################################
# Visualize the rank result
def rank_visual(query_i, qf, ql, qc, qi, gf, gl, gc, gi):
# 定义哪个query
index = sort_img(qf[query_i], ql[query_i], qc[query_i], gf, gl, gc)
try: # Visualize Ranking Result
# Graphical User Interface is needed
fig = plt.figure(figsize=(16, 4))
ax = plt.subplot(1, 11, 1)
ax.axis('off')
imshow(qi[query_i][0], 'query')
for i in range(10):
ax = plt.subplot(1, 11, i + 2)
ax.axis('off')
img_path = gi[index[i]][0]
label = gl[index[i]]
imshow(img_path)
if label == ql[query_i]:
ax.set_title('%d' % (i + 1), color='green')
else:
ax.set_title('%d' % (i + 1), color='red')
# print(img_path)
except RuntimeError:
for i in range(10):
img_path = gi[index[i]]
print(img_path[0])
print('If you want to see the visualization of the ranking result, graphical user interface is needed.')
name = str(query_i)+'.jpg'
save_path = r"E:\Dataset_test\firstwork\picture\rank_map\KG"
path = os.path.join(save_path,name)
fig.savefig(path)
```
#### File: modeling/modules/architecture.py
```python
import torch
import torch.nn as nn
import torchvision
from collections import OrderedDict
import sys
from .StackedHourGlass import StackedHourGlass, FeedbackHourGlass
from .light_cnn import LightCNN_9Layers as LigntCNN
from .unet import UNet
# Assume input range is [0, 1], RGB
class VGGFeatureExtractor(nn.Module):
def __init__(self,
feature_layer=34,
use_bn=False,
use_input_norm=True,
device=torch.device('cpu')):
super(VGGFeatureExtractor, self).__init__()
if use_bn:
model = torchvision.models.vgg19_bn(pretrained=True)
else:
model = torchvision.models.vgg19(pretrained=True)
self.use_input_norm = use_input_norm
if self.use_input_norm:
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
# [0.485-1, 0.456-1, 0.406-1] if input in range [-1,1]
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
# [0.229*2, 0.224*2, 0.225*2] if input in range [-1,1]
self.register_buffer('mean', mean)
self.register_buffer('std', std)
self.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)])
# No need to BP to variable
for k, v in self.features.named_parameters():
v.requires_grad = False
def forward(self, x):
# x: [0, 1]
if self.use_input_norm:
x = (x - self.mean) / self.std
output = self.features(x)
return output
class LightCNNFeatureExtractor(nn.Module):
def __init__(self):
super(LightCNNFeatureExtractor, self).__init__()
model = LigntCNN()
self.features = nn.Sequential(*list(model.features.children()))
for k, v in self.features.named_parameters():
v.requires_grad = False
def forward(self, x):
'''
x: NCHW [0, 1]
'''
output = self.features(x)
return output
class UNetFeatureDiscriminator(nn.Module):
def __init__(self, feature_only=False):
super().__init__()
self.model = UNet(3, 3, feature_only)
self.scale_num = 3
def forward(self, x):
feature_list, recon = self.model(x)
return feature_list, recon
```
#### File: modeling/modules/loss.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
# Define GAN loss: [vanilla | lsgan | wgan-gp]
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):
super(GANLoss, self).__init__()
self.gan_type = gan_type.lower()
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if self.gan_type == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan-gp':
def wgan_loss(input, target):
# target is boolean
return -1 * input.mean() if target else input.mean()
self.loss = wgan_loss
else:
raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))
def get_target_label(self, input, target_is_real):
if self.gan_type == 'wgan-gp':
return target_is_real
if target_is_real:
return torch.empty_like(input).fill_(self.real_label_val)
else:
return torch.empty_like(input).fill_(self.fake_label_val)
def forward(self, input, target_is_real):
target_label = self.get_target_label(input, target_is_real)
loss = self.loss(input, target_label)
return loss
class GradientPenaltyLoss(nn.Module):
def __init__(self, device=torch.device('cpu')):
super(GradientPenaltyLoss, self).__init__()
self.register_buffer('grad_outputs', torch.Tensor())
self.grad_outputs = self.grad_outputs.to(device)
def get_grad_outputs(self, input):
if self.grad_outputs.size() != input.size():
self.grad_outputs.resize_(input.size()).fill_(1.0)
return self.grad_outputs
def forward(self, interp, interp_crit):
grad_outputs = self.get_grad_outputs(interp_crit)
grad_interp = torch.autograd.grad(outputs=interp_crit, inputs=interp, \
grad_outputs=grad_outputs, create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_interp = grad_interp.view(grad_interp.size(0), -1)
grad_interp_norm = grad_interp.norm(2, dim=1)
loss = ((grad_interp_norm - 1)**2).mean()
return loss
class StructureLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, feat_list_1, feat_list_2, index_list):
assert len(feat_list_1) == len(feat_list_2)
loss = 0.0
for i in range(len(feat_list_1)):
feat1 = feat_list_1[i]
feat2 = feat_list_2[i]
index = index_list[i]
struc_vec1 = calc_struc_vec(feat1, index)
struc_vec2 = calc_struc_vec(feat2, index)
loss += (struc_vec1 - struc_vec2).abs().mean()
loss /= len(feat_list_1)
# loss in between [0, 2]
return loss
def calc_struc_vec(feat, index):
'''
@param
feat: N * C * H * W
index: N * num_anchor * 9 * 2, [:, :, 0, :] is center point
[:, :, 1:, :] are surrounding points
@return
struc_vec: N * num_anchor * 8, sturcture vector
'''
assert feat.size(0) == index.size(0)
bsize = feat.size(0)
num_anchor = index.size(1)
num_c = feat.size(1)
# pad feat and index
feat = F.pad(feat, (1, 1, 1, 1), mode='reflect')
h, w = feat.shape[-2:]
pad_index = index + 1
# select feature vector
index_x = pad_index[:, :, :, 0].view(bsize, 1, num_anchor * 9, 1).repeat(1, num_c, 1, h)
index_y = pad_index[:, :, :, 1].view(bsize, 1, num_anchor * 9, 1).repeat(1, num_c, 1, 1)
feat_x = feat.gather(-2, index_x)
feat_selected = feat_x.gather(-1, index_y).squeeze(-1).squeeze(-1)
feat_selected = feat_selected.transpose(1, 2).contiguous().view(bsize, num_anchor, 9, num_c)
feat_selected = feat_selected
# calculate dot product
round_vec = feat_selected.view(bsize * num_anchor, 9, num_c)[:, 1:, :]
center_vec = feat_selected.view(bsize * num_anchor, 9, num_c)[:, [0], :]
struc_vec = torch.bmm(round_vec, center_vec.transpose(1, 2))
norm = round_vec.pow(2).sum(-1, keepdim=True).sqrt() * center_vec.pow(2).sum(-1, keepdim=True).sqrt()
struc_vec /= norm
struc_vec = struc_vec.view(bsize, num_anchor, 8)
return struc_vec
```
#### File: modeling/modules/srfbn_hg_arch.py
```python
import torch
import torch.nn as nn
from .blocks import ConvBlock, DeconvBlock, FeatureHeatmapFusingBlock
from .architecture import StackedHourGlass
from .srfbn_arch import FeedbackBlock
from data.solver import solve_invisible_heatmap
# heatmap_in 32*32*68 detach false
def merge_heatmap_4(heatmap_in, detach):
'''
merge 68 heatmap to 4
heatmap: B*N*32*32
'''
# heatmap 4*20*32*32
heatmap = heatmap_in.clone()
heatmap = solve_invisible_heatmap(heatmap)
# the size of max_heat is 4*20*1*1
max_heat = heatmap.max(dim=2, keepdim=True)[0].max(dim=3, keepdim=True)[0]
max_heat = torch.max(max_heat, torch.ones_like(max_heat) * 0.05)
heatmap /= max_heat
if heatmap.size(1) == 20:
new_heatmap = torch.zeros_like(heatmap[:,:4])
tmp_id = torch.cat((torch.arange(4, 10), torch.arange(12, 14)))
new_heatmap[:, 0] = heatmap[:, tmp_id].sum(1) # front face
new_heatmap[:, 1] = heatmap[:, 14:20].sum(1) # rear face
tmp_id = torch.cat((torch.arange(0, 2), torch.arange(5, 6),torch.arange(7, 8)
,torch.arange(10, 11),torch.arange(13, 15),torch.arange(16, 17)))
new_heatmap[:, 2] = heatmap[:, tmp_id].sum(1) # left face
tmp_id = torch.cat((torch.arange(2,5),torch.arange(6,7),torch.arange(11,13),
torch.arange(15,16),torch.arange(17,18)))
new_heatmap[:, 3] = heatmap[:, tmp_id].sum(1) # right face
return new_heatmap.detach() if detach else new_heatmap
else:
raise NotImplementedError('Fusion for face landmark number %d not implemented!' % heatmap.size(1))
def merge_heatmap_3(heatmap_in, detach):
'''
merge 68 heatmap to 4
heatmap: B*N*32*32
'''
# heatmap 4*20*32*32
heatmap = heatmap_in.clone()
heatmap = solve_invisible_heatmap(heatmap)
new_heatmap = torch.zeros_like(heatmap[:,:3])
tmp_id = torch.cat((torch.arange(4, 10), torch.arange(12, 14)))
new_heatmap[:, 0] = heatmap[:, tmp_id].sum(1) # front face
new_heatmap[:, 1] = heatmap[:, 14:20].sum(1) # rear face
tmp_id = torch.cat((torch.arange(0, 8), torch.arange(10, 18)))
new_heatmap[:, 2] = heatmap[:, tmp_id].sum(1) # side face
return new_heatmap.detach() if detach else new_heatmap
class FeedbackBlockHeatmapAttention(FeedbackBlock):
def __init__(self,
num_features,
num_groups,
upscale_factor,
act_type,
norm_type,
num_heatmap,
num_fusion_block,
device=torch.device('cuda')):
super().__init__(num_features,
num_groups,
upscale_factor,
act_type,
norm_type,
device)
self.fusion_block = FeatureHeatmapFusingBlock(num_features, # 48
num_heatmap, # 4
num_fusion_block) # 7
def forward(self, x, heatmap):
# if self.should_reset:
# self.last_hidden = torch.zeros(x.size()).to(self.device)
# self.last_hidden.copy_(x)
# self.should_reset = True
x = torch.cat((x, x), dim=1)
x = self.compress_in(x)
# fusion
x = self.fusion_block(x, heatmap)
lr_features = []
hr_features = []
lr_features.append(x)
for idx in range(self.num_groups):
LD_L = torch.cat(tuple(lr_features), 1) # when idx == 0, lr_features == [x]
if idx > 0:
LD_L = self.uptranBlocks[idx-1](LD_L)
LD_H = self.upBlocks[idx](LD_L)
hr_features.append(LD_H)
LD_H = torch.cat(tuple(hr_features), 1)
if idx > 0:
LD_H = self.downtranBlocks[idx-1](LD_H)
LD_L = self.downBlocks[idx](LD_H)
lr_features.append(LD_L)
del hr_features
output = torch.cat(tuple(lr_features[1:]), 1) # leave out input x, i.e. lr_features[0]
output = self.compress_out(output)
self.last_hidden = output
return output
class FeedbackBlockCustom(FeedbackBlock):
def __init__(self, num_features, num_groups, upscale_factor, act_type,
norm_type, num_features_in):
super(FeedbackBlockCustom, self).__init__(
num_features, num_groups, upscale_factor, act_type, norm_type)
self.compress_in = ConvBlock(num_features_in, num_features,
kernel_size=1,
act_type=act_type, norm_type=norm_type)
def forward(self, x):
x = self.compress_in(x)
lr_features = []
hr_features = []
lr_features.append(x)
for idx in range(self.num_groups):
LD_L = torch.cat(tuple(lr_features), 1) # when idx == 0, lr_features == [x]
if idx > 0:
LD_L = self.uptranBlocks[idx-1](LD_L)
LD_H = self.upBlocks[idx](LD_L)
hr_features.append(LD_H)
LD_H = torch.cat(tuple(hr_features), 1)
if idx > 0:
LD_H = self.downtranBlocks[idx-1](LD_H)
LD_L = self.downBlocks[idx](LD_H)
lr_features.append(LD_L)
del hr_features
output = torch.cat(tuple(lr_features[1:]), 1) # leave out input x, i.e. lr_features[0]
output = self.compress_out(output)
return output
class SRFBN_HG(nn.Module):
def __init__(self, opt):
super().__init__()
in_channels = opt['in_channels']
out_channels = opt['out_channels']
num_groups = opt['num_groups']
hg_num_feature = opt['hg_num_feature']
hg_num_stack = opt['hg_num_stack']
hg_num_keypoints = opt['hg_num_keypoints']
hg_connect_type = opt['hg_connect_type']
act_type = 'prelu'
norm_type = None
self.num_steps = opt['num_steps']
num_features = opt['num_features']
self.upscale_factor = opt['scale']
if self.upscale_factor == 4:
stride = 4
padding = 2
kernel_size = 8
elif self.upscale_factor == 8:
stride = 8
padding = 2
kernel_size = 12
else:
raise NotImplementedError("Upscale factor %d not implemented!" % self.upscale_factor)
# LR feature extraction block
self.conv_in = ConvBlock(
in_channels,
4 * num_features,
kernel_size=3,
act_type=act_type,
norm_type=norm_type)
self.feat_in = ConvBlock(
4 * num_features,
num_features,
kernel_size=1,
act_type=act_type,
norm_type=norm_type)
# basic block
# first block takes only original LR feature as input, coarse SR
self.first_block = FeedbackBlockCustom(num_features, num_groups, self.upscale_factor,
act_type, norm_type, num_features)
# second block takes LR feature, last FB output and heatmap as input
self.block = FeedbackBlockCustom(num_features, num_groups, self.upscale_factor,
act_type, norm_type, 2 * num_features + hg_num_keypoints)
# reconstruction block
# self.upsample = nn.Upsample(scale_factor=upscale_factor, mode='bilinear')
self.out = DeconvBlock(
num_features,
num_features,
kernel_size=kernel_size,
stride=stride,
padding=padding,
act_type='prelu',
norm_type=norm_type)
self.conv_out = ConvBlock(
num_features,
out_channels,
kernel_size=3,
act_type=None,
norm_type=norm_type)
self.HG = StackedHourGlass(hg_num_feature, hg_num_stack, hg_num_keypoints, hg_connect_type)
if self.upscale_factor == 4:
self.HG_out = None
elif self.upscale_factor == 8:
self.HG_out = nn.MaxPool2d(2, stride=2)
def forward(self, x):
inter_res = nn.functional.interpolate(
x,
scale_factor=self.upscale_factor,
mode='bilinear',
align_corners=False)
x = self.conv_in(x)
x = self.feat_in(x)
sr_outs = []
heatmap_outs = []
hg_last_hidden = None
f_in = x
for step in range(self.num_steps):
if step == 0:
# use first FB to do coarse SR
FB_out = self.first_block(f_in)
else:
FB_out = self.block(f_in)
h = torch.add(inter_res, self.conv_out(self.out(FB_out)))
# detach, stop heatmap loss propogate to SR
heatmap, hg_last_hidden = self.HG(h, hg_last_hidden)
if self.HG_out:
heatmap_out = self.HG_out(heatmap)
else:
heatmap_out = factor * heatmap
f_in = torch.cat((x, FB_out, heatmap_out), dim=1)
sr_outs.append(h)
heatmap_outs.append(heatmap)
return sr_outs, heatmap_outs # return output of every timesteps
``` |
{
"source": "15871722713/pytest_web_demo",
"score": 3
} |
#### File: pytest_web_demo/utils/logger.py
```python
import logging
from config.conf import cm
class Log:
def __init__(self):
self.logger = logging.getLogger()
if not self.logger.handlers:
self.logger.setLevel(logging.DEBUG)
# 创建一个handle写入文件
fh = logging.FileHandler(cm.log_file, encoding='utf-8')
fh.setLevel(logging.INFO)
# 创建一个handle输出到控制台
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# 定义输出的格式
formatter = logging.Formatter(self.fmt)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# 添加到handle
self.logger.addHandler(fh)
self.logger.addHandler(ch)
@property
def fmt(self):
return '%(levelname)s\t%(asctime)s\t[%(filename)s:%(lineno)d]\t%(message)s'
log = Log().logger
if __name__ == '__main__':
log.info('hello world')
``` |
{
"source": "15921483570/python_data_structures_and_algorithms",
"score": 4
} |
#### File: docs/02/array_and_list.py
```python
from array import array # python 提供的比较原始的 array 类
arr = array('u', 'asdf')
print(arr[0], arr[1], arr[2], arr[3])
# 实现定长的 Array ADT,省略了边界检查等
class Array(object):
def __init__(self, size=32):
self._size = size
self._items = [None] * size
def __getitem__(self, index):
return self._items[index]
def __setitem__(self, index, value):
self._items[index] = value
def __len__(self):
return self._size
def clear(self, value=None):
for i in range(len(self._items)):
self._items[i] = value
def __iter__(self):
for item in self._items:
yield item
def test_array():
size = 10
a = Array(size)
a[0] = 1
assert a[0] == 1
assert len(a) == 10
# py.test array_and_list.py
```
#### File: docs/09/set_adt.py
```python
class Array(object):
def __init__(self, size=32, init=None):
self._size = size
self._items = [init] * size
def __getitem__(self, index):
return self._items[index]
def __setitem__(self, index, value):
self._items[index] = value
def __len__(self):
return self._size
def clear(self, value=None):
for i in range(len(self._items)):
self._items[i] = value
def __iter__(self):
for item in self._items:
yield item
class Slot(object):
"""定义一个 hash 表 数组的槽
注意,一个槽有三种状态,看你能否想明白。相比链接法解决冲突,二次探查法删除一个 key 的操作稍微复杂。
1.从未使用 HashMap.UNUSED。此槽没有被使用和冲突过,查找时只要找到 UNUSED 就不用再继续探查了
2.使用过但是 remove 了,此时是 HashMap.EMPTY,该探查点后边的元素扔可能是有key
3.槽正在使用 Slot 节点
"""
def __init__(self, key, value):
self.key, self.value = key, value
class HashTable(object):
UNUSED = None # 没被使用过
EMPTY = Slot(None, None) # 使用却被删除过
def __init__(self):
self._table = Array(8, init=HashTable.UNUSED) # 保持 2*i 次方
self.length = 0
@property
def _load_factor(self):
# load_factor 超过 0.8 重新分配
return self.length / float(len(self._table))
def __len__(self):
return self.length
def _hash(self, key):
return abs(hash(key)) % len(self._table)
def _find_key(self, key):
index = self._hash(key)
_len = len(self._table)
while self._table[index] is not HashTable.UNUSED:
if self._table[index] is HashTable.EMPTY:
index = (index*5 + 1) % _len
continue
elif self._table[index].key == key:
return index
else:
index = (index*5 + 1) % _len
return None
def _find_slot_for_insert(self, key):
index = self._hash(key)
_len = len(self._table)
while not self._slot_can_insert(index):
index = (index*5 + 1) % _len
return index
def _slot_can_insert(self, index):
return (self._table[index] is HashTable.EMPTY or self._table[index] is HashTable.UNUSED)
def __contains__(self, key): # in operator
index = self._find_key(key)
return index is not None
def add(self, key, value):
if key in self:
index = self._find_key(key)
self._table[index].value = value
return False
else:
index = self._find_slot_for_insert(key)
self._table[index] = Slot(key, value)
self.length += 1
if self._load_factor >= 0.8:
self._rehash()
return True
def _rehash(self):
old_table = self._table
newsize = len(self._table) * 2
self._table = Array(newsize, HashTable.UNUSED)
self.length = 0
for slot in old_table:
if slot is not HashTable.UNUSED and slot is not HashTable.EMPTY:
index = self._find_slot_for_insert(slot.key)
self._table[index] = slot
self.length += 1
def get(self, key, default=None):
index = self._find_key(key)
if index is None:
return default
else:
return self._table[index].value
def remove(self, key):
index = self._find_key(key)
if index is None:
raise KeyError()
value = self._table[index].value
self.length -= 1
self._table[index] = HashTable.EMPTY
return value
def __iter__(self):
for slot in self._table:
if slot not in (HashTable.EMPTY, HashTable.UNUSED):
yield slot.key
#########################################
# 上边是从 哈希表章 拷贝过来的代码,我们会直接继承 HashTable 实现 集合 set
#########################################
class SetADT(HashTable):
def add(self, key):
# 集合其实就是一个 dict,只不过我们把它的 value 设置成 1
return super(SetADT, self).add(key, True)
def __and__(self, other_set):
"""交集 A&B"""
new_set = SetADT()
for element_a in self:
if element_a in other_set:
new_set.add(element_a)
return new_set
def __sub__(self, other_set):
"""差集 A-B"""
new_set = SetADT()
for element_a in self:
if element_a not in other_set:
new_set.add(element_a)
return new_set
def __or__(self, other_set):
"""并集 A|B"""
new_set = SetADT()
for element_a in self:
new_set.add(element_a)
for element_b in other_set:
new_set.add(element_b)
return new_set
def test_set_adt():
sa = SetADT()
sa.add(1)
sa.add(2)
sa.add(3)
assert 1 in sa # 测试 __contains__ 方法,实现了 add 和 __contains__,集合最基本的功能就实现啦
sb = SetADT()
sb.add(3)
sb.add(4)
sb.add(5)
assert sorted(list(sa & sb)) == [3]
assert sorted(list(sa - sb)) == [1, 2]
assert sorted(list(sa | sb)) == [1, 2, 3, 4, 5]
if __name__ == '__main__':
test_set_adt()
```
#### File: docs/14/btree.py
```python
from collections import deque
class Queue(object): # 借助内置的 deque 我们可以迅速实现一个 Queue
def __init__(self):
self._items = deque()
def append(self, value):
return self._items.append(value)
def pop(self):
return self._items.popleft()
def empty(self):
return len(self._items) == 0
class Stack(object):
def __init__(self):
self._items = deque()
def push(self, value):
return self._items.append(value)
def pop(self):
return self._items.pop()
def empty(self):
return len(self._items) == 0
class BinTreeNode(object):
def __init__(self, data, left=None, right=None):
self.data, self.left, self.right = data, left, right
class BinTree(object):
def __init__(self, root=None):
self.root = root
@classmethod
def build_from(cls, node_list):
"""build_from
:param node_list: {'data': 'A', 'left': None, 'right': None, 'is_root': False}
"""
node_dict = {}
for node_data in node_list:
data = node_data['data']
node_dict[data] = BinTreeNode(data)
for node_data in node_list:
data = node_data['data']
node = node_dict[data]
if node_data['is_root']:
root = node
node.left = node_dict.get(node_data['left'])
node.right = node_dict.get(node_data['right'])
return cls(root)
def preorder_trav(self, subtree):
if subtree is not None:
print(subtree.data)
self.preorder_trav(subtree.left)
self.preorder_trav(subtree.right)
def preorder_trav_use_stack(self, subtree):
"""递归的方式其实是计算机帮我们实现了栈结构,我们可以自己显示的用栈来实现"""
s = Stack()
if subtree:
s.push(subtree)
while not s.empty():
top_node = s.pop()
print(top_node.data) # 注意这里我用了 print,你可以用 yield 产出值然后在调用的地方转成 list
if top_node.right:
s.push(top_node.right)
if top_node.left:
s.push(top_node.left)
def inorder_trav(self, subtree):
if subtree is not None:
self.inorder_trav(subtree.left)
print(subtree.data)
self.inorder_trav(subtree.right)
def yield_inorder(self, subtree): # for val in yield_inorder(root): print(val)
if subtree:
yield from self.inorder(subtree.left)
yield subtree.val
yield from self.inorder(subtree.right)
def reverse(self, subtree):
if subtree is not None:
subtree.left, subtree.right = subtree.right, subtree.left
self.reverse(subtree.left)
self.reverse(subtree.right)
def layer_trav(self, subtree):
cur_nodes = [subtree]
next_nodes = []
while cur_nodes or next_nodes:
for node in cur_nodes:
print(node.data)
if node.left:
next_nodes.append(node.left)
if node.right:
next_nodes.append(node.right)
cur_nodes = next_nodes # 继续遍历下一层
next_nodes = []
def layer_trav_use_queue(self, subtree):
q = Queue()
q.append(subtree)
while not q.empty():
cur_node = q.pop()
print(cur_node.data)
if cur_node.left:
q.append(cur_node.left)
if cur_node.right:
q.append(cur_node.right)
node_list = [
{'data': 'A', 'left': 'B', 'right': 'C', 'is_root': True},
{'data': 'B', 'left': 'D', 'right': 'E', 'is_root': False},
{'data': 'D', 'left': None, 'right': None, 'is_root': False},
{'data': 'E', 'left': 'H', 'right': None, 'is_root': False},
{'data': 'H', 'left': None, 'right': None, 'is_root': False},
{'data': 'C', 'left': 'F', 'right': 'G', 'is_root': False},
{'data': 'F', 'left': None, 'right': None, 'is_root': False},
{'data': 'G', 'left': 'I', 'right': 'J', 'is_root': False},
{'data': 'I', 'left': None, 'right': None, 'is_root': False},
{'data': 'J', 'left': None, 'right': None, 'is_root': False},
]
btree = BinTree.build_from(node_list)
print('====先序遍历=====')
btree.preorder_trav(btree.root)
print('====使用 stack 实现先序遍历=====')
btree.preorder_trav_use_stack(btree.root)
print('====层序遍历=====')
btree.layer_trav(btree.root)
print('====用队列层序遍历=====')
btree.layer_trav_use_queue(btree.root)
btree.reverse(btree.root)
print('====反转之后的结果=====')
btree.preorder_trav(btree.root)
```
#### File: docs/15/topk.py
```python
import heapq
class TopK:
"""获取大量元素 topk 大个元素,固定内存
思路:
1. 先放入元素前 k 个建立一个最小堆
2. 迭代剩余元素:
如果当前元素小于堆顶元素,跳过该元素(肯定不是前 k 大)
否则替换堆顶元素为当前元素,并重新调整堆
"""
def __init__(self, iterable, k):
self.minheap = []
self.capacity = k
self.iterable = iterable
def push(self, val):
if len(self.minheap) >= self.capacity:
min_val = self.minheap[0]
if val < min_val: # 当然你可以直接 if val > min_val操作,这里我只是显示指出跳过这个元素
pass
else:
heapq.heapreplace(self.minheap, val) # 返回并且pop堆顶最小值,推入新的 val 值并调整堆
else:
heapq.heappush(self.minheap, val) # 前面 k 个元素直接放入minheap
def get_topk(self):
for val in self.iterable:
self.push(val)
return self.minheap
def test():
import random
i = list(range(1000)) # 这里可以是一个可迭代元素,节省内存
random.shuffle(i)
_ = TopK(i, 10)
print(_.get_topk()) # [990, 991, 992, 996, 994, 993, 997, 998, 999, 995]
if __name__ == '__main__':
test()
``` |
{
"source": "15c-ap/Raphael-Bot",
"score": 3
} |
#### File: Raphael-Bot/tools/subby_api.py
```python
import logging
import discord
import requests
import constants
SUBBY_URL = constants.Subby_api.address
SUBBY_APIKEY = constants.Subby_api.api_key
BOT_ID = int(constants.Bot.id)
SUBBY_PAYLOAD = {'apikey': SUBBY_APIKEY}
REQUEST_TIMEOUT = 15
log = logging.getLogger(__name__)
def get_balance(member_id: discord.Member.id) -> int:
""" Calling Subby API to get ramen amount.
Args:
member_id (discord.Member.id): Discord member's ID.
Raises:
TypeError: `member_id` must be an int.
Exception: Retreving credits failed.
Exception: Subby Broke the API.
Exception: API Timed out.
Returns:
int: Member's balance.
"""
if not isinstance(member_id, int):
log.critical(f"SUBBY API, get_balance: Given value was not an interger. check your code! {member_id=}")
raise TypeError("member_id must be an int")
log.trace(f"SUBBY API, get_balance: issued for {member_id=}")
url = f"{SUBBY_URL}/economy/user/{member_id}"
try:
# This fails quite often, so there is some error checking here.
response = requests.get(url=url, params=SUBBY_PAYLOAD, timeout=REQUEST_TIMEOUT)
if response.status_code != 200:
# Let us know what the HTTP code is when it fails.
log.trace(f"SUBBY API, get_balance: request {response.status_code=}")
raise Exception(f"retreving credits failed: {response.status_code}")
log.trace(f"SUBBY API, get_balance: {response.status_code=} {response.json()=}")
balance = response.json()['buns']
# Catch if the API fails to respond.
except requests.exceptions.ConnectionError:
log.trace(f"SUBBY API, get_balance: ConnectionError")
raise Exception("Subby Broke the API, Ping him")
except requests.Timeout:
raise Exception("Subby API Timed out")
log.trace(f"SUBBY API, get_balance: {balance=}")
return balance
def add_balance(member_id: discord.Member.id, amount, edit_house:bool=False) -> int:
""" Calling Subby API to take ramen amount.
Args:
member_id (discord.Member.id): Discord member's ID.
amount ([type]): Amount to add member's account.
edit_house (bool, optional): Transfer funds to the bot. Defaults to False.
Raises:
TypeError: `member_id` must be an int.
Exception: Adding credits failed.
Exception: Subby Broke the API.
Exception: API Timed out.
Returns:
int: Member's new balance
"""
if not isinstance(member_id, int):
log.critical(f"SUBBY API, add_balance: Given value was not an interger. check your code! {member_id=}")
raise TypeError("member_id must be an int")
if amount == 0: # Pointless, do nothing.
return 0
log.trace(f"SUBBY API, add_balance: {member_id=} {amount=} {edit_house=}")
if edit_house: # For when you want see how well your house bank is doing.
subtract_balance(member_id=BOT_ID, amount=amount)
# Need balance so you can add on to whatever the user had previously.
balance = get_balance(member_id)
url = f"{SUBBY_URL}/economy/{member_id}/buns/{balance+amount}"
try:
# This fails quite often, so there is some error checking here.
response = requests.post(url=url, params=SUBBY_PAYLOAD, timeout=REQUEST_TIMEOUT)
if response.status_code != 200:
log.trace(f"SUBBY API, add_balance: request {response.status_code=}")
# Let us know what the HTTP code is when it fails.
raise Exception(f"adding credits failed: {response.status_code=} {response.json()=}")
return
log.trace(f"SUBBY API, add_balance: {response.json()=}")
# Catch if the API fails to respond.
except requests.exceptions.ConnectionError:
log.trace(f"SUBBY API, add_balance: ConnectionError")
raise Exception("Subby Broke the API, Ping him, credits not awarded")
except requests.Timeout:
raise Exception("Subby API Timed out")
return response.json()['buns']
def subtract_balance(member_id: discord.Member.id, amount: int, edit_house:bool=False) -> int:
""" Calling Subby API to take ramen amount.
Args:
member_id (discord.Member.id): Discord member's ID.
amount (int): Amount to subtract member's account.
edit_house (bool, optional): Transfer funds to the bot. Defaults to False.
Raises:
TypeError: `member_id` must be an int.
Exception: Subtracting credits failed.
Exception: Subby Broke the API.
Exception: API Timed out.
Returns:
int: Member's new balance
"""
if not isinstance(member_id, int):
log.critical(f"SUBBY API, subtract_balance: Given value was not an interger. check your code! {member_id=}")
raise TypeError("member_id must be an int")
log.trace(f"SUBBY API, subtract_balance: {member_id=} {amount=} {edit_house=}")
if amount == 0: # Pointless, do nothing.
return 0
if edit_house: # For when you want see how well your house bank is doing.
add_balance(member_id=BOT_ID, amount=amount)
# Need balance so you can add on to whatever the user had previously.
balance = get_balance(member_id)
url = f"{SUBBY_URL}/economy/{member_id}/buns/{balance-amount}"
try:
# This fails quite often, so there is some error checking here.
response = requests.post(url=url, params=SUBBY_PAYLOAD, timeout=REQUEST_TIMEOUT)
if response.status_code != 200:
log.trace(f"SUBBY API, subtract_balance: request {response.status_code=}")
# Let us know what the HTTP code is when it fails
raise Exception(f"subtracting credits failed: {response.status_code=}")
return 0
log.trace(f"SUBBY API, subtract_balance: {response.status_code=} {response.json()=}")
# Catch if the API fails to respond
except requests.exceptions.ConnectionError:
log.trace(f"SUBBY API, subtract_balance: ConnectionError")
raise Exception("Subby Broke the API, Ping him, credits not taken. lucky...")
except requests.Timeout:
raise Exception("Subby API Timed out")
return response.json()['buns']
def set_balance(member_id: discord.Member.id, amount: int) -> int:
""" Calling Subby API to set ramen amount.
Args:
member_id (discord.Member.id): Discord member's ID.
amount (int): Amount to set member's account.
Raises:
TypeError: `member_id` must be an int.
Exception: Set credits failed.
Exception: Subby Broke the API.
Exception: API Timed out.
Returns:
int: Member's new balance.
"""
if not isinstance(member_id, int):
log.critical(f"SUBBY API, set_balance: Given value was not an interger. check your code! {member_id=}")
raise TypeError("user must be an int")
log.trace(f"SUBBY API, set_balance: {member_id=} {amount=}")
url = f"{SUBBY_URL}/economy/{member_id}/buns/{amount}"
try:
# This fails quite often, so there is some error checking here.
response = requests.post(url=url, params=SUBBY_PAYLOAD, timeout=REQUEST_TIMEOUT)
if response.status_code != 200:
log.error(f"SUBBY API, set_balance: request {response.status_code=}")
# Let us know what the HTTP code is when it fails.
raise Exception(f"set credits failed: {response.status_code=}")
return 0
log.trace(f"SUBBY API, set_balance: {response.status_code=} {response.json()=}")
# Catch if the API fails to respond.
except requests.exceptions.ConnectionError:
log.error(f"SUBBY API, set_balance: ConnectionError")
raise Exception("Subby Broke the API, Ping him. balance not given")
except requests.Timeout:
raise Exception("Subby API Timed out")
return response.json()['buns']
def record_ledger(member_sender_id: discord.Member.id, member_receiver_id: discord.Member.id, amount: int, reason: str = "None") -> None:
""" Recording transaction history between members with SUBBY API.
Features:
If amount is negative, then sender and receiver positions will be swapped.
If amount is zero, function will do nothing.
If both sender and receiver are the same, do nothing.
Args:
member_sender_id (discord.Member.id): Discord member's ID of sender.
member_receiver_id (discord.Member.id): Discord member's ID of receiver.
amount (int): Amount transferred.
reason (str, optional): [description]. Defaults to "None".
Raises:
TypeError: Member ID must be an int.
Exception: Subby Broke the API.
Exception: API Timed out.
"""
if not isinstance(member_sender_id, int) or not isinstance(member_receiver_id, int):
log.critical("SUBBY API, record_ledger: Given value was not an interger."
"check your code! {member_sender_id=} OR {member_receiver_id=}")
raise TypeError("Member ID must be an int.")
log.trace(f"SUBBY API, record_ledger: {member_sender_id=} {member_receiver_id=} {amount=}")
if amount == 0: # If amount is zero, function will do nothing.
return
log.debug(f"SUBBY API, record_ledger: {amount=} was zero, do nothing")
if amount < 0: # If amount is negative, then sender and receiver positions will be swapped.
x = member_sender_id
member_sender_id = member_receiver_id
member_receiver_id = x
amount = abs(amount)
log.debug(f"SUBBY API, record_ledger: sender and receiver swapped places, due to amount being negative")
if member_sender_id == member_receiver_id: # If both sender and receiver are the same, do nothing.
log.info(f'SUBBY API, record_ledger: "member_sender_id" cannot be the same as "member_receiver_id". {member_sender_id=} {member_receiver_id=}')
return
url = f"{SUBBY_URL}/economy/transaction/add"
payload = SUBBY_PAYLOAD
json = {
"fromUserID": f"{member_sender_id}",
"toUserID": f"{member_receiver_id}",
"amount": f"{amount}",
"reason": f"{reason}"
}
log.trace(f"SUBBY API, record_ledger: {json=}")
try:
response = requests.post(url=url, json=json, params=payload, timeout=REQUEST_TIMEOUT)
log.debug(f"SUBBY API, record_ledger: {response.status_code=} {response.json()=}")
except requests.exceptions.ConnectionError:
log.error(f"SUBBY API, record_ledger: ConnectionError")
raise Exception("Subby Broke the API, Ping him, credits not awarded")
except requests.Timeout:
raise Exception("Subby API Timed out")
except Exception as exception:
log.error(f"SUBBY API, record_ledger: {exception=}")
pass # This API point is not critical, so no real point in doing anything with it.
def record_emoji(member_id: discord.Member.id, emoji_name: str, action: str, cost: int) -> None:
""" Recording emoji history with SUBBY API.
Args:
member_id (discord.Member.id): Discord member's ID.
emoji_name (str): name of emoji.
action (str): `purchase` or `removal`.
cost (int): cost of purchase.
Raises:
TypeError: `action` must be `purchase` or `removal`.
ValueError: `cost` cannot be negative.
Exception: Subby broke the API.
Exception: API Timed out.
"""
if action not in ['purchase', 'removal']:
log.critical(f"SUBBY API, record_emoji: Given value was not approperate. check your code! {action=}")
raise TypeError("action must be 'purchase' or 'removal'")
log.trace(f"SUBBY API, record_emoji: {member_id=} {emoji_name=} {action=} {cost=}")
if cost < 0: # If amount is negative, then sender and receiver positions will be swapped.
raise ValueError ("{cost=} cannot be negative")
log.debug(f"SUBBY API, record_emoji: cost amount is negative when it should never be")
url = f"{SUBBY_URL}/emojis/log"
payload = SUBBY_PAYLOAD
json = {
"userID": f"{member_id}",
"emojiName": f"{emoji_name}",
"action": f"{action}",
"cost": f"{cost}"
}
log.trace(f"SUBBY API, record_emoji: {json=}")
try:
response = requests.post(url=url, json=json, params=payload, timeout=REQUEST_TIMEOUT)
log.debug(f"SUBBY API, record_emoji: {response.status_code=} {response.json()=}")
except requests.exceptions.ConnectionError:
log.error(f"SUBBY API, record_emoji: ConnectionError")
raise Exception("Subby Broke the API, Ping him, credits not awarded")
except requests.Timeout:
raise Exception("Subby API Timed out")
except Exception as exception:
log.error(f"SUBBY API, record_emoji: {exception=}")
pass # This API point is not critical, so no real point in doing anything with it.
``` |
{
"source": "15cm/clothes-classifier",
"score": 2
} |
#### File: src/classifier/clf_model.py
```python
__author__ = '15cm'
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from feature.sift import Sift
import os
CURPATH = os.path.split(os.path.realpath(__file__))[0]
class RandomForest:
def __init__(self):
self.rf = RandomForestClassifier()
self.model_path =os.path.join(os.path.join(CURPATH,'model'),'random_forest.pkl')
def fit(self,X,y):
self.rf.fit(X,y)
def save(self):
joblib.dump(self.rf,self.model_path)
def load(self):
self.rf = joblib.load(self.model_path)
def predict(self,kmeans,file):
pass
# if hasattr(file,'__iter__'):
#
# if image:
# X = bow.compute_bow(kmeans,image)
# elif image_list:
# descriptors_list = [sift.get_descriptors(os.path.join('test',x)) for x in image_list] # 128-dimension descriptors for every image
# X = bow.compute_bow_matrix(kmeans,descriptors_list)
# else:
# print 'image or image_list should be provided'
# exit(1)
# return self.rf.predict(X)
```
#### File: src/cluster/bag_of_words.py
```python
__author__ = '15cm'
from data.data_handler import DataHandler
from feature.superpixel import SuperPixel
from mynp import np
import os
CURPATH = os.path.split(os.path.realpath(__file__))[0]
class Bow:
def __init__(self,kmeans):
self.kmeans = kmeans
self.bow_path = os.path.join(CURPATH,'bow')
def train_sift(self,X_list):
bow_list = []
for X in X_list:
bow_list.append(self.compute(X))
self.bow_matrix = reduce(np.vstack,bow_list)
dh = DataHandler()
dh.load()
sample_y = np.empty((len(X_list),1))
for i in range(len(sample_y)):
sample_y[i][0] = dh.get_lables(id=i)
sample_data = np.hstack(sample_y,self.bow_matrix)
# save sample data
np.savetxt(os.path.join(self.bow_path,'bow_sift.txt'),sample_data)
def train_pixel(self,image_list):
superpixel_list = [SuperPixel(x) for x in image_list]
for sp in superpixel_list:
sp.segment()
sp.count_descriptors()
# for
def compute(self,X):
bow = [0 for x in range(self.kmeans.n_cluster)]
clusters = self.kmeans.predict(X)
for i in clusters:
bow[i] += 1
return bow
def load(self,bow_name):
self.bow_matrix = np.loadtxt(self.bow_path)
```
#### File: src/cluster/kmeans_model.py
```python
__author__ = '15cm'
import os
from mynp import np
from feature.sift import Sift
from sklearn.cluster import KMeans
from sklearn.externals import joblib
CURPATH = os.path.split(os.path.realpath(__file__))[0]
class KmeansModel:
def __init__(self):
self.kmeans = KMeans(n_clusters=15,n_init=15)
self.model_path = os.path.join(CURPATH,'model')
def fit(self,X):
if type(X) == list:
self.kmeans.fit(reduce(np.vstack,X))
else:
self.kmeans.fit(X)
self.n_clusters = self.kmeans.n_clusters
@property
def n_cluster(self):
return self.n_clusters
def predict(self,X):
return self.kmeans.predict(X)
def save(self,model_name):
joblib.dump(self.kmeans,os.path.join(self.model_path,model_name + '.pkl'))
def load(self,model_name):
model_file = os.path.join(self.model_path,model_name + '.pkl')
if os.path.exists(model_file):
self.kmeans = joblib.load(model_file)
self.n_clusters = self.kmeans.n_clusters
return True
else:
return False
``` |
{
"source": "15EC10026/deeplabv3",
"score": 3
} |
#### File: deeplabv3/deeplab_model/deeplabv3.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.resnet import resnet50, resnet34
from model.aspp import ASPP, ASPP_Bottleneck
#from resnet import resnet50, resnet34
#from aspp import ASPP, ASPP_Bottleneck
class Bottleneck_custom(nn.Module):
expansion = 4
def __init__(self, in_channels, channels, stride=1, dilation=1):
super(Bottleneck_custom, self).__init__()
out_channels = self.expansion*channels
self.conv1 = nn.Conv2d(in_channels, channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(channels)
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
self.bn2 = nn.BatchNorm2d(channels)
self.conv3 = nn.Conv2d(channels, out_channels, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
if (stride != 1) or (in_channels != out_channels):
conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
bn = nn.BatchNorm2d(out_channels)
self.downsample = nn.Sequential(conv, bn)
else:
self.downsample = nn.Sequential()
def forward(self, x):
# (x has shape: (batch_size, in_channels, h, w))
out = F.relu(self.bn1(self.conv1(x))) # (shape: (batch_size, channels, h, w))
out = F.relu(self.bn2(self.conv2(out))) # (shape: (batch_size, channels, h, w) if stride == 1, (batch_size, channels, h/2, w/2) if stride == 2)
out = self.bn3(self.conv3(out)) # (shape: (batch_size, out_channels, h, w) if stride == 1, (batch_size, out_channels, h/2, w/2) if stride == 2)
out = out + self.downsample(x) # (shape: (batch_size, out_channels, h, w) if stride == 1, (batch_size, out_channels, h/2, w/2) if stride == 2)
out = F.relu(out) # (shape: (batch_size, out_channels, h, w) if stride == 1, (batch_size, out_channels, h/2, w/2) if stride == 2)
return out
def make_layer(block, in_channels, channels, num_blocks, stride=1, dilation=1):
strides = [stride] + [1]*(num_blocks - 1) # (stride == 2, num_blocks == 4 --> strides == [2, 1, 1, 1])
blocks = []
for stride in strides:
blocks.append(block(in_channels=in_channels, channels=channels, stride=stride, dilation=dilation))
in_channels = block.expansion*channels
layer = nn.Sequential(*blocks) # (*blocks: call with unpacked list entires as arguments)
return layer
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
#from model.resnet import ResNet18_OS16, ResNet34_OS16, ResNet50_OS16, ResNet101_OS16, ResNet152_OS16, ResNet18_OS8, ResNet34_OS8
#from model.aspp import ASPP, ASPP_Bottleneck
class DeepLabV3(nn.Module):
def __init__(self, num_classes): #, model_id, project_dir):
super(DeepLabV3, self).__init__()
self.num_classes = num_classes #as seen from the cityscapes website
#self.model_id = model_id
#self.project_dir = project_dir
#self.create_model_dirs()
'''
self.resnet = ResNet34_OS8() # NOTE! specify the type of ResNet here
self.aspp = ASPP(num_classes=self.num_classes) # NOTE! if you use ResNet50-152, set self.aspp = ASPP_Bottleneck(num_classes=self.num_classes) instead
'''
#self.resnet = ResNet50_OS16() # NOTE! specify the type of ResNet here
resnet = resnet34() #resnet50()
#resnet.load_state_dict(torch.load("/home/kaustavb/6867/model/resnet50-19c8e357.pth")) #needed ResNet50
resnet.load_state_dict(torch.load("/home/kaustavb/6867/model/resnet34-333f7ec4.pth")) #needed ResNet34
#self.resnet = nn.Sequential(*list(resnet.children())[:-3]) #only the convolutional features are extracted.
self.encoder = resnet
# replace last conv layer with dilated convolution
#self.layer5 = make_layer(Bottleneck_custom, in_channels=4*256, channels=512, num_blocks=3, stride=1, dilation=2) #needed ResNet50
self.layer5 = make_layer(Bottleneck_custom, in_channels=256, channels=64, num_blocks=3, stride=1, dilation=2) #needed ResNet34
self.aspp = ASPP_Bottleneck(num_classes=self.num_classes) # NOTE! if you use ResNet50-152, set self.aspp = ASPP_Bottleneck(num_classes=self.num_classes) instead
#self.conv_1x1_1 = nn.Conv2d(256, 48, kernel_size=1, bias=False)
#self.bn_conv_1x1_1 = nn.BatchNorm2d(48)
#-------------->
#ResNet34 : change value from 256 to 64
#ResNet50 : change value from 64 to 256
self.aggregate = nn.Sequential(nn.Conv2d(64, 48, kernel_size=1, bias=False),
nn.BatchNorm2d(48))
#self.conv_3x3_1 = nn.Conv2d(48+256, num_classes, kernel_size=3, stride=1, padding=1, bias=False)
#-------------->
#ResNet34 : change value from 256 to 64
#ResNet50 : change value from 64 to 256
self.last_conv = nn.Sequential(nn.Conv2d(48+64, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Dropout(0.2),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Dropout(0.2),
nn.Conv2d(64, num_classes, kernel_size=1, stride=1))
#self.bn_conv_3x3_1 = nn.BatchNorm2d(num_classes)
def forward(self, x):
# (x has shape (batch_size, 3, h, w))
h = x.size()[2]
w = x.size()[3]
feature_map, low_level_features = self.encoder(x) # (assuming self.resnet is ResNet18_OS16 or ResNet34_OS16. If self.resnet is ResNet18_OS8 or ResNet34_OS8, it will be (batch_size, 512, h/8, w/8). If self.resnet is ResNet50-152, it will be (batch_size, 4*512, h/16, w/16))
#feature_map = self.resnet(x)
#print(low_level_features.shape)
#print(feature_map.shape)
feature_map = self.layer5(feature_map)
#print(feature_map.shape)
output = self.aspp(feature_map) # (shape: (batch_size, 256, h/4, w/4))
#print(output.shape)
#print(low_level_features.shape) # (shape: (batch_size, 256, h/4, w/4))
#print(feature_map.shape) # (shape: (batch_size, 2048, h/16, w/16))
#print(output.shape) # (shape: (batch_size, 256, h/4, w/4))
#low_level_features = F.relu(self.bn_conv_1x1_1(self.conv_1x1_1(low_level_features))) # (shape: (batch_size, 48, h/4, w/4))
low_level_features = F.relu(self.aggregate(low_level_features)) # (shape: (batch_size, 48, h/4, w/4))
#print(low_level_features.shape)
output = torch.cat([low_level_features, output], 1)
#print(output.shape)
output = self.last_conv(output) # (shape: (batch_size, 256, h/4, w/4))
#print(output.shape)
output = F.interpolate(output, size=(h, w), mode="bilinear") # (shape: (batch_size, num_classes, h, w))
output = F.softmax(output, dim=1) # performs soft-max and outputs probability values for each pixel.
#print(output.shape)
return output
``` |
{
"source": "15ers/Solve_Naively",
"score": 3
} |
#### File: epikjjh/baekjoon/1339.py
```python
n = int(input())
nums = [0]*26
for form in [input() for i in range(n)]:
for i,e in enumerate(form):
nums[ord(e)-ord('A')] += 10**(len(form)-i-1)
nums.sort(reverse=True)
ret = 0
for i in range(10):
ret += nums[i]*(9-i)
print(ret)
```
#### File: epikjjh/baekjoon/14499.py
```python
n, m, y, x, k = map(int, input().split())
stage = [list(map(int, input().split())) for i in range(n)]
order = list(map(int, input().split()))
'''
3 3 3 0 5
102 510 025 142 132
4 4 4 5 0
5 2 1 3 4
before east west north south
'''
dice = [0]*6
# direction(y, x) : east, west, north, south
direction = ((0, 1), (0, -1), (-1, 0), (1, 0))
def switch(direction, dice):
if direction == (0, 1):
# 5->1 / 1->0 / 0->2 / 2->5
tmp = dice[:]
dice[1] = tmp[5]
dice[0] = tmp[1]
dice[2] = tmp[0]
dice[5] = tmp[2]
elif direction == (0, -1):
# 0->1 / 2->0 / 5->2 / 1->5
tmp = dice[:]
dice[1] = tmp[0]
dice[0] = tmp[2]
dice[2] = tmp[5]
dice[5] = tmp[1]
elif direction == (-1, 0):
# 0->3 / 4->0 / 5->4 / 3->5
tmp = dice[:]
dice[3] = tmp[0]
dice[0] = tmp[4]
dice[4] = tmp[5]
dice[5] = tmp[3]
# south : (1, 0)
else:
# 5->3 / 3->0 / 0->4 / 4->5
tmp = dice[:]
dice[3] = tmp[5]
dice[0] = tmp[3]
dice[4] = tmp[0]
dice[5] = tmp[4]
for i in order:
if 0 <= y + direction[i-1][0] <= n-1 and 0 <= x + direction[i-1][1] <= m-1:
y += direction[i-1][0]
x += direction[i-1][1]
switch(direction[i-1], dice)
if stage[y][x]:
dice[5] = stage[y][x]
stage[y][x] = 0
else:
stage[y][x] = dice[5]
print(dice[0])
else:
continue
```
#### File: epikjjh/baekjoon/14501.py
```python
import sys
input = sys.stdin.readline
n = int(input().rstrip())
time = [0]*n
price = [0]*n
for i in range(n):
time_elem, price_elem = map(int, input().rstrip().split())
time[i] = time_elem
price[i] = price_elem
def find_max(cur_day, cur_price):
if cur_day >= n:
return cur_price
if cur_day+time[cur_day] > n:
cur_price = find_max(cur_day+1, cur_price)
else:
cur_price = max(find_max(cur_day+1, cur_price), find_max(cur_day+time[cur_day], cur_price+price[cur_day]))
return cur_price
print(find_max(0,0))
```
#### File: epikjjh/baekjoon/2042.py
```python
import math
import sys
def init(arr: list, tree: list, node: int, start: int, end: int)-> int:
if start == end:
tree[node] = arr[start]
return tree[node]
else:
tree[node] = init(arr, tree, node*2, start, (start+end)//2) + init(arr, tree, node*2+1, (start+end)//2+1, end)
return tree[node]
def sum(tree: list, node: int, start: int, end: int, left: int, right: int)-> int:
if right < start or end < left:
return 0
elif left <= start and end <= right:
return tree[node]
else:
return sum(tree, node*2, start, (start+end)//2, left, right) + sum(tree, node*2+1, (start+end)//2+1, end, left, right)
def update(tree: list, node: int, start: int, end: int, index: int, diff: int):
if index < start or index > end:
return
tree[node] += diff
if start != end:
update(tree, node*2, start, (start+end)//2, index, diff)
update(tree, node*2+1, (start+end)//2+1, end, index, diff)
input = sys.stdin.readline
num, m, k = map(int, input().rstrip().split())
arr = [int(input().rstrip()) for i in range(num)]
height = math.ceil(math.log2(num))
tree = [0]*2**(height+1)
init(arr, tree, 1, 0, num-1)
for i in range(m+k):
a, b, c = map(int, input().rstrip().split())
if a == 1:
diff = c - (arr[b-1])
arr[b-1] = c
update(tree, 1, 0, num-1, b-1, diff)
else:
print(sum(tree, 1, 0, num-1, b-1, c-1))
```
#### File: epikjjh/baekjoon/2178.py
```python
import sys
def conv(stream):
return [int(e) for e in stream]
input = sys.stdin.readline
n,m = map(int,input().split())
arr = [conv(input().split()[0]) for i in range(n)]
visit = [[0]*m for i in range(n)]
visit[0][0] = 1
direction = [(0,1),(0,-1),(1,0),(-1,0)]
queue = [[0,0]]
while queue:
y,x = queue.pop(0)
if y==n-1 and x==m-1:
print(visit[y][x])
break
for i in range(4):
n_y = y+direction[i][0]
n_x = x+direction[i][1]
if 0<=n_y<n and 0<=n_x<m and arr[n_y][n_x] and not visit[n_y][n_x]:
visit[n_y][n_x] = visit[y][x] + 1
queue.append([n_y,n_x])
```
#### File: epikjjh/baekjoon/9037.py
```python
def check(table):
n = len(table)
for i in range(n):
if table[i] != table[(i+1)%n]:
return False
return True
t = int(input())
for i in range(t):
n = int(input())
table = list(map(int,input().split()))
l = len(table)
cnt = 0
table = [e+1 if e%2==1 else e for e in table]
while not check(table):
table = [table[i]//2 + table[(i+1)%l]//2 for i in range(l)]
table = [e+1 if e%2==1 else e for e in table]
cnt += 1
print(cnt)
``` |
{
"source": "15five/django-distributedlock",
"score": 2
} |
#### File: django-distributedlock/distributedlock/models.py
```python
from django.db import models
class Lock(models.Model):
key = models.CharField(max_length=255, blank=False, unique=True)
value = models.CharField(max_length=255, blank=False)
timestamp = models.DateTimeField(null=True, blank=True)
class Meta:
verbose_name = 'Lock'
verbose_name_plural = 'Locks'
def __unicode__(self):
return self.key
``` |
{
"source": "15five/django_scim",
"score": 2
} |
#### File: src/django_scim/models.py
```python
from urllib.parse import urljoin
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from . import constants, exceptions
from .settings import scim_settings
from .utils import get_base_scim_location_getter
class SCIMServiceProviderConfig(object):
"""
A reference ServiceProviderConfig. This should be overridden to
describe those authentication_schemes and features that are implemented by
your app.
"""
def __init__(self, request=None):
self.request = request
@property
def meta(self):
return {
'location': self.location,
'resourceType': 'ServiceProviderConfig',
}
@property
def location(self):
path = reverse('scim:service-provider-config')
return urljoin(get_base_scim_location_getter()(self.request), path)
def to_dict(self):
return {
'schemas': [constants.SchemaURI.SERVICE_PROVIDER_CONFIG],
'documentationUri': scim_settings.DOCUMENTATION_URI,
'patch': {
'supported': True,
},
'bulk': {
'supported': False,
'maxOperations': 1000,
'maxPayloadSize': 1048576,
},
# Django-SCIM2 does not fully support the SCIM2.0 filtering spec.
# Until it does, let's under promise and over deliver to the world.
'filter': {
'supported': False,
'maxResults': 50,
},
'changePassword': {
'supported': True,
},
'sort': {
'supported': False,
},
'etag': {
'supported': False,
},
'authenticationSchemes': scim_settings.AUTHENTICATION_SCHEMES,
'meta': self.meta,
}
class AbstractSCIMCommonAttributesMixin(models.Model):
"""
An abstract model to provide SCIM Common Attributes.
https://tools.ietf.org/html/rfc7643#section-3.1
Each SCIM resource (Users, Groups, etc.) includes the following
common attributes. With the exception of the "ServiceProviderConfig"
and "ResourceType" server discovery endpoints and their associated
resources, these attributes MUST be defined for all resources,
including any extended resource types. When accepted by a service
provider (e.g., after a SCIM create), the attributes "id" and "meta"
(and its associated sub-attributes) MUST be assigned values by the
service provider. Common attributes are considered to be part of
every base resource schema and do not use their own "schemas" URI.
For backward compatibility, some existing schema definitions MAY list
common attributes as part of the schema. The attribute
characteristics (see Section 2.2) listed here SHALL take precedence
over older definitions that may be included in existing schemas.
"""
"""
id
A unique identifier for a SCIM resource as defined by the service
provider. Each representation of the resource MUST include a
non-empty "id" value. This identifier MUST be unique across the
SCIM service provider's entire set of resources. It MUST be a
stable, non-reassignable identifier that does not change when the
same resource is returned in subsequent requests. The value of
the "id" attribute is always issued by the service provider and
MUST NOT be specified by the client. The string "bulkId" is a
reserved keyword and MUST NOT be used within any unique identifier
value. The attribute characteristics are "caseExact" as "true", a
mutability of "readOnly", and a "returned" characteristic of
"always". See Section 9 for additional considerations regarding
privacy.
"""
scim_id = models.CharField(
_('SCIM ID'),
max_length=254,
null=True,
blank=True,
default=None,
unique=True,
help_text=_('A unique identifier for a SCIM resource as defined by the service provider.'),
)
"""
externalId
A String that is an identifier for the resource as defined by the
provisioning client. The "externalId" may simplify identification
of a resource between the provisioning client and the service
provider by allowing the client to use a filter to locate the
resource with an identifier from the provisioning domain,
obviating the need to store a local mapping between the
provisioning domain's identifier of the resource and the
identifier used by the service provider. Each resource MAY
include a non-empty "externalId" value. The value of the
"externalId" attribute is always issued by the provisioning client
and MUST NOT be specified by the service provider. The service
provider MUST always interpret the externalId as scoped to the
provisioning domain. While the server does not enforce
uniqueness, it is assumed that the value's uniqueness is
controlled by the client setting the value. See Section 9 for
additional considerations regarding privacy. This attribute has
"caseExact" as "true" and a mutability of "readWrite". This
attribute is OPTIONAL.
"""
scim_external_id = models.CharField(
_('SCIM External ID'),
max_length=254,
null=True,
blank=True,
default=None,
db_index=True,
help_text=_('A string that is an identifier for the resource as defined by the provisioning client.'),
)
def set_scim_id(self, is_new):
if is_new:
self.__class__.objects.filter(id=self.id).update(scim_id=self.id)
self.scim_id = str(self.id)
def save(self, *args, **kwargs):
is_new = self.id is None
super(AbstractSCIMCommonAttributesMixin, self).save(*args, **kwargs)
self.set_scim_id(is_new)
class Meta:
abstract = True
class AbstractSCIMUserMixin(AbstractSCIMCommonAttributesMixin):
"""
An abstract model to provide the User resource schema.
# https://tools.ietf.org/html/rfc7643#section-4.1
"""
"""
userName
A service provider's unique identifier for the user, typically
used by the user to directly authenticate to the service provider.
Often displayed to the user as their unique identifier within the
system (as opposed to "id" or "externalId", which are generally
opaque and not user-friendly identifiers). Each User MUST include
a non-empty userName value. This identifier MUST be unique across
the service provider's entire set of Users. This attribute is
REQUIRED and is case insensitive.
"""
scim_username = models.CharField(
_('SCIM Username'),
max_length=254,
null=True,
blank=True,
default=None,
db_index=True,
help_text=_("A service provider's unique identifier for the user"),
)
@property
def scim_groups(self):
raise exceptions.NotImplementedError
class Meta:
abstract = True
class AbstractSCIMGroupMixin(AbstractSCIMCommonAttributesMixin):
"""
An abstract model to provide the Group resource schema.
# https://tools.ietf.org/html/rfc7643#section-4.2
"""
"""
displayName
A human-readable name for the Group. REQUIRED.
"""
scim_display_name = models.CharField(
_('SCIM Display Name'),
max_length=254,
null=True,
blank=True,
default=None,
db_index=True,
help_text=_("A human-readable name for the Group."),
)
class Meta:
abstract = True
def set_scim_display_name(self, is_new):
if is_new:
self.__class__.objects.filter(id=self.id).update(scim_display_name=self.name)
self.scim_display_name = self.name
def save(self, *args, **kwargs):
is_new = self.id is None
super(AbstractSCIMGroupMixin, self).save(*args, **kwargs)
self.set_scim_display_name(is_new)
``` |
{
"source": "15five/rocket_releaser",
"score": 3
} |
#### File: rocket_releaser/rocket_releaser/shas.py
```python
import subprocess
import logging
from os import path
from typing import List
import re
logger = logging.getLogger(__name__)
def branch_exists(repo_dir: str, branch_name: str):
try:
base_args = ["git", f'--git-dir={path.join(repo_dir, ".git")}']
subprocess.check_call(
base_args + ["show-ref", "--verify", "--quiet", "refs/heads/" + branch_name]
)
return True
except subprocess.CalledProcessError:
return False
class SHAs:
def __init__(self, repo_dir: str, fetch_before: bool = True):
"""
:param fetch_before: Whether to fetch branch to make sure you have it when calling for_branch.
"""
self.fetch_before = fetch_before
self.base_args = [
"git",
f'--git-dir={path.join(repo_dir, ".git")}',
f"--work-tree={repo_dir}",
]
def get_shas(
self, from_revision: str, to_revision: str, branch: str = "master"
) -> List[str]:
"""
Get SHAs from from_revision to to_revision.
:param branch: Branch to pull commits from.
"""
if self.fetch_before:
fetch_args = self.base_args + [
"fetch",
"origin",
f"{branch}:{branch}",
"--update-head-ok",
]
try:
subprocess.check_output(fetch_args)
logger.debug(f"Pulled branch with the following args: {fetch_args}")
except subprocess.CalledProcessError:
logger.exception("subprocess call failed")
rev_list_args = self.base_args + [
"rev-list",
# https://stackoverflow.com/questions/7251477/what-are-the-differences-between-double-dot-and-triple-dot-in-git-dif
# to be honest not sure what difference is between .. and ... when on same branch
# but staying with ... to be on safe side because it includes more
# I didn't find any differences when doing manual tests
from_revision + "..." + to_revision,
"--format=%B", # raw body (unwrapped subject and body) https://git-scm.com/docs/git-rev-list
]
try:
commit_msgs = subprocess.check_output(rev_list_args).decode("utf-8")
except subprocess.CalledProcessError:
logger.exception("subprocess call failed")
commit_msgs = ""
logger.debug(f"rev-list commit messages: {commit_msgs}")
shas = self._get_shas(commit_msgs)
logger.debug(f"rev-list SHAs: {shas}")
return shas
def _get_shas(self, commit_msgs: str) -> List[str]:
"""returns shas, inlcuding cherry-picked shas"""
shas = []
for line in commit_msgs.split("\n"):
line = line.rstrip("\r")
if re.match(r"commit \w+$", line):
shas.append(line.replace("commit ", ""))
if line.startswith("(cherry picked from commit "):
shas.append(line.replace("(cherry picked from commit ", "").rstrip(")"))
return shas
```
#### File: rocket_releaser/rocket_releaser/ticket_labeler.py
```python
import logging
import re
from typing import List
import github3
import jira
from .prs import PRs
logger = logging.getLogger(__name__)
class TicketLabeler:
"""
class for labeling github/jira pr's/tickets
"""
# you can find more transition id's with the following code:
"""
import jira
import json
jira = jira.JIRA('https://yourJira.atlassian.net', basic_auth=('<EMAIL>', 'JIRA SECRET'))
issue = jira.issue('DEV-91') # replace this number with issue you want to get availible transitions for
# keep in mind isssues in different states may have different transitions!
transitions=[(t['name'], t['id']) for t in jira.transitions(issue)]
d = dict(transitions)
print(json.dumps(d, indent=4 ,sort_keys=True))
"""
TRANSITION_IDS = {
"Backlog": "871",
"Blocked": "831",
"Close": "851",
"Code Review": "771",
"Deployed": "861",
"Ready to Test": "841",
"Reopen Issue": "811",
"Start Progress": "801",
"Start Testing": "791",
"Stop progress": "821",
}
TRANSITION_KEYWORDS = [
"close",
"closes",
"closed",
"fix",
"fixes",
"fixed",
"resolve",
"resolves",
"resolved",
]
JIRA_INFO_RE = re.compile(
r"""
(?P<transition>{transition_kw_options})? # optional keywords that mark Jira transitions
\s* # optional whitespace character
(?P<issue>\[*[A-Z]{{2,}}-\d+\]*) # Jira issue name. Match examples: [ENG-123], ENG-123, DIV-1234
""".format(
transition_kw_options="|".join(TRANSITION_KEYWORDS)
),
flags=re.VERBOSE | re.IGNORECASE,
)
PREVIEW_ENV_NAME: str = "preview"
STAGING_ENV_NAME: str = "staging"
PRODUCTION_ENV_NAME: str = "production"
def __init__(
self,
githubToken: str,
pull_request_dicts: List[dict],
repo_owner: str,
repo_name: str,
jira_token: str = "",
jira_username: str = "",
jira_url: str = "",
):
"""
:param githubToken: GitHub oauth githubToken
"""
self.githubToken = githubToken
self.pull_request_dicts = pull_request_dicts
self.repo_owner = repo_owner
self.repo_name = repo_name
self.jira_token = jira_token
self.jira_url = jira_url
self.gh = github3.GitHub(token=self.githubToken)
# documentation claims you need to use username/password combo but username/token works as well
if jira_token:
self.jira = jira.JIRA(jira_url, basic_auth=(jira_username, self.jira_token))
def label_tickets(self, env_name: str, vpc_name: str, dry_run=False) -> int:
"""
labels github pr's and associated jira tickets with env_name.
Note that if env_name matches self.PRODUCTION_ENV_NAME the jira issue will be closed
:param env_name: the name of the vpc to label the tickets/issues with
:return: number of jira tickets found
"""
if dry_run:
logger.info("Dry run - not actually making any changes")
jira_ticket_name_list = []
label: str = f"{vpc_name}({env_name})" if vpc_name != env_name else env_name
for pr in self.pull_request_dicts:
title = pr.get("title")
pr_num = pr.get("number")
try:
logger.info(
f"labeling pr #{pr_num} {title} at "
f"https://github.com/{self.repo_owner}/{self.repo_name}/pull/{pr_num} with {label}"
)
if not dry_run:
self.label_pr_or_issue(pr, label)
except github3.exceptions.GitHubException:
logger.exception("Error during labeling: ")
if not self.jira_token:
continue
jira_ticket_maps = []
jira_ticket_maps_title = self.get_jira_ticket_maps(pr.get("title", ""))
jira_ticket_maps_body = self.get_jira_ticket_maps(pr.get("body", ""))
for jira_ticket_map_title in jira_ticket_maps_title:
jira_ticket_maps.append(jira_ticket_map_title)
for jira_ticket_map_body in jira_ticket_maps_body:
if jira_ticket_map_body.get("transition"):
jira_ticket_maps.append(jira_ticket_map_body)
# we ignore tickets without transition in body because they may be unrelated
# ex: "this story is similar to ENG-4235" ~ we dont want to label ENG-4235
if not jira_ticket_maps:
logger.warning(f"couldnt find jira # in pr #{pr_num} {title}")
continue
# ugly hack ~ by converting to dict & back we ensure we dont have duplicate issues
# (same issue may be mentioned in title and again in body)
jira_ticket_maps = list(
{map["issue"]: map for map in jira_ticket_maps}.values()
)
for jira_ticket_map in jira_ticket_maps:
jira_ticket_name = jira_ticket_map.get("issue")
transition_kw = jira_ticket_map.get("transition")
try:
if jira_ticket_name not in jira_ticket_name_list:
jira_ticket_name_list.append(jira_ticket_name)
logger.info(
f"labeling jira ticket at {self.jira_url}/browse/{jira_ticket_name}"
f" with {label}"
)
issue = self.jira.issue(jira_ticket_name)
# a ticket may have more than one PR
# so we only transition if final PR
if transition_kw:
if env_name == self.PREVIEW_ENV_NAME:
logger.info(
"env_name matches preview - making sure if ticket is still in progress "
"its marked in review"
)
if not dry_run:
self.mark_in_review_jira_ticket(issue)
elif env_name == self.STAGING_ENV_NAME:
logger.info(
"env_name matches staging - marking jira ticket as ready to test"
)
if not dry_run:
self.mark_ready_test_jira_ticket(issue)
# we don't have enough qa currently to test everything before it goes to prod
# so we don't close the ticket when it hits prod in case it still needs testing
# elif env_name == self.PRODUCTION_ENV_NAME:
# logger.info('env_name matches production - closing jira ticket')
# if not dry_run:
# self.mark_deployed_jira_ticket(issue)
if not dry_run:
# this HAS to be last because you cant add labels to closed issue
self.label_jira_ticket(issue, jira_ticket_name, label)
except (jira.exceptions.JIRAError, ValueError):
logger.exception("error with " + str(title))
return len(jira_ticket_name_list)
@staticmethod
def get_jira_ticket_maps(search_string: str) -> List[dict]:
"""
Looks for all Jira related information in a string, including transition keywords and ticket number.
Will match the following strings:
* '[ENG-123]'
* 'ENG-123'
* 'eng-12345'
* 'closes DEV-1234`
* 'Fixes DS-123'
Note: The 'issue' value is always returned _without_ brackets.
:param search_string: str that will be searched for all Jira information.
:return: List of dictionaries, each having the keys 'issue' and 'transition'
"""
cleaned_info_list = []
for match_object in TicketLabeler.JIRA_INFO_RE.finditer(search_string):
info_dict = match_object.groupdict()
# lowercase the transition keyword if it is found
transition = info_dict.get("transition")
if transition:
transition = transition.lower()
# remove brackets and uppercase the issue name
issue = info_dict.get("issue", "").replace("[", "").replace("]", "").upper()
# edge case: 1-on-1 looks like a issue to regex, so we filter that out
# alternatively we could require brackets surrounding issue
if issue == "ON-1":
continue
cleaned_dict = {"transition": transition, "issue": issue}
cleaned_info_list.append(cleaned_dict)
return cleaned_info_list
def label_pr_or_issue(self, pr: dict, label: str):
"""
:param pr: dict with number key mapping to a string
"""
pr_num = pr.get("number")
issue = self.gh.issue(self.repo_owner, self.repo_name, pr_num)
issue.add_labels(label)
def label_jira_ticket(self, issue, ticket_name, label):
if " " in label:
raise ValueError("labels can't have spaces!")
if label not in issue.fields.labels:
issue.update(fields={"labels": issue.fields.labels + [label]})
return issue
def mark_deployed_jira_ticket(self, issue: jira.Issue):
"""
Closes w/ comment then marks as deployed
"""
if issue is None:
raise TypeError("issue is None - issue should be of type jira.Issue")
if (
issue.fields.status.name != "Closed"
and issue.fields.status.name != "Deployed"
):
self.jira.transition_issue(
issue,
self.TRANSITION_IDS["Close"],
fields={"resolution": {"name": "Done"}},
comment="auto transitioned by deploy",
)
if issue.fields.status.name != "Deployed":
# Note that you can't comment when transitioning to Deployed status
self.jira.transition_issue(
issue, self.TRANSITION_IDS["Deployed"], comment=""
)
def mark_ready_test_jira_ticket(self, issue: jira.Issue):
"""
:param issue: jira Issue
"""
if issue is None:
raise TypeError("issue is None - issue should be of type jira.Issue")
if issue.fields.status.name in ("Reopened", "Open", "In Progress", "In Review"):
self.jira.transition_issue(issue, self.TRANSITION_IDS["Ready to Test"])
def mark_in_review_jira_ticket(self, issue: jira.Issue):
"""
:param issue: jira Issue
"""
if issue is None:
raise TypeError("issue is None - issue should be of type jira.Issue")
if issue.fields.status.name in ("Reopened", "Open", "In Progress"):
self.jira.transition_issue(issue, self.TRANSITION_IDS["Code Review"])
```
#### File: rocket_releaser/tests/test_changelog.py
```python
from rocket_releaser.changelog import ChangeLog
def test_release_bodies_should_recognize_releases():
pull_request_dict = {"number": 12, "body": "RELEASES\n closes [ENG-413]"}
assert ChangeLog([pull_request_dict], "org_name", "repo_name").release_bodies
def test_release_bodies_should_recognize_not_releases():
pull_request_dict = {"number": 12, "body": "bla bla bla"}
assert not ChangeLog([pull_request_dict], "org_name", "repo_name").release_bodies
def test_parse_bodies_should_only_pick_up_release_block():
pull_request_dict = {"number": 12, "body": "RELEASES\n closes [ENG-413]\n\nIGNORE"}
c = ChangeLog([pull_request_dict], "org_name", "repo_name")
c.parse_bodies()
assert "IGNORE" not in c.features or c.fixes or c.noteworthy or c.qa_notes
def test_parse_bodies_should_recognize_noteworthy():
pull_request_dict = {"number": 12, "body": "RELEASES\n closes [ENG-413]"}
c = ChangeLog([pull_request_dict], "org_name", "repo_name")
c.parse_bodies()
assert c.noteworthy
def test_parse_bodies_should_recognize_not_noteworthy():
pull_request_dict = {"number": 12, "body": "RELEASES\n bla bla bla"}
c = ChangeLog([pull_request_dict], "org_name", "repo_name")
c.parse_bodies()
assert not c.noteworthy
# this test currently failing due to bug in code
# where line is line is recognized as noteworthy instead of fix
def test_parse_bodies_should_recognize_fix():
pull_request_dict = {"number": 12, "body": "RELEASES\n fixes [ENG-413]"}
c = ChangeLog([pull_request_dict], "org_name", "repo_name")
c.parse_bodies()
assert c.fixes
def test_parse_bodies_should_recognize_no_fix():
pull_request_dict = {"number": 12, "body": "RELEASES\n closes [ENG-413]"}
c = ChangeLog([pull_request_dict], "org_name", "repo_name")
c.parse_bodies()
assert not c.fixes
def test_parse_bodies_should_recognize_features():
pull_request_dict = {"number": 12, "body": "RELEASES\n more foobar"}
c = ChangeLog([pull_request_dict], "org_name", "repo_name")
c.parse_bodies()
assert c.features
def test_parse_bodies_should_recognize_no_features():
pull_request_dict = {
"number": 12,
"body": "RELEASES\ncloses [ENG-413]\nfixes error",
}
c = ChangeLog([pull_request_dict], "org_name", "repo_name")
c.parse_bodies()
assert not c.features
def test_parse_bodies_should_recognize_qa_notes():
pull_request_dict = {"number": 12, "body": "RELEASES bla\n\nqa\n more foobar"}
c = ChangeLog([pull_request_dict], "org_name", "repo_name")
c.parse_bodies()
assert not c.noteworthy
assert not c.fixes
assert not c.features
assert c.qa_notes
def test_parse_bodies_should_recognize_no_qa_notes():
pull_request_dict = {"number": 12, "body": "RELEASES\n\nbla"}
c = ChangeLog([pull_request_dict], "org_name", "repo_name")
c.parse_bodies()
assert not c.qa_notes
def test_parse_bodies_should_not_split_qa_notes():
pull_request_dict = {"number": 12, "body": "RELEASES\n\nQA:\ndo this\n and that"}
c = ChangeLog([pull_request_dict], "org_name", "repo_name")
c.parse_bodies()
assert len(c.qa_notes) == 1
def test_parse_bodies_should_add_jira_link():
pull_request_dict = {"number": 12, "body": "RELEASES\n[DS-435]"}
c = ChangeLog(
[pull_request_dict],
"org_name",
"repo_name",
jira_url="https://company.atlassian.net",
)
c.parse_bodies()
assert "atlassian" in c.noteworthy[0]
```
#### File: rocket_releaser/tests/test_shas.py
```python
from os import path
import tempfile
from random import randint
import shutil
import subprocess
from git import Repo
from rocket_releaser.shas import branch_exists, SHAs
cherry_pick_commit_msg = """commit 3f9ba302b9d440caba0fc2ba1f19e3614e46a7f1
feature
(cherry picked from commit c8e9114beabca79e4497f9ea40499c80cebe902a)"""
# These are 20 actual commits from fifteen5 repo
commit_msgs = """commit 6bc22dacce6aa6d036ca58bcbed23aceff61609e
Merge pull request #22344 from 15five/ENG-1357-Question-templates-not-cloned-appropriately
Clone questions templates correctly [ENG-1357]
commit d59a711dbc5278d35dff1c926ece789219e4aef4
Merge pull request #22402 from 15five/dev
__2019-03-05.0 - to staging
commit 90b079b9cf0f2d28fec37387d8f45f13ec38e829
Fix my team filtering in BSRs
commit 0fa63c345191896add555d1d2fb0c56023ab4838
Merge pull request #22407 from 15five/hotfix-staging-2019-03-05.0
__2019-03-05.1 - to staging (hotfix)
commit 0640274e4d0ddabc7ccc8e2a5d70b75ae758b946
Better SCIM logging
commit 094b0de3f41c235e2a3d5ac29475c9376aae715b
Merge pull request #22414 from 15five/staging-hf
__2019-03-05.2 - to staging (hotfix)
commit <PASSWORD>
Exclude smart groups when saving UserEditForm
[ENG-1546]
"""
def commit_random_file(branch="master"):
# see https://gitpython.readthedocs.io/en/stable/tutorial.html
filename = str(randint(0, 999999999))
new_file_path = path.join(repo.working_tree_dir, filename)
open(new_file_path, "wb").close() # create new file in working tree
repo.index.add([new_file_path]) # add it to the index
# Commit the changes to deviate masters history
return repo.index.commit("Added " + filename)
tmp_dirpath = None
repo = None
first_commit = None
shas = None
def setup_module():
global repo
global first_commit
global shas
global tmp_dirpath
tmp_dirpath = tempfile.mkdtemp()
repo = Repo.init(tmp_dirpath)
repo.git.config("user.email", "<EMAIL>")
repo.git.config("user.name", "test_user")
first_commit = commit_random_file()
shas = SHAs(tmp_dirpath)
def test_get_shas_internal():
shas_list = shas._get_shas(commit_msgs)
assert len(shas_list) == 7
def test_cherry_picked_sha():
shas_list = shas._get_shas(cherry_pick_commit_msg)
assert len(shas_list) == 2
assert shas_list[0] == "3f9ba302b9d440caba0fc2ba1f19e3614e46a7f1"
assert shas_list[1] == "c8e9114beabca79e4497f9ea40499c80cebe902a"
def test_get_shas_empty_when_no_changes():
my_shas = shas.get_shas(first_commit.hexsha, first_commit.hexsha)
assert not my_shas
def test_old_commit_in_shas():
# a old commit should still be in shas even if chronologically before last deploy
# create & checkout (-b) branch
branch_name = "old_branch"
repo.git.checkout("-b", branch_name)
# create old commit
old_commit = commit_random_file(branch=branch_name)
# create newer commit (realistically this would be another merge, but doesn't matter)
# we pretend user has deployed this commit
repo.git.checkout("master")
last_deployed_commit = commit_random_file()
# merge old commit into master
repo.git.merge(branch_name)
# pretend user deployed "old" commit
my_shas = shas.get_shas(last_deployed_commit.hexsha, repo.head.commit.hexsha)
assert my_shas
assert old_commit.hexsha in my_shas
assert last_deployed_commit.hexsha not in my_shas
# ^ already been deployed, we don't want to log release notes of this commit
assert repo.head.commit.hexsha in my_shas
# ^ sanity check
def test_not_in_range():
assert not shas.get_shas("nonexistant sha", "nonexistant sha")
def test_branch_exists():
assert branch_exists(tmp_dirpath, "master")
assert not branch_exists(
tmp_dirpath, "did-you-know-that-cashews-came-from-a-fruit?"
)
def teardown_module():
repo.close()
``` |
{
"source": "15five/snowplow-python-tracker",
"score": 2
} |
#### File: snowplow-python-tracker/snowplow_tracker/subject.py
```python
from contracts import contract, new_contract
SUPPORTED_PLATFORMS = set(["pc", "tv", "mob", "cnsl", "iot", "web", "srv", "app"])
DEFAULT_PLATFORM = "pc"
new_contract("subject", lambda x: isinstance(x, Subject))
new_contract("supported_platform", lambda x: x in SUPPORTED_PLATFORMS)
class Subject(object):
"""
Class for an event subject, where we view events as of the form
(Subject) -> (Verb) -> (Object)
"""
def __init__(self):
self.standard_nv_pairs = {"p": DEFAULT_PLATFORM}
@contract
def set_platform(self, value):
"""
:param value: One of ["pc", "tv", "mob", "cnsl", "iot", "web", "srv", "app"]
:type value: supported_platform
:rtype: subject
"""
self.standard_nv_pairs["p"] = value
return self
@contract
def set_user_id(self, user_id):
"""
:param user_id: User ID
:type user_id: string
:rtype: subject
"""
self.standard_nv_pairs["uid"] = user_id
return self
@contract
def set_screen_resolution(self, width, height):
"""
:param width: Width of the screen
:param height: Height of the screen
:type width: int,>0
:type height: int,>0
:rtype: subject
"""
self.standard_nv_pairs["res"] = "".join([str(width), "x", str(height)])
return self
@contract
def set_viewport(self, width, height):
"""
:param width: Width of the viewport
:param height: Height of the viewport
:type width: int,>0
:type height: int,>0
:rtype: subject
"""
self.standard_nv_pairs["vp"] = "".join([str(width), "x", str(height)])
return self
@contract
def set_color_depth(self, depth):
"""
:param depth: Depth of the color on the screen
:type depth: int
:rtype: subject
"""
self.standard_nv_pairs["cd"] = depth
return self
@contract
def set_timezone(self, timezone):
"""
:param timezone: Timezone as a string
:type timezone: string
:rtype: subject
"""
self.standard_nv_pairs["tz"] = timezone
return self
@contract
def set_lang(self, lang):
"""
Set language.
:param lang: Language the application is set to
:type lang: string
:rtype: subject
"""
self.standard_nv_pairs["lang"] = lang
return self
@contract
def set_domain_user_id(self, duid):
"""
Set the domain user ID
:param duid: Domain user ID
:type duid: string
:rtype: subject
"""
self.standard_nv_pairs["duid"] = duid
return self
@contract
def set_ip_address(self, ip):
"""
Set the domain user ID
:param ip: IP address
:type ip: string
:rtype: subject
"""
self.standard_nv_pairs["ip"] = ip
return self
@contract
def set_useragent(self, ua):
"""
Set the user agent
:param ua: User agent
:type ua: string
:rtype: subject
"""
self.standard_nv_pairs["ua"] = ua
return self
@contract
def set_network_user_id(self, nuid):
"""
Set the network user ID field
This overwrites the nuid field set by the collector
:param nuid: Network user ID
:type nuid: string
:rtype: subject
"""
self.standard_nv_pairs["tnuid"] = nuid
return self
def set_custom(self, field, value):
"""
Set custom field
:param field: Field name
:param value: Value for a field
"""
self.standard_nv_pairs[field] = value
return self
def set_custom_by_name(self, field, value):
"""
Set custom field by getting field name from SNOWPLOW_REVERTED_TRANSFORM_MAP
:param field: Field name
:param value: Value for a field
"""
self.standard_nv_pairs[SNOWPLOW_REVERTED_TRANSFORM_MAP.get(field, field)] = value
return self
SNOWPLOW_REVERTED_TRANSFORM_MAP = {
"event": "e",
"user_ipaddress": "ip",
"app_id": "aid",
"platform": "p",
"txn_id": "tid",
"user_id": "uid",
"domain_userid": "duid",
"network_userid": "nuid",
"useragent": "ua",
"user_fingerprint": "fp",
"domain_sessionidx": "vid",
"domain_sessionid": "sid",
"dvce_created_tstamp": "dtm",
"true_tstamp": "ttm",
"dvce_sent_tstamp": "stm",
"name_tracker": "tna",
"v_tracker": "tv",
"v_collector": "cv",
"br_lang": "lang",
"br_features_pdf": "f_pdf",
"br_features_flash": "f_fla",
"br_features_java": "f_java",
"br_features_director": "f_dir",
"br_features_quicktime": "f_qt",
"br_features_realplayer": "f_realp",
"br_features_windowsmedia": "f_wma",
"br_features_gears": "f_gears",
"br_features_silverlight": "f_ag",
"br_cookies": "cookie",
"br_colordepth": "cd",
"os_timezone": "tz",
"page_referrer": "refr",
"page_url": "url",
"page_title": "page",
"doc_charset": "cs",
"event_id": "eid",
"contexts": "cx",
"se_category": "se_ca",
"se_action": "se_ac",
"se_label": "se_la",
"se_property": "se_pr",
"se_value": "se_va",
"unstruct_event": "ue_pr",
"tr_orderid": "tr_id",
"tr_affiliation": "tr_af",
"tr_total": "tr_tt",
"tr_tax": "tr_tx",
"tr_shipping": "tr_sh",
"tr_city": "tr_ci",
"tr_state": "tr_st",
"tr_country": "tr_co",
"ti_orderid": "ti_id",
"ti_sku": "ti_sk",
"ti_name": "ti_nm",
"ti_category": "ti_ca",
"ti_price": "ti_pr",
"ti_quantity": "ti_qu",
"pp_xoffset_min": "pp_mix",
"pp_xoffset_max": "pp_max",
"pp_yoffset_min": "pp_miy",
"pp_yoffset_max": "pp_may",
"tr_currency": "tr_cu",
"ti_currency": "ti_cu",
}
``` |
{
"source": "15jgme/Mercury-GS",
"score": 2
} |
#### File: Mercury-GS/low_level/packet.py
```python
from low_level.comms import frame_queue, comms_send
from low_level.frameformat import MessageFormat, DataType, struct, PROTOCOL_DELIMITER
from low_level.continuous import continuous_sender, register_continuous
import config
import threading
def packet_register_callback(tlm_function_ptr, tlm_rejection_function_ptr, tc_function_ptr, exception_handler_function_ptr):
""" Registers the callbacks for this module to pass data back to previous modules. """
global callback_telemetry_response
global callback_telecommand_response
global callback_exception_handler
global callback_telemetry_rejection_response
# Register telemetry response callback to pass telemetry packet up to module
callback_telemetry_response = tlm_function_ptr
# Register telemetry response callback to pass telemetry rejection packet up to module
callback_telemetry_rejection_response = tlm_rejection_function_ptr
# Register telecommand response callback to pass telecommand packet up to module
callback_telecommand_response = tc_function_ptr
# Register exception handler callback
callback_exception_handler = exception_handler_function_ptr
class PacketHandler(threading.Thread):
""" PacketHandler Class to handle incoming valid frames """
def __init__(self, queue):
""" Initialise Thread, set argument as frame_queue and start Thread"""
super().__init__()
self.args = queue
self.start()
def run(self):
""" Waits for queue to be populated with a valid frame, then pops one off, picks bitfields out of frame
and passes data up to correct module depending on data type field.
"""
while True:
# Wait for queue to contain a frame, pop one off
frame = frame_queue.get()
# Unpack bitfields
frame_header_bytes = frame[:4]
frame_data_type = frame[4]
frame_data_length_bytes = frame[5:9]
frame_data_length = struct.unpack("!L", frame_data_length_bytes)[0]
frame_data_bytes = frame[9:]
# Pass data field up to correct module depending on data type field
if frame_data_type == DataType.TELEMETRY_DATA.value:
callback_telemetry_response(frame_data_bytes)
elif frame_data_type == DataType.TELEMETRY_REQUEST_REJECTION.value:
callback_telemetry_rejection_response(frame_data_bytes)
elif frame_data_type == DataType.TELECOMMAND_RESPONSE.value:
callback_telecommand_response(frame_data_bytes)
frame_queue.task_done()
def packet_init():
""" Initialise Packet Handler class instance, which automatically starts the packet handler Thread. """
global packet_handler
packet_handler = PacketHandler(frame_queue)
def data_format(data_to_format, data_format_builder):
""" Format data passed in in the format of the struct builder also passed in,
return the formatted data.
This function fulfills requirements PLAT_COMMS_00050 and PLAT_COMMS_00130.
"""
formatted_data = data_format_builder.pack(*data_to_format)
return formatted_data
def packetize(data_to_packet, data_type, is_continuous, message_object_database, latest_message_object):
""" Format the data into the desired protocol
This function fulfills requirement PLAT_COMMS_00040.
"""
# Create class instance for the packet, this may be useful in the future
packet_class = MessageFormat(data_to_packet, len(data_to_packet), data_type)
# Create the packet builder using Struct module to build a packet binary representation of
# 5 Individual Bytes followed by an Unsigned 32 Bit Int, Big Endian.
packet_builder = struct.Struct("! 5B I")
# Build the packet
packet_packed = bytearray(packet_builder.pack(packet_class.header,
packet_class.reserved1,
packet_class.reserved2,
packet_class.reserved3,
packet_class.data_type,
packet_class.data_length))
# Add the data field on the end of the packet
# (this cannot be created using the packet builder as the data field is of variable size).
packet_packed.extend(packet_class.data)
# Scan the packet for delimiter bytes, add an extra delimiter after any delimiter found except the first one
packet = delimiter_scan_and_add(packet_packed)
# Start the Timeout timer for this message
latest_message_object.start_timer()
# Send the message
comms_send(packet)
if is_continuous is True:
try:
register_continuous(config.TC_TLM_RATE, continuous_sender, packet, message_object_database,
latest_message_object)
except ZeroDivisionError as err:
print("\n", repr(err))
print("ERROR: Rate is 0, cannot run continuously")
callback_exception_handler("ERROR: Rate is 0, cannot run continuously")
def delimiter_scan_and_add(data_to_scan):
""" Scan data passed in for any delimiters,
insert an extra delimiter after any delimiter found except the first one so that the receiver doesn't interpret
it as a start of a new frame.
This function fulfills requirement PLAT_COMMS_00045
"""
# Copy data buffer into mutable bytearray
data_editable_copy = bytearray(data_to_scan)
header_checked = False
num_added_delimiters = 0
# Iterate over bytes in data
for index, byte in enumerate(data_to_scan):
if byte == PROTOCOL_DELIMITER:
# This byte is a delimiter!
if header_checked is False:
# This is the first delimiter found, I.E Start of Frame. Do not add another delimiter after it.
header_checked = True
else:
# Add another delimiter after this
data_editable_copy.insert(index + num_added_delimiters, PROTOCOL_DELIMITER)
num_added_delimiters += 1
if index >= 9:
data_editable_copy[8] += 1 # TODO: Find a better solution, this will overflow after 255
# TODO: Also index of data length may not be 8 if there are delimiters in the header
# Copy edited bytearray back over to data buffer and return
scanned_data = bytes(data_editable_copy)
return scanned_data
```
#### File: 15jgme/Mercury-GS/main.py
```python
import sys
from PyQt5.QtCore import QRegExp
from PyQt5.QtGui import QRegExpValidator, QIntValidator, QDoubleValidator, QValidator
from PyQt5.QtWidgets import QFileDialog, QApplication, QMainWindow
import config
from config import config_register_callback, change_timeout, OS, COMMS, RaspberryPi
from low_level.continuous import continuous_register_callback, adjust_continuous, continuous_stop
from low_level.packet import packet_register_callback, packet_init
from low_level.comms import comms_init, comms_register_callback, change_baud_rate, change_com_port
from platform_comms_app import Ui_MainWindow
from telecommand import tc_request_send, telecommand_register_callback, tc_response, tc_time_send
from telemetry import tlm_rejection_response, tlm_request_send, telemetry_register_callback, tlm_response
from test import transmit_test_frame, test_register_callback
import time
from datetime import datetime
from utils import epoch_to_sec
UINT_MAX = 4294967295
UINT_MIN = 0
S64INT_MAX = 9223372036854775807
S64INT_MIN = -9223372036854775808
# Validator for unsigned 32 bit integer
class UIntValidator(QValidator):
def __init__(self, parent):
QValidator.__init__(self, parent)
def validate(self, s, pos):
try:
if s == "":
# Backspace or delete
return QValidator.Acceptable, s, pos
if int(s) > UINT_MAX or int(s) < UINT_MIN:
return QValidator.Invalid, s, pos
except ValueError:
return QValidator.Invalid, s, pos
return QValidator.Acceptable, s, pos
# Validator for signed 64 bit integer
class SixtyFourBitIntValidator(QValidator):
def __init__(self, parent):
QValidator.__init__(self, parent)
def validate(self, s, pos):
try:
if s == "" or s == "-":
# Backspace or delete or minus
return QValidator.Acceptable, s, pos
if int(s) > S64INT_MAX or int(s) < S64INT_MIN:
return QValidator.Invalid, s, pos
except ValueError:
return QValidator.Invalid, s, pos
return QValidator.Acceptable, s, pos
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, *args, obj=None, **kwargs):
super(MainWindow, self).__init__()
self.setupUi(self)
# Set validators for GUI elements
self.inputTcTlmRateValue.setValidator(QIntValidator(0, 1000, self.inputTcTlmRateValue))
self.inputTcNumberValue.setValidator(UIntValidator(self.inputTcNumberValue))
self.inputTcTlmTimeoutValue.setValidator(QIntValidator(0, 1000, self.inputTcTlmTimeoutValue))
self.inputTlmAdHocChannelValue.setValidator(UIntValidator(self.inputTlmAdHocChannelValue))
self.inputTcDataValue.setValidator(QRegExpValidator(QRegExp(".{0,8}"), self.inputTcDataValue))
self.tlm_response_field = ({"channel": self.labelTlmSlot1, "value": self.labelTlmSlot1Value},
{"channel": self.labelTlmSlot2, "value": self.labelTlmSlot2Value},
{"channel": self.labelTlmSlot3, "value": self.labelTlmSlot3Value},
{"channel": self.labelTlmSlot4, "value": self.labelTlmSlot4Value},
{"channel": self.labelTlmSlot5, "value": self.labelTlmSlot5Value},
{"channel": self.labelTlmSlot6, "value": self.labelTlmSlot6Value},
{"channel": self.labelTlmSlot7, "value": self.labelTlmSlot7Value},
{"channel": self.labelTlmSlot8, "value": self.labelTlmSlot8Value},
{"channel": self.labelTlmSlot9, "value": self.labelTlmSlot9Value},
{"channel": self.labelTlmSlot10, "value": self.labelTlmSlot10Value},
{"channel": self.labelTlmSlot11, "value": self.labelTlmSlot11Value},
{"channel": self.labelTlmSlot12, "value": self.labelTlmSlot12Value},
{"channel": self.labelTlmSlot13, "value": self.labelTlmSlot13Value},
{"channel": self.labelTlmSlot14, "value": self.labelTlmSlot14Value},
{"channel": self.labelTlmSlot15, "value": self.labelTlmSlot15Value},
{"channel": self.labelTlmSlot16, "value": self.labelTlmSlot16Value},
{"channel": self.labelTlmSlot17, "value": self.labelTlmSlot17Value},
{"channel": self.labelTlmSlot18, "value": self.labelTlmSlot18Value},
{"channel": self.labelTlmSlot19, "value": self.labelTlmSlot19Value},
{"channel": self.labelTlmSlot20, "value": self.labelTlmSlot20Value},
{"channel": self.labelTlmSlot21, "value": self.labelTlmSlot21Value},
{"channel": self.labelTlmSlot22, "value": self.labelTlmSlot22Value},
{"channel": self.labelTlmSlot23, "value": self.labelTlmSlot23Value},
{"channel": self.labelTlmSlot24, "value": self.labelTlmSlot24Value},
{"channel": self.labelTlmSlot25, "value": self.labelTlmSlot25Value},
{"channel": self.labelTlmSlot26, "value": self.labelTlmSlot26Value},
{"channel": self.labelTlmSlot27, "value": self.labelTlmSlot27Value},
{"channel": self.labelTlmSlot28, "value": self.labelTlmSlot28Value},
{"channel": self.labelTlmSlot29, "value": self.labelTlmSlot29Value},
{"channel": self.labelTlmSlot30, "value": self.labelTlmSlot30Value},
{"channel": self.labelTlmSlot31, "value": self.labelTlmSlot31Value},
{"channel": self.labelTlmSlot32, "value": self.labelTlmSlot32Value},
{"channel": self.labelTlmSlot33, "value": self.labelTlmSlot33Value},
{"channel": self.labelTlmSlot34, "value": self.labelTlmSlot34Value},
{"channel": self.labelTlmSlot35, "value": self.labelTlmSlot35Value},
{"channel": self.labelTlmSlot36, "value": self.labelTlmSlot36Value},
{"channel": self.labelTlmSlot37, "value": self.labelTlmSlot37Value},
{"channel": self.labelTlmSlot38, "value": self.labelTlmSlot38Value},
{"channel": self.labelTlmSlot39, "value": self.labelTlmSlot39Value},
{"channel": self.labelTlmSlot40, "value": self.labelTlmSlot40Value})
self.tlm_response_list = list()
for tlm_slot in self.tlm_response_field:
tlm_slot["channel"].setText("")
tlm_slot["value"].setText("")
if RaspberryPi is False:
self.comboBoxComms.setEnabled(False)
self.dateEditSendThisDate.setDisplayFormat("dd/MM/yyyy")
# Init Serial Comms
comms_init("COM1", 9600)
# Register all callbacks
telemetry_register_callback(self.telemetry_response_receive, self.telemetry_rejection_response_receive,
self.telemetry_timeout, self.error_message_box)
telecommand_register_callback(self.telecommand_response_receive, self.telecommand_timeout,
self.error_message_box)
packet_register_callback(tlm_response, tlm_rejection_response, tc_response, self.error_message_box)
test_register_callback(self.test_response_receive, self.error_message_box)
comms_register_callback(self.error_message_box)
continuous_register_callback(self.error_message_box)
config_register_callback(self.error_message_box)
packet_init()
# TODO: I think there is a better way to handle events
# There is event handlers and signals, not sure what to use.
# https://www.learnpyqt.com/tutorials/signals-slots-events/
def on_click_send_pc_time(self, event):
# TC number for time sending is 0.
tc = 0
unix_time = time.time();
unix_time_seconds, milliseconds = epoch_to_sec(unix_time)
print('Clicked: Send PC Time')
print('Telecommand Number: {}'.format(tc))
print('Time: {}'.format(unix_time))
tc_time_send(tc, unix_time_seconds, milliseconds)
def on_click_send_this_time(self, event):
# TC number for time sending is 0.
tc = 0
time_from_ui = self.dateTimeEditSendThisTime.dateTime()
time_string = time_from_ui.toString(self.dateTimeEditSendThisTime.displayFormat())
date_from_ui = self.dateEditSendThisDate.dateTime()
date_string = date_from_ui.toString(self.dateEditSendThisDate.displayFormat())
# Preprocessing of date and time string is required before converting it into epoch time.
datetime_string = date_string + " " + time_string
datetime_object = datetime.strptime(datetime_string, '%d/%m/%Y %H:%M:%S.%f')
unix_time = (datetime_object - datetime(1970, 1, 1)).total_seconds()
unix_time_seconds, milliseconds = epoch_to_sec(unix_time)
print('clicked: send this time')
print('date: {}'.format(date_string))
print('Time: {}'.format(time_string))
tc_time_send(tc, unix_time_seconds, milliseconds)
def on_click_send_telecommand_request(self, event):
tc = self.inputTcNumberValue.text()
data = self.inputTcDataValue.text()
# Text value of the comboBox. see: https://doc.qt.io/qt-5/qcombobox.html#currentData-prop
datatype = self.comboBoxTcDataType.currentText()
# It returns 2 for true, and 0 for false. Is this normal in py? 0/1 is what i would expect.
is_continuous = self.checkBoxTcReqContinuous.checkState() == 2
print('TC: {}'.format(tc))
print('Data: {}'.format(data))
print('DataType: {}'.format(datatype))
print('Is continuous: {}'.format(is_continuous))
tc_request_send(tc, data, datatype, is_continuous)
def on_click_send_telemetry_request(self, event):
tlm_channel = self.inputTlmAdHocChannelValue.text()
is_continuous = self.checkBoxTlmReqContinuous.checkState() == 2
print('Channel: {}'.format(tlm_channel))
print('Is continuous: {}'.format(is_continuous))
tlm_request_send(tlm_channel, is_continuous)
def on_click_test_transmit(self):
delimiter = self.inputDelimiter.text()
reserved_bytes = [self.inputReservedBytes1.text(), self.inputReservedBytes2.text(),
self.inputReservedBytes3.text()]
data_type = self.inputDataType.text()
data_length = self.inputDataLength.text()
data_field = self.inputDataField.text()
transmit_test_frame(delimiter, reserved_bytes, data_type, data_length, data_field)
def test_response_receive(self, test_response):
self.outputResponse.setText(test_response)
def on_click_upload_open(self, event):
file_dialog = QFileDialog(self)
file_dialog.show()
file_path = file_dialog.getOpenFileName()[0]
file_dialog.hide()
self.inputUploadFrom.setText(file_path)
def on_click_upload_abort(self, event):
pass
def on_click_download_open(self):
file_dialog = QFileDialog(self)
file_dialog.show()
file_path = file_dialog.getOpenFileName()[0]
file_dialog.hide()
self.inputDownloadTo.setText(file_path)
def on_baud_rate_change(self):
change_baud_rate(int(self.comboBoxCommsBaudValue.currentText()))
def on_tc_tlm_rate_change(self):
rate_change = self.inputTcTlmRateValue.text()
if rate_change != "" and rate_change != "0":
adjust_continuous(int(rate_change))
else:
self.error_message_box("ERROR: Invalid Rate Value")
continuous_stop()
def on_timeout_change(self):
timeout_change = self.inputTcTlmTimeoutValue.text()
if timeout_change != "" or timeout_change != 0:
change_timeout(timeout_change)
else:
self.error_message_box("ERROR: Invalid Timeout Value")
def on_com_port_change(self):
import config
config.COM_PORT = self.inputComPort.currentText()
change_com_port(config.COM_PORT)
def on_comms_change(self):
import config
config.COMMS = self.comboBoxComms.currentText()
if config.COMMS == "SERIAL":
self.comboBoxCommsBaudValue.setEnabled(True)
self.inputComPort.setEnabled(True)
elif config.COMMS == "RF69":
self.comboBoxCommsBaudValue.setEnabled(False)
self.inputComPort.setEnabled(False)
def on_continuous_toggle(self, is_continuous):
if is_continuous is False:
continuous_stop()
def on_select_tc_datatype(self):
self.inputTcDataValue.setValidator(None)
self.inputTcDataValue.clear()
if self.comboBoxTcDataType.currentText() == "String":
self.inputTcDataValue.setValidator(QRegExpValidator(QRegExp(".{0,8}"), self.inputTcDataValue))
elif self.comboBoxTcDataType.currentText() == "Integer":
self.inputTcDataValue.setValidator(SixtyFourBitIntValidator(self.inputTcDataValue))
elif self.comboBoxTcDataType.currentText() == "Floating Point":
self.inputTcDataValue.setValidator(QDoubleValidator())
def telemetry_response_receive(self, telemetry_channel, telemetry_data):
if not any(d.get('channel') == telemetry_channel for d in self.tlm_response_list):
self.tlm_response_list.append({"channel": telemetry_channel, "value": telemetry_data})
else:
for item in self.tlm_response_list:
if item["channel"] == telemetry_channel:
item["value"] = telemetry_data
from operator import itemgetter
self.tlm_response_list = sorted(self.tlm_response_list, key=itemgetter("channel"))
for slot, telemetry_to_plot in zip(self.tlm_response_field, self.tlm_response_list):
slot["channel"].setText("TLM CH " + telemetry_to_plot["channel"])
slot["value"].setText(telemetry_to_plot["value"])
def telemetry_rejection_response_receive(self, telemetry_channel, telemetry_rejection_code):
self.labelTlmErrChannelValue.setText(telemetry_channel)
self.labelTlmErrReasonValue.setText(telemetry_rejection_code)
def telecommand_response_receive(self, telecommand_number, telecommand_data):
self.labelTcResNumberValue.setText(telecommand_number)
self.labelTcResStatus.setText(telecommand_data)
def telemetry_timeout(self):
timeout_count = int(self.labelTlmTimeoutsValue.text()) + 1
self.labelTlmTimeoutsValue.setText(str(timeout_count))
def telecommand_timeout(self):
timeout_count = int(self.labelTcResTimeoutsValue.text()) + 1
self.labelTcResTimeoutsValue.setText(str(timeout_count))
def error_message_box(self, error_text, error_timeout=5000):
self.statusbar.showMessage(error_text, error_timeout)
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec())
```
#### File: Mercury-GS/ss_simulator/ss_sim.py
```python
import serial
import struct, random, math
from time import sleep
#port = "COM1"
port = "COM2"
# timeout in 1/10 seconds (roughly)
TIMEOUT = 10
# possible states
WAITX55 = 0
RESBYTES = 1
DATATYPE = 2
DATALENGTH = 3
DATAFIELD = 4
COMPLETE = 5
X55_NONE = 0
X55_GOT1 = 1
X55_GOT2 = 2
ser = serial.Serial(port, 9600, timeout=0)
def get_packet():
state = WAITX55
x55status = X55_NONE
# reserved bytes count (expect 3)
rbcount = 0
# data length bytes count (expect 4)
dlcount = 0
# used to store datatype
datatype = 0
# length specified in data length bytes
datalength = 0
# bytes received in data field
datarecvd = 0
datafield = b''
# used for timeout
timecount = 0
while state != COMPLETE:
nextbyte = ser.read(1)
if nextbyte:
# we have received a character so reset the timeout count
timecount = 0
if state == DATAFIELD:
datarecvd += 1
if x55status:
if nextbyte != b'\x55':
if state:
# Not waiting for x55 so must have invalid packet
print('\nInvalid packet length')
#print('\nStart of new frame')
#print('Reserved Bytes:', end=' ')
state = RESBYTES
x55status = X55_NONE
rbcount = 0
dlcount = 0
datarecvd = 0
datatype = 0
datalength = 0
datafield = b''
else:
x55status = X55_GOT2
if nextbyte == b'\x55' and x55status != X55_GOT2:
#print('got x55', end=' ')
x55status = X55_GOT1
else:
x55status = X55_NONE
if state == RESBYTES:
rbcount += 1
#print(rbcount,nextbyte, end=' ')
if rbcount == 3:
state = DATATYPE
elif state == DATATYPE:
#print('\nData Type:',nextbyte)
datatype = int.from_bytes(nextbyte, "big")
#print(' datatype=',datatype)
state = DATALENGTH
#print('Data Length:', end=' ')
elif state == DATALENGTH:
dlcount += 1
#print(dlcount,nextbyte, end=' ')
datalength = (datalength * 256) + int.from_bytes(nextbyte, "big")
if dlcount == 4:
state = DATAFIELD
#print('\n datalength=', datalength)
#print('Data:', end=' ')
elif state == DATAFIELD:
#print(nextbyte, end=' ')
datafield += nextbyte
if datarecvd == datalength:
#print('\n Data field=', datafield)
#state = WAITX55
state = COMPLETE
else: print('Junk received:', nextbyte)
else:
# No character available so sleep and keep a count for timeout
if timecount >= TIMEOUT:
#print ('Timeout')
state = COMPLETE
timecount = 0
else:
sleep(0.1)
timecount += 1
return (datatype, datafield)
incycle = 0
while True:
# packet is a tuple
packet = get_packet()
datatype = packet[0]
datafield = packet[1]
datalength = len(datafield)
if datatype:
print('\n', packet)
if datatype == 1: # telecommand
if datalength < 4:
print('invalid telecommand')
else:
telecmd = datafield[0:4]
telecmdstr = struct.unpack('>I', telecmd)
telecmdno = telecmdstr[0]
if datalength != 12:
print('invalid telecommand length')
response = b'\x55\xde\xad\xbe\x02\x00\x00\x00\x05' + telecmd + b'\x02'
elif telecmdno == 1:
print('telecommand 1 - respond success')
response = b'\x55\xde\xad\xbe\x02\x00\x00\x00\x05\x00\x00\x00\x01\x00'
elif telecmdno == 2:
print('telecommand 2 - respond failed')
response = b'\x55\xde\xad\xbe\x02\x00\x00\x00\x05\x00\x00\x00\x02\x01'
elif telecmdno == 3:
print('telecommand 3 - respond invalid length')
response = b'\x55\xde\xad\xbe\x02\x00\x00\x00\x05\x00\x00\x00\x03\x02'
elif telecmdno == 5:
print('telecommand 5 - respond invalid command argument')
response = b'\x55\xde\xad\xbe\x02\x00\x00\x00\x05\x00\x00\x00\x05\x04'
else:
print('telecommand', telecmdno, 'not supported')
if telecmdno == 85:
response = b'\x55\xde\xad\xbe\x02\x00\x00\x00\x06\x00\x00\x00UU\x03'
else:
response = b'\x55\xde\xad\xbe\x02\x00\x00\x00\x05' + telecmd + b'\x03'
ser.write(response)
elif datatype == 4: # telemetry
print('telemetry request')
if datalength < 4:
print('invalid telemetry channel length')
else:
telechan = datafield[0:4]
telechanstr = struct.unpack('>I', telechan)
telechanno = telechanstr[0]
print('telechan=', telechan)
if datalength != 4:
print('invalid telemetry length')
response = b'\x55\xde\xad\xbe\x07\x00\x00\x00\x05' + telechan + b'\x01'
# channels 1-4, 12 and 85 responses taken from test_frames files
elif telechanno == 1:
response = b'\x55\xde\xad\xbe\x03\x00\x00\x00\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01'
elif telechanno == 2:
response = b'\x55\xde\xad\xbe\x03\x00\x00\x00\x0c\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02'
elif telechanno == 3:
response = b'\x55\xde\xad\xbe\x03\x00\x00\x00\x0c\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03'
elif telechanno == 4:
print('telemetry channel 4 - respond invalid data length')
response = b'\x55\xde\xad\xbe\x07\x00\x00\x00\x05\x00\x00\x00\x04\x01'
elif telechanno == 12:
response = b'\x55\xde\xad\xbe\x03\x00\x00\x00\x0c\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\xff'
elif telechanno == 85:
response = b'\x55\xde\xad\xbe\x03\x00\x00\x00\x0f\x00\x00\x00\x55\x55\x00\x00\x00\x00\x55\x55\x55\x55\x00\xfe'
# channels 20 to 39 respond with a random number
elif telechanno > 19 and telechanno < 40:
teledata = random.randint(0,255)
teledatab = bytes(chr(teledata),'latin-1')
if teledata == 85: # case of \x55
response = b'\x55\xde\xad\xbe\x03\x00\x00\x00\x0d' + telechan + b'\x00\x00\x00\x00\x00\x00\x00UU'
else:
response = b'\x55\xde\xad\xbe\x03\x00\x00\x00\x0c' + telechan + b'\x00\x00\x00\x00\x00\x00\x00' + teledatab
# other channels respond channel not supported
else:
response = b'\x55\xde\xad\xbe\x07\x00\x00\x00\x05' + telechan + b'\x00'
print('telemetry channel', telechanno, response)
ser.write(response)
#sleep(5)
#ser.write(response)
else:
print('other data type')
# Generate telemetry data for channel 42
t = int(random.randint(60, 80) * (1 + math.sin(incycle)))
x = bytes(chr(t),'latin-1')
if t == 85: # case of \x55
response = b'\x55\xde\xad\xbe\x03\x00\x00\x00\x0d\x00\x00\x00\x2a\x00\x00\x00\x00\x00\x00\x00UU'
else:
response = b'\x55\xde\xad\xbe\x03\x00\x00\x00\x0c\x00\x00\x00\x2a\x00\x00\x00\x00\x00\x00\x00' + x
incycle += 0.01
if incycle >= 2 * math.pi:
incycle = 0
# uncomment next line to receive telemetry on channel 42
ser.write(response)
ser.close()
```
#### File: 15jgme/Mercury-GS/utils.py
```python
def epoch_to_sec(unix_time):
unix_time_seconds = int(unix_time)
# Milliseconds takes 2 Bytes storage. So, to convert it into unsigned short, lets just take all the bits upto 16 bit.
millisec_through_sec = round((unix_time - unix_time_seconds)*1000)
milliseconds = (millisec_through_sec & 0xffff)
return unix_time_seconds, milliseconds
``` |
{
"source": "15jjg6/university-setup",
"score": 3
} |
#### File: university-setup/scripts/config.py
```python
from datetime import datetime
from pathlib import Path
def get_week(d=datetime.today()):
return (int(d.strftime("%W")) + 52 - 5) % 52
CURRENT_COURSE_SYMLINK = Path('~/Notes/current-course').expanduser()
CURRENT_COURSE_ROOT = CURRENT_COURSE_SYMLINK.resolve()
# Comment out watch file until I find good use for it
# CURRENT_COURSE_WATCH_FILE = Path('/tmp/current_course').resolve()
ROOT = Path('~/Notes/bachelor-3/semester-2').expanduser()
DATE_FORMAT = '%a %d %b %Y %H:%M'
``` |
{
"source": "15leesan/suguru",
"score": 3
} |
#### File: 15leesan/suguru/find_starters.py
```python
from typing import List, Tuple, Dict
import cv2
import pytesseract
import numpy as np
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
Coordinate = Tuple[int, int]
grid_size: Tuple[int] = tuple()
DEBUG = False
blocks = []
def set_block(block):
global blocks
blocks = block
def split(file_path) -> Dict[Coordinate, np.ndarray]:
im: np.ndarray = cv2.imread(file_path)
cell_size = (round(im.shape[0] / grid_size[0]), round(im.shape[1] / grid_size[1]))
cells = {}
for y in range(grid_size[1]):
for x in range(grid_size[0]):
cell = im[y * cell_size[1]:(y + 1) * cell_size[1], x * cell_size[0]:(x + 1) * cell_size[0]]
# cell = im.crop((x * cell_size[0], y * cell_size[1], (x + 1) * cell_size[0], (y + 1) * cell_size[1]))
cells[(x, y)] = cell
return cells
def set_starters_grid_size(grid_x, grid_y):
global grid_size
grid_size = (grid_x, grid_y)
def show_parts(file_path, grid_x, grid_y) -> Dict[Coordinate, int]:
global grid_size
grid_size = (grid_x, grid_y)
sections = split(file_path)
starters: Dict[Coordinate, int] = {}
for y in range(grid_y):
for x in range(grid_x):
im: np.ndarray = sections[(x, y)]
# Alternatively: can be skipped if you have a Blackwhite image
gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
_, img_bin = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
gray: np.ndarray = cv2.bitwise_not(img_bin)
cell_x, cell_y = gray.shape[0:2]
margin = round(cell_x / 7.5)
gray = gray[margin:-margin, margin:-margin]
if DEBUG:
cv2.imshow("a", gray)
kernel = np.ones((2, 1), np.uint8)
img = cv2.erode(gray, kernel, iterations=1)
img = cv2.dilate(img, kernel, iterations=1)
out_below = pytesseract.image_to_string(img, config='--psm 10 --oem 3 -c tessedit_char_whitelist=0123456789 -c page_separator=""').strip()
if len(out_below) > 0:
starters[(x, y)] = int(out_below)
if DEBUG:
print(f"({out_below}), {len(out_below)=}")
cv2.waitKey()
cv2.destroyWindow("a")
# cv2.imwrite("single_cell.png", im)
return starters
def resolve(starters):
original_image: np.ndarray = np.full((grid_size[1] + 2, grid_size[0] + 2, 3), 255, np.uint8)
SCALE = 48
image_pos = lambda x, y: ((x + 1) * SCALE, (y + 1) * SCALE)
original_image = cv2.resize(original_image, (original_image.shape[0] * SCALE, original_image.shape[1] * SCALE))
for x in range(grid_size[0] + 1):
cv2.line(original_image, image_pos(x, 0), image_pos(x, grid_size[1]), (0, 0, 0), 1)
for y in range(grid_size[1] + 1):
cv2.line(original_image, image_pos(0, y), image_pos(grid_size[0], y), (0, 0, 0), 1)
for y in range(grid_size[1]):
for x in range(grid_size[0]):
current = blocks[y][x]
# Up
if y == 0:
cv2.line(original_image, image_pos(x, y), image_pos(x + 1, y), (0, 0, 0), 3)
elif blocks[y - 1][x] != current:
cv2.line(original_image, image_pos(x, y), image_pos(x + 1, y), (0, 0, 0), 3)
# Down
if y == grid_size[1] - 1:
cv2.line(original_image, image_pos(x, y + 1), image_pos(x + 1, y + 1), (0, 0, 0), 3)
# Left
if x == 0:
cv2.line(original_image, image_pos(x, y), image_pos(x, y + 1), (0, 0, 0), 3)
elif blocks[y][x - 1] != current:
cv2.line(original_image, image_pos(x, y), image_pos(x, y + 1), (0, 0, 0), 3)
# Right
if x == grid_size[0] - 1:
cv2.line(original_image, image_pos(x + 1, y), image_pos(x + 1, y + 1), (0, 0, 0), 3)
selection = (0, 0)
while True:
im = original_image.copy()
cv2.rectangle(im, image_pos(selection[0], selection[1]), image_pos(selection[0] + 1, selection[1] + 1), (0, 0, 255), 3)
for pos in starters:
to_pos = image_pos(pos[0], pos[1] + 1)
to_pos = to_pos[0] + 10, to_pos[1] - 10
cv2.putText(im, str(starters[pos]), to_pos, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.imshow("Rewrite", im)
k = cv2.waitKey()
if k == 27 or k == 13:
# ESC pressed
cv2.destroyWindow("Rewrite")
return starters
elif k == 100:
selection = selection[0] + 1, selection[1]
elif k == 97:
selection = selection[0] - 1, selection[1]
elif k == 119:
selection = selection[0], selection[1] - 1
elif k == 115:
selection = selection[0], selection[1] + 1
elif k in [8, 32, 48]:
if selection in starters:
del starters[selection]
elif 49 <= k <= 57:
starters[selection] = k - 48
selection = min(max(selection[0], 0), grid_size[0] - 1), min(max(selection[1], 0), grid_size[1] - 1)
```
#### File: 15leesan/suguru/suguru_solver.py
```python
from itertools import count
from typing import NamedTuple
from PIL import Image, ImageDraw, ImageFont
import colorsys
from functools import cache
from random import shuffle, seed
from pprint import pprint
import brute_strength_solve
from typing import Tuple, List, Set, Dict
Coordinate = Tuple[int, int]
def solve(blocks, starters):
width = len(blocks[0])
height = len(blocks)
def get_coords_in_block(index):
l = []
for y in range(height):
for x in range(width):
if blocks[y][x] == index:
l.append((x, y))
return l
def get_block_index_from_coords(x, y):
return int(blocks[y][x])
def get_other_coords_in_block(x, y):
block = get_coords_in_block(get_block_index_from_coords(x, y))
block = [coord for coord in block if coord != (x, y)]
return block
num_blocks = len({blocks[y][x] for y in range(height) for x in range(width)})
seed(hash(frozenset(starters.items())))
colors = [tuple(int(k * 255) for k in colorsys.hsv_to_rgb(i / num_blocks, 0.5, 0.8)) for i in range(num_blocks)]
shuffle(colors)
img = Image.new("RGB", size=(height, width), color=(255, 255, 255))
draw = ImageDraw.Draw(img)
for y in range(height):
for x in range(width):
img.putpixel((x, y), colors[int(blocks[y][x]) - 1])
line_scale = 24
second_scale = 2
scale = line_scale * second_scale
img = img.resize((width * line_scale, height * line_scale))
draw = ImageDraw.Draw(img)
line_pos = lambda x, y: (x * line_scale - 1, y * line_scale - 1)
# draw.line([(x * line_scale, y * line_scale) for x, y in get_block_edge_points(5)], width=1, fill=(0, 0, 0))
line_args = {"fill": (100, 100, 100), "width": 2}
for y in range(height):
for x in range(width):
current = blocks[y][x]
# Up
if y == 0:
draw.line([line_pos(x, y), line_pos(x + 1, y)], **line_args)
elif blocks[y - 1][x] != current:
draw.line([line_pos(x, y), line_pos(x + 1, y)], **line_args)
# Down
if y == height - 1:
draw.line([line_pos(x, y + 1), line_pos(x + 1, y + 1)], **line_args)
# Left
if x == 0:
draw.line([line_pos(x, y), line_pos(x, y + 1)], **line_args)
elif blocks[y][x - 1] != current:
draw.line([line_pos(x, y), line_pos(x, y + 1)], **line_args)
# Right
if x == width - 1:
draw.line([line_pos(x + 1, y), line_pos(x + 1, y + 1)], **line_args)
img = img.resize((width * scale, height * scale))
original_image = img.copy()
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("arial.ttf", scale)
small_font = ImageFont.truetype("arial.ttf", scale // 10)
cell_pos = lambda x, y: ((x + 0.5) * scale, (y + 0.5) * scale)
DefaultCell = {"possible": {1, 2, 3, 4, 5, 6, 7, 8, 9}, "actual": None, "found": False, "starter": False, "guess": False, "original_possible": None}
grid: List[List[Dict]] = []
for y in range(height):
grid.append([])
for x in range(width):
grid[y].append(DefaultCell.copy())
grid[y][x]["possible"] = DefaultCell["possible"].copy()
grid[y][x]["position"] = (x, y)
for pos in starters.keys():
x, y = pos
val = starters[pos]
grid[y][x]["possible"].clear()
grid[y][x]["possible"].add(val)
grid[y][x]["found"] = True
grid[y][x]["actual"] = val
grid[y][x]["starter"] = True
def remove_at(x, y, val):
if x < 0 or x >= width or y < 0 or y >= height: return
if val in grid[y][x]["possible"]:
grid[y][x]["possible"].remove(val)
def update_at(x, y):
if x < 0 or x >= width or y < 0 or y >= height: return
if grid[y][x]["found"]:
for cell in get_coords_in_block(get_block_index_from_coords(x, y)):
if cell != (x, y):
if not grid[cell[1]][cell[0]]["found"]:
if grid[y][x]["actual"] in grid[cell[1]][cell[0]]["possible"]:
grid[cell[1]][cell[0]]["possible"].remove(grid[y][x]["actual"])
grid[y][x]["possible"] = {grid[y][x]["actual"]}
return
if len(grid[y][x]["possible"]) == 1:
grid[y][x]["found"] = True
grid[y][x]["actual"] = grid[y][x]["possible"].copy().pop()
def update_all():
for y in range(height):
for x in range(width):
update_at(x, y)
def find_other_neighbors(x, y) -> Set[Coordinate]:
coords = set()
for dx in [-1, 0, 1]:
for dy in [-1, 0, 1]:
if dx == dy == 0: continue
coords.add((x + dx, y + dy))
return coords
def find_all_neighbors(x, y) -> Set[Coordinate]:
coords = set()
for dx in [-1, 0, 1]:
for dy in [-1, 0, 1]:
coords.add((x + dx, y + dy))
return coords
def find_common_neighbors(positions: Set[Coordinate]):
commons: Set[Coordinate] = None
for coord in positions:
current_coords: Set[Coordinate] = find_all_neighbors(coord[0], coord[1])
if commons is None:
commons = current_coords
else:
commons.intersection_update(current_coords)
if commons is None:
return set()
return commons
def copy_grid():
copy = []
for y in range(len(grid)):
old_row = grid[y]
copy_row = []
for x in range(len(old_row)):
old_cell = grid[y][x]
new_cell = {k: old_cell[k].copy() if hasattr(old_cell[k], "copy") else old_cell[k] for k in old_cell}
copy_row.append(new_cell)
copy.append(copy_row)
return copy
def grids_same(old):
for y in range(len(grid)):
for x in range(len(grid[y])):
if old[y][x] != grid[y][x]:
return False
return True
def check_validity() -> bool:
for y in range(height):
for x in range(width):
if len(grid[y][x]["possible"]) == 0:
return False
if not grid[y][x]["found"]:
continue
for dx in [-1, 0, 1]:
for dy in [-1, 0, 1]:
if dx == dy == 0: continue
if not (0 <= x + dx < width and 0 <= y + dy < height): continue
if grid[y][x]["actual"] == grid[y + dy][x + dx]["actual"]:
print(f"Failure! Two digits next to each other at {x, y} and {x + dx, y + dy}: {grid[y][x]['actual']}")
return False
for other_cell in get_other_coords_in_block(x, y):
if grid[y][x]["actual"] == grid[other_cell[1]][other_cell[0]]["actual"]:
print("Failure! Two digits in the same block")
return False
return True
for i in range(1, num_blocks + 1):
cells = get_coords_in_block(i)
size = len(cells)
for cell in cells:
x, y = cell
grid[y][x]["possible"] = {k for k in grid[y][x]["possible"] if k <= size}
old_grid = copy_grid()
for i in count():
should_continue = False
update_all()
# https://pzl.org.uk/suguru.html#Algorithms
for block_index in range(1, num_blocks + 1):
coord_list = get_coords_in_block(block_index)
block_size = len(coord_list)
# Hidden single
found_actuals = {i: None for i in range(1, block_size + 1)}
for pos in coord_list:
cell = grid[pos[1]][pos[0]]
for possibility in cell["possible"]:
if found_actuals[possibility] is None:
found_actuals[possibility] = pos
elif type(found_actuals[possibility]) == tuple:
found_actuals[possibility] = True
for number in found_actuals:
value = found_actuals[number]
if type(value) == tuple:
x, y = value
grid[y][x]["found"] = True
grid[y][x]["actual"] = number
current_coords_by_possibilities: Dict[int, Set[Coordinate]] = {}
for index in range(1, 10):
current_coords_by_possibilities[index] = set()
for pos in coord_list:
cell = grid[pos[1]][pos[0]]
if cell["found"]: continue
for possibility in cell["possible"]:
current_coords_by_possibilities[possibility].add(pos)
# print(current_coords_by_possibilities)
for index in range(1, 10):
if len(current_coords_by_possibilities[index]) <= 1: continue
neighbors = find_common_neighbors(current_coords_by_possibilities[index])
external_common = [coord for coord in neighbors if coord not in coord_list]
for external_pos in external_common:
remove_at(external_pos[0], external_pos[1], index)
# print(index, external_common)
# print(index, neighbors, current_coords_by_possibilities[index])
# print(current_coords_by_possibilities)
update_all()
for y in range(height):
for x in range(width):
number_possible = len(grid[y][x]["possible"])
if number_possible == 2:
for other_cell in get_coords_in_block(get_block_index_from_coords(x, y)):
# Naked pairs
if other_cell == (x, y): continue
if grid[y][x]["possible"] == grid[other_cell[1]][other_cell[0]]["possible"]:
for removal_coord in get_coords_in_block(get_block_index_from_coords(x, y)):
if removal_coord == (x, y) or removal_coord == other_cell: continue
removal_cell = grid[removal_coord[1]][removal_coord[0]]
[removal_cell["possible"].remove(possibility) for possibility in grid[y][x]["possible"] if possibility in removal_cell["possible"]]
should_continue = True
for cell in find_other_neighbors(x, y):
# Forbidden neighbor
remove_at(cell[0], cell[1], grid[y][x]["actual"])
update_at(cell[0], cell[1])
for cell in get_coords_in_block(get_block_index_from_coords(x, y)):
# Exclusion rule
if cell != (x, y):
remove_at(cell[0], cell[1], grid[y][x]["actual"])
update_at(cell[0], cell[1])
if i > width * height:
should_continue = False
print("Did not finish!")
if not should_continue:
print(f"Took {i} iterations")
break
if grids_same(old_grid):
should_continue = False
print("Did not finish, beginning brute force")
new_board = [
[(grid[y][x]["actual"]) if grid[y][x]["found"] else (0) for x in range(width)] for y in range(height)
]
brute_strength_solve.init(blocks, (width, height))
result = brute_strength_solve.solve_suguru(new_board)
print(f"Solved {result=}")
if result:
for y in range(height):
for x in range(width):
grid[y][x]["found"] = True
grid[y][x]["actual"] = new_board[y][x]
break
# should_continue = False
for y in range(height):
for x in range(width):
if not grid[y][x]["found"]:
should_continue = True
continue
# if has_guessed
# # pprint(grid)
# for y in range(height):
# for x in range(width):
# current = grid[y][x]
# col = (100, 100, 100) if current["starter"] else (0, 0, 0)
# actual = current["actual"] if current["found"] else ""
#
# for num in range(1, 10):
# small_pos = ((x + (num / 12)) * scale, y * scale)
# present = (255, 255, 255) if num in grid[y][x]["possible"] else (0, 0, 0)
# draw.text(small_pos, str(num), font=small_font, anchor="la", fill=present)
# draw.text(cell_pos(x, y), str(actual), font=font, anchor="mm", fill=col)
# img.save("sugu.png")
# # pprint([grid[p[1]][p[0]] for p in get_coords_in_block(5)])
# input(">")
old_grid = copy_grid()
for y in range(height):
for x in range(width):
current = grid[y][x]
col = (100, 100, 100) if current["starter"] else (0, 0, 0)
actual = current["actual"] if current["found"] else ""
draw.text(cell_pos(x, y), str(actual), font=font, anchor="mm", fill=col)
img.save("sugu.png")
if __name__ == '__main__':
test_blocks = [
[1, 1, 2, 2, 3, 3],
[1, 1, 1, 2, 2, 4],
[5, 6, 7, 7, 2, 4],
[5, 6, 6, 7, 7, 4],
[5, 5, 6, 8, 7, 4],
[5, 8, 8, 8, 8, 4]
]
test_starters = { (1, 0):5, (3, 0):3, (1, 2):3, (5, 2):2, (3, 3):2 }
# test_blocks = [
# "111122",
# "334122",
# "344452",
# "364555",
# "366758",
# "668888"
# ]
#
# test_starters = {}
# test_starters[(0, 1)] = 2
# test_starters[(1, 4)] = 4
# test_starters[(1, 5)] = 1
# test_starters[(2, 0)] = 5
# test_starters[(3, 5)] = 3
# test_starters[(4, 0)] = 1
# test_starters[(5, 4)] = 5
#
#
solve(test_blocks, test_starters)
``` |
{
"source": "15menou/srocket",
"score": 3
} |
#### File: 15menou/srocket/environement.py
```python
import numpy as np
from data import Aspect
class Atm:
P_0 = 1.013 # hPa
pressure_decrease_rate = 10000 # meters
rho_0 = 1.125 # kg.m^-3
rho_decrease_rate = 10000 # meters
color_0 = (124, 230, 255)
color_infinity = (0, 38, 128)
space_height = 100000 # meters
@classmethod
def pressure(cls, z):
# z in meters with respect to the ground.
return Atm.P_0 * np.exp(- z / Atm.pressure_decrease_rate) # APPROXIMATION !
@classmethod
def vol_density(cls, z):
# z in meters with respect to the ground.
return Atm.rho_0 * np.exp(- z / Atm.rho_decrease_rate) # APPROXIMATION !
@classmethod
def air_color(cls, r):
if r < Earth.radius:
return Aspect.rgb_to_hex(Atm.color_0)
elif Earth.radius <= r and r < Atm.space_height + Earth.radius:
p = (r - Earth.radius) / Atm.space_height
q = 1 - p
color = [int(q * Atm.color_0[i] + p * Atm.color_infinity[i]) for i in range(3)]
return Aspect.rgb_to_hex(tuple(color))
else:
return Aspect.rgb_to_hex(Atm.color_infinity)
class Earth:
radius = 6371000.0 # meters
g = 9.81 # on ground
class Physics:
G = 6.674e-11 # m^3.kg^-1.s^-1
```
#### File: 15menou/srocket/log.py
```python
class Log:
HIDE_ALL = False
SHOW_FATAL = True
SHOW_ERROR = True
SHOW_WARNING = True
SHOW_DEBUG = True
SHOW_COMMENT = True
@classmethod
def set_level(cls, lvl):
"""
lvl is either :
- 'none'
- 'fatal',
- 'error',
- 'warning',
- 'debug',
- or 'comment'.
"""
if lvl == 'none':
Log.HIDE_ALL = True
else:
Log.HIDE_ALL = True
Log.SHOW_FATAL = True
Log.SHOW_ERROR = not(lvl == 'fatal')
Log.SHOW_WARNING = lvl in ['warning', 'debug', 'comment']
Log.SHOW_DEBUG = lvl in ['debug', 'comment']
Log.SHOW_COMMENT = lvl == 'comment'
@classmethod
def print(cls, msg):
display = not Log.HIDE_ALL
if display:
print(msg)
@classmethod
def get_msg(cls, msg, tag=''):
return '[{}] {}'.format(tag, msg)
@classmethod
def fatal(cls, msg):
if Log.SHOW_FATAL:
print(cls.get_msg(msg, 'FATAL'))
@classmethod
def error(cls, msg):
if Log.SHOW_ERROR:
print(cls.get_msg(msg, 'ERROR'))
@classmethod
def warning(cls, msg):
if Log.SHOW_WARNING:
print(cls.get_msg(msg, 'WARNING'))
@classmethod
def debug(cls, msg):
if Log.SHOW_DEBUG:
print(cls.get_msg(msg, 'DEBUG'))
@classmethod
def comment(cls, msg):
if Log.SHOW_COMMENT:
print(cls.get_msg(msg, 'COMMENT'))
```
#### File: 15menou/srocket/rocket.py
```python
from log import Log
from solid_bodies import Body
from environement import Earth
import tkinter as tk
import numpy as np
class Rocket:
def __init__(self, gui, rocket_gui):
self.name = 'first'
self.gui = gui
self.rocket_gui = rocket_gui
self.parts = dict()
self.r = Earth.radius
"""
States: [r, theta, phi, dot(r), dot(theta), dot(phi)]^T
such that:
- r : meters : radius from earth center.
- theta : radians : angular position with respect to launch pad
- phi : radians : attitude with respect to radial unitary vetor
- dot(r) : rad / s : derivative of r
- dot(theta) : rad / s : derivative of theta
- dot(phi) : rad / s : derivative of phi
"""
self.state_list = ['r', 'theta', 'phi', 'dr', 'dtheta', 'dphi']
self.states = np.zeros([6, 1])
self.set_state('r', Earth.radius)
self.init_msg()
def init_msg(self):
Log.debug('Rocket initialized such that:')
for state in self.state_list:
Log.debug('\t- {}_0: \t{} {}'.format(state,
self.get_state(state)[0],
self.state_unite(state)))
def state_unite(self, s):
i = ''
f = ''
if 'r' in s:
i = 'meter'
else:
i = 'radian'
if 'd' in s:
f = ' / second'
return i + f
def set_state(self, s, val):
# s is either 'r', 'theta', 'phi', 'dr', 'dtheta' or 'dphi'.
if s == 'r':
self.states[0] = val
elif s == 'theta':
self.states[1] = val
elif s == 'phi':
self.states[2] = val
elif s == 'dr':
self.states[3] = val
elif s == 'dtheta':
self.states[4] = val
elif s == 'dphi':
self.states[5] = val
else:
Log.print('{} is not a state.')
Log.print("s is either 'r', 'theta', 'phi', 'dr', 'dtheta' or 'dphi'.")
raise ValueError('Input is not a state.')
def get_state(self, s):
# s is either 'r', 'theta', 'phi', 'dr', 'dtheta' or 'dphi'.
if s == 'r':
return self.states[0]
elif s == 'theta':
return self.states[1]
elif s == 'phi':
return self.states[2]
elif s == 'dr':
return self.states[3]
elif s == 'dtheta':
return self.states[4]
elif s == 'dphi':
return self.states[5]
else:
Log.print('{} is not a state.')
Log.print("s is either 'r', 'theta', 'phi', 'dr', 'dtheta' or 'dphi'.")
raise ValueError('Input is not a state.')
def dyn(self):
"""
'dyn' is actually the function f such that:
dot(X) = f(X) where X is the vector of the states of the system.
"""
return self.states
def mass(self):
return sum([part.mass() for part in self.parts])
class RocketPart:
def __init__(self, mass):
self.mass = mass
self.gc = np.zeros([6, 1])
self.category = ''
def gravity_center(self):
"""
It has to be expressed in the rocket's frame.
"""
return self.gc
def mass(self):
return self.mass
class Engine(RocketPart):
def __init__(self, mass):
RocketPart.__init__(self, mass)
self.category = 'engine'
class Tank(RocketPart):
def __init__(self, mass):
RocketPart.__init__(self, mass)
self.category = 'tank'
```
#### File: 15menou/srocket/solid_bodies.py
```python
class Body:
supported_characteristics = ['m', 'mass',
'J', 'inertia']
def __init__(self, options):
for key in options.keys():
if key in Body.supported_characteristics:
val = options[key]
if key in ['m', 'mass']:
self.mass = val
elif key in ['J', 'inertia']:
self.J = val
else:
Log.print('Not supported key:{}'.format(key))
``` |
{
"source": "15minutOdmora/Generator-filmov",
"score": 3
} |
#### File: 15minutOdmora/Generator-filmov/dbCommunication.py
```python
import mysql.connector
from image_scraping import *
from auth import AUTH
import json
class Connector:
def __init__(self):
self.db = mysql.connector.connect(**AUTH)
self.cur = None
def create_cursor(self):
"""
Function: Creates the cursor in the var. self.cur to operate the database.
"""
self.cur = self.db.cursor(dictionary=True)
def close_cursor(self):
"""
Method: Closes the connection on the cursor cur
"""
if self.cur is not None:
self.cur.close()
self.cur = None
else:
print("Cursor does not exist.")
raise
def execute(self, code, param):
"""
Method: Executes the code with the given parameters
:param code: string containing code to be executed in MySql
:param param: touple containing values to be used in the string code
"""
if self.cur is not None:
if param is None:
self.cur.execute(code, param)
else:
self.cur.execute(code, param)
else:
print("Cursor does not exist.")
raise
def commit(self):
"""
Method: Commits changes to the database
"""
self.db.commit()
class UserDataBase(Connector):
def add_new_user(self, username, password, email=None, phone=None):
"""
Method: Adds new user into db in the table Uporabnik
:param username: The username of the user
:param password: <PASSWORD>
:param email: Email of the user, if not given is None
:param phone: Phone number of the user, if not given is None
:return: (True/False, reason, data_dict)
"""
# Create cursor
self.create_cursor()
# Create liked and watched Json files, dump them into string
liked = json.dumps({})
watched = json.dumps({})
# Check if phone or email was given
if phone is None and email is not None:
code = "INSERT INTO User(username, password, email, liked, watched) VALUES (%s, %s, %s, %s, %s)"
param = (username, password, email, liked, watched)
elif email is None and phone is not None:
code = "INSERT INTO User(username, password, phone, liked, watched) VALUES (%s, %s, %s, %s, %S)"
param = (username, password, phone, liked, watched)
# Execute the code
self.cur.execute(code, param)
# Commit to database
self.commit()
# Close cursor
self.close_cursor()
def check_user_registration_params(self, username='', email='', phone=''):
""" Method checks if username, email, phone are already in the user table
:param username: users username
:param email: users email
:param phone: users phone
:return: True/False, working/if problem -> where
"""
print(username, email)
def username():
"""Function checks if username is already in the user table
:return: True/False
"""
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT idUser FROM User WHERE username = %s"
param = (username,)
self.execute(code, param)
# If any user found, returns false
for user in self.cur:
self.close_cursor()
return False
self.close_cursor()
return True
def email():
"""Function checks if email is already in the user table
:return: True/False
"""
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT idUser FROM User WHERE email = %s"
param = (email,)
self.cur.execute(code, param)
# If any email found, returns false
for user in self.cur:
self.close_cursor()
return False
self.close_cursor()
return True
def phone():
"""Function checks if phone is already in the user table
:return: True/False
"""
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT idUser FROM User WHERE phoneNumber = %s"
param = (phone,)
self.cur.execute(code, param)
# If any email found, returns false
for user in self.cur:
self.close_cursor()
return False
self.close_cursor()
return True
# If faulty username
usr = username()
if not usr:
return False, 'username'
# If faulty email
if not email():
return False, 'Email'
# If faulty phone
if not phone():
return False, 'phone'
# If working
return True, 'working'
def delete_existing_user(self, id):
"""
Method: Deletes existing user with the given id.
:param id: idUser
:return: True/False if successful or not.
"""
# Create cursor
self.create_cursor()
# Delete user by id
code = "DELETE FROM User WHERE idUser = %s"
param = (id,)
self.cur.execute(code, param)
# Commit
self.commit()
# Close cursor
self.close_cursor()
def get_user_by_username(self, username):
"""
Function checks if user exists, returns True and the users data in a dict.
:return: Touple (True/False if user exists, {'userId': ,'username': ,'password': ,'email': ,'phone': })
"""
data = {}
# Create cursor
self.create_cursor()
# Search in database
code = "SELECT * FROM user WHERE username = %s"
param = (username,)
self.cur.execute(code, param)
# Should only be one username in database
for user in self.cur:
id_user = user['idUser']
password = user['password']
email = user['email']
phone = user['phoneNumber']
liked = json.loads(user['liked'])
watched = json.loads(user['watched'])
data = {'idUser': id_user,
'username': username,
'password': password,
'email': email,
'phone': phone,
'liked': liked,
'watched': watched}
self.close_cursor()
if data == {}:
return False, data
else:
return True, data
def get_user_by_id(self, id):
"""Function checks if user exists, returns True and the users data in a dict.
:return: Touple (True/False if user exists,
{'userId': ,'username': ,'email': ,'phone': , 'liked': JSON, 'watched' JSON})
"""
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT * FROM user WHERE idUser = %s"
param = (id,)
self.cur.execute(code, param)
# Should only be one username in database
# Saves user in a dict
for user in self.cur:
id_user = user['idUser']
username = user['username']
password = user['password']
email = user['email']
phone = user['phoneNumber']
liked = json.loads(user['liked'])
watched = json.loads(user['watched'])
self.close_cursor()
data = {'idUser': id_user,
'username': username,
'password': password,
'email': email,
'phone': phone,
'liked': liked,
'watched': watched}
return True, data
return False, {}
def save_watched_to_user(self, username, watched):
"""
Function saves liked and watched jsons to user in database
:param username: the id of the user
:param watched: dict of watched movies
:return: True/False if successful
"""
# Create cursor
self.create_cursor()
# Create watched json string
watched_json = json.dumps(watched)
# Search in database
code = "UPDATE user SET watched = %s WHERE username = %s"
param = (watched_json, username)
try:
self.cur.execute(code, param)
self.commit()
self.close_cursor()
return True
except:
return False
def save_liked_to_user(self, username, liked):
"""
Function saves liked and watched jsons to user in database
:param username: the id of the user
:param liked: dict of liked movies
:return: True/False if successful
"""
# Create cursor
self.create_cursor()
# Create liked json string
liked_json = json.dumps(liked)
# Search in database
code = "UPDATE user SET liked = %s WHERE username = %s"
param = (liked_json, username)
try:
self.execute(code, param)
self.commit()
self.close_cursor()
return True
except:
return False
def save_opinion_of_movie(self, username, idMovie, opinion, rate):
"""
Function saves given opinion about the movie to the user opinion table
:param username: users username
:param idMovie: id of movie
:param opinion: str("Luka suvcks balizz")
:return: True/False if successful
"""
try:
opinion_check, rating_check = self.get_all_opinions_of_user(username)
if idMovie not in rating_check.keys():
ver, user = self.get_user_by_username(username)
# Create cursor
self.create_cursor()
code = "INSERT INTO opinion(idUser, idMovie, opinion, ocena) VALUES (%s, %s, %s, %s)"
param = (user['idUser'], idMovie, opinion, rate)
# Execute the code
self.execute(code, param)
# Commit to database
self.commit()
# Close cursor
self.close_cursor()
else:
ver, user = self.get_user_by_username(username)
# Create cursor
self.create_cursor()
code = "UPDATE opinion SET opinion = %s, ocena = %s WHERE idUser = %s AND idMovie = %s"
param = (opinion, rate, user['idUser'], idMovie)
# Execute the code
self.execute(code, param)
# Commit to database
self.commit()
# Close cursor
self.close_cursor()
except:
return False
return True
def get_all_opinions_of_user(self, username):
"""
Function returns a data list of movie ids and opinions the user has saved
:param username: users username
:return: {'literally id of movie': 'opinion', 'ex. tt123456': 'I very liked this movie', ...}
"""
data = {}
data2 = {}
self.create_cursor()
code = "SELECT opinion.idMovie,opinion.opinion, opinion.ocena FROM opinion JOIN user ON opinion.idUser = user.idUser WHERE user.username = %s"
param = (username,)
# Execute the code
self.cur.execute(code, param)
for opinion in self.cur:
data[opinion['idMovie']] = opinion['opinion']
data2[opinion['idMovie']] = opinion['ocena']
self.close_cursor()
return data, data2
class MovieDatabase(Connector):
def search_by_keyword(self, keyword):
"""Function gets a keyword that was typed in the search box, returns all the results.
Search by keyword on main page
:param keyword: string
:return: int(number_of_matches), sorted(list[dict("movieId": , "title": , "year": , ...)]),
"""
# Lists for saving data
movies_data = []
writers_and_directors_data = []
# Add % for keyword search
keyword = '%' + keyword + '%'
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT * FROM movie WHERE title LIKE %s ORDER BY (numVotes) DESC"
param = (keyword,)
self.cur.execute(code, param)
# Save all of the data for movies
for movie in self.cur:
idMovie = movie['idMovie']
title = movie['title']
isAdult = movie['isAdult']
releaseYear = movie['releaseYear']
runtimeMinutes = movie['runtimeMinutes']
rating = movie['rating']
numVotes = movie['numVotes']
movies_dict = {'idMovie': idMovie,
'title': title,
'isAdult': isAdult,
'releaseYear': releaseYear,
'runtimeMinutes': runtimeMinutes,
'rating': rating,
'numVotes': numVotes}
movies_data.append(movies_dict)
self.close_cursor()
# Saves number of matches
number_of_matches = len(movies_data)
return number_of_matches, movies_data
def random_new_movies(self):
"""
Function returns a dict containing a list of 5 random movies.
:return: dict("movies": sorted(list["movieId": , ...]))
"""
# List to save the movie data in
movies_data = []
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT * FROM movie ORDER BY RAND() LIMIT 5"
self.cur.execute(code)
# Save all of the data for movies
for movie in self.cur:
idMovie = movie['idMovie']
title = movie['title']
isAdult = movie['isAdult']
releaseYear = movie['releaseYear']
runtimeMinutes = movie['runtimeMinutes']
rating = movie['rating']
numVotes = movie['numVotes']
img_url = get_google_image_link(title + " " + str(releaseYear))
movies_dict = {'idMovie': idMovie,
'title': title,
'isAdult': isAdult,
'releaseYear': releaseYear,
'runtimeMinutes': runtimeMinutes,
'rating': rating,
'numVotes': numVotes,
'img_url': img_url}
movies_data.append(movies_dict)
self.close_cursor()
return {"movies": movies_data}
def search_movie_by_id(self, id):
"""Function: Returns a movies list containing movie dicts, also has genres
:param id: idMovie
:return: list[dict('idMovie': , 'title': , ...)]
"""
def search_all_genres_for_movie(mov_id):
"""Function: Returns a genre list for movie id
:param id: idMovie
:return: list[dict('idMovie': , 'title': , ...)]
"""
# Saves all genres
genres_data = []
# Creates cursor
self.create_cursor()
# SQL code
code = "SELECT genreName FROM Genre JOIN GenresByMovie ON Genre.idGenre = GenresByMovie.idGenre " \
"JOIN Movie ON Movie.idMovie = GenresByMovie.idMovie WHERE Movie.idMovie = %s"
param = (mov_id,)
self.cur.execute(code, param)
# Saves genres
for genre in self.cur:
genres_data.append(genre['genreName'])
return genres_data
# List to save the movie data in, should only be one
movies_data = []
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT * FROM movie WHERE idMovie = %s"
param = (id,)
self.cur.execute(code, param)
# Save all of the data for movies, should only be one
for movie in self.cur:
idMovie = movie['idMovie']
title = movie['title']
isAdult = movie['isAdult']
releaseYear = movie['releaseYear']
runtimeMinutes = movie['runtimeMinutes']
rating = movie['rating']
numVotes = movie['numVotes']
genre = search_all_genres_for_movie(idMovie)
img_url = get_google_image_link(title + " " + str(releaseYear))
additional_data = get_movie_details(id)
movies_dict = {'idMovie': idMovie,
'title': title,
'isAdult': isAdult,
'releaseYear': releaseYear,
'runtimeMinutes': runtimeMinutes,
'rating': rating,
'numVotes': numVotes,
'img_url':img_url,
'description': additional_data['description'],
'genre': ", ".join(genre)}
movies_data.append(movies_dict)
return movies_data
def get_movie_by_param(self, parameters, rand=False):
"""Function: Returns a movies list containing movie dicts
:parameters: dict('release_year': dict('from': , 'to': ),
'genre': str(),
'duration': dict('from': , 'to': ),
'directed_by': str(),
'number_of_votes': dict('from': , 'to': ),
'rating': dict('from': , 'to': ))
:return: list[dict("movieId": , "title": , "year": , ...)]
"""
def get_all_movie_by_idstring(id_string):
"""Function: Returns a movies list based on idMovie.
:parameters: str('idMovie1','idMovie2', ...)
:return: list[dict("movieId": , "title": , "year": , ...)]
"""
# For saving movies
data = []
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT * FROM movie WHERE idMovie IN (" + id_string + ")"
self.execute(code, None)
# Saves all movies in a list
for movie in self.cur:
idMovie = movie['idMovie']
title = movie['title']
isAdult = movie['isAdult']
releaseYear = movie['releaseYear']
runtimeMinutes = movie['runtimeMinutes']
rating = movie['rating']
numVotes = movie['numVotes']
# Gets movie image
"""img_url = get_google_image_link(title + " " + str(releaseYear))
additional_data = get_movie_details(id)"""
movies_dict = {'idMovie': idMovie,
'title': title,
'isAdult': isAdult,
'releaseYear': releaseYear,
'runtimeMinutes': runtimeMinutes,
'rating': rating,
'numVotes': numVotes}
data.append(movies_dict)
return data
def get_all_movie_by_idstring_rand(id_string):
"""Function: Returns a 3 random movies list based on idMovie.
:parameters: str('idMovie1','idMovie2', ...)
:return: list[dict("movieId": , "title": , "year": , ...)]
"""
# For saving movies
data = []
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT * FROM movie WHERE idMovie IN (" + id_string + ") ORDER BY RAND() LIMIT 3"
self.execute(code, None)
# Saves all movies in a list
for movie in self.cur:
idMovie = movie['idMovie']
title = movie['title']
isAdult = movie['isAdult']
releaseYear = movie['releaseYear']
runtimeMinutes = movie['runtimeMinutes']
rating = movie['rating']
numVotes = movie['numVotes']
"""img_url = get_google_image_link(title + " " + str(releaseYear))
additional_data = get_movie_details(id)"""
movies_dict = {'idMovie': idMovie,
'title': title,
'isAdult': isAdult,
'releaseYear': releaseYear,
'runtimeMinutes': runtimeMinutes,
'rating': rating,
'numVotes': numVotes}
data.append(movies_dict)
return data
def get_all_movie_ids():
"""Function: Returns idMovie for all movies in the database, is used for interjection
:parameters: None
:return: dict(idMovie1: None, ... )
"""
# Dict to save the movie data in based on id, used for interjection / should use set()
movies_ids_tmp = {}
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT idMovie FROM movie"
self.cur.execute(code)
# Saves all idMovie for movies
for movie in self.cur:
idMovie = movie['idMovie']
movies_ids_tmp[idMovie] = None
return movies_ids_tmp
def get_movieid_by_genre(genres_str):
"""Function: Returns idMovie for all movies in selected genres
:parameters: str('Romance','Horror',...)
:return: dict(idMovie1: None, ... )
"""
# Dict to save the movie data in based on id, used for interjection / should use set()
genre_movies_ids_tmp = {}
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT movie.idMovie FROM movie JOIN genresbymovie ON genresbymovie.idMovie = movie.idMovie " \
"JOIN genre ON genresbymovie.idGenre = genre.idGenre WHERE genre.genreName IN (" + genres_str + ")"
self.cur.execute(code, None)
# Saves all idMovie for movies
for movie in self.cur:
idMovie = movie['idMovie']
genre_movies_ids_tmp[idMovie] = None
return genre_movies_ids_tmp
def get_movieid_by_director(name):
"""Function: Returns idMovie for all movies directed by a one person
:parameters: Directors name
:return: dict(idMovie1: None, ... )
"""
# Dict to save the movie data in based on id, used for interjection / should use set()
director_movies_ids_tmp = {}
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT movie.* FROM movie JOIN team ON team.idMovie = movie.idMovie " \
"JOIN writersanddirectors ON team.idWritersAndDirectors = writersanddirectors.idWritersAndDirectors " \
"WHERE writersanddirectors.name = %s"
param = (name,)
self.cur.execute(code, param)
# Saves all idMovie for movies
for movie in self.cur:
idMovie = movie['idMovie']
director_movies_ids_tmp[idMovie] = None
return director_movies_ids_tmp
def get_movieid_by_all_else(code, param):
"""Function: Returns idMovie for all movies fitting from,to parameters
:parameters: code = str(SQL code), param = tuple(int('from this year'),int('to this year'),...)
:return: dict(idMovie1: None, ... )
"""
# Dict to save the movie data in based on id, used for interjection / should use set()
other_movies_ids_tmp = {}
# Create cursor
self.create_cursor()
# Executes cursor, code is provided in params
self.cur.execute(code, param)
# Saves all idMovie for movies
for movie in self.cur:
idMovie = movie['idMovie']
other_movies_ids_tmp[idMovie] = None
return other_movies_ids_tmp
def join_all_return_ids(directors_ids, genres_ids, other_ids):
"""Function: Returns a list of idMovie for searched parameters
:parameters: directors_ids is a dictionary of idMovie for director searched
genres_ids is a dictionary of idMovie for genres searched
other_ids is a dictionary of idMovie for other parameters
:return: list(idMovie1,...)
"""
# List to save all final parsed idMovie
movie_ids = []
# Makes an intersection
common_ids = directors_ids.keys() & genres_ids.keys() & other_ids.keys()
# Saves all idMovie in a list
for id in common_ids:
movie_ids.append(id)
return movie_ids
def call(parameters, rand):
"""Function: calls stuff does stuff makes stuff we need
:parameters: parameters = dict('release_year': dict('from': , 'to': ),
'genre': str(),
'duration': dict('from': , 'to': ),
'directed_by': str(),
'number_of_votes': dict('from': , 'to': ),
'rating': dict('from': , 'to': ))
rand = True/False
:return: list[dict("movieId": , "title": , "year": , ...)]
"""
# List for saving all dicts with idMovie
join_this = []
# SQL code used for searching movies based on integer parameters
code = 'SELECT idMovie FROM movie WHERE'
# Saves from and to parameters
param = []
# Check if any integer parameters were given
i_did_it = False
# Saves idMovie for all parameters, will be used for intersection
other_ids = {}
genres_ids = {}
directors_ids = {}
# Prepares code if release year parameter was given
if 'release_year' in parameters.keys():
# Adds a condition line to the SQL code
code += ' releaseYear > %s AND releaseYear < %s AND'
# Saves parameters
param.append(parameters['release_year']['from'])
param.append(parameters['release_year']['to'])
# Integer parameters were given
i_did_it = True
# Prepares code if duration parameter was given
if 'duration' in parameters.keys():
# Adds a condition line to the SQL code
code += ' runtimeMinutes > %s AND runtimeMinutes < %s AND'
# Saves parameters
param.append(parameters['duration']['from'])
param.append(parameters['duration']['to'])
# Integer parameters were given
i_did_it = True
# Prepares code if number of votes parameter was given
if 'number_of_votes' in parameters.keys():
# Adds a condition line to the SQL code
code += ' numVotes > %s AND numVotes < %s AND'
# Saves parameters
param.append(parameters['number_of_votes']['from'])
param.append(parameters['number_of_votes']['to'])
# Integer parameters were given
i_did_it = True
# Prepares code if rating parameter was given
if 'rating' in parameters.keys():
# Adds a condition line to the SQL code
code += ' rating > %s AND rating < %s AND'
# Saves parameters
param.append(parameters['rating']['from'])
param.append(parameters['rating']['to'])
# Integer parameters were given
i_did_it = True
# If integer parameters were given, finds all idMovie for those
if i_did_it:
# Adds 1 to the end of code
# Code end now looks like: SELECT ... WHERE case1 AND case2 ... AND 1
code += ' 1'
# Saves all idMovie for integer parameters
other_ids = get_movieid_by_all_else(code, tuple(param))
# If none were found -> no movie fits the parameters given
if other_ids == {}:
return {}
# If genre parameters is geven, finds all idMovie for those
if 'genre' in parameters.keys():
# Saves string for genres, will be SQL parameter
genre_str = ''
i = 0
# Creates genre string used for code, string described in function parameters
for some_genre in parameters['genre']:
if i == 0:
genre_str += "'" + some_genre + "'"
i = 1
else:
genre_str = genre_str + ",'" + some_genre + "'"
# Saves all idMovie for given genres
genres_ids = get_movieid_by_genre(genre_str)
# If none were found -> no movie fits the parameters given
if genres_ids == {}:
return {}
# If director parameter is geven, finds all idMovie for those
if 'directed_by' in parameters.keys():
# Saves all idMovie for given genres
directors_ids = get_movieid_by_director(parameters['directed_by'])
# If none were found -> no movie fits the parameters given
if directors_ids == {}:
return {}
# Checks if user didnt select some parameters
join_this.append(other_ids)
join_this.append(directors_ids)
join_this.append(genres_ids)
if {} in join_this:
# Saves all movie ids, runs only if user did not specify some parameters
all_ids = get_all_movie_ids()
# Fills empty dicts with idMovie for all movies
# used for intersection
if directors_ids == {}:
directors_ids = all_ids
if genres_ids == {}:
genres_ids = all_ids
if other_ids == {}:
other_ids = all_ids
# Saves joined idMovie
mov_ids = join_all_return_ids(directors_ids, genres_ids, other_ids)
# Saves string for the ids, used for SQL code
string_of_ids = ''
# Creates string for ids, used for SQL code
i = 0
for id in mov_ids:
if i == 0:
string_of_ids += "'" + id + "'"
i = 1
else:
string_of_ids = string_of_ids + ",'" + id + "'"
# If random is True, saves random movies based on parameters,
# otherwise, save all movies based on parameters.
if rand:
final_squad = get_all_movie_by_idstring_rand(string_of_ids)
else:
final_squad = get_all_movie_by_idstring(string_of_ids)
# Returns a movies dict
return final_squad
# Return the call
return call(parameters, rand)
def search_movie_by_multiple_ids(self, id_list):
"""
Function: Returns a movies list containing movie dicts. of the ids in the id_list
:param id_list: list containing movie ids
:return: list[dict('idMovie': , 'title': , ...)]
"""
# List to save the movie data in
movies_data = []
# For each id
for id in id_list:
# Create cursor
self.create_cursor()
code = "SELECT * FROM movie WHERE idMovie = %s"
param = (id,)
self.cur.execute(code, param)
# Save all of the data for movies, one at a time
for movie in self.cur:
idMovie = movie['idMovie']
title = movie['title']
isAdult = movie['isAdult']
releaseYear = movie['releaseYear']
runtimeMinutes = movie['runtimeMinutes']
rating = movie['rating']
numVotes = movie['numVotes']
img_url = get_google_image_link(title + " " + str(releaseYear))
additional_data = get_movie_details(id)
movies_dict = {'idMovie': idMovie,
'title': title,
'isAdult': isAdult,
'releaseYear': releaseYear,
'runtimeMinutes': runtimeMinutes,
'rating': rating,
'numVotes': numVotes,
'img_url': img_url,
'description': additional_data['description']}
movies_data.append(movies_dict)
return movies_data
if __name__ == "__main__":
# For testing
mdb = MovieDatabase()
udb = UserDataBase()
opinion = udb.get_all_opinions_of_user("test3")
"""print(mdb.random_new_movies())"""
"""param = {'release_year': {'from': 1990, 'to': 2020},
'genre': 'Romance',
'duration': {'from': 60, 'to': 120},
'directed_by': "0",
'number_of_votes': {'from': 200, 'to': 10000},
'rating': {'from': 8, 'to': 10}}
print(mdb.get_movie_by_param(param))"""
pass
``` |
{
"source": "15minutOdmora/Ksok-Python-tecaj-22",
"score": 4
} |
#### File: code/file_actions/file_handling.py
```python
import os
import csv
import json
def read_txt(file_path):
"""
Reads all lines from file into a list of strings.
Args:
file_path (str): Path to file
Returns:
list[str]: List of read lines
"""
with open(file_path, "r") as f:
return f.readlines()
def read_csv(file_path):
"""
Reads all lines from csv file into a list representing the header
and a seperate list holding all data.
Args:
file_path (str): Path to file
Returns:
tuple[list[str], list[list[str]]]: Header, other rows
"""
with open(file_path, "r", encoding="UTF8") as f:
reader = csv.reader(f)
header = next(reader) # Save header
rows = []
for row in reader:
rows.append(row)
return header, rows
def write_csv(file_path, header, data):
"""
Saves passed data to a csv file, writing the header and data
Args:
file_path (str): Path to file to write
header (list[str]): List with header values
data (list[list[str]]): List containing each row (as a list of string)
"""
with open(file_path, "w", newline="", encoding="UTF8") as f: # Add encoding='UTF8' if needed, newline removes emptyline between rows
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(data)
def read_json(file_path):
"""
Reads the object from the json file.
Args:
file_path (str): Path to json file
Returns:
any: Object contained in file
"""
with open(file_path, "r") as f:
json_object = json.load(f)
return json_object
def write_json(file_path, data):
"""
Saves passed object into a json file.
Args:
file_path (str): Path to file
data (any): Object to save in json file
"""
with open(file_path, "w") as f:
json.dump(data, f, indent=4) #, sort_keys=True)
def update_json_dictionary(file_path, data):
"""
Updates the dictionary contained in the json file.
Args:
file_path (str): Path to json file
data (dict): Dictionary to update
"""
read_json = {}
if os.path.isfile(file_path): # Read only if file exists.
with open(file_path, "r") as f:
read_json = json.load(f)
read_json.update(data)
write_json(file_path, read_json) # Save to file
``` |
{
"source": "15minutOdmora/python-pyggui",
"score": 3
} |
#### File: pyggui/configure/asset_builder.py
```python
import os
import inspect
from typing import Dict, List, Union
from pyggui.exceptions import AssetsDirectoryNotDefinedError, AssetDoesNotExistError
class Directory:
"""
Class for building directory objects. Every directories file and sub-directory can be accessed through attributes,
if the file/directory does not exist the AssetDoesNotExistError gets raised.
Properties:
files: Returns a list of file paths of files contained in the directory.
directories (List[]): Returns a list of directory paths of sub-directories.
empty (bool): If directory is empty.
path (str): Will return the directories path.
Directory can be iterated through every one of its contents, where sub-directories come first and files second.
Printing out the object will output its structure.
TODO: Implement repr method.
"""
def __init__(self, directory_structure: Dict):
"""
Args:
directory_structure (Dict): Dictionary containing information of subdirectories and files.
"""
self.directory_structure = directory_structure
self._files = []
if "_files" in self.directory_structure:
self._files = [value["_path"] for value in self.directory_structure["_files"].values()]
self._directories = []
# Create directories list, long way without list comprehension, it's more readable
for key, value in self.directory_structure.items():
if key != "_path" and "_path" in value:
self.directories.append(value["_path"])
@property
def files(self) -> List[str]:
return self._files
@property
def directories(self) -> List[str]:
return self._directories
@property
def empty(self) -> bool:
return not ((self._files != []) and (self._folders != []))
def __getattr__(self, attr):
print(attr)
if attr in self:
getattr(self, attr)
else:
# Raise error otherwise
asset_path = self.directory_structure["_path"] + "\\" + attr
message = f"The asset {asset_path} does not exist in the defined assets directory."
raise AssetDoesNotExistError(message)
def __iter__(self):
for directory in self._directories:
yield directory
for file in self._files:
yield file
def build_directory(directory_structure: Dict) -> Directory:
"""
Function builds directory object by setting it appropriate attributes, sub-directories get also added as
attributes and recursively built.
Args:
directory_structure (Dict): Dictionary containing structure, created inside the AssetBuilder.build method.
Returns:
Directory: Object.
"""
parent_directory = Directory(directory_structure)
def build(dir_structure, directory):
setattr(directory, "path", dir_structure["_path"])
for attr in directory.directory_structure: # If sub-directory, set new sub-Directory object
if attr != "_path" and attr != "_files":
child_dir = Directory(directory_structure=dir_structure[attr]) # Make object
setattr(directory, attr, child_dir) # Set is as an attribute
build(directory.directory_structure[attr], child_dir) # Recursive build child
if "_files" in dir_structure: # If file, return file path
for attr in directory.directory_structure["_files"]:
setattr(directory, attr, dir_structure["_files"][attr]["_path"])
build(directory_structure, parent_directory)
return parent_directory
class Assets:
"""
Dummy class for raising error (when fetching attribute) when Asset directory was not defined.
"""
def __init__(self):
self.directory_structure = None
def __getattr__(self, attr):
message = "The Asset directory was not defined in the initialization of Game object. " \
"Set: assets_directory = path/to/your/assets/folder."
raise AssetsDirectoryNotDefinedError(message)
def __repr__(self):
return "Asset directory was not defined. Define it passing assets_directory to the Game object."
class AssetBuilder:
"""
Class used for building the Directory object.
"""
def __init__(self, directory: str = None):
"""
Args:
directory (str): Path to assets directory.
"""
# Check directory argument
if not directory: # If not passed grab modules parent directory
self.directory_path = None
else:
self.directory_path = os.path.normpath(directory) # Normalize path
def build(self) -> Dict:
"""
Method will build and return the correct object for using assets in game. If path was not defined the
dummy Asset object gets returned, so if access to some file is attempted an error gets returned.
"""
if not self.directory_path: # Return dummy object if path was not given
return Assets()
norm_dir_path = os.path.normpath(self.directory_path) # Normalize path
main_structure = {"_path": norm_dir_path} # Main mutable dictionary that will get returned
def traverse(structure: Dict, directory: str) -> None:
"""
Recursive function goes over directory, adding its files in the structure key = 'files' list,
recursive call for each directory found.
"""
for name, full_path in [(path, os.path.join(directory, path)) for path in os.listdir(directory)]:
# If file
if os.path.isfile(full_path): # Add each file to files key in structure
if "_files" not in structure:
structure["_files"] = {} # Empty dict
name_split = name.split(".") # Get file name and extension
_name, _extension = name_split[0], name_split[1]
structure["_files"][_name] = {"_extension": _extension, "_path": full_path}
# If directory
if os.path.isdir(full_path): # Add new structure under basename, recursive call
basename = os.path.basename(full_path)
structure[basename] = {"_path": full_path}
traverse(structure[basename], full_path)
# Call function
traverse(main_structure, norm_dir_path)
# Return directory object
return build_directory(main_structure)
```
#### File: pyggui/defaults/__welcome_page.py
```python
from pyggui.gui.page import Page
class _WelcomePage(Page):
def __init__(self, controller):
super().__init__(controller)
pass
```
#### File: pyggui/gui/grid.py
```python
from __future__ import annotations
from typing import Union, List, Tuple
import pygame
from pyggui.gui.item import StaticItem
class Cell(StaticItem):
"""
Class for representing a single rectangle in the grid that is placed in the i, j position and has i-th rows height,
j-th columns height. Items can be added to it, aligned and padded.
"""
def __init__(
self,
grid: Grid,
position_in_grid: Tuple,
position: List[int] = [0, 0],
size: Tuple[int, int] = (1, 1),
):
"""
Args:
position (List[int] = [0, 0]): Position to place item on screen (or on page).
size (Tuple[int, int] = (1, 1)): Size of item.
visible (bool): If item is currently visible.
selected (bool): If item is currently selected.
"""
super().__init__(position, size, False, False)
self.grid = grid
self.position_in_grid = position_in_grid
# Possible alignments
self.alignments = {
"left": self._left,
"right": self._right,
"top": self._top,
"bottom": self._bottom,
"centre": self._centre,
None: self._centre
}
# Possible paddings
self._padding = {
"top": 0,
"bottom": 0,
"left": 0,
"right": 0
}
@property
def padding(self):
return self._padding
@padding.setter
def padding(self, padding):
# TODO: Padding for whole cell, also to add is alignment for whole cell
pass
def _left(self, item: any) -> None:
"""
Method aligns item to the left side of cell.
"""
item.position = (self.position[0], item.position[1])
def _right(self, item: any) -> None:
"""
Method aligns item to the right side of cell.
"""
# Set right cell border to match item right side
diff = (self.width - item.width) if self.width > item.width else 0
# Set new x position
item.position = (self.position[0] + diff, item.position[1])
def _top(self, item: any) -> None:
"""
Method aligns item to the top side of cell.
"""
# Set top borders to match
item.position = (item.position[0], self.position[1])
def _bottom(self, item: any) -> None:
"""
Method aligns item to the bottom side of cell.
"""
# Set bottom cell border to match item bottom
diff = (self.height - item.height) if self.height > item.height else 0
item.position = (item.position[0], self.position[1] + diff)
def _centre(self, item: any) -> None:
"""
Method aligns item so its centre matches the cells centre.
"""
# Item centre is at cell centre
centered_x = self.position[0] + ((self.width - item.width) // 2)
centered_y = self.position[1] + ((self.height - item.height) // 2)
item.position = (centered_x, centered_y)
def __pad(self, item: any, padding: str, value: int) -> None:
"""
Method adds padding to item based on cell position and size.
Args:
item (any): Item to pad.
padding (str): Padding type (top, bottom, left, right).
value (int): Number of px to pad.
"""
# TODO: Make padding not move items if there is already enough space
if padding in self.padding.keys():
if padding == "top":
item.y += value
elif padding == "bottom":
item.y -= value
elif padding == "left":
item.x += value
elif padding == "right":
item.x -= value
def add_item(self, item: any, align: str = None, padding: str = None) -> None:
"""
Method adds item to cell, aligns and pads it base on passed values.
Args:
item (any): Item to add.
align (str): String defining alignment type. Multiple alignments are separated by a space character.
Example: alignment = "centre top" # Centre should always be first.
padding (str): String defining padding of item. Multiple alignments are separated by a comma. Value is
passed next to the alignment position as an integer value.
Example: padding = "top 5, left 3" # 5px from top 3px from bottom
"""
self.items.append(item) # Add item to item list
self.alignments["centre"](item) # Align item into centre initially so it moves it into cell
# Handle alignment
if align:
for align in align.split(" "): #
if align in self.alignments:
self.alignments[align](item) # Align item in set way
else:
self.alignments[align](item) # Default alignment for None is centre
# Handle padding
if padding:
for pad in padding.split(","): # Go over each padding
_pad = pad.strip() # Remove whitespace around
_pad = _pad.split(" ")
print(_pad, pad)
key, value = _pad[0], int(_pad[1]) # Todo add exception handling
self.__pad(item, padding=key, value=value)
def update(self):
for item in self.items:
item.update()
def draw(self, visible: bool = False):
if visible: # Only draw if grid is visible
pygame.draw.rect(
self.display,
color=(0, 0, 0),
rect=self.rect,
width=0 # Fill this one
)
pygame.draw.rect(
self.display,
color=(255, 255, 255),
rect=self.rect,
width=2
)
for item in self.items:
item.draw()
class Row:
"""
Single row in Grid, is only used for grabbing items using indexing with []. Row contains cells that are in that
row in the grid.
"""
def __init__(self, grid: Grid, data: List = None):
self.grid = grid
if data:
self._list = list(data)
else:
self._list = list()
def __len__(self):
""" List length """
return len(self._list)
def __getitem__(self, i):
""" Get a list item """
return self._list[i]
def __delitem__(self, i):
""" Delete an item """
del self._list[i]
def __setitem__(self, i, val):
""" Set item """
# optional: self._acl_check(val)
self._list[i] = val
def __repr__(self):
return "<{0} {1}>".format(self.__class__.__name__, self._list)
def __str__(self):
return str(self._list)
def insert(self, i, val):
""" Insert value at index """
# optional: self._acl_check(val)
self._list.insert(i, val)
def append(self, val):
""" Append value at end of list """
self.insert(len(self._list), val)
def make_grid_line(line: List[Union[float, int]], total_size: int, number_of_items: int) -> List[int]:
""" Used internally by Grid for constructing cell sizes for each column, row.
Function creates a list representing sizes of cells (in px) in that line (either row or column).
Line can be passed as a list of decimals (representing percentage of total size) or integers (representing sizes).
Line can also include less elements than there are rows/columns, elements then get added/removed accordingly.
Args:
line (List[Union[float, int]]): List of either integers or floats representing different size format (px or %).
total_size (int): Total size (height of all rows or width of all columns), can be either 1 (if %) or an integer
representing size in px.
number_of_items (int): Expected number of items in line.
"""
# Check number of elements matches, add/remove otherwise
element_number_difference = number_of_items - len(line)
if element_number_difference < 0: # If more were passed, remove last items
line = self.number_of_rows[:abs(element_number_difference)]
elif element_number_difference > 0: # If less were passed, add number of items (equal part)
if isinstance(line[0], float): # If float, parts added must be equal to 1/total_num_parts
one_part = 1 / number_of_items
else: # Else add equal parts of total_size
one_part = int(total_size / number_of_items)
line += [one_part for _ in range(element_number_difference)]
# Create list
if isinstance(line[0], float): # If decimal -> percentage
line_sum = sum(line)
# factor = line_sum / 1
line = [part / line_sum for part in line]
size_percentages = line # [part * factor for part in line]
return [int(total_size * part) for part in size_percentages]
else: # If not -> assume int -> sizes in px
factor = total_size / sum(line)
return [int(size * factor) for size in line]
class Grid(StaticItem):
def __init__(
self,
position: List[int] = [0, 0],
rows: int = 1,
columns: int = 1,
row_sizes: Union[List[int], List[float]] = None,
column_sizes: Union[List[int], List[float]] = None,
size: Tuple[int, int] = None,
visible: bool = False,
selected: bool = False
):
"""
Args:
position (List[int] = [0, 0]): Position to place item on screen (or on page).
rows (int): An integer representing number of rows.
columns (int): An integer representing number of columns.
row_sizes (Union[List[int], List[float]]): List of heights for each row, heights can either (all together)
be integer values (representing height of each row in px) or float numbers (representing height of each
row by percentage relative to grid size)
column_sizes (Union[List[int], List[float]]): List of widths for each column, widths can either
(all together) be integer values (representing width of each row in px) or float numbers
(representing width of each row by percentage relative to grid size)
size (Tuple[int, int] = (1, 1)): Size of item.
visible (bool): If item is currently visible.
selected (bool): If item is currently selected.
Note:
Adding less elements in row_sizes or column_sizes (ex. there's 5 rows you pass a list of 4 values) will
result in the last one being added as an equal part to the total (width of grid if ints passed, or 1 if
percentages (floats) passed).
Adding more elements will just cut the additional ones off.
"""
if not size: # Fetch whole screen size if not passed
size = pygame.display.get_surface().get_size()
super().__init__(position=position, size=size, visible=visible, selected=selected)
self._list: List[Row] = []
self.number_of_rows, self.number_of_columns = rows, columns
self.row_sizes = row_sizes
self.column_sizes = column_sizes
# Make rows and columns
self.__make_row_and_column_sizes()
self.__make(rows, columns)
def __make_row_and_column_sizes(self) -> None:
"""
Method constructs heights of rows and widths of columns in px so they can be generated in the __make method.
"""
# Make rows
if self.row_sizes:
rows_sizes = make_grid_line(self.row_sizes, self.height, self.number_of_rows)
else:
equal_part = int(self.height / self.number_of_rows)
rows_sizes = [equal_part for _ in range(self.number_of_rows)]
self.row_sizes = rows_sizes
# Make columns
if self.column_sizes:
rows_sizes = make_grid_line(self.column_sizes, self.width, self.number_of_columns)
else:
equal_part = int(self.width / self.number_of_columns)
rows_sizes = [equal_part for _ in range(self.number_of_columns)]
self.column_sizes = rows_sizes
def __make(self, number_of_rows: int, number_of_columns: int) -> None:
"""
Method creates grids list which contains rows of cells.
Args:
number_of_rows (int): Number of rows.
number_of_columns (int): Number of columns.
"""
curr_x, curr_y = 0, 0
for i in range(number_of_rows):
row = Row(self)
for j in range(number_of_columns):
row.append(
Cell(
grid=self,
position_in_grid=(i, j),
position=[curr_x, curr_y],
size=(self.column_sizes[j], self.row_sizes[i]),
)
)
curr_x += self.column_sizes[j]
self._list.append(row)
curr_x = 0
curr_y += self.row_sizes[i]
@property
def rows(self):
return len(self._list)
@property
def columns(self):
return len(self._list[0])
def add_item(self,
item: any,
row: int = None,
column: int = None,
align: str = None,
padding: str = None
) -> None:
"""
Method adds item to the grid in the specified cell at position row, column. Optional alignments, and paddings
can be defined relative to the cell where the item is being added.
Args:
item (any): Item to add.
row (int): Row in grid to add the item in. Starting at 0.
column (int): Column in grid to add the item in. Starting at 0.
align (str): Representing one or more alignment types. These include: centre, top, bottom, left, right.
Centre should be defined first. Separate alignments using a space " ".
padding (str): Representing one or more paddings of each side of the cell. Multiple can be passed by
separating them with commas ",", each padding should be passed as "side px". Where sides include: top,
bottom, left, right. Px represents an integer number of pixels to pad.
Ex.: padding = "top 5, left 10"
"""
self._list[row][column].add_item(item=item, align=alignment, padding=padding)
def update(self):
""" Method updates every item added to a cell in the grid. """
for row in self._list:
for cell in row:
cell.update()
def draw(self):
""" Method draws every item added to a cell in the grid. """
for row in self._list:
for cell in row:
cell.draw(visible=self.visible) # Pass if self visible
def __iter__(self):
""" For iterating over grid. TODO: Decide if iterating should yield every item not row. """
for row in self._list:
yield row
def __len__(self):
""" List length """
return len(self._list)
def __getitem__(self, i):
""" Get a list item """
return self._list[i]
def __delitem__(self, i):
""" Delete an item """
del self._list[i]
def __setitem__(self, i, val):
""" Set item """
self._list[i] = val
def __repr__(self):
return "<{0} {1}>".format(self.__class__.__name__, self._list)
def __str__(self):
return str(self._list)
```
#### File: pyggui/helpers/file_handling.py
```python
from __future__ import annotations
from typing import Callable, List, Tuple, Dict, Union
import os
import time
import json
import pygame
class ImageLoader:
@staticmethod
def load_image(image_path: str) -> pygame.surface.Surface:
"""
Method loads given path into image.
Args:
image_path (str): Path to image to load
Returns:
pygame.surface.Surface: Image loaded as a Pygame surface
"""
return pygame.image.load(image_path).convert() # .convert() optimizes speed by 5x
@staticmethod
def load_transparent_image(image_path: str) -> pygame.surface.Surface:
"""
Method loads given path into transparent image.
Args:
image_path (str): Path to image to load
Returns:
pygame.surface.Surface: Image loaded as a Pygame surface
"""
return pygame.image.load(image_path).convert_alpha() # .convert() optimizes speed by 5x
@staticmethod
def load_folder(folder_path: str) -> List[pygame.surface.Surface]:
"""
Method loads all ImageLoader from the folder intro a sprite list.
Args:
folder_path (str): Path to folder to load images from
Returns:
list[pygame.surface.Surface]: List containing Pygame images loaded as surfaces
"""
image_list = []
for image_path in os.listdir(folder_path):
path = os.path.join(folder_path, image_path)
image_list.append(ImageLoader.load_image(path))
return image_list
@staticmethod
def load_transparent_folder(folder_path: str) -> List[pygame.surface.Surface]:
"""
Method loads all ImageLoader from the folder intro a sprite list.
Args:
folder_path (str): Path to folder to load images from
Returns:
list[pygame.surface.Surface]: List containing Pygame images loaded as surfaces
"""
image_list = []
for image_path in os.listdir(folder_path):
path = os.path.join(folder_path, image_path)
image_list.append(ImageLoader.load_transparent_image(path))
return image_list
class DirectoryReader:
"""
Class consisting of static methods for reading directories. Used for fetching sub-directories, all files, the
directories structure, etc.
"""
@staticmethod
def get_all_directories(dir_path: str) -> List[Tuple[str, str]]:
"""
Method finds all sub-directories in the given directory.
Args:
dir_path (str): Path to directory to search from
Returns:
List[Tuple[str, str]]: List of tuples (directory name, directory path).
"""
folder_list = []
for item in os.scandir(dir_path):
if item.is_dir():
folder_list.append((item.name, os.path.abspath(item.path)))
return folder_list
@staticmethod
def get_all_files(dir_path: str) -> List[Tuple[str, str]]:
"""
Method finds all file names and its paths in the given directory.
Args:
dir_path (str): Path to directory to search from
Returns:
List[Tuple[str, str]]: List of tuples (file name, file path).
"""
file_list = []
for item in os.scandir(dir_path):
if item.is_file():
file_list.append((item.name, os.path.abspath(item.path))) # Append tuple
return file_list
@staticmethod
def get_structure(dir_path: str) -> Dict[str, Union[str, List, Dict]]:
"""
Method goes over the passed directory and creates a special structured dictionary.
Created dictionary follows this rules:
* For each directory create a sub-dictionary under the directories name as the key,
* Each file in giver directory is added in a list under 'files' key, file gets added as a tuple where
first value is the files name, second value its relative path based on dir_path.
The above is then run recursively across the directories tree structure.
Example:
Passing bottom directory with its relative or absolute path
Directory:
button/
- normal.png
on_click/
- 01.png
- 02.png
on_hover/
- 01.png
Will return dictionary:
{
'path': 'button/'
'files': [('normal.png', 'button/normal.png')],
'on_click': {
'path': 'button/on_click'
'files': [
('01.png', 'button/on_click/01.png'),
('02.png', 'button/on_click/02.png')
]
},
'on_hover': {
'path': 'button/on_hover',
'files': [
('01.png', 'button/on_hover/01.png')
]
}
}
Args:
dir_path (str): Directory path to traverse and create structure from.
Returns:
Dict[str, Union[str, List, Dict]]: Structured dictionary.
"""
norm_dir_path = os.path.normpath(dir_path) # Normalize path
main_structure = {"path": norm_dir_path} # Main mutable dictionary that will get returned
def traverse(structure: Dict, directory: str) -> None:
"""
Recursive function goes over directory, adding its files in the structure key = 'files' list,
recursive call for each directory found.
"""
for name, full_path in [(path, os.path.join(directory, path)) for path in os.listdir(directory)]:
# If file
if os.path.isfile(full_path): # Add each file to files key in structure
if "files" not in structure:
structure["files"] = []
structure["files"].append((name, full_path))
# If directory
if os.path.isdir(full_path): # Add new structure under basename, recursive call
basename = os.path.basename(full_path)
structure[basename] = {"path": full_path}
traverse(structure[basename], full_path)
# Call function
traverse(main_structure, norm_dir_path)
return main_structure
class Json:
"""
Class for loading, writing and updating data in json files.
All json files must contain dictionaries as the main scope object.
"""
@staticmethod
def load(path: str) -> Union[Dict, List]:
"""
Method loads a single json file, returning its contents.
Args:
path (str): Path to Json file.
Returns:
Union[Dict, List]: Content of the Json file
"""
try:
with open(path, "r") as f:
data = json.load(f)
return data
except FileNotFoundError:
print(f"Json.load: Unable to find Json file on path:\n {path}")
@staticmethod
def update(path: str, data: Dict) -> Dict:
"""
Method will update the dictionary with data and return the updated dict.
Args:
path (str): Path to Json file.
data (Dict): Dictionary of key-value pairs to update in the json file
Returns:
Dict: Updated dictionary.
"""
# Read data
try:
with open(path, "r") as f:
read_data = json.load(f)
except FileNotFoundError:
print(f"Json.update: Unable to find Json file on path:\n {path}")
return
# Update
read_data.update(data)
# Save, at this point we know the path exists
with open(path, "w") as f:
json.dump(read_data, f, indent=4)
return read_data
@staticmethod
def save(path: str, data: Dict) -> None:
"""
Method saves data into a json file specified by passed path.
Args:
path (str): Path to Json file to save data to.
data (Dict): Dictionary to save in the json file.
"""
# Check if path contains .json
if not (".json" in path):
path += ".json"
# Write data
try:
with open(path, "w") as f:
json.dump(data, f, indent=4)
except FileNotFoundError:
print(f"Json.save: Unable to find Json file on path:\n {path}")
return
``` |
{
"source": "15r10nk/executing",
"score": 2
} |
#### File: tests/samples/import_hook.py
```python
import logging
import sys
from importlib.util import spec_from_loader
import ast
# This is based on the MacroPy import hook
# https://github.com/lihaoyi/macropy/blob/46ee500b877d5a32b17391bb8122c09b15a1826a/macropy/core/import_hooks.py
class BirdsEyeLoader:
def __init__(self, spec, source, deep):
self._spec = spec
self.source = source
self.deep = deep
def create_module(self, spec):
pass
def exec_module(self, module):
from birdseye.bird import eye
eye.exec_string(
source=self.source,
filename=self._spec.origin,
globs=module.__dict__,
locs=module.__dict__,
deep=self.deep,
)
def get_filename(self, fullname):
return self._spec.loader.get_filename(fullname)
def is_package(self, fullname):
return self._spec.loader.is_package(fullname)
class BirdsEyeFinder(object):
"""Loads a module and looks for tracing inside, only providing a loader
if it finds some.
"""
def _find_plain_spec(self, fullname, path, target):
"""Try to find the original module using all the
remaining meta_path finders."""
spec = None
for finder in sys.meta_path:
# when testing with pytest, it installs a finder that for
# some yet unknown reasons makes birdseye
# fail. For now it will just avoid using it and pass to
# the next one
if finder is self or 'pytest' in finder.__module__:
continue
if hasattr(finder, 'find_spec'):
spec = finder.find_spec(fullname, path, target=target)
elif hasattr(finder, 'load_module'):
spec = spec_from_loader(fullname, finder)
if spec is not None and spec.origin != 'builtin':
return spec
def find_spec(self, fullname, path, target=None):
spec = self._find_plain_spec(fullname, path, target)
if spec is None or not (hasattr(spec.loader, 'get_source') and
callable(spec.loader.get_source)): # noqa: E128
if fullname != 'org':
# stdlib pickle.py at line 94 contains a ``from
# org.python.core for Jython which is always failing,
# of course
logging.debug('Failed finding spec for %s', fullname)
return
try:
source = spec.loader.get_source(fullname)
except ImportError:
logging.debug('Loader for %s was unable to find the sources',
fullname)
return
except Exception:
logging.exception('Loader for %s raised an error', fullname)
return
if not source or 'birdseye' not in source:
return
deep, trace_stmt = should_trace(source)
if not trace_stmt:
return
loader = BirdsEyeLoader(spec, source, deep)
return spec_from_loader(fullname, loader)
def should_trace(source):
trace_stmt = None
deep = False
for stmt in ast.parse(source).body:
if isinstance(stmt, ast.Import):
for alias in stmt.names:
if alias.name.startswith('birdseye.trace_module'):
trace_stmt = stmt
if alias.name.endswith('deep'):
deep = True
if isinstance(stmt, ast.ImportFrom) and stmt.module == 'birdseye':
for alias in stmt.names:
if alias.name.startswith('trace_module'):
trace_stmt = stmt
if alias.name.endswith('deep'):
deep = True
return deep, trace_stmt
```
#### File: tests/samples/ipython.py
```python
import inspect
import socket
import sys
from io import BytesIO, StringIO
from threading import currentThread, Thread
from uuid import uuid4
from IPython.core.display import HTML, display
from IPython.core.magic import Magics, cell_magic, magics_class
from jinja2 import Environment, PackageLoader, select_autoescape
from traitlets import Unicode, Int, Bool
from werkzeug.local import LocalProxy
from werkzeug.serving import ThreadingMixIn
from birdseye.bird import PY2, Database
from birdseye import server, eye
fake_stream = BytesIO if PY2 else StringIO
thread_proxies = {}
def stream_proxy(original):
def p():
frame = inspect.currentframe()
while frame:
if frame.f_code == ThreadingMixIn.process_request_thread.__code__:
return fake_stream()
frame = frame.f_back
return thread_proxies.get(currentThread().ident,
original)
return LocalProxy(p)
sys.stderr = stream_proxy(sys.stderr)
sys.stdout = stream_proxy(sys.stdout)
def run_server(port, bind_host, show_server_output):
if not show_server_output:
thread_proxies[currentThread().ident] = fake_stream()
try:
server.app.run(
debug=True,
port=port,
host=bind_host,
use_reloader=False,
)
except socket.error:
pass
templates_env = Environment(
loader=PackageLoader('birdseye', 'templates'),
autoescape=select_autoescape(['html', 'xml'])
)
@magics_class
class BirdsEyeMagics(Magics):
server_url = Unicode(
u'', config=True,
help='If set, a server will not be automatically started by %%eye. '
'The iframe containing birdseye output will use this value as the base '
'of its URL.'
)
port = Int(
7777, config=True,
help='Port number for the server started by %%eye.'
)
bind_host = Unicode(
'127.0.0.1', config=True,
help='Host that the server started by %%eye listens on. '
'Set to 0.0.0.0 to make it accessible anywhere.'
)
show_server_output = Bool(
False, config=True,
help='Set to True to show stdout and stderr from the server started by %%eye.'
)
db_url = Unicode(
u'', config=True,
help='The database URL that the server started by %%eye reads from. '
'Equivalent to the environment variable BIRDSEYE_DB.'
)
@cell_magic
def eye(self, _line, cell):
if not self.server_url:
server.db = Database(self.db_url)
server.Function = server.db.Function
server.Call = server.db.Call
server.Session = server.db.Session
Thread(
target=run_server,
args=(
self.port,
self.bind_host,
self.show_server_output,
),
).start()
eye.db = Database(self.db_url)
def callback(call_id):
"""
Always executes after the cell, whether or not an exception is raised
in the user code.
"""
if call_id is None: # probably means a bug
return
html = HTML(templates_env.get_template('ipython_iframe.html').render(
call_id=call_id,
url=self.server_url.rstrip('/'),
port=self.port,
container_id=uuid4().hex,
))
# noinspection PyTypeChecker
display(html)
value = eye.exec_ipython_cell(cell, callback)
# Display the value as would happen if the %eye magic wasn't there
return value
```
#### File: tests/samples/tests.py
```python
from __future__ import print_function, division
import ast
import inspect
import os
import sys
import tempfile
import time
import unittest
from executing import Source, only, PY3, NotOneValueFound, get_instructions
class TestStuff(unittest.TestCase):
# noinspection PyTrailingSemicolon
def test_semicolons(self):
# @formatter:off
tester(1); tester(2); tester(3)
tester(9
); tester(
8); tester(
99
); tester(33); tester([4,
5, 6, [
7]])
# @formatter:on
def test_decorator(self):
@empty_decorator
@decorator_with_args(tester('123'), x=int())
@tester(list(tuple([1, 2])), returns=empty_decorator)
@tester(
list(
tuple(
[3, 4])),
returns=empty_decorator)
@empty_decorator
@decorator_with_args(
str(),
x=int())
@tester(list(tuple([5, 6])), returns=empty_decorator)
@tester(list(tuple([7, 8])), returns=empty_decorator)
@empty_decorator
@decorator_with_args(tester('sdf'), x=tester('123234'))
def foo():
pass
def test_comprehensions(self):
# Comprehensions can be separated if they contain different names
str([{tester(x) for x in [1]}, {tester(y) for y in [1]}])
# or are on different lines
str([{tester(x) for x in [1]},
{tester(x) for x in [1]}])
# or are of different types
str([{tester(x) for x in [1]}, list(tester(x) for x in [1])])
# but not if everything is the same
# noinspection PyTypeChecker
# with self.assertRaises((AttributeError, NotOneValueFound)):
# str([{tester(x) for x in [1]}, {tester(x) for x in [2]}])
def test_lambda(self):
self.assertEqual(
(lambda x: (tester(x), tester(x)))(tester(3)),
(3, 3),
)
(lambda: (lambda: tester(1))())()
self.assertEqual(
(lambda: [tester(x) for x in tester([1, 2])])(),
[1, 2],
)
def test_closures_and_nested_comprehensions(self):
x = 1
# @formatter:off
str({tester(a+x): {tester(b+x): {tester(c+x) for c in tester([1, 2])} for b in tester([3, 4])} for a in tester([5, 6])})
def foo():
y = 2
str({tester(a+x): {tester(b+x): {tester(c+x) for c in tester([1, 2])} for b in tester([3, 4])} for a in tester([5, 6])})
str({tester(a+y): {tester(b+y): {tester(c+y) for c in tester([1, 2])} for b in tester([3, 4])} for a in tester([5, 6])})
str({tester(a+x+y): {tester(b+x+y): {tester(c+x+y) for c in tester([1, 2])} for b in tester([3, 4])} for a in tester([5, 6])})
def bar():
z = 3
str({tester(a+x): {tester(b+x): {tester(c+x) for c in tester([1, 2])} for b in tester([3, 4])} for a in tester([5, 6])})
str({tester(a+y): {tester(b+y): {tester(c+y) for c in tester([1, 2])} for b in tester([3, 4])} for a in tester([5, 6])})
str({tester(a+x+y): {tester(b+x+y): {tester(c+x+y) for c in tester([1, 2])} for b in tester([3, 4])} for a in tester([5, 6])})
str({tester(a+x+y+z): {tester(b+x+y+z): {tester(c+x+y+z) for c in tester([1, 2])} for b in tester([3, 4])} for a in tester([5, 6])})
bar()
foo()
# @formatter:on
def test_indirect_call(self):
dict(x=tester)['x'](tester)(3, check_func=False)
def test_compound_statements(self):
with self.assertRaises(TypeError):
try:
for _ in tester([1, 2, 3]):
while tester(0):
pass
else:
tester(4)
else:
tester(5)
raise ValueError
except tester(ValueError):
tester(9)
raise TypeError
finally:
tester(10)
# PyCharm getting confused somehow?
# noinspection PyUnreachableCode
str()
with self.assertRaises(tester(Exception)):
if tester(0):
pass
elif tester(0):
pass
elif tester(1 / 0):
pass
def test_generator(self):
def gen():
for x in [1, 2]:
yield tester(x)
gen2 = (tester(x) for x in tester([1, 2]))
assert list(gen()) == list(gen2) == [1, 2]
def test_future_import(self):
tester(4)
def test_many_calls(self):
node = None
start = time.time()
for i in range(10000):
new_node = Source.executing(inspect.currentframe()).node
if node is None:
node = new_node
else:
self.assertIs(node, new_node)
self.assertLess(time.time() - start, 1)
def test_decode_source(self):
def check(source, encoding, exception=None, matches=True):
encoded = source.encode(encoding)
if exception:
with self.assertRaises(exception):
Source.decode_source(encoded)
else:
decoded = Source.decode_source(encoded)
if matches:
self.assertEqual(decoded, source)
else:
self.assertNotEqual(decoded, source)
check(u'# coding=utf8\né', 'utf8')
check(u'# coding=gbk\né', 'gbk')
check(u'# coding=utf8\né', 'gbk', exception=UnicodeDecodeError)
check(u'# coding=gbk\né', 'utf8', matches=False)
# In Python 3 the default encoding is assumed to be UTF8
if PY3:
check(u'é', 'utf8')
check(u'é', 'gbk', exception=SyntaxError)
def test_multiline_strings(self):
tester('a')
tester('''
ab''')
tester('''
abc
def
'''
)
str([
tester(
'''
123
456
'''
),
tester(
'''
345
456786
'''
),
])
tester(
[
'''
123
456
'''
'''
345
456786
'''
,
'''
123
456
''',
'''
345
456786
'''
]
)
def test_multiple_statements_on_one_line(self):
if tester(1): tester(2)
for _ in tester([1, 2]): tester(3)
def assert_qualname(self, func, qn, check_actual_qualname=True):
qualname = Source.for_filename(__file__).code_qualname(func.__code__)
self.assertEqual(qn, qualname)
if PY3 and check_actual_qualname:
self.assertEqual(qn, func.__qualname__)
self.assertTrue(qn.endswith(func.__name__))
def test_qualname(self):
self.assert_qualname(C.f, 'C.f')
self.assert_qualname(C.D.g, 'C.D.g')
self.assert_qualname(f, 'f')
self.assert_qualname(f(), 'f.<locals>.g')
self.assert_qualname(C.D.h(), 'C.D.h.<locals>.i.<locals>.j')
self.assert_qualname(lamb, '<lambda>')
foo = lambda_maker()
self.assert_qualname(foo, 'lambda_maker.<locals>.foo')
self.assert_qualname(foo.x, 'lambda_maker.<locals>.<lambda>')
self.assert_qualname(foo(), 'lambda_maker.<locals>.foo.<locals>.<lambda>')
self.assert_qualname(foo()(), 'lambda_maker.<locals>.foo.<locals>.<lambda>', check_actual_qualname=False)
def test_extended_arg(self):
source = 'tester(6)\n%s\ntester(9)' % list(range(66000))
_, filename = tempfile.mkstemp()
code = compile(source, filename, 'exec')
with open(filename, 'w') as outfile:
outfile.write(source)
exec(code)
def test_only(self):
for n in range(5):
gen = (i for i in range(n))
if n == 1:
self.assertEqual(only(gen), 0)
else:
with self.assertRaises(NotOneValueFound):
only(gen)
def test_invalid_python(self):
path = os.path.join(os.path.dirname(__file__), 'not_code.txt', )
source = Source.for_filename(path)
self.assertIsNone(source.tree)
def test_executing_methods(self):
frame = inspect.currentframe()
executing = Source.executing(frame)
self.assertEqual(executing.code_qualname(), 'TestStuff.test_executing_methods')
if 'pypy' not in sys.version.lower():
text = 'Source.executing(frame)'
self.assertEqual(executing.text(), text)
start, end = executing.text_range()
self.assertEqual(executing.source.text[start:end], text)
def test_attr(self):
c = C()
c.x = c.y = tester
str((c.x.x, c.x.y, c.y.x, c.y.y, c.x.asd, c.y.qwe))
class TestFile(unittest.TestCase):
def test_file(self):
source = Source.for_frame(inspect.currentframe())
code = compile(source.text, source.filename, 'exec')
instructions = get_instructions(code)
lineno = None
for inst in instructions:
if inst.starts_line is not None:
lineno = inst.starts_line
if not inst.opname.startswith(
('BINARY_', 'UNARY_', 'LOAD_ATTR', 'LOAD_METHOD', 'LOOKUP_METHOD', 'COMPARE_OP')):
continue
frame = C()
frame.f_lasti = inst.offset
frame.f_code = code
frame.f_globals = globals()
frame.f_lineno = lineno
print(inst.opname)
assert Source.executing(frame).node is not None
class C(object):
@staticmethod
def f():
pass
class D(object):
@staticmethod
def g():
pass
@staticmethod
def h():
def i():
def j():
pass
return j
return i()
TestFile().test_file()
def f():
def g():
pass
return g
def lambda_maker():
def assign(x):
def decorator(func):
func.x = x
return func
return decorator
@assign(lambda: 1)
def foo():
return lambda: lambda: 3
return foo
lamb = lambda: 0
class Tester(object):
def get_node(self, typ):
frame = inspect.currentframe().f_back.f_back
Source.lazycache(frame)
node = Source.executing(frame).node
assert isinstance(node, typ), (node, typ)
return node
def check(self, node, value):
frame = inspect.currentframe().f_back.f_back
result = eval(
compile(ast.Expression(node), frame.f_code.co_filename, 'eval'),
frame.f_globals,
frame.f_locals,
)
assert result == value, (result, value)
def __call__(self, arg, check_func=True, returns=None):
call = self.get_node(ast.Call)
self.check(call.args[0], arg)
if check_func:
self.check(call.func, self)
if returns is None:
return arg
return returns
def __getattr__(self, item):
node = self.get_node(ast.Attribute)
self.check(node.value, self)
assert node.attr == item
return self
def __getitem__(self, item):
node = self.get_node(ast.Subscript)
self.check(node.value, self)
self.check(node.slice.value, item)
return self
def __add__(self, other):
node = self.get_node(ast.BinOp)
self.check(node.left, self)
self.check(node.right, other)
return self
__pow__ = __mul__ = __sub__ = __add__
def __invert__(self):
node = self.get_node(ast.UnaryOp)
self.check(node.operand, self)
return self
__neg__ = __pos__ = __invert__
def __lt__(self, other):
node = self.get_node(ast.Compare)
self.check(node.left, self)
self.check(node.comparators[0], other)
return self
__ne__ = __ge__ = __lt__
tester = Tester()
assert tester([1, 2, 3]) == [1, 2, 3]
assert tester.asd is tester
assert tester[19] is tester
assert tester ** 4 is tester
assert tester * 3 is tester
assert tester - 2 is tester
assert tester + 1 is tester
assert -tester is tester
assert +tester is tester
assert ~tester is tester
assert (tester < 7) is tester
assert (tester >= 78) is tester
assert (tester != 79) is tester
# assert (5 != tester != 6) is tester
assert tester.foo(45, False) == 45
def empty_decorator(func):
return func
def decorator_with_args(*_, **__):
return empty_decorator
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "15ramky/gsa-hello-app",
"score": 3
} |
#### File: gsa-hello-app/helloworld-app/app.py
```python
from flask import Flask, request, render_template
import socket
import ipaddress
app = Flask(__name__)
#least prefix legth in GCP
LEAST_PREFIX_LENGTH=29
all_ip_subnets = []
def cal_subnets(ipsubnet):
ipsubnet = ipaddress.IPv4Network(ipsubnet)
prefix_len = ipsubnet.prefixlen
if prefix_len == LEAST_PREFIX_LENGTH:
if ipsubnet not in all_ip_subnets:
all_ip_subnets.append(ipsubnet)
else:
for each_subnet in ipaddress.ip_network(ipsubnet).subnets():
if each_subnet not in all_ip_subnets:
all_ip_subnets.append(each_subnet)
cal_subnets(each_subnet)
@app.route("/", methods=['GET'])
def index():
return render_template('index.html')
@app.route('/', methods=['POST'])
def my_form_post():
cal_subnets(request.form['text'])
result_data = "<br>"
for each in all_ip_subnets:
result_data = result_data + " SUBNET: " + each.compressed \
+ " netmask: " + each.netmask.compressed \
+ " broadcast address: " + each.broadcast_address.compressed + "<br>"
html = "<b>All Possible subnets are: </b> {result_data}<br/>"
return html.format(result_data=result_data)
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
``` |
{
"source": "15ramky/remove_control_chars",
"score": 3
} |
#### File: 15ramky/remove_control_chars/remove_control_chars.py
```python
import os
import anim
def parse_each(each_file):
out_file = each_file+str(".result")
out_f = open(out_file, "w")
with open(each_file,"rb") as f:
anim.screen_anim("Processing ..."+str(each_file))
for line_p in f.readlines():
line, i, imax = '', 0, len(line_p)
while i < imax:
ac = ord(line_p[i])
if (32<=ac<127) or ac in (9,10): # printable, \t, \n
line += line_p[i]
elif ac == 27: # remove coded sequences
i += 1
while i<imax and line_p[i].lower() not in 'abcdhsujkm':
i += 1
elif ac == 8 or (ac==13 and line and line[-1] == ' '): # backspace or EOL spacing
if line:
line = line[:-1]
i += 1
out_f.write(line)
anim.screen_anim(" -- DONE\n")
# taking all the .data files in the current directory
files = [f for f in os.listdir('.') if os.path.isfile(f)]
for each_file in files:
if each_file.split('.')[-1] == "data":
parse_each(each_file)
``` |
{
"source": "15thai/Gibb_ringing",
"score": 2
} |
#### File: 15thai/Gibb_ringing/unring.py
```python
import time
import numpy as np
#from scipy.fftpack import fft,ifft,fft2,ifft2
import pyfftw
from numpy import cos,sin
from numpy.fft import fft, ifft,fft2,ifft2
from math import pi
#from pyfftw.interfaces.numpy_fft import fft
#from pyfftw.interfaces.numpy_fft import fft2
#from pyfftw.interfaces.numpy_fft import ifft
#from pyfftw.interfaces.numpy_fft import ifft2
import pdb
def unring_1d(data,nsh,minW,maxW):
n = data.shape[1]
numlines= data.shape[0]
shifts = np.zeros([2*nsh+1],dtype=np.float64)
shifts[0:nsh+1] = np.arange(nsh+1,dtype=np.float64)
shifts[nsh+1:] = -(np.arange(nsh,dtype=np.float64) +1)
phis = pi /n * shifts / nsh
us = cos(phis) + 1j* sin(phis)
sh = pyfftw.empty_aligned([2*nsh+1,n], dtype='complex128')
sh2 = pyfftw.empty_aligned([2*nsh+1,n], dtype='complex128')
#sh = np.zeros([2*nsh+1,n],dtype=np.complex128)
#sh2 = np.zeros([2*nsh+1,n],dtype=np.complex128)
maxn = (n-1)/2 if (n%2 == 1) else n/2 -1
data_out = pyfftw.empty_aligned(data.shape, dtype='complex128')
line = pyfftw.empty_aligned(n, dtype='complex128')
for k in range(numlines):
line[:] = data[k,:]
sh[0,:]= fft(line)
sh[:,0]=sh[0,0]
if n%2 ==0:
sh[:,n/2]=0
es = pyfftw.empty_aligned([2*nsh+1], dtype='complex128') +1
for l in range(maxn):
es[1:] = es[1:] * us[1:]
L=l+1
sh[1:,L] = es[1:] *sh[0,L]
L=n-1-l
sh[1:,L] = np.conjugate(es[1:]) *sh[0,L]
for j in range(2*nsh+1):
line2=sh[j,:]
sh2[j,:]= ifft(line2)
TV1arr= np.zeros([2*nsh+1],dtype=np.double)
TV2arr= np.zeros([2*nsh+1],dtype=np.double)
for t in range(minW,maxW+1):
TV1arr[:] = TV1arr[:] + abs(sh2[:,(-t)%n].real - sh2[:, -(t+1)%n].real)
TV1arr[:] = TV1arr[:] + abs(sh2[:,(-t)%n].imag - sh2[:, -(t+1)%n].imag)
TV2arr[:] = TV2arr[:] + abs(sh2[:, t %n].real - sh2[:, (t+1)%n].real)
TV2arr[:] = TV2arr[:] + abs(sh2[:, t%n].imag - sh2[:, (t+1)%n].imag)
for l in range(n):
minidx1 = np.argmin(TV1arr)
minidx2 = np.argmin(TV2arr)
if TV1arr[minidx1] < TV2arr[minidx2]:
minidx=minidx1
else:
minidx=minidx2
TV1arr[:] = TV1arr[:] + abs(sh2[:, (l-minW+1)%n].real - sh2[:, (l-minW)%n].real)
TV1arr[:] = TV1arr[:] - abs(sh2[:, (l-maxW)%n].real - sh2[:, (l-(maxW+1))%n].real)
TV2arr[:] = TV2arr[:] + abs(sh2[:, (l+maxW+1)%n].real - sh2[:, (l+maxW+2)%n].real)
TV2arr[:] = TV2arr[:] - abs(sh2[:, (l+minW)%n].real - sh2[:, (l+minW+1)%n].real)
TV1arr[:] = TV1arr[:] + abs(sh2[:, (l-minW+1)%n].imag - sh2[:, (l-minW)%n].imag)
TV1arr[:] = TV1arr[:] - abs(sh2[:, (l-maxW)%n].imag - sh2[:, (l-(maxW+1))%n].imag)
TV2arr[:] = TV2arr[:] + abs(sh2[:, (l+maxW+1)%n].imag - sh2[:, (l+maxW+2)%n].imag)
TV2arr[:] = TV2arr[:] - abs(sh2[:, (l+minW)%n].imag - sh2[:, (l+minW+1)%n].imag)
a0r = sh2[minidx,(l-1)%n].real
a1r = sh2[minidx,l].real
a2r = sh2[minidx,(l+1)%n].real
a0i = sh2[minidx,(l-1)%n].imag
a1i = sh2[minidx,l].imag
a2i = sh2[minidx,(l+1)%n].imag
s= np.double(shifts[minidx])/nsh/2.
if s > 0:
data_out[k,l] = (a1r*(1-s) + a0r*s + 1j* (a1i*(1-s) + a0i*s))
else:
s=-s
data_out[k,l] = (a1r*(1-s) + a2r*s + 1j* (a1i*(1-s) + a2i*s))
return data_out
def unring_2d(data1,nsh,minW,maxW):
eps = 1E-10
data1_a= pyfftw.empty_aligned((data1.shape[0], data1.shape[1]), dtype='complex128')
data2_a= pyfftw.empty_aligned((data1.shape[1], data1.shape[0]), dtype='complex128')
data1_a[:]=data1
data2_a[:]=data1_a.transpose()
tmp1 = fft2(data1_a)
tmp2 = fft2(data2_a)
cks = np.arange(data1.shape[0],dtype=np.float64)
cks = ( 1 + cos(2*pi*cks/data1.shape[0]))*0.5
cjs = np.arange(data1.shape[1],dtype=np.float64)
cjs = (1 + cos(2*pi*cjs/data1.shape[1]))*0.5
cks_plus_cjs = np.tile(cks,[data1.shape[1],1]).transpose() + np.tile(cjs,[data1.shape[0],1])
cks_plus_cjs[cks_plus_cjs ==0] = eps
#pdb.set_trace()
tmp1 = (tmp1 * np.tile(cks,[data1.shape[1],1]).transpose() ) / cks_plus_cjs
tmp2 = (tmp2 * np.tile(cjs,[data1.shape[0],1]).transpose() ) / cks_plus_cjs.transpose()
data1_a[:]= ifft2(tmp1)
data2_a[:]= ifft2(tmp2)
data1b = unring_1d(data1_a,nsh,minW,maxW)
data2b = unring_1d(data2_a,nsh,minW,maxW)
tmp1[:]= fft2(data1b)
tmp2[:]= fft2(data2b)
tmp1[:] = (tmp1 + tmp2.transpose())
tmp2 = ifft2(tmp1)
return tmp2
def unring(arr, nsh=25, minW=1, maxW=3, out_dtype=None):
r"""Local PCA-based denoising of diffusion datasets.
Parameters
----------
arr : 4D array
Array of data to be denoised. The dimensions are (X, Y, Z, N), where N
are the diffusion gradient directions.
patch_extent : int, optional
The diameter of the local patch to be taken around each voxel (in
voxels). The radius will be half of this value. If not provided,
the default will be automatically computed as:
.. math ::
patch_extent = max(5,\lfloor N^{1/3} \rfloor)
out_dtype : str or dtype, optional
The dtype for the output array. Default: output has the same dtype as
the input.
Returns
-------
denoised_arr : 4D array
This is the denoised array of the same size as that of the input data,
clipped to non-negative values
noise_arr : 3D array
Voxelwise standard deviation of the noise estimated from the data.
sigma : float
Mean value of noise standard deviations over all voxels (mean of
noise_arr).
References
----------
.. [Veraart16] <NAME>, <NAME>, Novikov DS (2016)
Diffusion MRI noise mapping using random matrix theory.
Magnetic resonance in Medicine 76(5), p1582-1593.
https://doi.org/10.1002/mrm.26059
"""
start_time = time.time()
if out_dtype is None:
out_dtype = arr.dtype
# We retain float64 precision, iff the input is in this precision:
if arr.dtype == np.float64:
calc_dtype = np.float64
# Otherwise, we'll calculate things in float32 (saving memory)
else:
calc_dtype = np.float32
if not arr.ndim == 4:
raise ValueError("PCA denoising can only be performed on 4D arrays.",
arr.shape)
unrang_arr = np.zeros(arr.shape, dtype=calc_dtype)
slice_data = pyfftw.empty_aligned((arr.shape[0], arr.shape[1]), dtype='complex128')
for vol in range(arr.shape[3]):
print(vol)
for k in range(arr.shape[2]):
slice_data = arr[:,:,k,vol]
result_slice = unring_2d(slice_data, nsh,minW,maxW)
unrang_arr[:,:,k,vol]=result_slice.real
print("--- %s seconds ---" % (time.time() - start_time))
return unrang_arr.astype(calc_dtype)
``` |
{
"source": "15vrs/cmpe-327",
"score": 3
} |
#### File: cmpe-327/qa327/backend.py
```python
from qa327.models import db, User, Ticket
from werkzeug.security import generate_password_hash, check_password_hash
from datetime import date
"""
This file defines all backend logic that interacts with database and other services
"""
def get_user(email):
"""
Get a user by a given email
:param email: the email of the user
:return: a user that has the matched email address
"""
user = User.query.filter_by(email=email).first()
return user
def login_user(email, password):
"""
Check user authentication by comparing the password
:param email: the email of the user
:param password: the password input
:return: the user if login succeeds
"""
# if this returns a user, then the name already exists in database
user = get_user(email)
if not user or not check_password_hash(user.password, password):
return None
return user
def register_user(email, name, password, password2):
"""
Register the user to the database
:param email: the email of the user
:param name: the name of the user
:param password: the password of <PASSWORD>
:param password2: another password input to make sure the input is correct
:return: an error message if there is any, or None if register succeeds
"""
user = get_user(email)
if user:
return "This email has been ALREADY used"
hashed_pw = generate_password_hash(password, method='sha256')
# store the encrypted password rather than the plain password
new_user = User(email=email, name=name, password=<PASSWORD>, balance=5000)
db.session.add(new_user)
db.session.commit()
return None
def set_ticket(owner, name, quantity, price, date):
"""
Register a ticket to the database
:param owner: the email of the ticket seller
:param name: the name of the ticket
:param quantity: the quantity of tickets being sold
:param price: the price of each ticket being sold
:param date: the date the tickets expire
:return: an error message if there is any, or None if register succeeds
"""
new_ticket = Ticket(owner=owner, name=name, quantity=quantity, price=price, date=date)
db.session.add(new_ticket)
db.session.commit()
return None
def get_all_tickets():
"""
Gets all the tickets in the database that havent expired
:return: a list of Tickets that havent expired
"""
tik = Ticket.query.filter(Ticket.date > int(date.today().strftime('%Y%m%d'))).all()
return tik
def update_ticket(owner, name, quantity, price, date):
"""
Attempt to update a ticket in the database
:return: an error message if there is any, or None if update succeeds
"""
tik = Ticket.query.filter_by(owner=owner, name=name).first()
if not tik:
return "Ticket does not exist"
tik.quantity = quantity
tik.price = price
tik.date = date
db.session.commit()
return None
def buy_ticket(email, name, quantity):
"""
Attmempt to buy a ticket in the database
:param owner: the email of the ticket buyer
:param name: the name of the ticket being bought
:param quantity: the quantity of tickets being bought
:return: an error message if there is any, or None if register succeeds
"""
user = User.query.filter_by(email=email).first()
tik = Ticket.query.filter_by(name=name).first()
user.balance = user.balance - (tik.price * quantity * 1.40)
if tik.quantity == quantity:
db.session.delete(tik)
else:
tik.quantity = tik.quantity - quantity
db.session.commit()
return None
def delete_database():
"""
Deletes both the Ticket and User databases
"""
User.query.delete()
Ticket.query.delete()
db.session.commit()
def get_ticket(name):
"""
Gets the first ticket in the database
:param name: the name of the ticket
:return: The ticket or none if no ticket exist
"""
return Ticket.query.filter_by(name=name).first()
def get_balance(owner):
"""
Gets the users current balance
:param owner: the email of the owner
:return: balance of the current user or none if owner doesn't exists
"""
return User.query.filter_by(email=owner).first().balance
``` |
{
"source": "15x15G/onebot_Astrologian_FFXIV",
"score": 2
} |
#### File: 15x15G/onebot_Astrologian_FFXIV/__init__.py
```python
from .luck import luck_daily
from hoshino import Service, logger
from hoshino.typing import CQEvent
from hoshino.util import escape
from hoshino.typing import CommandSession
sv = Service('Astrologian', help_='''
[占卜/zhanbu]
'''.strip())
# on_command 装饰器将函数声明为一个命令处理器
@sv.on_prefix('/zhanbu','占卜','/占卜','zhanbu', only_to_me=False)
async def luck(bot, ev: CQEvent):
args: list = escape(ev.message.extract_plain_text().strip()).split()
msg=" "
if args:
if "help" in args:
msg="""
艾欧泽亚人的一天从23:00开始!
可以在"/占卜"后加 "重抽" "redraw" "r" 来重抽
插件名:Astrologian
(forked from onebot_Astrologian_FFXIV)
当前支持版本:hoshinobot""".strip()
elif ("r" in args) or ("重抽" in args) or ("redraw" in args):
msg="开拓命运吧\n"
msg+=await luck_daily(user_id=ev.user_id,redraw=True)
elif args[0] == "test" and len(args)>1 and args[1].isdigit()==True:
logger.debug("test" + ": " + args[1])
msg=await luck_daily(user_id=int(args[1]),redraw=False)
else:
msg=await luck_daily(user_id=ev.user_id,redraw=False)
await bot.send(ev, msg)
``` |
{
"source": "15ykp/Facial-PC-Capstone",
"score": 3
} |
#### File: 15ykp/Facial-PC-Capstone/test.py
```python
import pyaudio
import speech_recognition as sr
import time
def play_sound(audio):
# Set chunk size of 1024 samples per data frame
chunk = 35000
# Create an interface to PortAudio
p = pyaudio.PyAudio()
# Open a .Stream object to write the WAV file to
# 'output = True' indicates that the sound will be played rather than recorded
stream = p.open(format = 8,
channels = 1,
rate = audio.sample_rate,
output = True)
stream.write(audio.get_raw_data())
# Close and terminate the stream
stream.close()
p.terminate()
microphone = sr.Microphone(device_index=(3))
recognizer = sr.Recognizer()
print('talk now')
with microphone as s:
audio = recognizer.listen(s, 1, 10)
print('Will playback in 2 secs')
time.sleep(2)
play_sound(audio)
``` |
{
"source": "160012/EDAS",
"score": 2
} |
#### File: EDAS/function/admin.py
```python
from .models import Cadres, Teacher, Curriculum
from django.contrib import admin
@admin.register(Cadres)
class CadresAdmin(admin.ModelAdmin):
def save_model(self, request, obj, form, change):
if change:
user = request.user.username
name = self.model.objects.get(pk=obj.pk).name
# person = form.cleaned_data['person'].name
f = open('e://jiaowuxitong.txt', 'a')
# f.write(person+'职位:'+job+',被'+user+'修改'+'\r\n')
f.write('学生干部,干部:'+name+'被'+user+'修改'+'\r\n')
f.close()
else:
pass
super().save_model(request, obj, form, change)
fieldsets = (
('个人信息', {
'fields': ('c_id', 'name', 'sex', 'position', 'phone', 'QQ')
}),)
# 只读字段
readonly_fields = ['c_id', ]
# 默认排序字段
ordering = ['c_id']
# 可选排序字段
sortable_by = ['c_id', 'sex']
# 列表页展示字段
list_display = ['c_id', 'name', 'sex', 'position', 'phone', 'QQ']
# 设置路由地址
list_display_links = ['c_id', 'name']
# 设置过滤器
list_filter = ['sex']
# 设置每页展示数据量
list_per_page = 10
# 设置可搜索字段
search_fields = ['name', 'position']
admin.site.site_title = '教务系统(极简)'
admin.site.site_header = '18级网络工程2班'
@admin.register(Curriculum)
class CurriculumAdmin(admin.ModelAdmin):
# 修改页展示字段
fieldsets = (
('课程信息', {
'fields': ("name", "teacher", "all_time", "theoretical_time",
"practice_time", "score", "category", "method",
"assessment", "week_time", "time", "place"),
}),
)
# 列表页可排序字段
# sortable_by = ['score', 'category', 'assessment', 'all_time', 'score', 'category', 'name']
# 列表页展示字段
list_display = ['name', 'teacher', 'all_time', 'theoretical_time', 'practice_time',
'score', 'category', 'method', 'assessment', 'week_time', 'time', 'place']
# 设置过滤字段
list_filter = ['category', 'assessment', 'method']
# 设置每页显示数据量
list_per_page = 10
# 设置搜索字段
search_fields = ['name', 'teacher', 'place']
@admin.register(Teacher)
class TeacherAdmin(admin.ModelAdmin):
fieldsets = (
('个人信息', {
'fields': ('id', 'name', 'sex', 'phone', 'subject')
})
,
)
# 只读字段
readonly_fields = ['id',]
# 默认排序字段
ordering = ['id']
# 列表页展示字段
list_display = ['id', 'name', 'subject', 'phone']
# 设置路由地址
list_display_links = ['id', 'name']
# 设置每页展示数据量
list_per_page = 10
# 设置可搜索字段
search_fields = ['name', 'subject', 'phone']
```
#### File: EDAS/student/__init__.py
```python
from django.apps import AppConfig
import os
default_app_config = 'student.IndexConfig'
def get_current_app_name(_file):
return os.path.split(os.path.dirname(_file))[-1]
class IndexConfig(AppConfig):
name = get_current_app_name(__file__)
verbose_name = '学生列表'
```
#### File: management/commands/import_teacher.py
```python
import csv
from django.core.management import BaseCommand
from function.models import Teacher
# python manage.py import_students --path
class Command(BaseCommand):
help = '从一个CSV文件的内容中读取候选人列表,导入到数据库中'
def add_arguments(self, parser):
parser.add_argument('--path', type=str)
def handle(self, *args, **kwargs):
path = kwargs['path']
with open(path, 'rt', encoding='utf-8') as f:
reader = csv.reader(f, dialect='excel')
for row in reader:
teacher = Teacher.objects.create(
name=row[0],
sex=row[1],
subject=row[2],
phone=row[3],
)
```
#### File: EDAS/student/models.py
```python
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.html import format_html
sex_fields = [
('男', '男'),
('女', '女'),
]
leader = ['张宁超', '于俊凯', '尚进', '修鸿博', '杨旭', '刘梦',
'王洪明', '董一燃', '张源', '黄继成', '曲泰安']
dor_fields = [
('1号楼112', '1号楼112'),
('8号楼142', '8号楼142'),
('8号楼144', '8号楼144'),
('1号楼115', '1号楼115'),
('2号楼301', '2号楼301'),
('1号楼114', '1号楼114'),
('1号楼116', '1号楼116'),
('1号楼105', '1号楼105'),
('1号楼110', '1号楼110'),
]
class Student(AbstractUser):
username = models.CharField(max_length=20, unique=True, verbose_name='学号')
name = models.CharField(max_length=20, verbose_name='姓名')
sex = models.CharField(max_length=20, choices=sex_fields, verbose_name='性别')
phone = models.CharField(max_length=20, verbose_name='手机号码')
qq = models.CharField(max_length=20, verbose_name='QQ号码')
dor = models.CharField(max_length=20, choices=dor_fields, verbose_name='寝室')
def __str__(self):
return self.username
class Meta:
verbose_name = '学生列表'
verbose_name_plural = '学生列表'
def colored_name(self):
if self.name in leader:
color_code = 'red'
else:
color_code = 'blue'
return format_html(
'<span style="color: {};">{}</span>',
color_code,
self.name,
)
colored_name.short_description = '姓名'
``` |
{
"source": "1600kabir/filetransfer",
"score": 4
} |
#### File: 1600kabir/filetransfer/cipher.py
```python
def encrypt(s, n):
return ''.join(chr((ord(char) - 97 + n) % 26 + 97) for char in s)
def decrypt(s, n):
return ''.join(chr((ord(char) - 97 - n) % 26 + 97) for char in s)
action = input('Would you like to encrypt or decrypt a message?: ')
num = 0
while num == 0:
if action == 'decrypt':
msg = input('enter message to be decrpyted: ')
key = input('enter the shifting key: ')
print(decrypt(str(msg), int(key)))
num += 1
break
if action == 'encrypt':
msg = input('enter message to be encrypted: ')
key = input('enter the shifting key: ')
print(encrypt(str(msg), int(key)))
num += 1
break
else:
print('action failed')
``` |
{
"source": "1600kabir/sql-view-tracker",
"score": 3
} |
#### File: 1600kabir/sql-view-tracker/sql.py
```python
import sqlite3
import os
class DB(object):
def __init__(self, f=None, init=False):
self.file = file
if not os.path.exists(self.file)
open(self.file, 'a').close()
self.conn = sqlite3.connect(self.file):
self.c = self.conn.cursor()
if init:
self._init()
def _init(self):
self.c.execute('''CREATE TABLE views
num_views integer''')
self.conn.commit()
def add_views(self, user_view):
self.c.execute('''SELECT num_views FROM views WHERE id=?''', (user_view))
views = self.c.fetchone()
if num_views:
viewsss = num_views[0]
viewsss += 1
self.c.execute('''UPDATE views SET num_views? WHERE id=?''', (user_view))
self.conn.commit()
``` |
{
"source": "1600kabir/tradebot",
"score": 2
} |
#### File: 1600kabir/tradebot/trade.py
```python
import numpy as np
import ffn as f
import pandas as pd
date_train = ['2018-12-01', '2019-01-01', '2019-02-02', '2019-03-01', '2019-04-02', '2019-05-01', '2019-06-02', '2019-07-01', '2019-08-02', '2019-09-01', '2019-10-02', '2019-11-01']
date_train_lag = ['2018-11-30', '2018-12-31', '2019-02-01', '2019-02-28', '2019-04-01', '2019-04-30', '2019-06-01', '2019-06-30', '2019-08-01', '2019-08-30', '2019-10-01', '2019-10-31']
date_trade = ['2019-01-02', '2019-02-01', '2019-03-02', '2019-04-01', '2019-05-02', '2019-06-01', '2019-07-02', '2019-08-01', '2019-09-02', '2019-10-01', '2019-11-02', '2019-12-01']
def mu(df):
s = 0
for i in df:
s += i
return s/len(df)
def Stdev(df, mean):
n = 1/(len(df)-1)
s = 0
for i in df:
x = mean - i
s += x ** 2
return n * s
def lsrl(m, yint, price):
return -1 * m * price + yint
for q in range(len(date_train)-1):
data = f.get('jpm', start=date_train[q], end=date_train[q+1])
datalag = f.get('bac', start=date_train[q], end=date_train[q+1])
data['dep'] = datalag
price_data = data
data = data.pct_change()
p1 = data['jpm']
price1 = []
for i in p1:
if str(i) != 'nan':
price1.append(i)
p2 = data['dep']
price2 = []
for j in p2:
if str(j) != 'nan':
price2.append(j)
muX = mu(price1)
muY = mu(price2)
Sx = Stdev(price1, muX)
Sy = Stdev(price2, muY)
corr = np.corrcoef(price1,price2)
corr = corr[0][1]
m = corr * (Sy/Sx)
yint = m * muX + muY
data = f.get('GOOG, FB, MSFT, jpm, bac', start=date_trade[q], end=date_trade[q+1])
price_data = data
price_change = data.pct_change()
pred = []
for i in price_change['jpm']:
pred.append(lsrl(m, yint, i))
money = 0
buy = lambda price: money - price
sell = lambda price: money + price
short = 0
for i in range(len(pred)-1):
if pred[i] > 0 and short == 0:
money = buy(price_data['bac'][i])
money = sell(price_data['bac'][i+1])
elif pred[i] > 0 and short != 0:
for i in range(short):
money = buy(price_data['bac'][i])
short = 0
elif pred[i] < 0:
money = sell(price_data['bac'][i])
short += 1
print('Profits from {} to {} are {}'.format(date_trade[q], date_trade[q+1], money))
``` |
{
"source": "1621740748/stock-pandas",
"score": 3
} |
#### File: stock-pandas/stock_pandas/common.py
```python
from functools import partial
from typing import (
Callable,
Optional,
Tuple
)
import numpy as np
from pandas import DataFrame
def to_int(name: str, larger_than: int, raw_value: str) -> int:
try:
value = int(raw_value)
except ValueError:
raise ValueError(
f'{name} must be a positive int, but got `{raw_value}`'
)
if value <= larger_than:
raise ValueError(f'{name} must be greater than {larger_than}')
return value
period_to_int = partial(to_int, 'period', 1)
times_to_int = partial(to_int, 'times', 0)
repeat_to_int = partial(to_int, 'repeat', 0)
def create_enum(choices: list, name: str, value: str) -> str:
if value in choices:
return value
choices_str = ' or '.join([f'"{choice}"' for choice in choices])
raise ValueError(
f'{name} should be either {choices_str}, but got `{value}`'
)
style_enums = partial(create_enum, [
'bullish',
'bearish'
], 'style')
column_enums = partial(create_enum, [
'open',
'high',
'low',
'close'
], 'column')
def to_direction(value: str) -> int:
if value == '1':
return 1
if value == '-1':
return - 1
raise ValueError(f'direction must be `1` or `-1`, but got `{value}`')
# A simple cache
class DirectiveCache:
def __init__(self):
self._store = {}
def set(self, key: str, value):
self._store[key] = value
def get(self, key: str, default=None):
return self._store.get(key, default)
KEY_ALIAS_MAP = '__stock_aliases_map'
KEY_COLUMNS_INFO_MAP = '__stock_columns_info_map'
KEY_DIRECTIVES_CACHE = '__stock_directives_cache'
def copy_stock_metas(source, target) -> None:
columns = target.columns
# If the new dataframe has been truncated,
# Then we need to clean the column info
# We just set the size of the info to zero to avoid complexity
need_clean = len(target) < len(source)
source_aliases_map = getattr(source, KEY_ALIAS_MAP, None)
if source_aliases_map is not None:
aliases_map = {}
for alias, column in source_aliases_map.items():
# Column `column` might be dropped in `target`
# by dataframe.drop(columns=some_columns)
# so we need to check it
# TODO: if alias is in columns, something wrong happened
# - support .iloc, loc, and other indexing and setting methods
if column in columns:
aliases_map[alias] = column
# Use `object.__setattr__` to avoid pandas UserWarning:
# > Pandas doesn't allow columns to be created via a new attribute name
object.__setattr__(target, KEY_ALIAS_MAP, aliases_map)
source_columns_info_map = getattr(source, KEY_COLUMNS_INFO_MAP, None)
if source_columns_info_map is not None:
columns_info_map = {}
for column, info in source_columns_info_map.items():
if column in columns:
# Set the size to 0,
# which indicates that the column needs to be calculated again
columns_info_map[
column
] = info.update(0) if need_clean else info
object.__setattr__(target, KEY_COLUMNS_INFO_MAP, columns_info_map)
source_stock_directives_cache = getattr(source, KEY_DIRECTIVES_CACHE, None)
if source_stock_directives_cache is not None:
object.__setattr__(
target,
KEY_DIRECTIVES_CACHE,
source_stock_directives_cache
)
def ensure_return_type(
cls,
method: str,
should_apply_constructor: bool
) -> None:
def helper(self, *args, **kwargs):
ret = getattr(super(cls, self), method)(*args, **kwargs)
if should_apply_constructor:
ret = cls(ret)
copy_stock_metas(self, ret)
return ret
helper.__doc__ = getattr(DataFrame, method).__doc__
setattr(cls, method, helper)
def create_meta_property(key, create, self):
value = getattr(self, key, None)
if value is not None:
return value
value = create()
object.__setattr__(self, key, value)
return value
def meta_property(key, create):
return property(partial(create_meta_property, key, create))
def compare_cross(
left: np.ndarray,
right: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
less = right < left
# matrix or vector of all False value
cross = np.zeros_like(less)
if len(cross) > 1:
# Find cross
cross[1:] = np.diff(less)
return cross, less
ARGS_SEPARATOR = ','
def join_args(args: list) -> str:
return ARGS_SEPARATOR.join([
str(arg) for arg in args
])
def rolling_window(
array: np.ndarray,
period: int,
# A stride for float is 8
stride: int = 8
) -> np.ndarray:
"""Gets an `period`-period rolling window for 1d array
"""
return np.lib.stride_tricks.as_strided(
array,
shape=(len(array) - period + 1, period),
strides=(stride, stride)
)
def shift_and_fill(
array: np.ndarray,
period: int,
fill=np.nan
) -> np.ndarray:
"""Adds items to the left of an array to meet the min periods
"""
return np.append(np.repeat(fill, period - 1), array)
def rolling_calc(
array: np.ndarray,
period: int,
func: Callable,
fill=np.nan,
stride: int = 8
) -> np.ndarray:
"""Creates a `period`-period rolling window and apply
`func` to the items
"""
length = len(array)
if period > length:
return np.repeat(fill, length)
unshifted = np.apply_along_axis(
func,
1,
rolling_window(array, period, stride)
)
return shift_and_fill(unshifted, period, fill)
DEFAULT_ARG_VALUE = ''
def command_full_name(
name: str,
sub: Optional[str]
) -> str:
return name if sub is None else f'{name}.{sub}'
NONE_TUPLE = (None, None)
TYPE_DIRECTIVE = 1
TYPE_COMMAND = 2
TYPE_OPERATOR = 3
TYPE_ARGUMENT = 4
TYPE_SCALAR = 5
```
#### File: stock-pandas/stock_pandas/dataframe.py
```python
from typing import (
Tuple,
Type,
Union,
List
)
from pandas import (
DataFrame,
Series,
to_datetime
)
import numpy as np
from .directive import parse
from .common import (
meta_property,
copy_stock_metas,
ensure_return_type,
KEY_ALIAS_MAP,
KEY_COLUMNS_INFO_MAP,
KEY_DIRECTIVES_CACHE,
DirectiveCache
)
class ColumnInfo:
def __init__(self, size, directive, period) -> None:
self.size = size
self.directive = directive
self.period = period
def update(self, size) -> 'ColumnInfo':
"""Creates a new ColumnInfo and update the size
"""
return ColumnInfo(
size,
self.directive,
self.period
)
class StockDataFrame(DataFrame):
"""The wrapper class for `pandas.DataFrame`
Args definitions are the same as `pandas.DataFrame`
"""
_stock_aliases_map = meta_property(
KEY_ALIAS_MAP, lambda: {}
)
_stock_columns_info_map = meta_property(
KEY_COLUMNS_INFO_MAP, lambda: {}
)
_stock_directives_cache = meta_property(
KEY_DIRECTIVES_CACHE, lambda: DirectiveCache()
)
@property
def _constructor(self) -> Type['StockDataFrame']:
"""This method overrides `DataFrame._constructor`
which ensures the return type of several DataFrame methods
"""
return StockDataFrame
def __finalize__(self, other, *args, **kwargs) -> 'StockDataFrame':
"""This method overrides `DataFrame.__finalize__`
which ensures the meta info of StockDataFrame
"""
super().__finalize__(other, *args, **kwargs)
if isinstance(other, StockDataFrame):
copy_stock_metas(other, self)
return self
def __init__(
self,
data=None,
date_column=None,
*args,
**kwargs
) -> None:
DataFrame.__init__(self, data, *args, **kwargs)
if self.columns.nlevels > 1:
# For now, I admit,
# there are a lot of works to support MultiIndex dataframes
raise ValueError(
'stock-pandas does not support dataframes with MultiIndex columns' # noqa:E501
)
if isinstance(data, StockDataFrame):
copy_stock_metas(data, self)
self._create_column = False
if date_column:
self[date_column] = to_datetime(self[date_column])
self.set_index(date_column, inplace=True)
def __getitem__(self, key) -> Union[Series, 'StockDataFrame']:
if isinstance(key, str):
key = self._map_single_key(key)
# We just return super __getitem__,
# because the result must be series
return super().__getitem__(key)
if isinstance(key, list):
key = self._map_keys(key)
# else: key of another type
result = super().__getitem__(key)
if isinstance(result, Series):
# The series has already been fulfilled by
# `self._get_or_calc_series()`
return result
result = StockDataFrame(result)
return result
def _direct_get_column(self, key: str) -> Series:
"""Gets the column directly from dataframe by key
"""
return self._get_item_cache(key)
def exec(
self,
directive_str: str,
create_column: bool = None
) -> np.ndarray:
"""Executes the given directive and
returns a numpy ndarray according to the directive.
This method is **NOT** Thread-safe.
Args:
directive (str): directive
create_column (:obj:`bool`, optional): whether we should create a
column for the calculated series.
Returns:
np.ndarray
"""
if self._is_normal_column(directive_str):
return self[directive_str].to_numpy()
# We should call self.exec() without `create_column`
# inside command formulas
explicit_create_column = isinstance(create_column, bool)
original_create_column = self._create_column
if explicit_create_column:
self._create_column = create_column
else:
# cases
# 1. called by users
# 2. or called by command formulas
create_column = self._create_column
series = self._calc(directive_str)
if explicit_create_column:
# Set back to default value, since we complete calculatiing
self._create_column = original_create_column
return series
def alias(self, as_name, src_name) -> None:
"""Defines column alias or directive alias
Args:
as_name (str): the alias name
src_name (str): the name of the original column, or directive
Returns:
None
"""
columns = self.columns
if as_name in columns:
raise ValueError(f'column "{as_name}" already exists')
if src_name not in columns:
raise ValueError(f'column "{src_name}" not exists')
self._stock_aliases_map[as_name] = src_name
def _map_keys(self, keys) -> List:
return [
self._map_single_key(key)
for key in keys
]
def _map_single_key(self, key):
if not isinstance(key, str):
# It might be an `pandas.DataFrame` indexer type,
# or an KeyError which we should let pandas raise
return key
if key in self._stock_aliases_map:
# Map alias, if the key is an alias
key = self._stock_aliases_map[key]
if self._is_normal_column(key):
# There exists a column named `key`,
# and it is a normal column
return key
# Not exists
directive = self._parse_directive(key)
# It is a valid directive
# If the column exists, then fulfill it,
# else create it
column_name, _ = self._get_or_calc_series(directive, True)
# Append the real column name to the mapped key,
# So `pandas.DataFrame.__getitem__` could index the right column
return column_name
def _parse_directive(self, directive_str: str):
return parse(directive_str, self._stock_directives_cache)
def _get_or_calc_series(
self,
directive,
create_column: bool
) -> Tuple[str, np.ndarray]:
"""Gets the series column corresponds the `directive` or
calculate by using the `directive`
Args:
directive (Directive): the parsed `Directive` instance
create_column (bool): whether we should create a column for the
calculated series
Returns:
Tuple[str, np.ndarray]: the name of the series, and the series
"""
name = str(directive)
if name in self._stock_columns_info_map:
return name, self._fulfill_series(name)
array, period = directive.run(
self,
# create the whole series
slice(None)
)
if create_column:
self._stock_columns_info_map[name] = ColumnInfo(
len(self),
directive,
period
)
self._set_new_item(name, array)
return name, array
def _set_new_item(
self,
name: str,
value: np.ndarray
) -> None:
"""Set a new column and avoid SettingWithCopyWarning by using
pandas internal APIs
"""
value = np.atleast_2d(value)
self._data.set(name, value)
self._clear_item_cache()
def _fulfill_series(self, column_name: str) -> np.ndarray:
column_info = self._stock_columns_info_map.get(column_name)
size = len(self)
array = self._direct_get_column(column_name).to_numpy()
if size == column_info.size:
# Already fulfilled
return array
neg_delta = column_info.size - size
# Sometimes, there is not enough items to calculate
calc_delta = max(
neg_delta - column_info.period + 1,
- size
)
calc_slice = slice(calc_delta, None)
fulfill_slice = slice(neg_delta, None)
partial, _ = column_info.directive.run(self, calc_slice)
if neg_delta == calc_delta:
array = partial
else:
array[fulfill_slice] = partial[fulfill_slice]
self._set_new_item(column_name, array)
column_info.size = size
return array
def _is_normal_column(self, column_name) -> bool:
return column_name in self.columns and \
column_name not in self._stock_columns_info_map
def _calc(self, directive_str: str) -> np.ndarray:
directive = self._parse_directive(directive_str)
_, series = self._get_or_calc_series(directive, self._create_column)
return series
METHODS_TO_ENSURE_RETURN_TYPE = [
('append', False),
('astype', True)
]
for method, should_apply_constructor in METHODS_TO_ENSURE_RETURN_TYPE:
ensure_return_type(StockDataFrame, method, should_apply_constructor)
```
#### File: stock_pandas/directive/__init__.py
```python
from .parser import Parser
from .factory import create_by_node
from .types import Directive
def parse(directive_str: str, cache) -> Directive:
directive_str = directive_str.strip()
cached = cache.get(directive_str)
if cached:
return cached
ast = Parser(directive_str).parse()
directive, _ = create_by_node(ast, directive_str, cache)
cache.set(directive_str, directive)
return directive # type: ignore
``` |
{
"source": "16231108/comp3",
"score": 2
} |
#### File: finrl/autotrain/feature.py
```python
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import preprocessing
matplotlib.use("Agg")
import datetime
import torch
from finrl.config import config
from finrl.marketdata.yahoodownloader import YahooDownloader
from finrl.preprocessing.preprocessors import FeatureEngineer
from finrl.preprocessing.data import data_split
from finrl.env.env_stocktrading import StockTradingEnv
from finrl.env.lxc_env_stocktrading import lxcStockTradingEnv
from finrl.model.models import DRLAgent
from finrl.trade.backtest import backtest_stats as BackTestStats
from stable_baselines3 import A2C
def train_one(data_path):
"""
train an agent
"""
print("==============Start Fetching Data===========")
df = pd.read_csv(data_path)
#names=["date","open","high","low","close","volume","tic","day",]
#df = pd.read_csv("./" + config.DATA_SAVE_DIR + "/" + "20210315-07h382" + ".csv",index_col=0)
print('GPU is :',torch.cuda.is_available())
#df = pd.read_csv("./" + config.DATA_SAVE_DIR + "/" + "20210315-08h17" + ".csv", index_col=0)
#print(df)
print("==============Start Feature Engineering===========")
fe = FeatureEngineer(
use_technical_indicator=True,
tech_indicator_list=config.TECHNICAL_INDICATORS_LIST,
use_turbulence=True,
user_defined_feature=False,
)
processed = fe.preprocess_data(df)
# Training & Trading data split
train = data_split(processed, config.START_DATE, config.START_TRADE_DATE)
trade = data_split(processed, config.START_TRADE_DATE, config.END_DATE)
train.to_csv('/result_train',index=False)
trade.to_csv('/result_trade',index=False)
```
#### File: finrl/marketdata/lxcUrl.py
```python
import requests
import datetime
import pandas as _pd
import base64
import time
from tqdm import tqdm
import json
app_key = "81118a71-6e2d-4117-a03e-71c1e405faef"
app_secrect = "26b193b3-e8da-4eed-a613-147388f17acd"
token = 'F536FCFA8EF34D2D923060768394E3212021030809454981118A71'
def getEveryDay(begin_date,end_date):
# 前闭后闭
date_list = []
begin_date = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date,"%Y-%m-%d")
while begin_date <= end_date:
date_str = begin_date.strftime("%Y-%m-%d")
date_list.append(date_str)
begin_date += datetime.timedelta(days=1)
return date_list
def getToken(app_key,app_secrect):
global token
bytesString = (app_key+':'+app_secrect).encode(encoding="utf-8")
url = 'https://sandbox.hscloud.cn/oauth2/oauth2/token';
header = {'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic '+str(base64.b64encode(bytesString),encoding="utf-8")}
field = {'grant_type' : 'client_credentials'}
r = requests.post(url,data=field,headers=header)
if r.json().get('access_token') :
token = r.json().get('access_token')
print("获取公共令牌:"+str(token))
return
else :
print("获取公共令牌失败")
exit
def postOpenApi(url,params):
global token
header = {'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Bearer '+token}
r = requests.post(url,data=params,headers=header)
temp = r.json().get('data')
#print(temp[0]['high_price']=="")
#print("result = "+str(r.json().get('data')))
return temp
def hsDownloadData(en_prod_code,begin_date,end_date):
dataList = getEveryDay(begin_date,end_date)
url = "https://sandbox.hscloud.cn/gildataastock/v1/astock/quotes/daily_quote"
Date =[]
Open = []
High = []
Low = []
Close = []
Adj_Close = []
Volume = []
for oneDay in tqdm(dataList):
#params = 'en_prod_code=600000.SH&trading_date=2016-12-30&unit=0'
params = "en_prod_code="+en_prod_code+"&trading_date="+oneDay
#print(params)
temp = postOpenApi(url, params)
if(temp[0]['high_price'] != ""):#有数据,开盘
Date.append(temp[0]['trading_date'])
Open.append(temp[0]['open_price'])
High.append(temp[0]['high_price'])
Low.append(temp[0]['low_price'])
Close.append(temp[0]['close_price'])#后期需要修改
Adj_Close.append(temp[0]['avg_price'])
Volume.append(temp[0]['business_amount'])
time.sleep(2)
Frame = {"Open": Open,
"High": High,
"Low": Low,
"Close": Close,
"Adj Close": Adj_Close,
"Volume": Volume
}
quotes =_pd.DataFrame.from_dict(Frame)
quotes.index = _pd.to_datetime(Date)
quotes.sort_index(inplace=True)
quotes.index.name = "Date"
return quotes
def jsonToDate(jsonDate):
pass
if __name__ == '__main__':
# getToken(app_key,app_secrect)
data = hsDownloadData('000002.SZ','2020-12-21','2021-01-01')
print(len(data))
data = hsDownloadData('000001.SZ', '2020-12-21', '2021-01-01')
print(len(data))
print(type(data))
```
#### File: 16231108/comp3/pipeline.py
```python
import kfp
from kfp import dsl
from kfp.dsl import ContainerOp
from kfp.dsl import InputArgumentPath
def fetch_data():
return ContainerOp(
name='fetch-data',
image='star16231108/baseline:1.1',
command=['python'],
arguments=["main.py",'fetch'],
file_outputs={'out': '/result'},
)
def feature_engineering(data):
return ContainerOp(
name='feature-engineering',
image='star16231108/baseline:1.1',
command=['python'],
arguments=['main.py','feature',InputArgumentPath(data)],
file_outputs={'train_out':'/result_train',
'trade_out':'/result_trade'}
)
def train_model(train_df,trade_df):
return ContainerOp(
name='train-model',
image='star16231108/baseline:1.1',
command=['python'],
arguments=['main.py','train_model',
InputArgumentPath(train_df),
InputArgumentPath(trade_df)],
file_outputs={'trained_model':'/model.pkl',
'e_trade_gym':'/e_trade.pkl'})
def tradeing(m_path,t_path):
return ContainerOp(
name='tradeing',
image='star16231108/baseline:1.1',
command=['python'],
arguments=['main.py','trade',
InputArgumentPath(m_path),
InputArgumentPath(t_path)],
file_outputs={'df_account_value':'/df_account_value'}
)
def result_backtest(a_value):
return ContainerOp(
name='Get Backtest all Results',
image='star16231108/baseline:1.1',
command=['python'],
arguments=['main.py','backtest',
InputArgumentPath(a_value)],
file_outputs={'perf_stats_all','/perf_stats_all'}
)
@dsl.pipeline(
name='FinRL-Library-2',
)
def sequential_pipeline():
data=fetch_data()
feature=feature_engineering(data.outputs['out'])
model=train_model(feature.outputs['train_out'],
feature.outputs['trade_out'])
trade=tradeing(model.outputs['train_model'],model.outputs['e_trade_gym'])
backtest=result_backtest(trade.outputs['df_account_value'])
if __name__ == '__main__':
kfp.compiler.Compiler().compile(sequential_pipeline, __file__ + '.yaml')
``` |
{
"source": "16231108/stk_comp",
"score": 3
} |
#### File: finrl/marketdata/all_data_download.py
```python
import baostock as bs
import pandas as pd
def download_data(date):
bs.login()
# 获取指定日期的指数、股票数据
stock_rs = bs.query_all_stock(date)
stock_df = stock_rs.get_data()
stock_df.to_csv("D:\\all_stock.csv", encoding="gbk", index=False)
data_df = pd.DataFrame()
for code in stock_df["code"]:
print("Downloading :" + code)
k_rs = bs.query_history_k_data_plus(code, "date,code,open,high,low,close", date, date)
data_df = data_df.append(k_rs.get_data())
bs.logout()
data_df.to_csv("D:\\demo_assignDayData.csv", encoding="gbk", index=False)
print(data_df)
if __name__ == '__main__':
# 获取指定日期全部股票的日K线数据
download_data("2019-02-25")
```
#### File: finrl/marketdata/yahoodownloader.py
```python
import pandas as pd
import baostock as bs
import yfinance as yf
from .lxcUrl import hsDownloadData
class YahooDownloader:
"""Provides methods for retrieving daily stock data from
Yahoo Finance API
Attributes
----------
start_date : str
start date of the data (modified from config.py)
end_date : str
end date of the data (modified from config.py)
ticker_list : list
a list of stock tickers (modified from config.py)
Methods
-------
fetch_data()
Fetches data from yahoo API
"""
def __init__(self, start_date: str, end_date: str, ticker_list: list):
self.start_date = start_date
self.end_date = end_date
self.ticker_list = ticker_list
def lxcDownload(self,tic):
df = pd.read_csv("./"+"lxcData" + "/" + str(tic) + ".csv", index_col=0)
date = df['date']
df = df.drop("date",axis=1)
print(df)
df.index = pd.to_datetime(date)
df.sort_index(inplace=True)
df.index.name = "date"
return df
def fetch_data(self) -> pd.DataFrame:
"""Fetches data from Yahoo API
Parameters
----------
Returns
-------
`pd.DataFrame`
7 columns: A date, open, high, low, close, volume and tick symbol
for the specified stock ticker
"""
# Download and save the data in a pandas DataFrame:
data_df = pd.DataFrame()
print('lxc:',len(self.ticker_list))
lxc_temp = 1
for tic in self.ticker_list:
#print('download ',lxc_temp,'个数据')
lxc_temp = lxc_temp+1
temp_df = yf.download(tic, start=self.start_date, end=self.end_date)
#temp_df = hsDownloadData(en_prod_code =tic, begin_date=self.start_date, end_date=self.end_date)
#print('type temp_df is:', type(temp_df))
#print('temp_df is:',temp_df)
temp_df["tic"] = tic
data_df = data_df.append(temp_df)
# reset the index, we want to use numbers as index instead of dates
data_df = data_df.reset_index()
try:
# convert the column names to standardized names
data_df.columns = [
"date",
"open",
"high",
"low",
"close",
"adjcp",
"volume",
"tic",
]
# use adjusted close price instead of close price
data_df["close"] = data_df["adjcp"]
# drop the adjusted close price column
data_df = data_df.drop("adjcp", 1)
except NotImplementedError:
print("the features are not supported currently")
# create day of the week column (monday = 0)
data_df["day"] = data_df["date"].dt.dayofweek
# convert date to standard string format, easy to filter
data_df["date"] = data_df.date.apply(lambda x: x.strftime("%Y-%m-%d"))
# drop missing data
data_df = data_df.dropna()
data_df = data_df.reset_index(drop=True)
print("Shape of DataFrame: ", data_df.shape)
# print("Display DataFrame: ", data_df.head())
data_df = data_df.sort_values(by=['date','tic']).reset_index(drop=True)
return data_df
def lxc_fetch_data(self) -> pd.DataFrame:
"""Fetches data from Yahoo API
Parameters
----------
Returns
-------
`pd.DataFrame`
7 columns: A date, open, high, low, close, volume and tick symbol
for the specified stock ticker
"""
# Download and save the data in a pandas DataFrame:
# 登陆系统
def round_amount(vol):
data = round(float(vol),2)
return data
lg = bs.login()
# 显示登陆返回信息
print('login respond error_code:' + lg.error_code)
print('login respond error_msg:' + lg.error_msg)
# 获取行业分类数据
rs = bs.query_stock_industry()
# rs = bs.query_stock_basic(code_name="浦发银行")
print('query_stock_industry error_code:' + rs.error_code)
print('query_stock_industry respond error_msg:' + rs.error_msg)
# 打印结果集
lxc_list = []
data_df = pd.DataFrame()
while (rs.error_code == '0') & rs.next():
# 获取一条记录,将记录合并在一起
temp = rs.get_row_data()
lxc_temp = temp
if (temp[3] == "食品饮料"):
lxc_list.append(temp[1])
temp_df = bs.query_history_k_data_plus(temp[1], "date,open,high,low,close,volume", self.start_date, self.end_date).get_data()
if(len(temp_df)<1):
continue
temp_df["tic"] = str(temp[1])
temp_df["open"] = temp_df["open"].apply(round_amount)
temp_df["high"] = temp_df["high"].apply(round_amount)
temp_df["low"] = temp_df["low"].apply(round_amount)
temp_df["close"] = temp_df["close"].apply(round_amount)
temp_df["volume"] = temp_df["volume"].apply(round_amount)
data_df = data_df.append(temp_df)
date = data_df["date"]
data_df = data_df.drop("date",axis = 1)
data_df.index = pd.to_datetime(date)
data_df.index.name="date"
print("data_df is:",data_df)
data_df = data_df.reset_index()
try:
# convert the column names to standardized names
data_df.columns = [
"date",
"open",
"high",
"low",
"close",
"volume",
"tic",
]
# use adjusted close price instead of close price
#data_df["close"] = data_df["adjcp"]
# drop the adjusted close price column
#data_df = data_df.drop("adjcp", 1)
except NotImplementedError:
print("the features are not supported currently")
# create day of the week column (monday = 0)
data_df["day"] = data_df["date"].dt.dayofweek
# convert date to standard string format, easy to filter
data_df["date"] = data_df.date.apply(lambda x: x.strftime("%Y-%m-%d"))
# drop missing data Y
data_df = data_df.dropna()
data_df = data_df.reset_index(drop=True)
print("Shape of DataFrame: ", data_df.shape)
# print("Display DataFrame: ", data_df.head())
data_df = data_df.sort_values(by=['date','tic']).reset_index(drop=True)
return data_df
def select_equal_rows_stock(self, df):
df_check = df.tic.value_counts()
df_check = pd.DataFrame(df_check).reset_index()
df_check.columns = ["tic", "counts"]
mean_df = df_check.counts.mean()
equal_list = list(df.tic.value_counts() >= mean_df)
names = df.tic.value_counts().index
select_stocks_list = list(names[equal_list])
df = df[df.tic.isin(select_stocks_list)]
return df
``` |
{
"source": "162/catalyst",
"score": 2
} |
#### File: dl/callbacks/checkpoint.py
```python
from typing import Dict
import os
from catalyst.dl.core import Callback, RunnerState
from catalyst.dl import utils
class CheckpointCallback(Callback):
"""
Checkpoint callback to save/restore your model/criterion/optimizer/metrics.
"""
def __init__(
self, save_n_best: int = 3, resume: str = None, resume_dir: str = None
):
"""
Args:
save_n_best: number of best checkpoint to keep
resume: path to checkpoint to load and initialize runner state
"""
self.save_n_best = save_n_best
self.resume = resume
self.resume_dir = resume_dir
self.top_best_metrics = []
self._keys_from_state = ["resume", "resume_dir"]
@staticmethod
def load_checkpoint(*, filename, state: RunnerState):
if os.path.isfile(filename):
print(f"=> loading checkpoint {filename}")
checkpoint = utils.load_checkpoint(filename)
state.epoch = checkpoint["epoch"]
utils.unpack_checkpoint(
checkpoint,
model=state.model,
criterion=state.criterion,
optimizer=state.optimizer,
scheduler=state.scheduler
)
print(
f"loaded checkpoint {filename} (epoch {checkpoint['epoch']})")
else:
raise Exception("no checkpoint found at {filename}")
def save_checkpoint(
self,
logdir: str,
checkpoint: Dict,
is_best: bool,
save_n_best: int = 5,
main_metric: str = "loss",
minimize_metric: bool = True
):
suffix = f"{checkpoint['stage']}.{checkpoint['epoch']}"
filepath = utils.save_checkpoint(
logdir=f"{logdir}/checkpoints/",
checkpoint=checkpoint,
suffix=suffix,
is_best=is_best,
is_last=True
)
checkpoint_metric = checkpoint["valid_metrics"][main_metric]
self.top_best_metrics.append((filepath, checkpoint_metric))
self.top_best_metrics = sorted(
self.top_best_metrics,
key=lambda x: x[1],
reverse=not minimize_metric
)
if len(self.top_best_metrics) > save_n_best:
last_item = self.top_best_metrics.pop(-1)
last_filepath = last_item[0]
os.remove(last_filepath)
def pack_checkpoint(self, **kwargs):
return utils.pack_checkpoint(**kwargs)
def on_stage_start(self, state: RunnerState):
for key in self._keys_from_state:
value = getattr(state, key, None)
if value is not None:
setattr(self, key, value)
if self.resume_dir is not None:
self.resume = str(self.resume_dir) + "/" + str(self.resume)
if self.resume is not None:
self.load_checkpoint(filename=self.resume, state=state)
def on_epoch_end(self, state: RunnerState):
if state.stage.startswith("infer"):
return
checkpoint = self.pack_checkpoint(
model=state.model,
criterion=state.criterion,
optimizer=state.optimizer,
scheduler=state.scheduler,
epoch_metrics=dict(state.metrics.epoch_values),
valid_metrics=dict(state.metrics.valid_values),
stage=state.stage,
epoch=state.epoch,
checkpoint_data=state.checkpoint_data
)
self.save_checkpoint(
logdir=state.logdir,
checkpoint=checkpoint,
is_best=state.metrics.is_best,
save_n_best=self.save_n_best,
main_metric=state.main_metric,
minimize_metric=state.minimize_metric
)
def on_stage_end(self, state: RunnerState):
print("Top best models:")
top_best_metrics_str = "\n".join(
[
"{filepath}\t{metric:3.4f}".format(
filepath=filepath, metric=metric
) for filepath, metric in self.top_best_metrics
]
)
print(top_best_metrics_str)
class IterationCheckpointCallback(Callback):
"""
Iteration checkpoint callback to save your model/criterion/optimizer
"""
def __init__(
self,
save_n_last: int = 3,
num_iters: int = 100,
stage_restart: bool = True
):
"""
Args:
save_n_last: number of last checkpoint to keep
num_iters: save the checkpoint every `num_iters`
stage_restart: restart counter every stage or not
"""
self.save_n_last = save_n_last
self.num_iters = num_iters
self.stage_restart = stage_restart
self._iteration_counter = 0
self.last_checkpoints = []
def save_checkpoint(
self,
logdir,
checkpoint,
save_n_last
):
suffix = f"{checkpoint['stage']}." \
f"epoch.{checkpoint['epoch']}." \
f"iter.{self._iteration_counter}"
filepath = utils.save_checkpoint(
logdir=f"{logdir}/checkpoints/",
checkpoint=checkpoint,
suffix=suffix,
is_best=False,
is_last=False
)
self.last_checkpoints.append(filepath)
if len(self.last_checkpoints) > save_n_last:
top_filepath = self.last_checkpoints.pop(0)
os.remove(top_filepath)
print(f"\nSaved checkpoint at {filepath}")
def pack_checkpoint(self, **kwargs):
return utils.pack_checkpoint(**kwargs)
def on_stage_start(self, state):
if self.stage_restart:
self._iteration_counter = 0
def on_batch_end(self, state):
self._iteration_counter += 1
if self._iteration_counter % self.num_iters == 0:
checkpoint = self.pack_checkpoint(
model=state.model,
criterion=state.criterion,
optimizer=state.optimizer,
scheduler=state.scheduler,
epoch_metrics=None,
valid_metrics=None,
stage=state.stage,
epoch=state.epoch
)
self.save_checkpoint(
logdir=state.logdir,
checkpoint=checkpoint,
save_n_last=self.save_n_last
)
__all__ = ["CheckpointCallback", "IterationCheckpointCallback"]
```
#### File: callbacks/metrics/accuracy.py
```python
from typing import List
from catalyst.dl.core import MultiMetricCallback
from catalyst.dl.utils import criterion
class AccuracyCallback(MultiMetricCallback):
"""
Accuracy metric callback.
"""
def __init__(
self,
input_key: str = "targets",
output_key: str = "logits",
prefix: str = "accuracy",
accuracy_args: List[int] = None,
):
"""
Args:
input_key: input key to use for accuracy calculation;
specifies our `y_true`.
output_key: output key to use for accuracy calculation;
specifies our `y_pred`.
accuracy_args: specifies which accuracy@K to log.
[1] - accuracy
[1, 3] - accuracy at 1 and 3
[1, 3, 5] - accuracy at 1, 3 and 5
"""
super().__init__(
prefix=prefix,
metric_fn=criterion.accuracy,
list_args=accuracy_args or [1],
input_key=input_key,
output_key=output_key
)
class MapKCallback(MultiMetricCallback):
"""
mAP@k metric callback.
"""
def __init__(
self,
input_key: str = "targets",
output_key: str = "logits",
prefix: str = "map",
map_args: List[int] = None,
):
"""
Args:
input_key: input key to use for
calculation mean average accuracy at k;
specifies our `y_true`.
output_key: output key to use for
calculation mean average accuracy at k;
specifies our `y_pred`.
map_args: specifies which map@K to log.
[1] - map@1
[1, 3] - map@1 and map@3
[1, 3, 5] - map@1, map@3 and map@5
"""
super().__init__(
prefix=prefix,
metric_fn=criterion.mean_average_accuracy,
list_args=map_args or [1],
input_key=input_key,
output_key=output_key
)
__all__ = ["AccuracyCallback", "MapKCallback"]
```
#### File: dl/experiment/supervised.py
```python
from typing import List
from catalyst.dl.core import Callback
from .base import BaseExperiment
from catalyst.dl.callbacks import \
CriterionCallback, OptimizerCallback, SchedulerCallback, \
CheckpointCallback
class SupervisedExperiment(BaseExperiment):
def get_callbacks(self, stage: str) -> "List[Callback]":
callbacks = self._callbacks
if not stage.startswith("infer"):
default_callbacks = [
(self._criterion, CriterionCallback),
(self._optimizer, OptimizerCallback),
(self._scheduler, SchedulerCallback),
("_default_saver", CheckpointCallback),
]
for key, value in default_callbacks:
is_already_present = any(
isinstance(x, value) for x in callbacks)
if key is not None and not is_already_present:
callbacks.append(value())
return callbacks
__all__ = ["SupervisedExperiment"]
```
#### File: dl/scripts/trace.py
```python
import argparse
from argparse import ArgumentParser
from pathlib import Path
from typing import Dict
import safitty
import torch
from catalyst.dl.utils.scripts import import_experiment_and_runner
from catalyst.dl.core import Experiment
from catalyst import utils
from catalyst.dl.utils.trace import trace_model
def trace_model_from_checkpoint(logdir, method_name):
config_path = logdir / "configs/_config.json"
checkpoint_path = logdir / "checkpoints/best.pth"
print("Load config")
config: Dict[str, dict] = safitty.load(config_path)
# Get expdir name
config_expdir = Path(config["args"]["expdir"])
# We will use copy of expdir from logs for reproducibility
expdir_from_logs = Path(logdir) / "code" / config_expdir.name
print("Import experiment and runner from logdir")
ExperimentType, RunnerType = \
import_experiment_and_runner(expdir_from_logs)
experiment: Experiment = ExperimentType(config)
print("Load model state from checkpoints/best.pth")
model = experiment.get_model(next(iter(experiment.stages)))
checkpoint = utils.load_checkpoint(checkpoint_path)
utils.unpack_checkpoint(checkpoint, model=model)
print("Tracing")
traced = trace_model(model, experiment, RunnerType, method_name)
print("Done")
return traced
def build_args(parser: ArgumentParser):
parser.add_argument("logdir", type=Path)
parser.add_argument(
"--method", "-m",
default="forward",
help="Model method to trace")
return parser
def parse_args():
parser = argparse.ArgumentParser()
build_args(parser)
args = parser.parse_args()
return args
def main(args, _):
logdir = args.logdir
method_name = args.method
traced = trace_model_from_checkpoint(logdir, method_name)
torch.jit.save(traced, str(logdir / "traced.pth"))
if __name__ == "__main__":
main(parse_args(), None)
```
#### File: rl/db/mongo.py
```python
import datetime
import pymongo
from catalyst.rl import utils
from catalyst.rl.core import DBSpec
class MongoDB(DBSpec):
def __init__(self, port=12000, prefix=None, sync_epoch=False):
self._server = pymongo.MongoClient(host="127.0.0.1", port=port)
self._prefix = "" if prefix is None else prefix
self._shared_db = self._server["shared"]
self._agent_db = self._server[f"agent_{self._prefix}"]
self._trajectory_collection = self._shared_db["trajectories"]
self._weights_collection = self._agent_db["weights"]
self._flag_collection = self._agent_db["flag"]
self._last_datetime = datetime.datetime.min
self._epoch = 0
self._sync_epoch = sync_epoch
@property
def num_trajectories(self) -> int:
num_trajectories = self._trajectory_collection.count() - 1
return num_trajectories
def set_sample_flag(self, sample: bool):
self._flag_collection.replace_one(
{"prefix": "sample_flag"},
{
"sample_flag": sample,
"prefix": "sample_flag"
},
upsert=True
)
def get_sample_flag(self) -> bool:
flag_obj = self._flag_collection.find_one(
{"prefix": {"$eq": "sample_flag"}}
)
flag = int(flag_obj.get("sample_flag") or -1) == int(1)
return flag
def push_trajectory(self, trajectory):
trajectory = utils.structed2dict_trajectory(trajectory)
trajectory = utils.pack(trajectory)
self._trajectory_collection.insert_one({
"trajectory": trajectory,
"date": datetime.datetime.utcnow(),
"epoch": self._epoch
})
def get_trajectory(self, index=None):
assert index is None
trajectory_obj = self._trajectory_collection.find_one(
{"date": {"$gt": self._last_datetime}}
)
if trajectory_obj is not None:
self._last_datetime = trajectory_obj["date"]
trajectory, trajectory_epoch = \
utils.unpack(
trajectory_obj["trajectory"]), trajectory_obj["epoch"]
if self._sync_epoch and self._epoch != trajectory_epoch:
trajectory = None
else:
trajectory = utils.dict2structed_trajectory(trajectory)
else:
trajectory = None
return trajectory
def clean_trajectories(self):
self._trajectory_collection.drop()
def dump_weights(self, weights, prefix, epoch):
self._epoch = epoch
weights = utils.pack(weights)
self._weights_collection.replace_one(
{"prefix": prefix},
{
"weights": weights,
"prefix": prefix,
"epoch": self._epoch
},
upsert=True
)
def load_weights(self, prefix):
weights_obj = self._weights_collection.find_one({"prefix": prefix})
weights = weights_obj.get("weights")
if weights is None:
return None
self._epoch = weights_obj["epoch"]
weights = utils.unpack(weights)
return weights
def clean_weights(self, prefix):
self._weights_collection.delete_one({"prefix": prefix})
__all__ = ["MongoDB"]
```
#### File: rl/db/redis.py
```python
from redis import StrictRedis
from catalyst.rl import utils
from catalyst.rl.core import DBSpec
class RedisDB(DBSpec):
def __init__(self, port=12000, prefix=None, sync_epoch=False):
self._server = StrictRedis(port=port)
self._prefix = "" if prefix is None else prefix
self._index = 0
self._epoch = 0
self._sync_epoch = sync_epoch
@property
def num_trajectories(self) -> int:
num_trajectories = self._server.llen("trajectories") - 1
return num_trajectories
def set_sample_flag(self, sample: bool):
self._server.set("sample_flag", int(sample))
def get_sample_flag(self) -> bool:
flag = int(self._server.get("sample_flag") or -1) == int(1)
return flag
def push_trajectory(self, trajectory):
trajectory = utils.structed2dict_trajectory(trajectory)
trajectory = {
"trajectory": trajectory,
"epoch": self._epoch
}
trajectory = utils.pack(trajectory)
self._server.rpush("trajectories", trajectory)
def get_trajectory(self, index=None):
index = index if index is not None else self._index
trajectory = self._server.lindex("trajectories", index)
if trajectory is not None:
self._index = index + 1
trajectory = utils.unpack(trajectory)
trajectory, trajectory_epoch = \
trajectory["trajectory"], trajectory["epoch"]
if self._sync_epoch and self._epoch != trajectory_epoch:
trajectory = None
else:
trajectory = utils.dict2structed_trajectory(trajectory)
return trajectory
def clean_trajectories(self):
self._server.delete("trajectories")
self._index = 0
def dump_weights(self, weights, prefix, epoch):
self._epoch = epoch
weights = {
"weights": weights,
"epoch": self._epoch
}
weights = utils.pack(weights)
self._server.set(f"{self._prefix}_{prefix}_weights", weights)
def load_weights(self, prefix):
weights = self._server.get(f"{self._prefix}_{prefix}_weights")
if weights is None:
return None
weights = utils.unpack(weights)
self._epoch = weights.get("epoch")
return weights["weights"]
def clean_weights(self, prefix):
self._server.delete(f"{self._prefix}_{prefix}_weights")
__all__ = ["RedisDB"]
```
#### File: rl/scripts/dump_redis.py
```python
import argparse
import pickle
from tqdm import tqdm
from redis import StrictRedis
def build_args(parser):
parser.add_argument(
"--port",
type=int,
default=12000)
parser.add_argument(
"--out-pkl",
type=str,
required=True)
parser.add_argument(
"--chunk-size",
type=int,
default=10000)
parser.add_argument(
"--start-from",
type=int,
default=0)
return parser
def parse_args():
parser = argparse.ArgumentParser()
build_args(parser)
args = parser.parse_args()
return args
def main(args, _=None):
redis = StrictRedis(port=args.port)
redis_len = redis.llen("trajectories") - 1
episodes = []
for i in tqdm(range(args.start_from, redis_len)):
episode = redis.lindex("trajectories", i)
episodes.append(episode)
if i > args.start_from \
and (i - args.start_from) % args.chunk_size == 0:
with open(args.out_pkl.format(suffix=i), "wb") as fout:
pickle.dump(episodes, fout)
episodes = []
with open(args.out_pkl.format(suffix=i), "wb") as fout:
pickle.dump(episodes, fout)
if __name__ == "__main__":
args = parse_args()
main(args)
```
#### File: rl/scripts/run_trainer.py
```python
import os
import argparse
from catalyst.utils.scripts import import_module
from catalyst.utils import parse_args_uargs, dump_config, set_global_seed
from catalyst.rl.registry import OFFPOLICY_ALGORITHMS, ONPOLICY_ALGORITHMS, \
ENVIRONMENTS, DATABASES
from catalyst.rl.offpolicy.trainer import Trainer as OffpolicyTrainer
from catalyst.rl.onpolicy.trainer import Trainer as OnpolicyTrainer
from catalyst.rl.scripts.misc import OFFPOLICY_ALGORITHMS_NAMES, \
ONPOLICY_ALGORITHMS_NAMES
def build_args(parser):
parser.add_argument(
"--config",
"--configs",
"-C",
nargs="+",
help="path to config/configs",
metavar="CONFIG_PATH",
dest="configs",
required=True
)
parser.add_argument("--expdir", type=str, default=None)
parser.add_argument("--logdir", type=str, default=None)
# parser.add_argument("--resume", type=str, default=None)
parser.add_argument("--seed", type=int, default=42)
return parser
def parse_args():
parser = argparse.ArgumentParser()
build_args(parser)
args, unknown_args = parser.parse_known_args()
return args, unknown_args
def main(args, unknown_args):
args, config = parse_args_uargs(args, unknown_args)
set_global_seed(args.seed)
if args.logdir is not None:
os.makedirs(args.logdir, exist_ok=True)
dump_config(config, args.logdir, args.configs)
if args.expdir is not None:
module = import_module(expdir=args.expdir) # noqa: F841
env = ENVIRONMENTS.get_from_params(**config["environment"])
algorithm_name = config["algorithm"].pop("algorithm")
if algorithm_name in OFFPOLICY_ALGORITHMS_NAMES:
ALGORITHMS = OFFPOLICY_ALGORITHMS
trainer_fn = OffpolicyTrainer
sync_epoch = False
weights_sync_mode = "critic" if env.discrete_actions else "actor"
elif algorithm_name in ONPOLICY_ALGORITHMS_NAMES:
ALGORITHMS = ONPOLICY_ALGORITHMS
trainer_fn = OnpolicyTrainer
sync_epoch = True
weights_sync_mode = "actor"
else:
# @TODO: add registry for algorithms, trainers, samplers
raise NotImplementedError()
db_server = DATABASES.get_from_params(
**config.get("db", {}), sync_epoch=sync_epoch
)
algorithm_fn = ALGORITHMS.get(algorithm_name)
algorithm = algorithm_fn.prepare_for_trainer(env_spec=env, config=config)
# if args.resume is not None:
# algorithm.load_checkpoint(filepath=args.resume)
trainer = trainer_fn(
algorithm=algorithm,
env_spec=env,
db_server=db_server,
logdir=args.logdir,
weights_sync_mode=weights_sync_mode,
**config["trainer"],
)
trainer.run()
if __name__ == "__main__":
args, unknown_args = parse_args()
main(args, unknown_args)
```
#### File: atari/src/actor.py
```python
from typing import Dict
from gym.spaces import Box, Discrete
import torch
import torch.nn as nn
from catalyst.contrib.modules import Flatten
from catalyst.rl.agent.head import PolicyHead # , StateNet
from catalyst.rl.core import ActorSpec, EnvironmentSpec
from catalyst.utils.initialization import create_optimal_inner_init
class ConvActor(ActorSpec):
def __init__(
self,
# state_net: StateNet,
head_net: PolicyHead,
):
super().__init__()
# self.state_net = state_net
self.observation_net = nn.Sequential(
nn.Conv2d(4, 64, kernel_size=4, stride=4),
nn.Dropout2d(p=0.1),
nn.LeakyReLU(),
nn.Conv2d(64, 64, kernel_size=4, stride=4, groups=4),
nn.Dropout2d(p=0.1),
nn.LeakyReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, groups=4),
# Flatten()
)
self.observation_net.apply(create_optimal_inner_init(nn.LeakyReLU))
self.aggregation_net = nn.Sequential(
Flatten(),
nn.Linear(576, 512),
nn.LayerNorm(512),
nn.Dropout(p=0.1),
nn.LeakyReLU(),
)
self.aggregation_net.apply(create_optimal_inner_init(nn.LeakyReLU))
self.head_net = head_net
@property
def policy_type(self) -> str:
return self.head_net.policy_type
def forward(self, state: torch.Tensor, logprob=False, deterministic=False):
x = state
if len(x.shape) < 3:
x = x.unsqueeze(1)
x = x / 255.
batch_size, history_len, *feature_size = x.shape
x = x.view(-1, history_len, *feature_size).squeeze_(2)
# x = x.permute([0, 3, 1, 2])
x = self.observation_net(x)
# x = x.view(batch_size, history_len, -1)
x = self.aggregation_net(x)
x = self.head_net(x, logprob, deterministic)
return x
@classmethod
def get_from_params(
cls,
# state_net_params: Dict,
policy_head_params: Dict,
env_spec: EnvironmentSpec,
):
# @TODO: any better solution?
action_space = env_spec.action_space
if isinstance(action_space, Box):
policy_head_params["out_features"] = action_space.shape[0]
elif isinstance(action_space, Discrete):
policy_head_params["out_features"] = action_space.n
else:
raise NotImplementedError()
# state_net = StateNet.get_from_params(**state_net_params)
head_net = PolicyHead(**policy_head_params)
net = cls(
# state_net=state_net,
head_net=head_net
)
return net
``` |
{
"source": "1633743096/-",
"score": 2
} |
#### File: loveword/middleware/huya_parse.py
```python
import requests
import json
import re
"""
目标APP:虎牙直播
目标url:APP/web端 视频分享链接
爬取思路:
1. 通过APP里的分享获取视频url
2. 请求url后,并不能找到视频相关信息,真实的视频页面的地址:
https://liveapi.huya.com/moment/getMomentContent?videoId=XXXXXXX
"""
class HuYa(object):
def __init__(self, url):
self.url = url
self.session = requests.Session()
def get_video(self):
try:
# 处理url,获取视频id
pattern = re.compile("/(\d+).html", re.S)
vid = re.findall(pattern, self.url)[0]
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/85.0.4183.102 Safari/537.36",
"referer": "https://v.huya.com/"
}
params = {
"videoId": str(vid)
}
api = "https://liveapi.huya.com/moment/getMomentContent"
result = self.session.get(url=api, params=params, headers=headers, timeout=10)
if result.status_code == 200:
try:
res = result.json()
url = res["data"]["moment"]["videoInfo"]["definitions"][0]["url"]
cover = res["data"]["moment"]["videoInfo"]["videoCover"]
title = res["data"]["moment"]["videoInfo"]["videoTitle"]
info = {
"summary": title,
"cover": cover,
"url": url
}
return json.dumps(info, ensure_ascii=False)
except Exception as e:
return json.dumps({"info": "暂无相关数据,请检查相关数据:" + str(e)}, ensure_ascii=False)
else:
return json.dumps({"info": "暂无相关数据,请检查相关数据:"}, ensure_ascii=False)
except Exception as e:
return json.dumps({"info": "暂无相关数据,请检查相关数据:" + str(e)}, ensure_ascii=False)
```
#### File: loveword/middleware/meipai_parse.py
```python
import base64
import requests
import json
import re
import execjs
"""
# 方法一:
class MeiPai(object):
def __init__(self, url):
self.url = url
self.session = requests.Session()
with open("static/loveword/js/meipai_encrypt.js", "r", encoding="utf-8") as f:
resource = f.read()
self.ctx = execjs.compile(resource)
def get_video(self):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36",
"Upgrade-Insecure-Requests": "1",
"Host": "www.meipai.com",
"Referer": "http://www.meipai.com/"
}
pattern = re.compile('data-video="(.*?)"', re.S)
pattern2 = re.compile('<meta name="description" content="(.*?)"', re.S)
try:
response = self.session.get(url=self.url, headers=headers, timeout=10)
if response.status_code == 200:
video_bs64 = re.findall(pattern, response.text)[0]
title = re.findall(pattern2, response.text)[0]
video_url = self.ctx.call("getmp4", video_bs64)
info = {
"title": title,
"video": "https:"+video_url
}
return json.dumps(info, ensure_ascii=False)
except Exception as e:
return json.dumps({"info": "暂无相关数据,请检查相关数据:" + str(e)}, ensure_ascii=False)
"""
# 方法二
class MeiPai(object):
def __init__(self, url):
self.url = url
self.session = requests.Session()
def getHex(self, a):
hex_1 = a[:4][::-1]
str_1 = a[4:]
return str_1, hex_1
def getDec(self, a):
b = str(int(a, 16))
c = list(b[:2])
d = list(b[2:])
return c, d
def substr(self, a, b):
k = int(b[0])
c = a[:k]
d = a[k:k + int(b[1])]
temp = a[int(b[0]):].replace(d, "")
result = c + temp
return result
def getPos(self, a, b):
b[0] = len(a) - int(b[0]) - int(b[1])
return b
def get_video(self):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36",
"Upgrade-Insecure-Requests": "1",
"Host": "www.meipai.com",
"Referer": "http://www.meipai.com/"
}
pattern = re.compile('data-video="(.*?)"', re.S)
pattern2 = re.compile('<meta name="description" content="(.*?)"', re.S)
try:
response = self.session.get(url=self.url, headers=headers, timeout=10)
if response.status_code == 200:
video_bs64 = re.findall(pattern, response.text)[0]
title = re.findall(pattern2, response.text)[0]
str1, hex1 = self.getHex(video_bs64)
pre, tail = self.getDec(hex1)
d = self.substr(str1, pre)
kk = self.substr(d, self.getPos(d, tail))
a = base64.b64decode(kk)
info = {
"title": title,
"video": "https:"+a.decode(encoding='utf-8')
}
return json.dumps(info, ensure_ascii=False)
except Exception as e:
return json.dumps({"info": "暂无相关数据,请检查相关数据:" + str(e)}, ensure_ascii=False)
```
#### File: loveword/middleware/pipifunny.py
```python
import re
import json
import requests
"""
目标APP:皮皮搞笑
目标url:APP视频分享链接
爬取思路:
1. 通过APP里的分享获取视频url
2. 请求url后,并不能在源代码里找到视频相关信息,真实的视频页面的地址其实就是发送post请求
http://share.ippzone.com/ppapi/share/fetch_content
"""
class PiPiFunny(object):
def __init__(self, url):
self.url = url
self.session = requests.Session()
def parse(self):
try:
# 处理url,获取视频id
pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
re.S)
deal_url = re.findall(pattern, self.url)[0]
# 获取处理后的url,获取vid
mid = re.findall("mid=(\d+)", deal_url, re.S)[0]
pid = re.findall("pid=(\d+)", deal_url, re.S)[0]
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/85.0.4183.102 Safari/537.36",
"Origin": "http://share.ippzone.com",
"Referer": deal_url,
"Content-Type": "text/plain;charset=UTF-8"
}
data = {
"mid": int(mid),
"pid": int(pid),
"type": "post"
}
result = self.session.post(url="https://h5.ippzone.com/ppapi/share/fetch_content", data=json.dumps(data),
headers=headers, timeout=10)
if result.status_code == 200:
try:
# 获取视频真实地址
doc = result.json()
url = doc['data']['post']['videos'][str(doc['data']['post']['imgs'][0]['id'])]['url']
description = doc['data']['post']['content']
info = {
"title": description,
"url": url
}
return json.dumps(info, ensure_ascii=False)
except Exception as e:
return json.dumps({"info": "暂无相关数据,请检查相关数据:" + str(e)}, ensure_ascii=False)
else:
return json.dumps({"info": "暂无相关数据,请检查相关数据:"}, ensure_ascii=False)
except Exception as e:
return json.dumps({"info": "暂无相关数据,请检查相关数据:" + str(e)}, ensure_ascii=False)
```
#### File: loveword/middleware/xhs_parse.py
```python
import re
import json
import html
import requests
"""
目标APP:小红书
目标url:视频APP分享链接
爬取思路:
1. 通过APP里的分享获取视频url:http://xhslink.com/xvxMJ
2. url重定向到真实跳转地址:简化后.,https://www.xiaohongshu.com/discovery/item/5f77dbcf000000000100491c...
"""
class XiaoHongShu(object):
def __init__(self, url):
self.url = url
self.session = requests.Session()
def get_video(self):
headers = {
"Host": "xhslink.com",
"Upgrade-Insecure-Requests": "1",
"Pragma": "no-cache",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/79.0.3945.88 Safari/537.36"
}
source_headers = {
"cookie": "xhsTrackerId=6e8cc536-0d57-4226-c27c-831a6e51c4cc; xhsuid=6KOIxzWIclOk5WsI; "
"Hm_lvt_d0ae755ac51e3c5ff9b1596b0c09c826=1606207238; "
"xhsTracker=url=noteDetail&xhsshare=CopyLink; extra_exp_ids=gif_exp1,ques_exp1; "
"timestamp2=20201229ef45ffd4004e2dcc00c97dec; "
"timestamp2.sig=a95ob3HUIi0pV4z3n8kQHuJ2sk3HjHT-XdYVwbgEHbs; xhs_spses.5dde=*; "
"xhs_spid.5dde=05e7787428e31fd4.1593488621.11.1609225136.1607129499.6465ec57-2e5f-4f43-aaf1"
"-161a7fd7a7e6",
"Upgrade-Insecure-Requests": "1",
"Pragma": "no-cache",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/79.0.3945.88 Safari/537.36"
}
try:
# 处理url
# 获取视频id
pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
re.S)
deal_url = re.findall(pattern, self.url)[0]
response = self.session.get(url=deal_url, headers=headers, allow_redirects=False, timeout=10)
# 获取重定向后的简化url
base_url = response.headers.get("Location")
result = self.session.get(url=base_url, headers=source_headers, timeout=10)
pattern_video = re.compile('<video .*? src="(.*?)".*?></video>', re.S)
pattern_desc = re.compile('"description": "(.*?)",', re.S)
if result.status_code == 200:
try:
res = result.text
url = re.findall(pattern_video, res)[0]
description = re.findall(pattern_desc, res)[0]
info = {
"description": description,
"url": html.unescape(url)
}
return json.dumps(info, ensure_ascii=False)
except Exception as e:
return json.dumps({"info": "暂无相关数据,请检查相关数据:" + str(e)}, ensure_ascii=False)
else:
return json.dumps({"info": "暂无相关数据,请检查相关数据:"}, ensure_ascii=False)
except Exception as e:
return json.dumps({"info": "暂无相关数据,请检查相关数据:" + str(e)}, ensure_ascii=False)
``` |
{
"source": "16342613/ARC",
"score": 3
} |
#### File: ARC/src/manual_solve.py
```python
import os, sys
import json
import numpy as np
import re
import copy
"""
Name: <NAME>
ID: 16342613
Github:
I have included manual implementations of 5 different problems. The copy library was used to create a deep copy of the
input grid.
"""
### YOUR CODE HERE: write at least three functions which solve
### specific tasks by transforming the input x and returning the
### result. Name them according to the task ID as in the three
### examples below. Delete the three examples. The tasks you choose
### must be in the data/training directory, not data/evaluation.
# This is an exception that will allow us to break from nested loops
class BreakLoop(Exception): pass
def solve_0a938d79(inputGrid):
"""
- Transformation Process:
The input matrix has 2 coloured cells on an edge/s of the matrix. In the output matrix, these cells are extended
up to the opposite edge, forming a procedural pattern up to the end of the matrix.
- Analysis:
All training and testing grids were solved correctly.
"""
# Note: This was the first one that I did, and I can admit that this can be programmed more efficiently. I wanted
# to get back to this one and fix the inefficiency but I ran out of time. There is too much repetition in this
# function and that could be removed easily!
# The grids are layed out using (y,x,colour) as the coordinates.
seeds = []
# Create a deep copy of the input grid
output = copy.deepcopy(inputGrid)
# Store the size of the input grid
gridXSize = len(inputGrid[0])
gridYSize = len(inputGrid)
coloursOnYAxis = True
# Loop through the edges of the grid and find the 'seeds' which are the coloured cells
try:
for xIndex in range(gridXSize):
# Represents the opposite edges on the x axis
possibleColourIndexes = [0, gridYSize - 1]
# Find the seeds on each edge (if they exist on both edges)
for yIndex in range(len(possibleColourIndexes)):
colourValue = inputGrid[yIndex][xIndex]
# If we find a seed
if colourValue != 0:
seeds.append([0, xIndex, colourValue])
# Find the remaining seed
for yIndex2 in possibleColourIndexes:
for xIndex2 in range(gridXSize):
colourValue = inputGrid[yIndex2][xIndex2]
if colourValue != 0 and xIndex2 != xIndex:
seeds.append([gridYSize - 1, xIndex2, colourValue])
# Once we find both seeds, specify which axis the coloured bars will be (perpendicular
# to the x axis here), and break from the nested loop
coloursOnYAxis = True
raise BreakLoop
for yIndex in range(gridYSize):
# Represents the opposite edges on the y axis
possibleColourIndexes = [0, gridXSize - 1]
# Find the seeds on each edge (if they exist on both edges)
colourValue = inputGrid[yIndex][0]
for xIndex in possibleColourIndexes:
# If we find a seed
if colourValue != 0:
seeds.append([yIndex, xIndex, colourValue])
# Find the remaining seed
for xIndex2 in possibleColourIndexes:
for yIndex2 in range(gridYSize):
colourValue = inputGrid[yIndex2][xIndex2]
if colourValue != 0 and yIndex2 != yIndex:
seeds.append([yIndex2, gridYSize - 1, colourValue])
# Once we find both seeds, specify which axis the coloured bars will be (perpendicular
# to the y axis here), and break from the nested loop
coloursOnYAxis = False
raise BreakLoop
except BreakLoop:
pass
# Find out how far to extend the lines
gridConstraint = gridXSize
if coloursOnYAxis is False:
seeds[0] = [seeds[0][1], seeds[0][0], seeds[0][2]]
seeds[1] = [seeds[1][1], seeds[1][0], seeds[1][2]]
gridConstraint = gridYSize
# The gap between the lines
barGap = seeds[0][1] - seeds[1][1]
# The details of the current line we are painting
currentPaintAxisValueDetails = [0, 0, 0]
# Find out which line to draw first
if barGap < 0:
currentPaintAxisValueDetails = [seeds[0][1], seeds[0][2], seeds[1][2]]
elif barGap > 0:
currentPaintAxisValueDetails = [seeds[1][1], seeds[1][2], seeds[0][2]]
# A count of how many lines we have drawn so far
count = 0
while currentPaintAxisValueDetails[0] < gridConstraint:
if coloursOnYAxis is True:
# Draw the line for the first colour
if count % 2 == 0:
output[:, currentPaintAxisValueDetails[0]] = currentPaintAxisValueDetails[1]
# Draw the line for the second colour
else:
output[:, currentPaintAxisValueDetails[0]] = currentPaintAxisValueDetails[2]
else:
# Draw the line for the first colour
if count % 2 == 0:
output[currentPaintAxisValueDetails[0], :] = currentPaintAxisValueDetails[1]
else:
# Draw the line for the second colour
output[currentPaintAxisValueDetails[0], :] = currentPaintAxisValueDetails[2]
# Set up the 'painter' to draw the next line, taking the gap between the lines into account
currentPaintAxisValueDetails[0] += abs(barGap)
count += 1
return output
def solve_5c0a986e(inputGrid):
"""
- Transformation Process:
The input grid has a 2x2 red square and a 2x2 blue square. A bottom-right facing diagonal line trails off from
the red square, while a top-left facing diagonal line trials off from the blue square. Adding these trails
gives us the output matrix.
- Analysis:
All training and testing grids were solved correctly.
"""
# Create a deep copy of the input grid
output = copy.deepcopy(inputGrid)
# Store the size of the input grid
gridXSize = len(inputGrid[0])
gridYSize = len(inputGrid)
# The important corner of the blue and red squares, and the colours of the diagonal lines which emanate from them
blueSeed = []
redSeed = []
colours = [1, 2]
try:
# Loop through the grid
for yIndex in range(gridYSize):
for xIndex in range(gridXSize):
# Finding the top left blue block in the 2x2 blue square
if inputGrid[yIndex][xIndex] == 1 and blueSeed == []:
blueSeed = [yIndex, xIndex]
# Finding the bottom right red block in the 2x2 blue square
if inputGrid[yIndex][xIndex] == 2 and redSeed == []:
redSeed = [yIndex + 1, xIndex + 1]
# If we have found both the red and blue squares, we can stop our search
if redSeed != [] and blueSeed != []:
raise BreakLoop
except BreakLoop:
pass
nextIndexes = [[point - 1 for point in blueSeed], [point + 1 for point in redSeed]]
# Repeat this for both squares
for i in range(2):
# Repeat until the diagonal line hits an edge
while (nextIndexes[i][0] < gridYSize) and (nextIndexes[i][0] >= 0) and \
(nextIndexes[i][1] < gridXSize) and (nextIndexes[i][1] >= 0):
output[nextIndexes[i][0]][nextIndexes[i][1]] = colours[i]
if i == 0:
nextIndexes[0] = [point - 1 for point in nextIndexes[0]]
if i == 1:
nextIndexes[1] = [point + 1 for point in nextIndexes[1]]
return output
def solve_363442ee(inputGrid):
"""
- Transformation Process:
The input grid is seperates by a grey line. The top 3x3 square to the left of the grey line has a specific
pattern if colours, and we store this pattern. To the right of the grey line, there are a number of blue cells.
Each blue cell represents a centre point on which the pattern should be applied, therefore we paste the pattern
onto the blue cells such that a blue cell is in the middle of the 3x3 pattern. This gives us the output matrix.
- Analysis:
All training and testing grids were solved correctly.
"""
# Create a deep copy of the input grid
output = copy.deepcopy(inputGrid)
# Store the pattern and the input grid size
pattern = inputGrid[0:3, 0:3]
gridXSize = len(inputGrid[0])
gridYSize = len(inputGrid)
# Locate the grey line which separates the input grid
seperatorXIndex = 0
for xIndex in range(gridXSize):
if inputGrid[0][xIndex] == 5:
seperatorXIndex = xIndex
break
# Only keep the right hand side of the grey line
gridXSize = gridXSize - seperatorXIndex
# Paste the pattern on top of the blue cells, centred on the blue cells
for yIndex in range(gridYSize):
for xIndex in range(gridXSize):
if inputGrid[yIndex][xIndex + seperatorXIndex] == 1:
output[(yIndex - 1):(yIndex + 2), (xIndex + seperatorXIndex- 1):(xIndex + seperatorXIndex + 2)] = pattern
return output
def solve_868de0fa(inputGrid):
"""
- Transformation Process:
In the input grid, there are a set of 'hollow' squares. In the output, these same 'hollow' squares are filled
with either a red or orange colour. If the number of cells in one side of the square is even, then the square
is filled with a red colour. If the number of cells in one side of the square is odd, then the square os filled
with an orange colour.
- Analysis:
All training and testing grids were solved correctly.
"""
# Create a deep copy of the input grid
output = copy.deepcopy(inputGrid)
# Store the size of the input grid
gridXSize = len(inputGrid[0])
gridYSize = len(inputGrid)
discoveredPerimeters = [] # This stores the locations of the cells (y,x) that we already have worked on
possibleColours = [2, 7] # The possible colour codes of the filled squares
# Loop through the grid
for yIndex in range(gridYSize):
for xIndex in range(gridXSize):
# If search through the grid by going from left to right, then going down a row, and repeating this process,
# the first not previously encountered blue cell is always going to be the top left corner of an
# undiscovered 'hollow' square
if (inputGrid[yIndex][xIndex] == 1) and ([yIndex, xIndex] not in discoveredPerimeters):
topLeft = [yIndex, xIndex]
currentCoordinate = [yIndex, xIndex]
# If we go diagonally downwards from this top left blue cell, the next blue cell that we encounter
# is the bottom right corner of the 'hollow' square. An index error is thrown if we search past the
# bounds of the grid, and this occurs when the 'hollow' square is up against an edge of the grid
try:
while inputGrid[currentCoordinate[0] + 1][currentCoordinate[1] + 1] != 1:
currentCoordinate[0] += 1
currentCoordinate[1] += 1
except IndexError:
pass
bottomRight = [currentCoordinate[0] + 1, currentCoordinate[1] + 1]
# Store the 'hollow' square itself, and the space within this square that is going to be filled
# with colour
zone = inputGrid[topLeft[0]:(bottomRight[0] + 1), topLeft[1]:(bottomRight[1] + 1)]
internalZone = inputGrid[(topLeft[0] + 1):bottomRight[0], (topLeft[1] + 1):bottomRight[1]]
# Fill the 'hollow' square with the correct colour
output[(topLeft[0] + 1):bottomRight[0], (topLeft[1] + 1):bottomRight[1]] = \
internalZone + possibleColours[len(zone) % 2]
# Add every cell in this filled square to the list of cells that we have worked on, so we can
# ignore this cells in future searches
for zoneYIndex in range(len(zone)):
for zoneXIndex in range(len(zone[0])):
discoveredPerimeters.append([zoneYIndex + topLeft[0], zoneXIndex + topLeft[1]])
return output
def solve_ac0a08a4(inputGrid):
"""
- Transformation Process:
We store every unique colour in the input, and create an output matrix that has sides x times the length
of the input matrix, where x is the number of unique colours observed. The coloured zones still remain in
the same relative positions in the output, but the number of coloured cells is scaled according to the
number of unique colours in order to accomodate for the larger outpit matrix. So basically this just
upscales the grid, and the scaling factor is directly proportional to the number of unique colours.
- Analysis:
All training and testing grids were solved correctly.
"""
# Create a deep copy of the input grid
output = copy.deepcopy(inputGrid)
# Store the initial size of the grid
inputGridXSize = len(inputGrid[0])
inputGridYSize = len(inputGrid)
seeds = [] # The seed format is (y position, x position, colour)
# Loop through the input grid to find the unique colours
for yIndex in range(inputGridYSize):
for xIndex in range(inputGridXSize):
if inputGrid[yIndex][xIndex] != 0:
seeds.append([yIndex, xIndex, inputGrid[yIndex][xIndex]])
# Initialise the output grid. This output grid is numberOfUniqueColours times larger than the input grid
output = np.zeros([inputGridXSize * len(seeds), inputGridYSize * len(seeds)])
# Upscale the coloured cells
for seed in seeds:
output[(seed[0] * len(seeds)):(seed[0] * len(seeds) + len(seeds)),
(seed[1] * len(seeds)):(seed[1] * len(seeds) + len(seeds))] = seed[2]
return output
def main():
# Find all the functions defined in this file whose names are
# like solve_abcd1234(), and run them.
# regex to match solve_* functions and extract task IDs
p = r"solve_([a-f0-9]{8})"
tasks_solvers = []
# globals() gives a dict containing all global names (variables
# and functions), as name: value pairs.
for name in globals():
m = re.match(p, name)
if m:
# if the name fits the pattern eg solve_abcd1234
ID = m.group(1) # just the task ID
solve_fn = globals()[name] # the fn itself
tasks_solvers.append((ID, solve_fn))
for ID, solve_fn in tasks_solvers:
# for each task, read the data and call test()
directory = os.path.join("..", "data", "training")
json_filename = os.path.join(directory, ID + ".json")
data = read_ARC_JSON(json_filename)
test(ID, solve_fn, data)
def read_ARC_JSON(filepath):
"""Given a filepath, read in the ARC task data which is in JSON
format. Extract the train/test input/output pairs of
grids. Convert each grid to np.array and return train_input,
train_output, test_input, test_output."""
# Open the JSON file and load it
data = json.load(open(filepath))
# Extract the train/test input/output grids. Each grid will be a
# list of lists of ints. We convert to Numpy.
train_input = [np.array(data['train'][i]['input']) for i in range(len(data['train']))]
train_output = [np.array(data['train'][i]['output']) for i in range(len(data['train']))]
test_input = [np.array(data['test'][i]['input']) for i in range(len(data['test']))]
test_output = [np.array(data['test'][i]['output']) for i in range(len(data['test']))]
return (train_input, train_output, test_input, test_output)
def test(taskID, solve, data):
"""Given a task ID, call the given solve() function on every
example in the task data."""
print(taskID)
train_input, train_output, test_input, test_output = data
print("Training grids")
for x, y in zip(train_input, train_output):
yhat = solve(x)
show_result(x, y, yhat)
print("Test grids")
for x, y in zip(test_input, test_output):
yhat = solve(x)
show_result(x, y, yhat)
def show_result(x, y, yhat):
print("Input")
print(x)
print("Correct output")
print(y)
print("Our output")
print(yhat)
print("Correct?")
# if yhat has the right shape, then (y == yhat) is a bool array
# and we test whether it is True everywhere. if yhat has the wrong
# shape, then y == yhat is just a single bool.
print(np.all(y == yhat))
if __name__ == "__main__": main()
``` |
{
"source": "163a-email/capriccio",
"score": 2
} |
#### File: 163a-email/capriccio/capriccio.py
```python
import pyglet
import threading
import time
import datetime
import sys, getopt
import random
from mingus.midi import fluidsynth
from os.path import isfile
class Alarm(object):
'''
A simple alarm clock written in Python.
Example:
import capriccio
t = datetime.datetime.now() + datetime.timedelta(seconds=10)
a = Alarm(t, "alarm.mp3")
# The above plays alarm.mp3, or you could use Alarm(t, 0) to let capriccio generate note sequences in real time given an instrument number.
# Make sure a is retained, or else when the main thread ends, this alarm will also be released.
a.destroy() # Stop the alarm clock.
'''
def __init__(self, timestamp, tune, is_random = False):
'''
Initialize an alarm clock.
Args:
timestamp (datetime): The timestamp on which the alarm will sound.
tune (str): The filename of the alarm audio file, or if is_random==True, the instrument number.
is_random (bool): if sound generator should be used instead of an audio file.
'''
self.should_stop = True
now = datetime.datetime.now()
delay = (timestamp - now).total_seconds()
if delay > 0:
print "Scheduling an alarm clock at %s, which is in %.1f seconds." % (timestamp, delay)
else:
print "Scheduling an alarm clock at %s, which is %.1f seconds earlier than current time %s. This alarm is not set." % (timestamp, delay, now)
return
if is_random == False:
self.alarm_thread = threading.Timer(delay, self.__play_sound__, (tune,))
else:
self.alarm_thread = threading.Timer(delay, self.__generate_sound__, (int(tune),))
self.alarm_thread.start()
def destroy(self):
'''
Stop the alarm clock, whether or not it has actually occured
'''
self.should_stop = True
if hasattr(self, 'p'):
self.p.delete()
if hasattr(self, 'sg'):
self.sg.stop()
self.alarm_thread.cancel()
self.alarm_thread.join()
def __generate_sound__(self, instrument):
'''
Play generated note sequences on a given instrument.
Args:
instrument (int): The instrument used by SoundGen. (Not identical to GM 1 Sound Set)
'''
self.sg = SoundGen(instrument)
self.sg.play()
def __play_sound__(self, tune):
'''
Play the audio file continuously, until the alarm is cancelled by calling destroy()
Args:
tune (str): The filename of the alarm audio file.
'''
music = pyglet.media.load(tune)
sg = pyglet.media.SourceGroup(music.audio_format, None)
sg.loop = True
sg.queue(music)
self.p = pyglet.media.Player()
self.p.queue(sg)
self.p.play()
v=float(0)
while v<1 and self.should_stop == False:
v+=0.05
self.p.volume = v;
time.sleep(0.2)
class SoundGen(object):
'''
Synthesize note sequences with mingus and fluidsynth.
'''
# L = low, H = high
kNoteNone = int(-1)
kNoteLB = int(59)
kNoteC = int(60)
kNoteCs = kNoteDf = int(61)
kNoteD = int(62)
kNoteDs = kNoteEf = int(63)
kNoteE = int(64)
kNoteF = int(65)
kNoteFs = kNoteGf = int(66)
kNoteG = int(67)
kNoteGs = kNoteAf = int(68)
kNoteA = int(69)
kNoteAs = kNoteBf = int(70)
kNoteB = int(71)
kNoteHC = int(72)
kNoteHCs = kNoteHDf = int(73)
kNoteHD = int(74)
kNoteHDs = kNoteHEf = int(75)
kNoteHE = int(76)
kSameNoteMultiplier = 3
OneBeatLength = 0.5 # 1 beat is _ seconds
def __init__(self, instrument = 0):
self.should_stop = False
if instrument == 1: # Pipe: Pan Flute
self.__set_instrument__(75, 0.5)
elif instrument == 2: # Brass: French Horn
self.__set_instrument__(60, 0.3)
elif instrument == 3: # Synth Lead: Lead 8 (bass + lead)
self.__set_instrument__(87, 0.2)
elif instrument == 4: # Synth Effects: FX 3 (crystal)
self.__set_instrument__(98, 0.3)
elif instrument == 5: # Percussive: Steel Drums
self.__set_instrument__(114, 0.2)
elif instrument == 6: # Sound Effects: Bird Tweet (Calm but prob no a good wake up alarm)
self.__set_instrument__(123, 0.5)
elif instrument == 7: # Sound Effects: Gunshot (ANNOYING~)
self.__set_instrument__(127, 0.2)
elif instrument == 8: # Ensemble: String Ensemble 2
self.__set_instrument__(49, 0.4)
elif instrument == 9: # Pipe: Piccolo
self.__set_instrument__(72, 0.4)
else: # Default: Piano: Electric Grand Piano
self.__set_instrument__(2, 0.3)
def __set_instrument__(self, instrument, beat_length):
self.instrument = instrument
self.OneBeatLength = beat_length
def play(self):
fluidsynth.init('/usr/share/sounds/sf2/FluidR3_GM.sf2','alsa')
fluidsynth.set_instrument(0, self.instrument) # Use channel 0
self.previous = int(SoundGen.kNoteNone) # Previously played note
self.shift = random.randint(-10, 5) # Allow the key to be shifted
beat_tracker = int(0) # 4/4 time.
while self.should_stop == False:
v = random.randint(65,75)
if beat_tracker % 8 == 0:
# First beat, strong
v = random.randint(85,95)
elif (beat_tracker - 4) % 8 == 0:
# Third beat, semi-strong
v = random.randint(75,85)
elif beat_tracker % 2 == 1:
# Off-beat, very soft
v = random.randint(55,65)
# Random note length
possible_lengths = [4] + [2] * 10 + [1] * 4 # 4 is 2 beats, 2 is 1 beat, 1 is half-beat
if beat_tracker % 2 == 1: # avoid non-half-beat if currently in half-beat
possible_lengths += [1] * 20 # Add weight to half-beat
length = random.choice(possible_lengths)
beat_tracker+=length
if self.previous != SoundGen.kNoteNone:
fluidsynth.stop_Note(self.previous+self.shift, 0)
self.previous = SoundGen.__next_note__(self.previous)
fluidsynth.play_Note(self.previous+self.shift,0,v);
time.sleep(length * self.OneBeatLength)
def stop(self):
self.should_stop = True;
if self.previous != SoundGen.kNoteNone: # Won't actually kill SoundGen just yet, but at least will stop the sound instantly.
fluidsynth.stop_Note(self.previous+self.shift, 0)
@staticmethod
def __next_note__(previous):
# I know, tons of magic numbers and so difficult to read. Will fix.
if (previous == SoundGen.kNoteNone):
choices = [SoundGen.kNoteC, SoundGen.kNoteD, SoundGen.kNoteE, SoundGen.kNoteF, SoundGen.kNoteG, SoundGen.kNoteA, SoundGen.kNoteB, SoundGen.kNoteC]
else:
if (previous == SoundGen.kNoteLB):
choices = [SoundGen.kNoteC] * 10 + [SoundGen.kNoteD] + [SoundGen.kNoteG]
elif (previous == SoundGen.kNoteC):
choices = [SoundGen.kNoteC, SoundGen.kNoteD, SoundGen.kNoteE, SoundGen.kNoteF, SoundGen.kNoteG] + [SoundGen.kNoteE] * 2 + [SoundGen.kNoteG] * 3
elif (previous == SoundGen.kNoteD):
choices = [SoundGen.kNoteC, SoundGen.kNoteE, SoundGen.kNoteF, SoundGen.kNoteG, SoundGen.kNoteA] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteC, SoundGen.kNoteG] * 2 + [SoundGen.kNoteD]
elif (previous == SoundGen.kNoteE):
choices = [SoundGen.kNoteC, SoundGen.kNoteD, SoundGen.kNoteF, SoundGen.kNoteG, SoundGen.kNoteHC] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteC] * 2 + [SoundGen.kNoteG] * 2 + [SoundGen.kNoteE]
elif (previous == SoundGen.kNoteF):
choices = [SoundGen.kNoteC, SoundGen.kNoteD, SoundGen.kNoteE, SoundGen.kNoteG, SoundGen.kNoteA, SoundGen.kNoteHC] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteF]
elif (previous == SoundGen.kNoteG):
choices = [SoundGen.kNoteC, SoundGen.kNoteD, SoundGen.kNoteE, SoundGen.kNoteF, SoundGen.kNoteA, SoundGen.kNoteB, SoundGen.kNoteHC, SoundGen.kNoteHD, SoundGen.kNoteHE] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteC] * 2 + [SoundGen.kNoteE] * 2 + [SoundGen.kNoteG]
elif (previous == SoundGen.kNoteA):
choices = [SoundGen.kNoteE, SoundGen.kNoteF, SoundGen.kNoteG, SoundGen.kNoteB, SoundGen.kNoteHC, SoundGen.kNoteHD, SoundGen.kNoteHE] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteHC] * 2 + [SoundGen.kNoteA]
elif (previous == SoundGen.kNoteB):
choices = [SoundGen.kNoteE, SoundGen.kNoteF, SoundGen.kNoteG, SoundGen.kNoteA, SoundGen.kNoteHC, SoundGen.kNoteHD] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteG] * 2 + [SoundGen.kNoteHC] * 2 + [SoundGen.kNoteB]
elif (previous == SoundGen.kNoteHC):
choices = [SoundGen.kNoteE, SoundGen.kNoteF, SoundGen.kNoteG, SoundGen.kNoteA, SoundGen.kNoteB, SoundGen.kNoteHD, SoundGen.kNoteHE] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteG] * 3 + [SoundGen.kNoteE] * 3 + [SoundGen.kNoteHC]
elif (previous == SoundGen.kNoteHD):
choices = [SoundGen.kNoteG, SoundGen.kNoteB, SoundGen.kNoteHC, SoundGen.kNoteHE] * SoundGen.kSameNoteMultiplier + [SoundGen.kNoteB] * 2 + [SoundGen.kNoteG] * 2 + [SoundGen.kNoteHD]
elif (previous == SoundGen.kNoteHE):
choices = [SoundGen.kNoteG, SoundGen.kNoteHC] * SoundGen.kSameNoteMultiplier * 3 + [SoundGen.kNoteHC] * 2 + [SoundGen.kNoteG] * 2 + [SoundGen.kNoteHE]
return random.choice(choices)
def main(argv):
try:
opts, args = getopt.getopt(argv,"hd:t:i:",["delay=","tune=","instrument="])
except getopt.GetoptError:
print_usage()
sys.exit(1)
for opt, arg in opts:
if opt == "-h":
print_usage()
sys.exit(0)
elif opt in ("-d", "--delay"):
try:
float(arg)
except ValueError:
print "Illegal delay value. Expecting a positive float value, got %s" % arg
sys.exit(3)
if float(arg)<0:
print "Illegal delay value. Expecting a positive float value, got %s" % arg
sys.exit(4)
d = datetime.datetime.now()+datetime.timedelta(seconds=float(arg))
elif opt in ("-t", "--tune"):
if not isfile(arg):
print "Tune file %s does not exist." % arg
sys.exit(5)
t = arg
elif opt in ("-i", "--instrument"):
i = arg
else:
assert False, "Unhandled option"
if "d" not in locals():
print_usage()
sys.exit(6)
if "t" not in locals() and "i" not in locals():
print_usage()
sys.exit(6)
if "t" in locals():
a = Alarm(d,t)
else:
a = Alarm(d,i,True)
try:
while True:
time.sleep(0.05)
except KeyboardInterrupt:
a.destroy()
def print_usage():
print "capriccio.py -d <delay in seconds> -t <audio filename>\ncaprioccio.py -d <delay in seconds> -i <instrument id>"
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "163ui/ykdl",
"score": 3
} |
#### File: ykdl/ykdl/embedextractor.py
```python
from importlib import import_module
from logging import getLogger
from .common import alias
class EmbedExtractor():
"""
this class is to help video embed site to handle
video from other site.
we just need to know the source site name, and video ID
that's enough.
with site name and VID, develop can easily to find out the real URL
because embed site don't have video info, so they don't need stream_info.
"""
def __init__(self):
self.video_info = None
self.logger = getLogger(self.name)
def prepare(self):
"""
this API is to do real job to get site and VID
sometimes title
MUST override!!
"""
pass
def prepare_playlist(self):
"""
this API is to do real job to get site and VID
sometimes title
MUST override!!
"""
pass
def parser(self, url):
if isinstance(url, str) and url.startswith('http'):
self.url = url
self.video_info = None
self.prepare()
if not self.video_info:
raise NotImplementedError(self.url + " is not supported")
site, vid = self.video_info
if site in alias.keys():
site = alias[site]
s = import_module('.'.join(['ykdl','extractors',site])).site
return s.parser(vid)
def parser_list(self, url):
if isinstance(url, str) and url.startswith('http'):
self.url = url
self.video_info_list = []
self.prepare_playlist()
if not self.video_info_list:
raise NotImplementedError('Playlist is not supported for ' + self.name + 'with url: ' + self.url)
info_list = []
for v in self.video_info_list:
site, vid = v
if site in alias.keys():
site = alias[site]
s = import_module('.'.join(['ykdl','extractors',site])).site
yield s.parser(vid)
def __getattr__(self, attr):
return None
```
#### File: extractors/le/finance.py
```python
import json
import time
import datetime
import platform
from ykdl.util.html import get_content, url_info
from ykdl.util.match import match1, matchall
from ykdl.extractor import VideoExtractor
from ykdl.videoinfo import VideoInfo
class LeLiveFi(VideoExtractor):
name = u"Le Live Finance(乐视财经)"
supported_stream_types = ['flv_1080p3m', 'flv_1080p', 'flv_1300', 'flv_1000', 'flv_720p', 'flv_350']
stream_2_profile = {'flv_1080p3m': u'1080p' ,'flv_1080p': u'1080p' , 'flv_1300': u'超清', 'flv_1000': u'高清' , 'flv_720p': u'标清', 'flv_350': u'流畅' }
stream_2_id = {'flv_1080p3m': 'BD', 'flv_1080p': 'BD' , 'flv_1300': 'TD', 'flv_1000': 'HD' , 'flv_720p': 'SD', 'flv_350': 'LD' }
stream_ids = ['BD', 'TD', 'HD', 'SD', 'LD']
def prepare(self):
info = VideoInfo(self.name, True)
html = get_content(self.url)
self.vid = match1(html, 'liveId\s*:\s*"(\d+)"') or match1(self.url, 'd=(\d+)', 'live/(\d+)')
live_data = json.loads(get_content('http://player.pc.le.com/player/startup_by_pid/1001/{}?host=live.le.com'.format(self.vid)))
assert 'status' in live_data and live_data['status'] == 2, "Live show is finished, playback is not supported!"
info.title = live_data['title']
stream_data = live_data['rows']
for s in stream_data:
stream_id = self.stream_2_id[s['rateType']]
stream_profile = self.stream_2_profile[s['rateType']]
if not stream_id in info.stream_types:
info.stream_types.append(stream_id)
streamUrl = s['streamUrl'] + '&format=1&expect=2&termid=1&platid=10&playid=1&sign=live_web&splatid=1001&vkit=20161017&station={}'.format( self.vid)
data = json.loads(get_content(streamUrl))
src = data['location']
info.streams[stream_id] = {'container': 'm3u8', 'video_profile': stream_profile, 'size' : float('inf'), 'src' : [src]}
info.stream_types = sorted(info.stream_types, key = self.stream_ids.index)
return info
site = LeLiveFi()
```
#### File: ykdl/extractors/mgtv.py
```python
from ykdl.util.html import default_proxy_handler, get_content
from ykdl.util.match import match1, matchall
from ykdl.extractor import VideoExtractor
from ykdl.videoinfo import VideoInfo
from ykdl.compact import install_opener, build_opener, HTTPCookieProcessor
import json
import sys
import base64
import uuid
import time
py3 = sys.version_info[0] == 3
if py3:
maketrans = bytes.maketrans
bytearray2str = bytearray.decode
else:
from string import maketrans
bytearray2str = str
encode_translation = maketrans(b'+/=', b'_~-')
def generate_did_tk2():
did = str(uuid.uuid4())
s = 'pno=1000|ver=0.3.0001|did={}|clit={}'.format(did, int(time.time()))
if not isinstance(s, bytes):
s = s.encode()
e = bytearray(base64.b64encode(s).translate(encode_translation))
e.reverse()
return did, bytearray2str(e)
class Hunantv(VideoExtractor):
name = u"芒果TV (HunanTV)"
supported_stream_profile = [ u'蓝光', u'超清', u'高清', u'标清' ]
supported_stream_types = [ 'BD', 'TD', 'HD', 'SD' ]
profile_2_types = { u'蓝光': 'BD', u'超清': 'TD', u'高清': 'HD', u'标清': 'SD' }
def prepare(self):
handlers = [HTTPCookieProcessor()]
if default_proxy_handler:
handlers += default_proxy_handler
install_opener(build_opener(*handlers))
info = VideoInfo(self.name)
if self.url and not self.vid:
self.vid = match1(self.url, 'https?://www.mgtv.com/b/\d+/(\d+).html')
if self.vid is None:
html = get_content(self.url)
self.vid = match1(html, 'vid=(\d+)', 'vid=\"(\d+)', 'vid: (\d+)')
did, tk2 = generate_did_tk2()
api_info_url = 'https://pcweb.api.mgtv.com/player/video?video_id={}&did={}&tk2={}'.format(self.vid, did, tk2)
meta = json.loads(get_content(api_info_url))
assert meta['code'] == 200, '[failed] code: {}, msg: {}'.format(meta['code'], meta['msg'])
assert meta['data'], '[Failed] Video info not found.'
pm2 = meta['data']['atc']['pm2']
info.title = meta['data']['info']['title']
api_source_url = 'https://pcweb.api.mgtv.com/player/getSource?video_id={}&did={}&pm2={}&tk2={}'.format(self.vid, did, pm2, tk2)
meta = json.loads(get_content(api_source_url))
assert meta['code'] == 200, '[failed] code: {}, msg: {}'.format(meta['code'], meta['msg'])
assert meta['data'], '[Failed] Video source not found.'
data = meta['data']
domain = data['stream_domain'][0]
for lstream in data['stream']:
if lstream['url']:
url = json.loads(get_content(domain + lstream['url']))['info']
info.streams[self.profile_2_types[lstream['name']]] = {'container': 'm3u8', 'video_profile': lstream['name'], 'src' : [url]}
info.stream_types.append(self.profile_2_types[lstream['name']])
info.stream_types= sorted(info.stream_types, key = self.supported_stream_types.index)
return info
def prepare_list(self):
html = get_content(self.url, headers={})
return matchall(html, ['"a-pic-play" href="([^"]+)"'])
site = Hunantv()
``` |
{
"source": "164140757/wild",
"score": 2
} |
#### File: AMOS/data/dcm2nii.py
```python
import os
from glob import glob
from multiprocessing import Pool
from tqdm import tqdm
data_root = r"F:\MIA\AMOS-CT-MR\raw\test"
out_dir = r"F:\MIA\AMOS-CT-MR\processed\test"
def hasSubdir(root):
list_dir = [os.path.isdir(os.path.join(root, d)) for d in os.listdir(root)]
return any(list_dir)
def dcm2niix(_dir, out_dir):
check_id = os.path.split(_dir)[-1]
out_path = os.path.join(out_dir, check_id)
os.makedirs(out_path, exist_ok=True)
cmd='dcm2niix -f %f_%k_%j -z y -o \"{}\" \"{}\"'.format(out_path, _dir)
res=os.popen(cmd)
output_str=res.read()
return output_str
if __name__ == '__main__':
data_roots=glob(data_root+'/*/')
total_dir=[]
totolen=0
for data_root in data_roots:
dir_list=[]
for root, subdirs, _ in os.walk(data_root):
for subdir in subdirs:
dir_list.append(os.path.join(root, subdir))
totolen += len(dir_list)
total_dir.extend(dir_list)
# clean sub-roots
total_dir = [x for x in total_dir if not hasSubdir(x)]
print(f'Total number of cases: {len(total_dir)}')
with Pool(3) as p:
tqdm(p.map(dcm2niix, total_dir), total=len(total_dir))
```
#### File: AMOS/data/nrrd2nii.py
```python
from functools import partial
import re
from typing import OrderedDict
import nrrd
import numpy as np
import argparse
import os
import nibabel as nib
import nrrd
import numpy as np
import copy
def alterLabelValueBySegName(inNrrd):
dict_label_map = {}
img = inNrrd[0]
headers = inNrrd[1]
headers_out = copy.deepcopy(headers)
img_out = np.zeros(img.shape)
for header in headers:
if re.match('Segment[0-9]+_Name$',header) is not None:
idx = header.split('_')[0].split('Segment')[-1]
# print(f'index: {idx}')
gt_labelVal = int(headers[f'Segment{idx}_Name'].split('_')[-1])
pre = int(headers[f'Segment{idx}_LabelValue'])
# print(f'pre: {pre}')
# print(f'to: {gt_labelVal}')
dict_label_map[int(headers[f'Segment{idx}_LabelValue'])] = gt_labelVal
# print(f'map: {dict_label_map}')
headers_out[f'Segment{idx}_LabelValue'] = gt_labelVal
headers_out[f'Segment{idx}_Name'] = f'Segment_{gt_labelVal}'
headers_out[f'Segment{idx}_ID'] = f'Segment_{gt_labelVal}'
# print(dict_label_map)
for pre, to in dict_label_map.items():
img_out[img==pre] = to
return img_out, headers_out
def paddingToOrigin(inNrrd):
img = inNrrd[0]
return img
def _space2ras(space):
'''Find the diagonal transform required to transform space to RAS'''
positive= space.split('-')
xfrm=[ ]
if positive[0][0].lower() == 'l': # 'left'
xfrm.append(-1)
else:
xfrm.append(1)
if positive[1][0].lower() == 'p': # 'posterior'
xfrm.append(-1)
else:
xfrm.append(1)
if positive[2][0].lower() == 'i': # 'inferior'
xfrm.append(-1)
else:
xfrm.append(1)
# return 4x4 diagonal matrix
xfrm.append(1)
return np.diag(xfrm)
def nifti_write(inImg, prefix= None):
if prefix:
prefix= os.path.abspath(prefix)
else:
prefix= os.path.abspath(inImg).split('.')[0]
try:
img= nrrd.read(inImg)
# hdr= img[1]
# data= img[0]
# change headers context by ID
data, hdr=alterLabelValueBySegName(img)
SPACE_UNITS = 2
TIME_UNITS = 0
SPACE2RAS = _space2ras(hdr['space'])
translation= hdr['space origin']
if hdr['dimension']==4:
axis_elements= hdr['kinds']
for i in range(4):
if axis_elements[i] == 'list' or axis_elements[i] == 'vector':
grad_axis= i
break
volume_axes= [0,1,2,3]
volume_axes.remove(grad_axis)
rotation= hdr['space directions'][volume_axes,:3]
xfrm_nhdr= np.matrix(np.vstack((np.hstack((rotation.T, np.reshape(translation,(3,1)))),[0,0,0,1])))
# put the gradients along last axis
if grad_axis!=3:
data= np.moveaxis(data, grad_axis, 3)
try:
# DWMRI
# write .bval and .bvec
f_val= open(prefix+'.bval', 'w')
f_vec= open(prefix+'.bvec', 'w')
b_max = float(hdr['DWMRI_b-value'])
mf= np.matrix(np.vstack((np.hstack((hdr['measurement frame'],
[[0],[0],[0]])),[0,0,0,1])))
for ind in range(hdr['sizes'][grad_axis]):
bvec = [float(num) for num in hdr[f'DWMRI_gradient_{ind:04}'].split()]
L_2= np.linalg.norm(bvec[:3])
bval= round(L_2 ** 2 * b_max)
bvec.append(1)
# bvecINijk= RAS2IJK @ SPACE2RAS @ mf @ np.matrix(bvec).T
# simplified below
bvecINijk= xfrm_nhdr.T @ mf @ np.matrix(bvec).T
L_2= np.linalg.norm(bvecINijk[:3])
if L_2:
bvec_norm= bvecINijk[:3]/L_2
else:
bvec_norm= [0, 0, 0]
f_val.write(str(bval)+' ')
f_vec.write((' ').join(str(x) for x in np.array(bvec_norm).flatten())+'\n')
f_val.close()
f_vec.close()
except:
# fMRI
pass
TIME_UNITS= 8
else:
rotation= hdr['space directions']
xfrm_nhdr= np.matrix(np.vstack((np.hstack((rotation.T, np.reshape(translation,(3,1)))),[0,0,0,1])))
xfrm_nifti= SPACE2RAS @ xfrm_nhdr
# RAS2IJK= xfrm_nifti.I
# automatically sets dim, data_type, pixdim, affine
img_nifti= nib.nifti1.Nifti1Image(data, affine= xfrm_nifti)
hdr_nifti= img_nifti.header
# now set xyzt_units, sform_code= qform_code= 2 (aligned)
# https://nifti.nimh.nih.gov/nifti-1/documentation/nifti1fields/nifti1fields_pages/xyzt_units.html
# simplification assuming 'mm' and 'sec'
hdr_nifti.set_xyzt_units(xyz= SPACE_UNITS, t= TIME_UNITS)
hdr_nifti['qform_code'] = 2
hdr_nifti['sform_code']= 2
# append seg info
nib.save(img_nifti, prefix+'.nii.gz')
except:
print(f'{inImg} is error. ')
def main():
parser = argparse.ArgumentParser(description='NRRD to NIFTI conversion tool')
parser.add_argument('-i', '--input', type=str, required=True, help='input nrrd/nhdr file')
parser.add_argument('-p', '--prefix', type=str,
help='output prefix for .nii.gz, .bval, and .bvec files (default: input prefix)')
args = parser.parse_args()
nifti_write(args.input, args.prefix)
def pipeline(img, outdir):
file_ = os.path.split(img)[-1].split('_pred.nii.gz')[0] + '_pred'
dr = img.split(os.sep)[-2]
path = os.path.join(outdir, dr)
os.makedirs(path, exist_ok=True)
prefix = os.path.join(path, file_)
nifti_write(img, prefix)
# import vtk
# def readnrrd(filename):
# """Read image in nrrd format."""
# reader = vtk.vtkNrrdReader()
# reader.SetFileName(filename)
# reader.Update()
# info = reader.GetInformation()
# return reader.GetOutput(), info
# def writenifti(image,filename, info):
# """Write nifti file."""
# writer = vtk.vtkNIFTIImageWriter()
# writer.SetInputData(image)
# writer.SetFileName(filename)
# writer.SetInformation(info)
# writer.Write()
# m, info = readnrrd('/media/neubias/b0c7dd3a-8b12-435e-8303-2c331d05b365/DATA/Henry_data/mri.nrrd')
# writenifti(m, '/media/neubias/b0c7dd3a-8b12-435e-8303-2c331d05b365/DATA/Henry_data/mri_prueba2.nii', info)
if __name__ == '__main__':
from glob import glob
from multiprocessing import Pool
from tqdm import tqdm
data_root = '/mnts2d/med_data1/haotian/AMOS/first_round/valid/ps_slicer_nrrd'
out_root = '/mnts2d/med_data1/haotian/AMOS/first_round/valid/ps_'
os.makedirs(out_root,exist_ok=True)
data_list = glob(data_root+'/*/*.nrrd')
data = '/home/baihaotian/programs/wild/data/AMOS/63aac6c0425d136aca3c83458da27279_1.2.840.113704.1.111.1036.1547534440.1_1.2.840.113704.1.111.1036.1547534533.19_pred.nii.gz.seg.nrrd'
nii_data = '/mnts2d/med_data1/haotian/AMOS/first_round/valid/firstround_select/labels/0/63aac6c0425d136aca3c83458da27279_1.2.840.113704.1.111.1036.1547534440.1_1.2.840.113704.1.111.1036.1547534533.19_pred.nii.gz'
# headers = nib.load(nii_data).header
in_ = nrrd.read(data)
# print(np.unique(in_[0]))
# pipeline(data,
# '/home/baihaotian/programs/wild/data/')
headers = in_[1]
# print(headers['Segment1_Name'])
# for header in headers:
# if re.match('Segment[0-9]+_ID$',header) is not None:
# idx = header.split('_')[0].split('Segment')[-1]
# print(f'header: {header}, num: {idx}')
# print(headers[header])
# img = alterLabelValueBySegName(in_)[0]
# print(np.unique(img))
# headers = alterLabelValueBySegName(in_)[1]
# print('---------------------------')
for i in headers:
print(f'{i}, {headers[i]}')
# for data in data_list:
# pipeline(data, out_root)
# with Pool(6) as p:
# r = list(tqdm(p.map(partial(pipeline, outdir=out_root), data_list), total=len(data_list)))
``` |
{
"source": "164747/alpaca-trade-api-python",
"score": 2
} |
#### File: alpaca-trade-api-python/alpaca_trade_api/stream.py
```python
import asyncio
import json
import logging
import typing
import msgpack
import websockets
from pydantic import BaseModel, PrivateAttr
from websockets.legacy.client import WebSocketClientProtocol
from alpaca_trade_api.models import private_stream as prs
from alpaca_trade_api.models import public_stream as pus
logger = logging.getLogger(__name__)
_HandlerType = typing.Callable[[object], typing.Awaitable]
class StreamBase(BaseModel):
key_id: str
secret_key: str
endpoint: str
_ws: typing.Optional[WebSocketClientProtocol] = PrivateAttr(default=None)
_handler: typing.Optional[_HandlerType] = PrivateAttr(default=None)
def set_handler(self, handler: _HandlerType):
self._handler = handler
async def __connect(self):
if self._ws is None:
self._ws = await websockets.connect(self.endpoint)
await self.auth()
async def send(self, d: dict) -> typing.Union[dict, typing.List[dict]]:
raise NotImplementedError
async def auth(self):
raise NotImplementedError
async def __close(self):
if self._ws is not None:
await self._ws.close()
self._ws = None
async def pre_action(self):
raise NotImplementedError
async def consume(self):
raise NotImplementedError
async def run_forever(self):
retries = 0
assert self._handler is not None
while True:
try:
if self._ws is None:
await self.__connect()
await self.pre_action()
await self.consume()
except websockets.WebSocketException:
await self.__close()
retries += 1
if retries > 3:
raise
if retries > 1:
await asyncio.sleep(3)
finally:
await asyncio.sleep(0.01)
class PublicStream(StreamBase):
symbols: typing.List[str] = []
async def send(self, d: dict) -> typing.Union[dict, typing.List[dict]]:
assert self._ws is not None
await self._ws.send(json.dumps(d))
# await self._ws.send(msgpack.packb(d))
logger.debug(f'{self.endpoint} <-- {d}')
b = await self._ws.recv()
if isinstance(b, str):
d = json.loads(b)
else:
d = msgpack.unpackb(b)
logger.debug(f'REC {self.endpoint} --> {d}')
return d
async def auth(self):
d = dict(action='auth', key=self.key_id, secret=self.secret_key)
msg = await self.send(d)
if msg[0]['T'] == 'error':
raise ValueError(msg[0].get('msg', 'auth failed'))
if msg[0]['T'] != 'success' or msg[0]['msg'] != 'connected':
raise ValueError('failed to authenticate')
@staticmethod
def __cast(d: dict) -> typing.Union[None, pus.Quote, pus.Bar, pus.Trade]:
t = d['T']
if t == 'b':
return pus.Bar(**d)
elif t == 'q':
return pus.Quote(**d)
elif t == 't':
return pus.Trade(**d)
return None
async def pre_action(self):
assert len(self.symbols) > 0
d = dict(action='subscribe', trades=self.symbols, bars=self.symbols, quotes=self.symbols)
await self.send(d)
async def consume(self):
while True:
r = await self._ws.recv()
if isinstance(r, str):
dl = json.loads(r)
else:
dl = msgpack.unpackb(r)
for d in dl:
o = self.__cast(d)
if o is not None:
await self._handler(o)
class PrivateStream(StreamBase):
def __cast(self, d: dict):
s, o = d['stream'], d['data']
if s == 'trade_updates':
return prs.TradeBase(**o)
raise NotImplementedError
async def send(self, d: dict) -> typing.Union[dict, typing.List[dict]]:
assert self._ws is not None
logger.debug(f'SEND {self.endpoint} <-- {d}')
s = json.dumps(d)
await self._ws.send(s)
r = await self._ws.recv()
logger.debug(f'REC {self.endpoint} --> {r}')
return json.loads(r)
async def auth(self):
d = {
"action": "authenticate",
"data": {
"key_id": self.key_id,
"secret_key": self.secret_key
}
}
d = await self.send(d)
if not d['data']['status'] == 'authorized':
raise websockets.WebSocketException('Not Authorized')
async def pre_action(self):
d = {
"action": "listen",
"data": {
"streams": ["trade_updates"]
}
}
await self.send(d)
async def consume(self):
while True:
r = await self._ws.recv()
d = json.loads(r)
o = self.__cast(d)
await self._handler(o)
``` |
{
"source": "1658484908/Million-text-analysis",
"score": 3
} |
#### File: nengyuan/chezhiwang/middlewares.py
```python
from scrapy import signals
from chezhiwang import items
import scrapy
import random
from scrapy import log
import time
class ChezhiwangSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class ChezhiwangDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
import random
from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware #代理ip,这是固定的导入
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware #代理UA,固定导入
class IPPOOLS(HttpProxyMiddleware):
def __init__(self,ip=''):
'''初始化'''
self.ip=ip
def process_request(self, request, spider):
'''使用代理ip,随机选用'''
ip=random.choice(self.ip_pools) #随机选择一个ip
print ('当前使用的IP是'+ip['ip'])
try:
request.meta["proxy"]="http://"+ip['ip']
except (Exception):
#print (e)
pass
ip_pools=[
{'ip':'192.168.127.12:9797'},
{'ip': '172.16.31.10:37901'}
#{'ip':'172.16.17.32:53281'}
#https
# {'ip': '172.16.31.10:8000'},
# {'ip':'172.16.31.10:37901'},
# {'ip': '172.16.31.10:44693'}
#{'ip':'172.16.58.3:808'}
]
class UAPOOLS(UserAgentMiddleware):
def __init__(self,user_agent=''):
self.user_agent=user_agent
def process_request(self, request, spider):
'''使用代理UA,随机选用'''
ua=random.choice(self.user_agent_pools)
print ('当前使用的user-agent是'+ua)
try:
request.headers.setdefault('User-Agent',ua)
except (Exception):
pass
user_agent_pools=[
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.75 Safari/537.36',
]
```
#### File: chezhiwang/spiders/zhongche.py
```python
from scrapy.spider import BaseSpider
from chezhiwang.arrangurl import *
from chezhiwang import items
from chezhiwang import myre
class DmozSpider(BaseSpider):
name = "zhongche"
allowed_domains = ["chinanews.cn"]
# start_urls = [
# "http://www.chinanews.cn/jk/kong/news/2008/08-01/1332750.shtml"
# ]
start_urls=arrangeurl('yule','health')
def parse(self, response):
#self.log("title:%s"%response.css('div.title0').extract())
title=response.css('div.title0').extract()
title=str(title)
if title=='[]':#left_bt
title = response.css('div.left_bt').extract()
title=myre.cleancode(str(title))
text=response.css('div.left_zw').extract()
if text==[]:
text = response.css('div.font16Style').extract()
text=str(text)
if text=='':
text = response.css('div.left_zw').extract()
text = myre.cleancode(text)
type='yule'
myitem=items.ChezhiwangItem()
myitem['title']=title
myitem['text_content']=text
myitem['type']=type
# yield {
# 'title': title,
# 'text_content': text,
# 'type':type
# }
yield myitem
```
#### File: Million-text-analysis/getnewsurl/getdate.py
```python
import datetime
#Tempurl is the address of China News Network,
#http://www.chinanews.com/scroll-news/+type+/year+/date+/new.shtml
#example:http://www.chinanews.com/scroll-news/it/2010/1201/news.shtml'
def get_data_list(start,end,type):
url_date_list=[]
date_start=datetime.datetime.strptime(start,'%Y-%m-%d')
date_end=datetime.datetime.strptime(end,'%Y-%m-%d')
while date_start<=date_end:
temptime=date_start.strftime('/%Y/%m%d')
tempurl='http://www.chinanews.com/scroll-news/'+type+'/'+temptime+'/news.shtml'
url_date_list.append(tempurl)
date_start=date_start+datetime.timedelta(1)
return url_date_list
#url_list=get_data_list('2008-08-01','2018-11-19')
``` |
{
"source": "16647615268/python",
"score": 3
} |
#### File: python/Leetcode/Distance_Binary.py
```python
class Solution(object):
def binaryGap(self, N):
num = 0 #初始化计数值n
A = [i for i in range(32) if (N >> i) & 1]
#A提前申请了一个32字节的存储空间,循环右移判断如果是1就输出这个位置的标号
if len(A) < 2: #异常值处理
return 0
for i in range(len(A) - 1): #循环,如果前一个和后一个标号相差1说明挨着
if A[i + 1] - A[i] == 1:
num +=1
return num+1 #减完出来是少一个数的,要补1
a=Solution()
print(a.binaryGap(15))
```
#### File: python/Leetcode/money_conbin_num.py
```python
def sovle(n):
nums=[1,5,10,20,50,100]
data = [0] * (n+1)
data[0] = 1
print(data)
for num in nums:
for i in range(n+1):
if i-num>=0:
data[i] += data[i-num]
print(i,num,data)
return data[-1]
nums = int(input())
print(sovle(nums))
```
#### File: python/Voice broadcast number/baidu_aip_voice.py
```python
from aip import AipSpeech
""" 你的 APPID AK SK """
APP_ID = '17084747'
API_KEY = '<KEY>'
SECRET_KEY = '<KEY>'
sourse="12321.42"
numbers_list=['零','一','二','三','四','五','六','七','八','九']
units_list=["拾","佰","仟","万","亿"]
others_list=["已收到","点"]
client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)
def create_sound_basic(basic):
result = client.synthesis(basic, 'zh', 1, {'vol': 5,'per':0})
# 识别正确返回语音二进制 错误则返回dict 参照下面错误码
if not isinstance(result, dict):
with open(basic+'.wav', 'wb') as f:
f.write(result)
#creat basic sound
for numbers in numbers_list:
create_sound_basic(numbers)
for numbers in units_list:
create_sound_basic(numbers)
for numbers in others_list:
create_sound_basic(numbers)
```
#### File: python/Voice broadcast number/replace_name.py
```python
import os
import shutil
path = "/home/liyang/Speech_synthesis/syn_basic"
def translate_zh(zh):
zh_en_dict = {'零':"zero", '一':"one", '二':"two", '三':"three", '四':"four", '五':"five", '六':"six", '七':"seven", '八':"eight", '九':"nine",
"拾":"shi", "佰":"bai", "仟":"qian", "万":"wan", "亿":"yi",
"已收到":"received", "元":"yuan", "角":"jiao", "分":"fen"}
zh_en = zh_en_dict[zh]
return zh_en
name_list = os.listdir(path)
for name in name_list:
if name[-4:] == ".wav":
re_name = ""
try:
if "_end" not in name:
if "已" in name:
re_name = translate_zh(name[:-4])
re_out = re_name + ".wav"
elif "过" in name:
re_name = "Over number"
re_out = re_name + ".wav"
else:
for na in name[:-4]:
en = (translate_zh(na))
re_name = re_name + en
re_out = re_name + ".wav"
elif "_end" in name:
for na in name[:-8]:
en = (translate_zh(na))
re_name = re_name + en
re_out = re_name + "_end.wav"
print(re_out)
shutil.copyfile(name,re_out)
except:
continue
```
#### File: python/Voice broadcast number/syn_syn_en.py
```python
from pydub import AudioSegment
from flask import Flask, request
app = Flask(__name__)
@app.route('/', methods=["POST"])
def regist():
data = request.data
dic = eval(data)
source = dic["number"]
numbers_list = ['零', '一', '二', '三', '四', '五', '六', '七', '八', '九']
units_list = ["拾", "佰", "仟", "万", "亿"]
others_list = ["已收到", "元", "角", "分"]
sourse_int = str(int(eval(source)))
try:
if int(sourse_int) >= 100000: # Over one hundred thousand
song_out = AudioSegment.from_wav("您的交易数额过大.wav")
else:
out_zhong = strip_number(sourse_int)
song_out = mix_dub(out_zhong)
song_out.export("sound_out.wav", format("wav"))
except:
print("您输入的数值有误")
return "sound_out.wav"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=7778)
def basic_num(basic_int):
# input a str number
if len(str(basic_int)) == 4:
out_int = ""
else:
out_int = "零"
for i in range(len(str(basic_int))):
if int(basic_int[i]) != 0:
if i != len(str(basic_int)) - 1:
out_int = out_int+str(numbers_list[int(basic_int[i])]) + str(units_list[len(str(basic_int)) - i - 2])
elif i == len(basic_int) - 1:
out_int = out_int+str(numbers_list[int(basic_int[i])])
else:
if i != len(str(basic_int)) - 1:
out_int = out_int+str(numbers_list[int(basic_int[i])])
out_int = out_int.strip("零") # delet first and end zero
for j in range(4):
out_int = out_int.replace("零零","零") # delet double zero
if "佰" not in out_int:
out_int = out_int.replace("一拾", "拾")
out_zhong = out_int
return out_int
def strip_number(sourse_int):
if (len(str(sourse_int)) <= 8) & (len(str(sourse_int)) > 4):
monst = str(int(sourse_int) // 10000)
basic = str(int(sourse_int) % 10000)
monst_zh = basic_num(monst)
basic_zh = basic_num(basic)
if "仟" not in basic_zh:
out_zhong = monst_zh +"万"+"零"+basic_zh
else:
out_zhong = monst_zh + "万" + basic_zh
elif len(str(sourse_int)) <= 4:
basic = sourse_int
basic_zh = basic_num(basic)
out_zhong = basic_zh
elif len(str(sourse_int)) > 8:
more = str(int(sourse_int) // 100000000)
monst = str(int(int(sourse_int) - int(more) * 100000000) // 10000)
basic = str(int(sourse_int) % 10000)
more_zh = basic_num(more)
monst_zh = basic_num(monst)
if "仟" not in monst_zh:
out_zhong = more_zh+"亿"+"零"+monst_zh + "万"
elif "仟" in monst_zh:
out_zhong = more_zh+"亿"+monst_zh + "万"
basic_zh = basic_num(basic)
if "仟" not in basic_zh:
out_zhong = out_zhong + "零" + basic_zh
elif "仟" in basic_zh:
out_zhong = out_zhong + basic_zh
out_zhong =out_zhong + others_list[1]
#have dot
sourse_float = eval(sourse)-eval(sourse_int)
if sourse_float != 0:
two_dot = ""
dot_sourse_float = len(str(sourse_float))
sourse_dot = int((round(sourse_float,2))*100)
if len(str(sourse_dot)) == 1:
sourse_dot = "0" + str(sourse_dot)
for dot in range(len(str(sourse_dot))):
two_dot = two_dot + numbers_list[int(str(sourse_dot)[dot])] + others_list[2+int(dot)]
two_dot = two_dot.replace("零角","").replace("零分","")
out_zhong = out_zhong + two_dot
out_zhong = out_zhong.replace("零万","") #delet_00000
return out_zhong
def mix_dub(out_zhong):
#mix all sound
song_out = AudioSegment.from_wav(str(translate_zh(str(others_list[0])) + ".wav"))
len_zhong = int(len(out_zhong))
if len_zhong > 2:
for sim in range(len_zhong-2): #if not end
if (out_zhong[sim] in numbers_list) & (out_zhong[sim] != "零") & (out_zhong[sim] != "元"):
sim_two = out_zhong[sim] +out_zhong[sim+1]
two_sound_path = str(translate_zh(str(sim_two))) + ".wav"
two_sound = AudioSegment.from_wav(two_sound_path)
song_two = two_sound[30:-30]
song_out = song_out.append(song_two,crossfade=40)
elif (out_zhong[sim] == "零"):
sim_two = out_zhong[sim]
two_sound_path = str(translate_zh(str(sim_two))) + ".wav"
two_sound = AudioSegment.from_wav(two_sound_path)
song_two = two_sound[20:-20]
song_out = song_out.append(song_two, crossfade=10)
elif (out_zhong[sim] == "元"):
if (out_zhong[sim-1]) in units_list:
sim_two = out_zhong[sim]
two_sound_path = str(translate_zh(str(sim_two))) + ".wav"
two_sound = AudioSegment.from_wav(two_sound_path)
song_two = two_sound[20:-20]
song_out = song_out.append(song_two, crossfade=10)
if out_zhong[-2] in units_list: #if simple end
end_path = str(translate_zh(str((out_zhong[-1:])))) + "_end.wav"
else:
end_path = str(translate_zh(str((out_zhong[-2:])))) + "_end.wav"
else:
end_path = str(translate_zh(str((out_zhong[-2:])))) + "_end.wav"
end_sound = AudioSegment.from_wav(end_path)
end_song = end_sound[30:-30]
song_out = song_out.append(end_song, crossfade=10)
return song_out
# Chinese translate English
def translate_zh(zh):
zh_en_dict = {'零':"zero", '一':"one", '二':"two", '三':"three", '四':"four", '五':"five", '六':"six", '七':"seven", '八':"eight", '九':"nine",
"拾":"shi", "佰":"bai", "仟":"qian", "万":"wan", "亿":"yi",
"已收到":"received", "元":"yuan", "角":"jiao", "分":"fen"}
if "已" not in zh:
zh_en = ""
for z in zh:
zh_en = zh_en + zh_en_dict[z]
else: #已到账.wav translate
zh_en = zh_en_dict[zh]
return zh_en
``` |
{
"source": "1665169869/bilibiliAPI",
"score": 3
} |
#### File: bilibiliAPI/user/info.py
```python
import requests
def spaceInfo(mid, SESSDATA=""):
API = "http://api.bilibili.com/x/space/acc/info"
#参考:https://github.com/SocialSisterYi/bilibili-API-collect/blob/master/user/info.md#%E7%94%A8%E6%88%B7%E8%AF%A6%E7%BB%86%E4%BF%A1%E6%81%AF1-%E7%94%A8%E4%BA%8E%E7%A9%BA%E9%97%B4
#用户详细信息1 (用于空间)
params = {
"mid": mid
}
headers = {
"cookies": "SESSDATA="+SESSDATA
}
return requests.get(API, params=params, headers=headers)
def userDetailsCards(mid, SESSDATA="", photo=False):
API = "http://api.bilibili.com/x/web-interface/card"
#参考:https://github.com/SocialSisterYi/bilibili-API-collect/blob/master/user/info.md#%E7%94%A8%E6%88%B7%E8%AF%A6%E7%BB%86%E4%BF%A1%E6%81%AF2-%E7%94%A8%E4%BA%8E%E5%90%8D%E7%89%87
#用户详细信息2 (用于名片)
params = {
"mid": mid,
"photo": photo
}
headers = {
"cookies": "SESSDATA="+SESSDATA
}
return requests.get(API, params=params, headers=headers)
def spaceMyInfo(SESSDATA):
# 本用户详细信息
# https://github.com/SocialSisterYi/bilibili-API-collect/blob/master/user/info.md#%E6%9C%AC%E7%94%A8%E6%88%B7%E8%AF%A6%E7%BB%86%E4%BF%A1%E6%81%AF
API = "http://api.bilibili.com/x/space/myinfo"
headers = {
"cookies": "SESSDATA="+SESSDATA
}
return requests.get(API, headers=headers)
``` |
{
"source": "1665695549/phython-alien",
"score": 3
} |
#### File: phython-alien/ship/game_stats.py
```python
class GameStats():
"""跟踪游戏的统计信息"""
def __init__(self,ai_settings):
"""初始化统计信息"""
self.ai_settings=ai_settings
self.reset_states()
#游戏刚启动时处于活动状态
self.game_active=False
#在任何情况下都不应该重置最高分得分
self.high_score=0
def reset_states(self):
"""初始化在游戏运行期间可能变化的统计信息"""
self.ships_left = self.ai_settings.ship_limit
self.score=0
self.level=1
```
#### File: phython-alien/ship/hello_world.py
```python
print("Hello Python world")
def prep_high_score(self):
"""将最高得分转换为渲染的图像"""
high_score=int(round(self.stats.high_score,-1)
#high_score_str = "{:,}".format(high_score)
self.high_score_image=self.font.render(str(self.stats.high_score),True,self.text_color,self.ai_settings.bg_color)
#将最高得分放在屏幕顶部中央
self.high_score_rect=self.high_score_image.get_rect()
self.high_score_rect.centerx=self.screen_rect.centerx
self.high_score_rect.top=self.score_rect.top
```
#### File: phython-alien/ship/settings.py
```python
class Settings():
"""存储《外星人入侵》的所有设置的类"""
def __init__(self):
"""初始化游戏的设置"""
#屏幕设置
self.screen_width=1200
self.screen_height=600
self.bg_color=(230,230,230)
#飞船的设置
self.ship_limit=3
#子弹设置
self.bullet_wigth=3
self.bullet_height=15
self.bullet_color=(60,60,60)
self.bullets_allowed=3
#外星人设置
self.fleet_drop_speed=10
#以什么样的速度加快游戏节奏
self.speedup_scale=1.1
#外星人点数的提高速度
self.score_scale=1.5
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
"""初始化随游戏进行而变化的设置"""
#初始化飞船、子弹、外星人的初始速度
self.ship_speed_factor=1.5
self.bullet_speed_factor=3
self.alien_speed_factor=1
#fleet_direction为1表示向右移,为-1表示向左移
self.fleet_direction=1
#记分
self.alien_points=50
def increase_speed(self):
"""提高速度设置"""
self.ship_speed_factor *=self.speedup_scale
self.bullet_speed_factor *=self.speedup_scale
self.alien_speed_factor *=self.speedup_scale
self.alien_points=int(self.alien_points*self.score_scale)
```
#### File: phython-alien/ship/ship.py
```python
import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self,ai_settings,screen):
"""初始化飞船并设置其初始化位置"""
super(Ship,self).__init__()
self.screen=screen
self.ai_settings=ai_settings
#加载飞船图像并获取其外接矩形
self.image=pygame.image.load('image/ship.bmp')
self.rect=self.image.get_rect()
#获取屏幕矩形
self.screen_rect=screen.get_rect()
#将每搜新飞船放在屏幕底部中央
self.rect.centerx=self.screen_rect.centerx
self.rect.bottom=self.screen_rect.bottom
#在飞船的属性center中存储小数值
self.center=float(self.rect.centerx)
#移动标志
self.moving_right=False
self.moving_left=False
def update(self):
"""根据移动标志调整飞船的位置"""
#更新飞船的center值,而不是rect
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center +=self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -=self.ai_settings.ship_speed_factor
#根据self,crnter更新rect对象
self.rect.centerx=self.center
def blitme(self):
"""在指定位置绘制飞船"""
self.screen.blit(self.image,self.rect)
def center_ship(self):
"""让飞船在屏幕上居中"""
self.center = self.screen_rect.centerx
``` |
{
"source": "1665695549/pythonDataVisual",
"score": 4
} |
#### File: pythonDataVisual/dataVisualization/random_walk.py
```python
from random import choice
class RandomWalk():
"""creak a random-walk class"""
def __init__(self,num_points=5000):
"""init the attribute of random-walk"""
self.num_points=num_points
#all random-walk start at (0,0)
self.x_values=[0]
self.y_values=[0]
def fill_walk(self):
"""calculating all poits in random-walk"""
#do random-walk until the list reach the specified length
while len(self.x_values)<self.num_points:
#determine the direction and the distance of advance
x_direction=choice([1,-1])
x_distance=choice([0,1,2,3,4])
x_step=x_direction*x_distance
y_direction=choice([1,-1])
y_distance=choice([0,1,2,3,4])
y_step=y_direction*y_distance
#refuce to stay in place
if x_step==0 and y_step==0:
continue
#calculate the x_value and y_value of next point
next_x=self.x_values[-1]+x_step
next_y=self.y_values[-1]+y_step
self.x_values.append(next_x)
self.y_values.append(next_y)
``` |
{
"source": "1667/baiduapi",
"score": 3
} |
#### File: 1667/baiduapi/testbaiduapi.py
```python
import base64
import urllib
import urllib2
import sys
import ssl
import json
import time
import cv2
import numpy as np
import screeninfo
from PIL import Image, ImageDraw, ImageFont
import threading
nowTime = lambda:int(round(time.time() * 1000))
token = ''
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):
if (isinstance(img, np.ndarray)):
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype(
"simsunttc/simsun.ttc", textSize, encoding="utf-8")
draw.text((left, top), text, textColor, font=fontText)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
class ImageRThread(threading.Thread):
_instance_lock = threading.Lock()
def __new__(cls,*args,**kwargs):
if not hasattr(ImageRThread,"_instance"):
with ImageRThread._instance_lock:
if not hasattr(ImageRThread,"_instance"):
ImageRThread._instance = object.__new__(cls)
print("init thread ")
ImageRThread.running = True
ImageRThread.dataframe = np.array([1])
return ImageRThread._instance
def setcallback(self,callbacks):
self.datacallback = callbacks
def setdataframe(self,dataframe):
self.dataframe = dataframe
def __del__(self):
pass
def getimageresult(self):
# request_url = "https://aip.baidubce.com/rest/2.0/image-classify/v1/body_analysis"
request_url = "https://aip.baidubce.com/rest/2.0/image-classify/v1/body_attr"
# global frame
if len(self.dataframe) != 1:
image = cv2.imencode('.jpg',self.dataframe)[1]
img = base64.b64encode(image)
# print("data spece",nowTime()-times)
params = {"image":img}
params = urllib.urlencode(params)
access_token = token
request_url = request_url + "?access_token=" + access_token
request = urllib2.Request(url=request_url, data=params)
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
start = nowTime()
response = urllib2.urlopen(request)
print(nowTime()-start)
content = response.read()
print content
return content
def run(self):
while self.running:
contenst = self.getimageresult()
if self.datacallback:
self.datacallback(self.dataframe,contenst)
class cvcap(object):
def __init__(self):
# super().__init__()
self.cap = cv2.VideoCapture(-1)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH,320)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT,240)
self.content = None
self.lastinfos = None
self.font = cv2.FONT_HERSHEY_SIMPLEX
def contentcallback(self,dats,content):
self.content = content
def getimage(self):
_, frame = self.cap.read()
imagedraw = frame.copy()
if _:
# cv2.imshow('img',frame)
if self.content:
jsondata = json.loads(self.content)
infos = jsondata.get('person_info',None)
if infos == None:
infos = self.lastinfos
else:
self.lastinfos = infos
index = 0
for info in infos:
index += 20
loaction = info['location']
# print loaction
lt = (int(loaction['left']),int(loaction['top']))
cv2.rectangle(imagedraw, lt, (int(loaction['left']+loaction['width']),int(loaction['top']+loaction['height'])), (index+20,255,index),2)
if type(info['attributes']) == dict:
if info['attributes']['gender']['score'] > 0.8:
# cv2.putText(imagedraw,'男',(lt[0],lt[1]+8), self.font, 0.4,(255,0,0),1,cv2.LINE_AA)
imagedraw = cv2ImgAddText(imagedraw,info['attributes']['gender']['name'],lt[0]+2,lt[1]+2,(255,0,0),15)
if info['attributes']['age']['score'] > 0.8:
# cv2.putText(imagedraw,'男',(lt[0],lt[1]+8), self.font, 0.4,(255,0,0),1,cv2.LINE_AA)
imagedraw = cv2ImgAddText(imagedraw,'age: '+info['attributes']['age']['name'],lt[0]+2,lt[1]+16,(255,0,0),15)
return (frame,imagedraw)
else:
return np.array([1])
if __name__ == "__main__":
window_name = 'projector'
screen = screeninfo.get_monitors()[0]
width, height = screen.width, screen.height
cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
cv2.moveWindow(window_name, screen.x - 1, screen.y - 1)
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
cvc = cvcap()
keeprun = True
imgT = ImageRThread()
imgT.setDaemon(True)
imgT.setcallback(cvc.contentcallback)
imgT.start()
while keeprun:
# global frame
frame,draw = cvc.getimage()
if cv2.waitKey(33) & 0xFF == ord('q'):
print "I'm done"
break
if len(frame) != 1:
cv2.imshow(window_name,draw)
imgT.setdataframe(frame)
# getimageresult(frame)
else:
print('error')
``` |
{
"source": "1667/PythonRobotics",
"score": 3
} |
#### File: Localization/mykf/kf.py
```python
import numpy as np
import math
###
# 卡尔曼滤波
# 第一步:预测部分
# x' = Fx + u 根据输入值更新状态值
# P' = FPFT+Q
# 第二步:观测
# y = z - Hx' 计算测量值与预测值的差值 H 为测量矩阵,将状态值转换成测量值
# S = HP'HT+R R是测量噪声矩阵,表示测量值与真值之间的差值
# K = P'HTS-1 求卡尔曼增益K,也就是y值的权重
# x = x'+Ky 更新状态向量,考虑了测量值,预测值和噪声
# P = (I-KH)P' 更新不确定度,用于下一周期
###
class KalmanFilter(object):
def __init__(self,x_in,F_in,P_in,Q_in,H_in,R_in):
self.x_ = x_in # 状态向量
self.F_ = F_in # 状态转移矩阵,也就是根据测量值转换成状态值的计算方式
self.P_ = P_in # 状态协方差矩阵,表示状态的不确定性,根据实际情况填写,会随着系统更新而更新
self.Q_ = Q_in # 过程噪声,就是外界无法估计的噪声,一般为单位矩阵
self.H_ = H_in # 测量矩阵,将状态值转换成测量值
self.R_ = R_in # R是测量噪声矩阵
self.DT = 0.1
def set_F(self,F_in):
self.F_ = F_in
def set_P(self,P_in):
self.P_ = P_in
def set_Q(self,Q_in):
self.Q_ = Q_in
def Prediction(self,u_in):
self.B_ = np.array([[self.DT * math.cos(self.x_[2, 0]), 0],
[self.DT * math.sin(self.x_[2, 0]), 0],
[0.0, self.DT],
[1.0, 0.0]])
self.x_ = self.F_ @ self.x_ + self.B_@u_in
self.P_ = self.F_ @ self.P_ @ self.F_.T+self.Q_
def MeasurementUpdate(self,z_in):
y = z_in - self.H_ @ self.x_
S = self.H_ @ self.P_ @ self.H_.T+self.R_
K = self.P_ @ self.H_.T @ np.linalg.inv(S)
self.x_ = self.x_ + K@y
self.P_ = (np.eye(self.x_.shape[0])[email protected]_)@self.P_
```
#### File: PathPlanning/AStar/mystar.py
```python
import matplotlib.pyplot as plt
import math
"""
A_star
公式:f(n) = g(n)+h(n)
g(n)表示当前点到起点的距离,h(n)表示当前点到目标点的距离
维护两个列表,open列表表示待计算的点,close的表示已经搜索的点
算法过程:
1.将起始点放入open列表
2.重复以下步骤:
i.在open_list 查找F最小的点,并把查找点作为当前点
ii.把当前点从open_list 删除,添加到close_list
iii. 对当前点相邻的点执行以下步骤:
1. 如果该相邻点不可通行或已经在close_list,则跳过,继续下一节点
2. 如果该相邻点不在open_list,则将该节点添加到open_list,并设置该相邻点的父节点为当前点,并保存G值和F值
3. 如果该相邻点在open_list,则判断经由当前点到达该相邻节点的G值是否小于原来保存的G值,若小于,则将该相邻节点的父节点设为当前节点,并重新设置该相邻节点的G和F,因为H是不变的
iv. 循环结束条件
当终点节点被加入到open_list 作为待检验节点,表示路径已经找到
或者open_list为空,表明没有可以添加的节点,终点节点也没有被添加进来,所以表示路径查找失败
3. 从终点节点沿父节点遍历,并保存所有节点,遍历所得节点就是路径点
"""
show_animation = True
class Node:
def __init__(self,x,y,cost,pind):
self.x = x
self.y = y
self.cost = cost # 表示g(n),即到起始点的距离
self.pind = pind # 父节点 的index
def __str__(self):
return str(self.x)+","+str(self.y)+","+str(self.cost)+","+str(self.pind)
def calc_obstacle_map(ox,oy,reso,vr):
# reso 为分辨率
# vr 机器半径
print("[INFO] generating obstacle map")
minx = round(min(ox))
miny = round(min(oy))
maxx = round(max(ox))
maxy = round(max(oy))
print("[INFO] minx:",minx)
print("[INFO] miny:",miny)
print("[INFO] maxx:",maxx)
print("[INFO] maxy:",maxy)
xwidth = round(maxx-minx)
ywidth = round(maxy-miny)
print("[INFO] xwidth:",xwidth)
print("[INFO] ywidth:",ywidth)
obmap = [[False for i in range(xwidth+1)] for i in range(ywidth+1)]
# print("[INFO] obmap:",len(obmap[0]))
# 根据具体环境,对障碍物作膨胀
for ix in range(xwidth+1):
x = ix + minx
for iy in range(ywidth+1):
y = iy + miny
for iox,ioy in zip(ox,oy):
d = math.hypot(iox-x,ioy-y)
if d <= vr/reso:
obmap[ix][iy] = True
break
# print([int(tmplist) for tmplist in obmap])
obmap_t = [[0 for i in range(xwidth+1)] for i in range(ywidth+1)]
for ix in range(xwidth+1):
x = ix + minx
for iy in range(ywidth+1):
y = iy + miny
for iox,ioy in zip(ox,oy):
d = math.hypot(iox-x,ioy-y)
if d <= vr/reso:
obmap_t[ix][iy] = 1
break
print(len(obmap_t[0]))
return obmap,minx,miny,maxx,maxy,xwidth,ywidth
def verify_node(node,obmap,minx,miny,maxx,maxy):
if node.x < minx or node.y < miny or node.x > maxx or node.y > maxy:
return False
if obmap[node.x][node.y]:
return False
return True
def calc_index(node,xwidth,xmin,ymin):
return (node.y-ymin)*xwidth + (node.x-xmin) # 计算索引值
def get_motion_model():
motion = [[1,0,1],
[0,1,1],
[-1,0,1],
[0,-1,1],
[-1,-1,math.sqrt(2)],
[-1,1,math.sqrt(2)],
[1,-1,math.sqrt(2)],
[1,1,math.sqrt(2)]]
return motion
def calc_heuristic(n1,n2):
# 计算距离
w = 1.0
d = w*math.sqrt((n1.x-n2.x)**2 + (n1.y-n2.y)**2)
return d
def calc_final_path(ngoal,closedset,reso):
rx,ry = [ngoal.x*reso],[ngoal.y*reso]
pind = ngoal.pind
while pind != -1:
n = closedset[pind]
rx.append(n.x*reso)
ry.append(n.y*reso)
pind = n.pind
return rx,ry
def is_around_obs(node,obmap,minx,miny,maxx,maxy):
for i in range(-2,3):
for j in range(-2,3):
node_near = Node(node.x+i, node.y+j,
node.cost+i,None)
if not verify_node(node_near,obmap,minx,miny,maxx,maxy):
return True
return False
def a_star_planning(sx,sy,gx,gy,ox,oy,reso,rr):
nstart = Node(round(sx/reso),round(sy/reso),0.0,-1)
ngoal = Node(round(gx/reso),round(gy/reso),0.0,-1)
ox = [iox/reso for iox in ox]
oy = [ioy/reso for ioy in oy]
obmap,minx,miny,maxx,maxy,xw,yw = calc_obstacle_map(ox,oy,reso,rr)
motion = get_motion_model()
print("[INFO] motion: {0}".format(motion))
openset,closeset = dict(),dict()
openset[calc_index(nstart,xw,minx,miny)] = nstart
while True:
# 在open_list 查找F最小的点,并把查找点作为当前点
c_id = min(openset,key=lambda o:openset[o].cost+calc_heuristic(ngoal,openset[o]))
current = openset[c_id]
# for node in openset.values():
# print(node.cost)
# print("=====")
# if show_animation:
# plt.plot(current.x*reso,current.y*reso,"xc")
# if len(closeset.keys()) % 10 == 0:
# plt.pause(0.001)
# plt.show()
# break
if current.x == ngoal.x and current.y == ngoal.y:
print("find goall")
ngoal.pind = current.pind
ngoal.cost = current.cost
break
# 把当前点从open_list 删除,添加到close_list
del openset[c_id]
closeset[c_id] = current
for i in range(len(motion)):
node = Node(current.x+motion[i][0], current.y+motion[i][1],
current.cost+motion[i][2],c_id)
# if is_around_obs(node,obmap,minx,miny,maxx,maxy):
# continue
n_id = calc_index(node,xw,minx,miny)
if n_id in closeset:
continue
if not verify_node(node,obmap,minx,miny,maxx,maxy):
continue
if n_id not in openset:
openset[n_id] = node
else:
# 如果该相邻点在open_list,则判断经由当前点到达该相邻节点的G值是否小于原来保存的G值,
# 若小于,则将该相邻节点的父节点设为当前节点,并重新设置该相邻节点的G和F
if openset[n_id].cost > node.cost:
openset[n_id] = node
# 以下代码也可用于更新node,但是逻辑不够清晰
# tcost = current.cost + calc_heuristic(current,node)
# if tcost >= node.cost:
# continue
# node.cost = tcost
# openset[n_id] = node
rx,ry = calc_final_path(ngoal,closeset,reso)
return rx,ry
def main():
sx = 10.0 # [m]
sy = 10.0 # [m]
gx = 50.0 # [m]
gy = 50.0 # [m]
grid_size = 2.0 # [m]
robot_radius = 2.0 # [m]
plt.ion()
ox, oy = [], []
for i in range(60):
ox.append(i)
oy.append(0.0)
for i in range(60):
ox.append(60.0)
oy.append(i)
for i in range(61):
ox.append(i)
oy.append(60.0)
for i in range(61):
ox.append(0.0)
oy.append(i)
for i in range(40):
ox.append(20.0)
oy.append(i)
for i in range(40):
ox.append(40.0)
oy.append(60.0-i)
if show_animation: # pragma: no cover
plt.plot(ox, oy, ".k")
plt.plot(sx, sy, "og")
plt.plot(gx, gy, "xb")
plt.grid(True)
plt.axis("equal")
rx,ry = a_star_planning(sx, sy, gx, gy,ox,oy,grid_size,robot_radius)
# plt.show()
# plt.ioff()
for x,y in zip(rx,ry):
plt.plot(x,y,'o')
# print(x,y)
plt.pause(0.1)
print('len ',len(rx))
plt.ioff()
plt.show()
if __name__ == "__main__":
main()
``` |
{
"source": "1667/yolov3-pytorch",
"score": 2
} |
#### File: 1667/yolov3-pytorch/util.py
```python
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
def unique(tensor):
tensor_np = tensor.cpu().numpy()
unique_np = np.unique(tensor_np)
unique_tensor = torch.from_numpy(unique_np)
tensor_res = tensor.new(unique_tensor.shape)
tensor_res.copy_(unique_tensor)
return tensor_res
def predict_transform(prediction, inp_dim, anchors, num_classes,CUDA = True):
"""
在特征图上进行多尺度预测, 在GRID每个位置都有三个不同尺度的锚点.predict_transform()利用一个scale得到的feature map预测得到的每个anchor的属性(x,y,w,h,s,s_cls1,s_cls2...),其中x,y,w,h
是在网络输入图片坐标系下的值,s是方框含有目标的置信度得分,s_cls1,s_cls_2等是方框所含目标对应每类的概率。输入的feature map(prediction变量)
维度为(batch_size, num_anchors*bbox_attrs, grid_size, grid_size),类似于一个batch彩色图片BxCxHxW存储方式。参数见predict_transform()里面的变量。
并且将结果的维度变换成(batch_size, grid_size*grid_size*num_anchors, 5+类别数量)的tensor,同时得到每个方框在网络输入图片(416x416)坐标系下的(x,y,w,h)以及方框含有目标的得分以及每个类的得分。
"""
batch_size = prediction.size(0)
stride = inp_dim // prediction.size(2) # 下采样倍数,也就是缩放了多少
grid_size = inp_dim // stride # 当前的图像大小
bbox_attrs = 5+num_classes
num_anchors = len(anchors)
# 输入的尺寸是 grid_size*grid_size*num_anchors*(4+1+num_classes)比如:13*13*3*(4+1+80)
# 4 是边框坐标 1 是边框置信度 3 是先验框的个数
if CUDA:
prediction = prediction.cuda()
# -----------------------------------
# 维度调整
prediction = prediction.view(batch_size,bbox_attrs*num_anchors,grid_size*grid_size) # torch.Size([1, 255, 169])
# print("pre1",prediction.size())
prediction = prediction.transpose(1,2).contiguous()# torch.Size([1, 169, 255])
# print("pre2",prediction.size())
# 将 anchor 按行排列,即一行对应一个anchor属性,
prediction = prediction.view(batch_size,grid_size*grid_size*num_anchors,bbox_attrs) # torch.Size([1, 507, 85])
# print("pre3",prediction.size())
# -----------------------------------
anchors = [(a[0]/stride,a[1]/stride) for a in anchors] # 先验框也缩放到对应大小
# print(prediction[:,:,4])
# -----------------------------------
# 对 centerx centery 和置信度取sigmoid
prediction[:,:,0] = torch.sigmoid(prediction[:,:,0])
prediction[:,:,1] = torch.sigmoid(prediction[:,:,1])
prediction[:,:,4] = torch.sigmoid(prediction[:,:,4])
# -----------------------------------
# -----------------------------------
#调整中心坐标 分别加上对应网格在整个图像的起始坐标,如 第一个是(0,0) 最后一个是(12,12)
grid = np.arange(grid_size)
a,b = np.meshgrid(grid,grid)
x_offset = torch.FloatTensor(a).view(-1,1)
y_offset = torch.FloatTensor(b).view(-1,1)
if CUDA:
x_offset = x_offset.cuda()
y_offset = y_offset.cuda()
x_y_offset = torch.cat((x_offset,y_offset),1).repeat(1,num_anchors).view(-1,2).unsqueeze(0)
prediction[:,:,:2] += x_y_offset
# print("xy center ",prediction[:,:,:2])
# -----------------------------------
# -----------------------------------
# 求先验框在当前特征图上的宽高
anchors = torch.FloatTensor(anchors)
if CUDA:
anchors = anchors.cuda()
# print("anchors ",anchors)
anchors = anchors.repeat(grid_size*grid_size,1).unsqueeze(0)
# print("anchors size2 ",anchors.size())
# 根据公式bw=pw×e^tw及bh=ph×e^th,求边框在当前特征图的尺寸
prediction[:,:,2:4] = torch.exp(prediction[:,:,2:4])*anchors
# print("anchors ===",prediction[:,:,2:4])
# -----------------------------------
# -----------------------------------
# 求每个分类的得分
prediction[:,:,5:5+num_classes] = torch.sigmoid(prediction[:,:,5:5+num_classes])
# 调整预测边框的大小和目标边框尺寸一致,为了和目标边框大小作比较,求代价函数
# 最终把所有的坐标映射到输入的图片上
prediction[:,:,:4] *= stride
return prediction
def bbox_iou(box1,box2):
b1_x1,b1_y1,b1_x2,b1_y2 = box1[:,0],box1[:,1],box1[:,2],box1[:,3]
b2_x1,b2_y1,b2_x2,b2_y2 = box2[:,0],box2[:,1],box2[:,2],box2[:,3]
inter_rect_x1 = torch.max(b1_x1,b2_x1)
inter_rect_y1 = torch.max(b1_y1,b2_y1)
inter_rect_x2 = torch.max(b1_x2,b2_x2)
inter_rect_y2 = torch.max(b1_y2,b2_y2)
inter_area = torch.clamp(inter_rect_x2-inter_rect_x1+1,min = 0)*torch.clamp(inter_rect_y2-inter_rect_y1+1,min=0)
b1_area = (b1_x2 - b1_x1+1)*(b1_y2-b1_y1+1)
b2_area = (b2_x2 - b2_x1+1)*(b2_y2-b2_y1+1)
iou = inter_area/(b1_area+b2_area-inter_area)
return iou
def write_results(prediction,confidence,num_classes,nms_conf = 0.4):
"""
NMS (非极大值抑制过程)
1.先把置信度比较低的过滤掉
2.改变维度,使输出只包含类别中最高得分及类别
3.取出图片中所有的类别,并按类别进行遍历
4.对置信度排序,并计算iou
5.去除重合率较高的预测,重复计算,直到对该类别的所有预测遍历完成
"""
# 将置信度小于阀值的边界框设为零
conf_mask = (prediction[:,:,4] > confidence).float()
# print("conf mask1",conf_mask,conf_mask.size())
# unsqueeze就是 拓展维度
conf_mask = conf_mask.unsqueeze(2)
# print("conf mask2",conf_mask,conf_mask.size())
prediction = prediction*conf_mask
# 由(center,w,h)变为 (left,top,right,bottom)
box_corner = prediction.new(prediction.shape)
box_corner[:,:,0] = (prediction[:,:,0] - prediction[:,:,2]/2)
box_corner[:,:,1] = (prediction[:,:,1] - prediction[:,:,3]/2)
box_corner[:,:,2] = (prediction[:,:,0] + prediction[:,:,2]/2)
box_corner[:,:,3] = (prediction[:,:,1] + prediction[:,:,3]/2)
prediction[:,:,:4] = box_corner[:,:,:4]
batch_size = prediction.size(0)
write = False
# 遍历batch中的每个图片的检测结果
for ind in range(batch_size):
# 每个图片都有10627个结果
image_pred = prediction[ind]
# 取最大预测值,并把85保存为7(增加了分数和index,去掉了其他项)
# 最大值 和最大值索引
max_conf,max_conf_score = torch.max(image_pred[:,5:5+num_classes],1)
max_conf = max_conf.float().unsqueeze(1)
max_conf_score = max_conf_score.float().unsqueeze(1)
seq = (image_pred[:,:5],max_conf,max_conf_score)
image_pred = torch.cat(seq,1)
# print("imagepred size1 ",image_pred.size())
# 去掉置信度被设为零的行,其实是取出不为零的行
non_zero_ind = (torch.nonzero(image_pred[:,4]))
# print(non_zero_ind.size())
try:
image_pred_ = image_pred[non_zero_ind.squeeze(),:].view(-1,7)
except:
continue
# print("imagepred_ size2 ",image_pred_)
if image_pred_.shape[0] == 0:
continue
img_classes = unique(image_pred_[:,-1]) # 取出 类型ID
# print("img class",img_classes)
for cls in img_classes:
#取出属于这个类别的所有预测
cls_mask = image_pred_*(image_pred_[:,-1] == cls).float().unsqueeze(1)
class_mask_ind = torch.nonzero(cls_mask[:,-2]).squeeze()
image_pred_class = image_pred_[class_mask_ind].view(-1,7)
# 按置信度进行排序,而不是得分
conf_sort_index = torch.sort(image_pred_class[:,4],descending = True)[1]
image_pred_class = image_pred_class[conf_sort_index]
# print("pred class",cls,image_pred_class)
idx = image_pred_class.size(0)
for i in range(idx):
try:
ious = bbox_iou(image_pred_class[i].unsqueeze(0),image_pred_class[i+1:])
except ValueError:
break
except IndexError:
break
# 去掉重合率较高的预测,所以对于有多个物体的类别,只要没有重合,就不会被过滤。
iou_mask = (ious < nms_conf).float().unsqueeze(1)
# print("iou ",cls,i,iou_mask)
image_pred_class[i+1:] *= iou_mask
non_zero_ind = torch.nonzero(image_pred_class[:,4]).squeeze()
# print("iou non z",non_zero_ind)
image_pred_class = image_pred_class[non_zero_ind].view(-1,7)
# 把预测结果和图片在batch中的id对应起来
# new(image_pred_class.size(0),1),这个是为了处理同一个类别有多个物体的情况,要把所有预测都和图片对应
batch_ind = image_pred_class.new(image_pred_class.size(0),1).fill_(ind)
# print("batch ind",batch_ind)
seq = batch_ind,image_pred_class
if not write:
output = torch.cat(seq,1)
write = True
else:
out = torch.cat(seq,1)
output = torch.cat((output,out))
# print(output)
try:
return output
except:
return 0
def load_classes(namefile):
fp = open(namefile,"r")
names = fp.read().split("\n")[:-1]
return names
# 按比例缩放图片大小,并用128填充满图片
def letterbox_image(img,inp_dim):
img_w,img_h = img.shape[1],img.shape[0]
w,h = inp_dim
new_w = int(img_w*min(w/img_w,h/img_h))
new_h = int(img_h*min(w/img_w,h/img_h))
resized_image = cv2.resize(img,(new_w,new_h),interpolation = cv2.INTER_CUBIC)
canvas = np.full((inp_dim[1],inp_dim[0],3),128)
canvas[(h-new_h)//2:(h-new_h)//2 + new_h,(w-new_w)//2:(w-new_w)//2+new_w,:] = resized_image
return canvas
def prep_image(img,inp_dim):
img = letterbox_image(img,(inp_dim,inp_dim))
img = img[:,:,::-1].transpose((2,0,1)).copy()
img = torch.from_numpy(img).float().div(255.0).unsqueeze(0)
return img
``` |
{
"source": "166MMX/hiro-python-library",
"score": 2
} |
#### File: hiro/abc/common.py
```python
import io
from abc import ABC
from typing import Generator, Any, TypeVar, Iterable, Iterator, Optional
from ijson import items as ijson_items
from requests import Response
class AbcRest(ABC):
__slots__ = ()
_T = TypeVar('_T')
def debug_iter(iterable: Iterable[_T], file_name: str) -> Generator[_T, None, None]:
with open(file_name, 'wb') as f:
for i in iterable:
f.write(i)
yield i
class AbcData(ABC):
__slots__ = ()
@staticmethod
def items_generator(response: Response) -> Generator[Any, None, None]:
with response:
res_iter = response.iter_content(chunk_size=None)
# res_iter = debug_iter(res_iter, 'http_response_bytes.json')
readable = ReadableIterator(res_iter)
item_iter = ijson_items(readable, prefix='items.item')
yield from item_iter
class AbcModel(ABC):
__slots__ = ()
class ReadableIterator(io.RawIOBase):
# https://github.com/j-planet/Kaggle/blob/master/ValuedShoppers/IterStreamer.py
def __init__(self, iterator: Iterator[bytes]) -> None:
super().__init__()
self.iterator = iterator
self.partial_chunk = b''
def readable(self) -> bool:
return True
def readinto(self, buffer: bytearray) -> Optional[int]:
buffer_size = len(buffer)
chunk = bytearray(self.partial_chunk)
while len(chunk) < buffer_size:
try:
b = next(self.iterator)
chunk += b
except StopIteration:
stopped = True
break
else:
stopped = False
self.partial_chunk = chunk[buffer_size:]
buffer[::] = chunk[:buffer_size]
read = len(buffer)
if read == 0 and stopped:
self.close()
return read
```
#### File: hiro/abc/graph.py
```python
from abc import abstractmethod
from datetime import datetime
from typing import TYPE_CHECKING, Any, Dict, Mapping, Union, overload, Literal, Optional, Iterable, Generator
from requests import Response
from arago.hiro.model.graph.attribute import ATTRIBUTE_T
from arago.hiro.model.graph.edge import EdgeId, Edge
from arago.hiro.model.graph.history import HistoryFormat, HistoryDiff, HistoryEntry
from arago.hiro.model.graph.vertex import VertexId, Vertex, VERTEX_T, VERTEX_T_co, VERTEX_TYPE_T, VERTEX_XID_T_co, \
VERTEX_ID_T, VERTEX_XID_T
from arago.hiro.model.storage import BLOB_VERTEX_T_co, TIME_SERIES_VERTEX_T_co
from arago.ogit import OgitEntity
from arago.ogit import OgitVerb
from .common import AbcRest, AbcData, AbcModel
if TYPE_CHECKING:
from arago.hiro.client.rest_base_client import HiroRestBaseClient
# noinspection PyUnusedLocal
class AbcGraphEdgeRest(AbcRest):
__slots__ = ()
@abstractmethod
def __init__(self, client: 'HiroRestBaseClient') -> None:
...
@abstractmethod
def create(self, edge_type: str, req_data: Mapping[str, Any]) -> Response:
...
@abstractmethod
def delete(self, edge_id: str) -> Response:
...
# noinspection PyUnusedLocal
class AbcGraphEdgeData(AbcData):
__slots__ = ()
@abstractmethod
def __init__(self, client: 'HiroRestBaseClient') -> None:
...
# https://pod1159.saasarago.com/_api/index.html#!/%5BGraph%5D_Verb/post_connect_type
# create add connection
# usage: hiro [<general options>] edge create [<specific options>]
# <from/out-vertex> <edge-type> <to/in-vertex>
@abstractmethod
def create(self, out_vertex_id: str, edge_type: str, in_vertex_id: str) -> Dict[str, Any]:
...
# https://pod1159.saasarago.com/_api/index.html#!/%5BGraph%5D_Verb/delete_id
# delete remove connection
# usage: hiro [<general options>] edge delete [<specific options>]
# <from/out-vertex> <edge-type> <to/in-vertex>
@abstractmethod
def delete(self, edge_id: str) -> Dict[str, Any]:
...
# noinspection PyUnusedLocal
class AbcGraphEdgeModel(AbcModel):
__slots__ = ()
@abstractmethod
def __init__(self, client: 'HiroRestBaseClient') -> None:
...
@abstractmethod
def create(
self,
out_vertex_id: Union[Vertex, VertexId, str],
edge_type: Union[OgitVerb, str],
in_vertex_id: Union[Vertex, VertexId, str]
) -> Edge:
...
@abstractmethod
def delete(
self,
edge_id: Union[Edge, EdgeId, str]
) -> Edge:
...
# noinspection PyUnusedLocal
class AbcGraphVertexRest(AbcRest):
__slots__ = ()
@abstractmethod
def __init__(self, client: 'HiroRestBaseClient') -> None:
...
@abstractmethod
def create(
self,
vertex_type: str,
req_data: Mapping[str, Any],
headers: Optional[Mapping[str, str]] = None
) -> Response:
...
@abstractmethod
def get(
self,
vertex_id: str,
params: Optional[Mapping[str, str]] = None,
headers: Optional[Mapping[str, str]] = None
) -> Response:
...
@abstractmethod
def update(
self,
vertex_id: str,
req_data: Mapping[str, Any],
headers: Optional[Mapping[str, str]] = None
) -> Response:
...
@abstractmethod
def delete(
self,
vertex_id: str,
headers: Optional[Mapping[str, str]] = None
) -> Response:
...
@abstractmethod
def history(
self,
vertex_id: str,
params: Optional[Mapping[str, str]] = None,
headers: Optional[Mapping[str, str]] = None
) -> Response:
...
# noinspection PyUnusedLocal
class AbcGraphVertexData(AbcData):
__slots__ = ()
@abstractmethod
def __init__(self, client: 'HiroRestBaseClient') -> None:
...
# https://pod1159.saasarago.com/_api/index.html#!/%5BGraph%5D_Entity/post_new_type
# create create new vertices
# usage: hiro [<general options>] vertex create [<specific options>] <files
# containing vertex definitions (JSON)>
# Options
# -t,--type <vertexType> vertex type to create (only used for files not
# containing ogit/_type)
@abstractmethod
def create(
self,
vertex_type: str,
req_data: Optional[Mapping[str, Any]] = None,
headers: Optional[Mapping[str, str]] = None
) -> Dict[str, Any]:
...
# https://pod1159.saasarago.com/_api/index.html#!/%5BGraph%5D_Entity/get_id
# TODO impl --show-list-meta
# usage: hiro [<general options>] vertex get [<specific options>] <vertices
# to retrieve>
# Options
# --show-list-meta Expand meta data for attributes containing a
# list value. This will suppress the behavior
# that lists with only one element are shown
# as scalar value. (default: do not expand
# meta data)
# --use-xids Any given IDs are taken as external IDs
@abstractmethod
def get(
self,
vertex_id: str,
fields: Optional[Iterable[str]] = None,
params: Optional[Mapping[str, str]] = None,
headers: Optional[Mapping[str, str]] = None
) -> Dict[str, Any]:
...
# https://pod1159.saasarago.com/_api/index.html#!/%5BGraph%5D_Entity/post_id
# update update one or more vertices from JSON input
# usage: hiro vertex update [<options>]
# [<update JSON files>]>
# Options
# -f,--file <fileName> file that contains update JSON (requires -i/--id)
# -i,--id <vertexId> vertex ID to update (requires -f/--file)
# put update existing vertex/creates missing. requires ogit/_xid in data
# usage: hiro vertex put [<options>] <files
# containing vertex definitions (JSON) with ogit/_xid>
# Options
# -t,--type <vertexType> vertex type to create (only used for files not
# containing ogit/_type)
@abstractmethod
def update(
self,
vertex_id: str,
req_data: Optional[Mapping[str, Any]] = None,
headers: Optional[Mapping[str, str]] = None
) -> Dict[str, Any]:
...
# https://pod1159.saasarago.com/_api/index.html#!/%5BGraph%5D_Entity/delete_id
# delete delete specific vertices
# usage: hiro [<general options>] vertex delete [<specific options>]
# <vertices to delete>
@abstractmethod
def delete(
self,
vertex_id: str,
headers: Optional[Mapping[str, str]] = None
) -> Dict[str, Any]:
...
# usage: hiro [<general options>] vertex history [<specific options>]
# <vertex IDs to retrieve history for>
# Options
# --use-xids specified ID(s) is(are) taken as external ID(s)
# --detail-level <arg> "element": (full) vertex content for each
# history version. "full": vertex content plus
# history meta data. "diff": show only changes
# from previous version. Default: element.
# --show-list-meta Expand meta data for attributes containing a
# list value. This will suppress the behavior
# that lists with only one element are shown
# as scalar value. (default: do not expand
# meta data)
# --version <arg> retrieve a specific incarnation (based on
# ogit/_v attribute) of the vertex. This
# option will disable most of the other
# restricting options.
# --from <from-ts> only history entries with timestamps greater
# or equal the specified one will be returned.
# Value must be specified in msecs after
# epoch.
# --to <to-ts> only history entries with timestamps smaller
# or equal the specified one will be returned.
# Value must be specified in msecs after
# epoch.
# --offset <arg> skip first <offset> results of history
# --limit <arg> retrieve at most the specified number of
# history entries
@abstractmethod
def history(
self,
vertex_id: str,
start: Optional[int] = None,
end: Optional[int] = None,
offset: Optional[int] = None,
limit: Optional[int] = None,
res_format: Optional[str] = None,
version: Optional[int] = None,
params: Optional[Mapping[str, str]] = None,
headers: Optional[Mapping[str, str]] = None
) -> Generator[Dict[str, Any], None, None]:
...
# set-attribute (bulk) update of a single vertex attribute
# usage: hiro [<general options>] vertex set-attribute [<specific options>]
# <vertices to update>
# Options
# -a,--attribute <attrName> attribute name to set
# -h,--help show usage
# --threads <arg> number of parallel executions
# -v,--value <value> value to set attribute <attrName> to
# search search for vertices
# put-edges create edges from input file using ogit/_xid
# del-edges create edges from input file using ogit/_xid
# noinspection PyUnusedLocal
class AbcGraphVertexModel(AbcModel):
__slots__ = ()
@abstractmethod
def __init__(self, client: 'HiroRestBaseClient') -> None:
...
@overload
@abstractmethod
def create(
self,
vertex: VERTEX_T
) -> VERTEX_T_co:
...
@overload
@abstractmethod
def create(
self,
vertex_type: VERTEX_TYPE_T,
vertex: Optional[VERTEX_T] = None
) -> VERTEX_T_co:
...
@overload
@abstractmethod
def create(
self,
vertex_type: Union[Literal['ogit/Attachment'], Literal[OgitEntity.OGIT_ATTACHMENT]],
vertex: Optional[VERTEX_T] = None
) -> BLOB_VERTEX_T_co:
...
@overload
@abstractmethod
def create(
self,
vertex_type: Union[Literal['ogit/Timeseries'], Literal[OgitEntity.OGIT_TIME_SERIES]],
vertex: Optional[VERTEX_T] = None
) -> TIME_SERIES_VERTEX_T_co:
...
@abstractmethod
def create(self, *args, **kwargs) -> VERTEX_T_co:
...
@overload
@abstractmethod
def get(
self,
vertex: VERTEX_T,
fields: Optional[Iterable[ATTRIBUTE_T]] = None
) -> VERTEX_T_co:
...
@overload
@abstractmethod
def get(
self,
vertex_id: VERTEX_ID_T,
fields: Optional[Iterable[ATTRIBUTE_T]] = None
) -> VERTEX_T_co:
...
@overload
@abstractmethod
def get(
self,
vertex_xid: VERTEX_XID_T,
fields: Optional[Iterable[ATTRIBUTE_T]] = None
) -> VERTEX_T_co:
...
@abstractmethod
def get(self, *args, **kwargs) -> VERTEX_T_co:
...
@overload
@abstractmethod
def update(
self,
vertex: VERTEX_T
) -> VERTEX_T_co:
...
@overload
@abstractmethod
def update(
self,
vertex_id: VERTEX_ID_T,
vertex: VERTEX_T
) -> VERTEX_T_co:
...
@overload
@abstractmethod
def update(
self,
vertex_xid: VERTEX_XID_T,
vertex: VERTEX_T
) -> VERTEX_T_co:
...
@overload
@abstractmethod
def update(
self,
vertex: VERTEX_T,
source_vertex: VERTEX_T
) -> VERTEX_T_co:
...
@abstractmethod
def update(self, *args, **kwargs) -> VERTEX_T_co:
...
@overload
@abstractmethod
def delete(
self,
vertex: VERTEX_T
) -> VERTEX_T_co:
...
@overload
@abstractmethod
def delete(
self,
vertex_id: VERTEX_ID_T
) -> VERTEX_T_co:
...
@overload
@abstractmethod
def delete(
self,
vertex_xid: VERTEX_XID_T
) -> VERTEX_T_co:
...
@abstractmethod
def delete(self, *args, **kwargs) -> VERTEX_T_co:
...
@overload
@abstractmethod
def history(
self,
vertex: VERTEX_T,
res_format: Literal[HistoryFormat.ELEMENT],
start: Optional[datetime] = None,
end: Optional[datetime] = None,
offset: Optional[int] = None,
limit: Optional[int] = None,
version: Optional[int] = None
) -> Generator[VERTEX_T_co, None, None]:
...
@overload
@abstractmethod
def history(
self,
vertex_id: VERTEX_ID_T,
res_format: Literal[HistoryFormat.ELEMENT],
start: Optional[datetime] = None,
end: Optional[datetime] = None,
offset: Optional[int] = None,
limit: Optional[int] = None,
version: Optional[int] = None
) -> Generator[VERTEX_T_co, None, None]:
...
@overload
@abstractmethod
def history(
self,
vertex_id: VERTEX_XID_T_co,
res_format: Literal[HistoryFormat.ELEMENT],
start: Optional[datetime] = None,
end: Optional[datetime] = None,
offset: Optional[int] = None,
limit: Optional[int] = None,
version: Optional[int] = None
) -> Generator[VERTEX_T_co, None, None]:
...
@overload
@abstractmethod
def history(
self,
vertex: VERTEX_T,
res_format: Literal[HistoryFormat.DIFF],
start: Optional[datetime] = None,
end: Optional[datetime] = None,
offset: Optional[int] = None,
limit: Optional[int] = None,
version: Optional[int] = None
) -> Generator[HistoryDiff, None, None]:
...
@overload
@abstractmethod
def history(
self,
vertex_id: VERTEX_ID_T,
res_format: Literal[HistoryFormat.DIFF],
start: Optional[datetime] = None,
end: Optional[datetime] = None,
offset: Optional[int] = None,
limit: Optional[int] = None,
version: Optional[int] = None
) -> Generator[HistoryDiff, None, None]:
...
@overload
@abstractmethod
def history(
self,
vertex_id: VERTEX_XID_T_co,
res_format: Literal[HistoryFormat.DIFF],
start: Optional[datetime] = None,
end: Optional[datetime] = None,
offset: Optional[int] = None,
limit: Optional[int] = None,
version: Optional[int] = None
) -> Generator[HistoryDiff, None, None]:
...
@overload
@abstractmethod
def history(
self,
vertex: VERTEX_T,
res_format: Literal[HistoryFormat.FULL],
start: Optional[datetime] = None,
end: Optional[datetime] = None,
offset: Optional[int] = None,
limit: Optional[int] = None,
version: Optional[int] = None
) -> Generator[HistoryEntry, None, None]:
...
@overload
@abstractmethod
def history(
self,
vertex_id: VERTEX_ID_T,
res_format: Literal[HistoryFormat.FULL],
start: Optional[datetime] = None,
end: Optional[datetime] = None,
offset: Optional[int] = None,
limit: Optional[int] = None,
version: Optional[int] = None
) -> Generator[HistoryEntry, None, None]:
...
@overload
@abstractmethod
def history(
self,
vertex_id: VERTEX_XID_T_co,
res_format: Literal[HistoryFormat.FULL],
start: Optional[datetime] = None,
end: Optional[datetime] = None,
offset: Optional[int] = None,
limit: Optional[int] = None,
version: Optional[int] = None
) -> Generator[HistoryEntry, None, None]:
...
@abstractmethod
def history(self, *args, **kwargs) -> Generator[Union[HistoryEntry, HistoryDiff, Vertex], None, None]:
...
# noinspection PyUnusedLocal
class AbcGraphRest(AbcRest):
__slots__ = ()
@abstractmethod
def __init__(self, client: 'HiroRestBaseClient') -> None:
...
@property
@abstractmethod
def edge(self) -> AbcGraphEdgeRest:
...
@property
@abstractmethod
def vertex(self) -> AbcGraphVertexRest:
...
# noinspection PyUnusedLocal
class AbcGraphData(AbcData):
__slots__ = ()
@abstractmethod
def __init__(self, client: 'HiroRestBaseClient') -> None:
...
@property
@abstractmethod
def edge(self) -> AbcGraphEdgeData:
...
@property
@abstractmethod
def vertex(self) -> AbcGraphVertexData:
...
# noinspection PyUnusedLocal
class AbcGraphModel(AbcModel):
__slots__ = ()
@abstractmethod
def __init__(self, client: 'HiroRestBaseClient') -> None:
...
@property
@abstractmethod
def edge(self) -> AbcGraphEdgeModel:
...
@property
@abstractmethod
def vertex(self) -> AbcGraphVertexModel:
...
```
#### File: hiro/abc/health.py
```python
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional
from requests import Response
from .common import AbcRest, AbcData, AbcModel
if TYPE_CHECKING:
from arago.hiro.client.rest_base_client import HiroRestBaseClient
# https://tools.ietf.org/html/draft-inadarei-api-health-check-05
# noinspection PyUnusedLocal
class AbcHealthRest(AbcRest):
__slots__ = ()
@abstractmethod
def __init__(self, client: 'HiroRestBaseClient') -> None:
...
@abstractmethod
def check(self, headers: Optional[Mapping[str, str]]) -> Response:
...
# noinspection PyUnusedLocal
class AbcHealthData(AbcData):
__slots__ = ()
@abstractmethod
def __init__(self, client: 'HiroRestBaseClient') -> None:
...
@abstractmethod
def check(self) -> Dict[str, Any]:
...
# noinspection PyUnusedLocal
class AbcHealthModel(AbcModel):
__slots__ = ()
@abstractmethod
def __init__(self, client: 'HiroRestBaseClient') -> None:
...
@abstractmethod
def check(self) -> Dict[str, Any]:
...
```
#### File: backend/five/probe.py
```python
from typing import TYPE_CHECKING, Any, Dict, Final, Mapping, Optional
from requests import Response
from arago.hiro.abc.probe import AbcProbeRest, AbcProbeData, AbcProbeModel
from arago.hiro.client.exception import HiroClientError
from arago.hiro.model.probe import Version
if TYPE_CHECKING:
from arago.hiro.client.rest_base_client import HiroRestBaseClient
class Hiro5ProbeRest(AbcProbeRest):
__base_client: Final['HiroRestBaseClient']
PATH_INFO: Final[str] = '/info'
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
fork = client.fork()
fork.authenticator.exclude_path(literal=Hiro5ProbeRest.PATH_INFO)
self.__base_client = fork
def probe(self, headers: Optional[Mapping[str, str]] = None) -> Response:
return self.__base_client.request(
'GET', Hiro5ProbeRest.PATH_INFO, headers=headers
)
class Hiro5ProbeData(AbcProbeData):
__rest_client: Final[Hiro5ProbeRest]
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
self.__rest_client = Hiro5ProbeRest(client)
def probe(self) -> Optional[Dict[str, Any]]:
try:
response = self.__rest_client.probe(
headers={'Accept': 'application/json'}
)
except HiroClientError:
return None
with response:
res_data = response.json()
return res_data
class Hiro5ProbeModel(AbcProbeModel):
__data_client: Final[Hiro5ProbeData]
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
self.__data_client = Hiro5ProbeData(client)
def probe(self) -> Optional[Version]:
res_data = self.__data_client.probe()
if res_data is None or 'version' not in res_data:
return None
version: str = res_data['version']
if version.startswith('v1.5'):
return Version.HIRO_5
return None
```
#### File: backend/seven/app_admin.py
```python
from typing import TYPE_CHECKING, Final
from urllib.parse import quote
from arago.hiro.model.graph.vertex import Vertex
from arago.hiro.utils.cast_b import to_vertex
if TYPE_CHECKING:
from arago.hiro.client.rest_base_client import HiroRestBaseClient
class Hiro7AppAdminModel:
_base_client: Final['HiroRestBaseClient']
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__()
# TODO Bug https://itautopilot.zendesk.com/agent/tickets/7933
# path = client.root.model.meta.version()['app-admin'].endpoint
# if path.endswith('/'):
# path = path[:-1]
path = '/api/app-admin/1.2'
fork = client.fork(path)
self._base_client = fork
def deactivate(self, app_id: str) -> dict:
# DELETE /$id
# https://docs.hiro.arago.co/hiro/6.2.0/user/hiro-graph-api/app-rest-api.html#_id_delete
# https://docs.hiro.arago.co/hiro/6.2.0/user/hiro-graph-api/graph-applications.html#deactivate
# TODO result strange json vs Vertex
# {'ogit/Auth/Application/status': 'inactive'}
uri = '/%s' % quote(app_id, '')
with self._base_client.request(
'DELETE', uri, headers={'Accept': 'application/json'}
) as response:
res_data = response.json()
# vertex = to_vertex(res_data, self.__base_client)
# return vertex[OgitAttribute.OGIT_AUTH_APPLICATION_STATUS] == 'inactive'
return res_data
class Hiro7GraphAppAdminModel(Hiro7AppAdminModel):
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
def create(self, name: str, description: str) -> Vertex:
# returns ogit/Auth/Application Vertex
# POST /$type
# https://docs.hiro.arago.co/hiro/6.2.0/user/hiro-graph-api/app-rest-api.html#_type_post
# https://docs.hiro.arago.co/hiro/6.2.0/user/hiro-graph-api/graph-applications.html#create
uri = '/graph'
req_data = {
'ogit/name': name,
'ogit/description': description,
}
with self._base_client.request(
'POST', uri, headers={'Accept': 'application/json'}, json=req_data
) as response:
res_data = response.json()
vertex = to_vertex(res_data, self._base_client)
return vertex
def activate(self, app_id: str) -> dict:
# PATCH /$id
# https://docs.hiro.arago.co/hiro/6.2.0/user/hiro-graph-api/app-rest-api.html#_id_patch
# https://docs.hiro.arago.co/hiro/6.2.0/user/hiro-graph-api/graph-applications.html#activate
uri = '/%s' % quote(app_id, '')
req_data = {}
with self._base_client.request(
'PATCH', uri, headers={'Accept': 'application/json'}, json=req_data
) as response:
res_data = response.json()
# TODO define model
return res_data
```
#### File: backend/seven/auth.py
```python
from typing import TYPE_CHECKING, Any, Dict, Final, Mapping, Optional
from requests import Response
from arago.hiro.abc.auth import AbcAuthRest, AbcAuthData, AbcAuthModel
from arago.hiro.model.auth import SessionCredentials, AccessToken, ClientCredentials, PasswordAccessToken
if TYPE_CHECKING:
from arago.hiro.client.rest_base_client import HiroRestBaseClient
class Hiro7AuthRest(AbcAuthRest):
__base_client: Final['HiroRestBaseClient']
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
path = client.root.model.meta.version()['auth'].endpoint
fork = client.fork(path)
fork.authenticator.exclude_path(literal=f'{path}/app')
self.__base_client = fork
def password(self, req_data: Mapping[str, Any], headers: Optional[Mapping[str, str]] = None) -> Response:
uri = '/app'
return self.__base_client.request(
'POST', uri, json=req_data, headers=headers
)
def revoke(self, req_data: Mapping[str, Any], headers: Optional[Mapping[str, str]] = None) -> Response:
uri = '/revoke'
return self.__base_client.request(
'POST', uri, json=req_data, headers=headers
)
class Hiro7AuthData(AbcAuthData):
__rest_client: Final[Hiro7AuthRest]
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
self.__rest_client = Hiro7AuthRest(client)
def password(self, client_id: str, client_secret: str, username: str, password: str) -> Dict[str, Any]:
req_data = {
'client_id': client_id,
'client_secret': client_secret,
'username': username,
'password': password,
}
with self.__rest_client.password(
req_data, headers={'Accept': 'application/json'}
) as response:
res_data = response.json()
return res_data
def revoke(self, client_id: str) -> Dict[str, Any]:
req_data = {
'client_id': client_id,
}
with self.__rest_client.revoke(
req_data, headers={'Accept': 'application/json'}
) as response:
res_data = response.json()
return res_data
class Hiro7AuthModel(AbcAuthModel):
__data_client: Final[Hiro7AuthData]
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
self.__data_client = Hiro7AuthData(client)
def password(self, credentials: SessionCredentials) -> AccessToken:
res_data = self.__data_client.password(
credentials.client.id,
credentials.client.secret,
credentials.account.username,
credentials.account.password,
)
token = PasswordAccessToken.from_data(res_data)
return token
def revoke(self, client_cred: ClientCredentials) -> None:
res_data = self.__data_client.revoke(client_cred.id)
if len(res_data) != 0:
raise RuntimeError(f'Assert error: Unexpected result "{res_data}"')
```
#### File: backend/seven/storage.py
```python
from codecs import iterencode
from contextlib import contextmanager
from datetime import datetime
from functools import cached_property
from typing import Final, Generator, Dict, Any, Mapping, Optional, ContextManager, IO, TYPE_CHECKING, Iterable, Union, \
Iterator
from urllib.parse import quote
from requests.models import Response
from arago.extension import json
from arago.hiro.abc.common import AbcData
from arago.hiro.abc.storage import AbcStorageBlobRest, AbcStorageBlobData, AbcStorageBlobModel, data_to_model, \
model_to_data
from arago.hiro.abc.storage import AbcStorageLogRest, AbcStorageLogData, AbcStorageLogModel
from arago.hiro.abc.storage import AbcStorageRest, AbcStorageData, AbcStorageModel
from arago.hiro.abc.storage import AbcStorageTimeSeriesRest, AbcStorageTimeSeriesData, AbcStorageTimeSeriesModel
from arago.hiro.model.storage import TimeSeriesValue, BlobVertex, TimeSeriesVertex, \
TIME_SERIES_ID_T, BLOB_ID_T, TimeSeriesId, BlobId
from arago.hiro.utils.datetime import datetime_to_timestamp_ms
if TYPE_CHECKING:
from arago.hiro.client.rest_base_client import HiroRestBaseClient
class Hiro7StorageBlobRest(AbcStorageBlobRest):
__base_client: Final['HiroRestBaseClient']
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
path = client.root.model.meta.version()['graph'].endpoint
fork = client.fork(path)
self.__base_client = fork
def get(
self,
blob_id: str,
params: Optional[Mapping[str, str]] = None,
headers: Optional[Mapping[str, str]] = None
) -> Response:
"""
https://requests.readthedocs.io/en/master/user/quickstart/#binary-response-content
https://requests.readthedocs.io/en/master/user/quickstart/#raw-response-content
https://requests.readthedocs.io/en/master/user/advanced/#streaming-requests
https://requests.readthedocs.io/en/master/user/advanced/#blocking-or-non-blocking
"""
uri = '/%s/content' % (
quote(blob_id, safe=''),
)
e_headers = {'Accept': 'application/octet-stream'}
if headers:
e_headers.update(headers)
return self.__base_client.request(
'GET', uri, params=params, headers=e_headers, stream=True
)
# TODO test content_type support
def set(
self,
blob_id: str,
content: Union[bytes, bytearray, Iterable[bytes], IO[bytes]],
headers: Optional[Mapping[str, str]] = None
) -> Response:
"""
https://requests.readthedocs.io/en/master/user/advanced/#body-content-workflow
https://requests.readthedocs.io/en/master/user/advanced/#streaming-uploads
https://requests.readthedocs.io/en/master/user/advanced/#chunk-encoded-requests
https://requests.readthedocs.io/en/master/user/advanced/#post-multiple-multipart-encoded-files
https://requests.readthedocs.io/en/master/user/advanced/#blocking-or-non-blocking
"""
uri = '/%s/content' % (
quote(blob_id, safe=''),
)
e_headers = {'Accept': 'application/json'}
if headers:
e_headers.update(headers)
return self.__base_client.request(
'POST', uri, headers=e_headers, data=content
)
class Hiro7StorageBlobData(AbcStorageBlobData):
__rest_client: Final[Hiro7StorageBlobRest]
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
self.__rest_client = Hiro7StorageBlobRest(client)
# TODO test support of content_id
# TODO test support of include_deleted
@contextmanager
def get(
self,
blob_id: str,
content_id: Optional[str] = None,
include_deleted: Optional[bool] = False
) -> ContextManager[Generator[bytes, None, None]]:
params = {}
if content_id is not None:
params['contentId'] = content_id
if include_deleted:
params['includeDeleted'] = include_deleted
with self.__rest_client.get(blob_id, params) as response:
yield response.iter_content(chunk_size=None)
def set(
self,
blob_id: str,
content: Union[bytes, bytearray, Iterable[bytes], IO[bytes]],
content_type: Optional[str] = None
) -> None:
if content_type is not None:
headers = {'Content-Type': content_type}
else:
headers = None
self.__rest_client.set(blob_id, content, headers)
class Hiro7StorageBlobModel(AbcStorageBlobModel):
__data_client: Final[Hiro7StorageBlobData]
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
self.__data_client = Hiro7StorageBlobData(client)
def get(
self,
blob_id: Union[BlobVertex, BLOB_ID_T],
content_id: Optional[str] = None,
include_deleted: Optional[bool] = False
) -> ContextManager[Generator[bytes, None, None]]:
e_blob_id: str
if isinstance(blob_id, BlobVertex):
e_blob_id = str(blob_id.id)
elif isinstance(blob_id, BlobId):
e_blob_id = str(blob_id)
elif isinstance(blob_id, str):
e_blob_id = blob_id
else:
raise TypeError(type(blob_id))
return self.__data_client.get(e_blob_id, content_id, include_deleted)
def set(
self,
blob_id: Union[BlobVertex, BLOB_ID_T],
content: Union[bytes, bytearray, Iterable[bytes], IO[bytes]],
content_type: Optional[str] = None
) -> None:
e_blob_id: str
if isinstance(blob_id, BlobVertex):
e_blob_id = str(blob_id.id)
elif isinstance(blob_id, BlobId):
e_blob_id = str(blob_id)
elif isinstance(blob_id, str):
e_blob_id = blob_id
else:
raise TypeError(type(blob_id))
self.__data_client.set(e_blob_id, content, content_type)
class Hiro7StorageLogRest(AbcStorageLogRest):
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
class Hiro7StorageLogData(AbcStorageLogData):
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
class Hiro7StorageLogModel(AbcStorageLogModel):
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
class Hiro7StorageTimeSeriesRest(AbcStorageTimeSeriesRest):
__base_client: Final['HiroRestBaseClient']
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
path = client.root.model.meta.version()['graph'].endpoint
fork = client.fork(path)
self.__base_client = fork
def add(
self,
ts_id: str,
content: Union[bytes, bytearray, Iterable[bytes], IO[bytes]],
headers: Optional[Mapping[str, str]] = None
) -> Response:
uri = '/%s/values' % (
quote(ts_id, safe=''),
)
# To stream and upload, simply provide a file-like object for your body
return self.__base_client.request(
'POST', uri, headers=headers, data=content
)
def get(
self,
ts_id: str,
params: Optional[Mapping[str, Any]] = None,
headers: Optional[Mapping[str, str]] = None
) -> Response:
uri = '/%s/values' % (
quote(ts_id, safe=''),
)
return self.__base_client.request(
'GET', uri, params=params, headers=headers, stream=True
)
class Hiro7StorageTimeSeriesData(AbcStorageTimeSeriesData):
__rest_client: Final[Hiro7StorageTimeSeriesRest]
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
self.__rest_client = Hiro7StorageTimeSeriesRest(client)
def add(
self,
ts_id: str,
values: Iterator[Mapping[str, Any]]
) -> None:
json_generator = json.GeneratorAwareJSONEncoder().iterencode(values)
payload_generator = iterencode(json_generator, 'utf8')
headers = {'Content-Type': 'application/json'}
self.__rest_client.add(ts_id, payload_generator, headers)
def get(
self,
ts_id: str,
start: Optional[int] = None,
end: Optional[int] = None
) -> Generator[Dict[str, Any], None, None]:
"""
:param ts_id: time series vertex id
:param start: unix timestamp in seconds
:param end: unix timestamp in seconds
"""
params = {
'from': '%011i' % (0 if start is None else start),
}
if end is not None:
params['to'] = '%011i' % end
headers = {'Accept': 'application/json'}
response = self.__rest_client.get(ts_id, params, headers)
items = AbcData.items_generator(response)
yield from items
class Hiro7StorageTimeSeriesModel(AbcStorageTimeSeriesModel):
__data_client: Final[Hiro7StorageTimeSeriesData]
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
self.__data_client = Hiro7StorageTimeSeriesData(client)
def add(
self,
ts_id: Union[TimeSeriesVertex, TIME_SERIES_ID_T],
values: Iterator[TimeSeriesValue]
) -> None:
e_ts_id: str
if isinstance(ts_id, TimeSeriesVertex):
e_ts_id = ts_id.id
elif isinstance(ts_id, TimeSeriesId):
e_ts_id = str(ts_id)
elif isinstance(ts_id, str):
e_ts_id = ts_id
else:
raise TypeError(type(ts_id))
transformer = model_to_data(values)
self.__data_client.add(e_ts_id, transformer)
def get(
self,
ts_id: Union[TimeSeriesVertex, TIME_SERIES_ID_T],
start: Optional[datetime] = None,
end: Optional[datetime] = None
) -> Generator[TimeSeriesValue, None, None]:
e_ts_id: str
if isinstance(ts_id, TimeSeriesVertex):
e_ts_id = ts_id.id
elif isinstance(ts_id, TimeSeriesId):
e_ts_id = str(ts_id)
elif isinstance(ts_id, str):
e_ts_id = ts_id
else:
raise TypeError(type(ts_id))
items = self.__data_client.get(
e_ts_id,
datetime_to_timestamp_ms(start),
datetime_to_timestamp_ms(end)
)
transformer = data_to_model(items)
yield from transformer
class Hiro7StorageRest(AbcStorageRest):
__client: Final['HiroRestBaseClient']
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
self.__client = client
@cached_property
def blob(self) -> Hiro7StorageBlobRest:
return Hiro7StorageBlobRest(self.__client)
@cached_property
def log(self) -> AbcStorageLogRest:
raise NotImplementedError()
@cached_property
def ts(self) -> Hiro7StorageTimeSeriesRest:
return Hiro7StorageTimeSeriesRest(self.__client)
class Hiro7StorageData(AbcStorageData):
__client: Final['HiroRestBaseClient']
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
self.__client = client
@cached_property
def blob(self) -> Hiro7StorageBlobData:
return Hiro7StorageBlobData(self.__client)
@cached_property
def log(self) -> AbcStorageLogData:
raise NotImplementedError()
@cached_property
def ts(self) -> Hiro7StorageTimeSeriesData:
return Hiro7StorageTimeSeriesData(self.__client)
class Hiro7StorageModel(AbcStorageModel):
__client: Final['HiroRestBaseClient']
def __init__(self, client: 'HiroRestBaseClient') -> None:
super().__init__(client)
self.__client = client
@cached_property
def blob(self) -> Hiro7StorageBlobModel:
return Hiro7StorageBlobModel(self.__client)
@cached_property
def log(self) -> AbcStorageLogModel:
raise NotImplementedError()
@cached_property
def ts(self) -> Hiro7StorageTimeSeriesModel:
return Hiro7StorageTimeSeriesModel(self.__client)
```
#### File: hiro/client/client.py
```python
from functools import cached_property, lru_cache
from typing import TypeVar, Optional
import requests
from requests.auth import AuthBase
from arago.extension.requests import HiroPasswordAuth
from arago.hiro.client.model_client import HiroRestClient, HiroDataClient, HiroModelClient
from arago.hiro.client.rest_base_client import HiroRestBaseClient
from arago.hiro.model.auth import ClientCredentials, AccountCredentials, SessionCredentials
from arago.hiro.model.graph.attribute import SystemAttribute
from arago.hiro.model.graph.vertex import VERTEX_XID_T_co, VERTEX_ID_T_co, VERTEX_T_co, \
resolve_vertex_id, resolve_vertex_xid
from arago.hiro.utils.user_agent import build_user_agent
_AUTH_BASE_T_co = TypeVar('_AUTH_BASE_T_co', bound=AuthBase, covariant=True)
class HiroClient(HiroRestBaseClient):
def __init__(self, parent: Optional['HiroRestBaseClient'] = None) -> None:
super().__init__(parent)
if parent is None:
self.root = self
def configure(self, endpoint: str, auth: _AUTH_BASE_T_co) -> None:
self.endpoint = endpoint
self.base_url = endpoint
s = requests.Session()
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
s.headers.update({
'User-Agent': build_user_agent('HiroClient'),
'Cache-Control': 'no-store',
})
s.auth = auth
self.session, self.authenticator = s, auth
@staticmethod
def create_stringly(
endpoint: str,
client_id: str,
client_secret: str,
username: str,
password: str
) -> 'HiroClient':
credentials = SessionCredentials(
ClientCredentials(client_id, client_secret),
AccountCredentials(username, password)
)
client = HiroClient()
auth = HiroPasswordAuth(client, credentials)
client.configure(endpoint, auth)
return client
@cached_property
def rest(self) -> HiroRestClient:
return HiroRestClient(self)
@cached_property
def data(self) -> HiroDataClient:
return HiroDataClient(self)
@cached_property
def model(self) -> HiroModelClient:
return HiroModelClient(self)
def resolve_vertex_id(
self,
vertex: VERTEX_T_co,
vertex_id: VERTEX_ID_T_co,
vertex_xid: VERTEX_XID_T_co
) -> VERTEX_ID_T_co:
e_vertex_id = resolve_vertex_id(vertex, vertex_id)
if e_vertex_id:
return e_vertex_id
e_vertex_xid = resolve_vertex_xid(vertex, vertex_xid)
if e_vertex_xid:
e_vertex_id = self.resolve_xid(e_vertex_xid)
if e_vertex_id:
return e_vertex_id
raise RuntimeError()
@lru_cache(maxsize=None, typed=True)
def resolve_xid(self, vertex_xid: VERTEX_XID_T_co) -> VERTEX_ID_T_co:
gen = self.model.search.external_id(vertex_xid, fields={
SystemAttribute.OGIT__ID
})
vertex = next(gen)
try:
next(gen)
raise RuntimeError(f'''External ID '{vertex_xid}' is ambiguous and is associated with multiple vertices''')
except StopIteration:
return vertex.id
```
#### File: hiro/client/exception.py
```python
from typing import Mapping, Any, List
class HiroClientError(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
class OntologyValidatorError(HiroClientError):
message: str
warnings: List[str]
errors: List[str]
def __init__(self, data: Mapping[str, Any]) -> None:
super().__init__()
error = data['error']
self.message = error['message']
result = error['result']
self.warnings = result['warnings']
self.errors = result['errors']
@staticmethod
def is_validator_error(data: Mapping[str, Any]) -> bool:
# {
# 'error': {
# 'message': 'validation failed',
# 'result': {
# 'errors': [
# 'attribute ogit/description is invalid'
# ],
# 'warnings': [
# ]
# }
# }
# }
if 'error' not in data:
return False
error = data['error']
if 'message' not in error or 'result' not in error:
return False
message = error['message']
result = error['result']
if message != 'validation failed' or 'errors' not in result or 'warnings' not in result:
return False
warnings = result['warnings']
errors = result['errors']
if not isinstance(warnings, list) or not isinstance(errors, list):
return False
return True
class HiroServerError(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
```
#### File: hiro/utils/cast_b.py
```python
from typing import Mapping, Any, Optional
from arago.hiro.model.graph.attribute import ATTRIBUTE_T
from arago.hiro.model.graph.dict import GraphDict
from arago.hiro.model.graph.vertex import HIRO_BASE_CLIENT_T_co, VERTEX_T_co, Vertex
from arago.hiro.model.storage import BlobVertex, TimeSeriesVertex
from arago.ogit import OgitAttribute, OgitEntity
def to_vertex(
data: Mapping[ATTRIBUTE_T, Any],
client: Optional[HIRO_BASE_CLIENT_T_co] = None
) -> VERTEX_T_co:
vertex_type = GraphDict(data).get(OgitAttribute.OGIT__TYPE)
e_vertex_type = vertex_type
if e_vertex_type is OgitEntity.OGIT_ATTACHMENT:
return BlobVertex(data, client=client, draft=False)
elif e_vertex_type is OgitEntity.OGIT_DATA_LOG:
raise NotImplementedError()
elif e_vertex_type is OgitEntity.OGIT_TIME_SERIES:
return TimeSeriesVertex(data, client=client, draft=False)
else:
return Vertex(data, client=client, draft=False)
```
#### File: hiro/utils/datetime.py
```python
from datetime import datetime, timezone
from typing import Optional
def timestamp_ms_to_datetime(value: Optional[int] = None) -> Optional[datetime]:
if value is None:
return None
return datetime.fromtimestamp(value / 1e3, tz=timezone.utc)
def datetime_to_timestamp_ms(value: Optional[datetime] = None) -> Optional[int]:
if value is None:
return None
if value.tzinfo is None:
raise ValueError('tzinfo is required')
return int(value.timestamp() * 1e3)
```
#### File: hiro-python-library/tests/conftest.py
```python
import base64
from typing import Dict, Generator
import pytest
from arago.hiro.client.client import HiroClient
from arago.hiro.client.model_client import HiroRestClient, HiroDataClient, HiroModelClient
from arago.hiro.model.auth import SessionCredentials, ClientCredentials, AccountCredentials
@pytest.fixture(scope='module')
def data() -> Dict[str, str]:
# noinspection SpellCheckingInspection
return {
'endpoint': 'https://pod1159.saasarago.com',
'client_id': '',
'client_secret': ''
''
''
'',
'username': '',
'password': r''''''
}
@pytest.fixture(scope='module')
def client(data: Dict[str, str]) -> HiroClient:
return HiroClient.create_stringly(**data)
@pytest.fixture(scope='module')
def rest_client(client: HiroClient) -> Generator[HiroRestClient, None, None]:
yield HiroRestClient(client)
@pytest.fixture(scope='module')
def data_client(client: HiroClient) -> Generator[HiroDataClient, None, None]:
yield HiroDataClient(client)
@pytest.fixture(scope='module')
def model_client(client: HiroClient) -> Generator[HiroModelClient, None, None]:
yield HiroModelClient(client)
@pytest.fixture(scope='module')
def user_agent(client: HiroClient) -> str:
return client.session.headers['User-Agent']
@pytest.fixture(scope='module')
def credentials(data: Dict[str, str]) -> SessionCredentials:
return SessionCredentials(
ClientCredentials(data['client_id'], data['client_secret']),
AccountCredentials(data['username'], data['password'])
)
@pytest.fixture(scope='module')
def png_img() -> bytes:
# https://www.flaticon.com/free-icon/small-bookmark_84510
# https://pngcrush.com/
# https://onlinepngtools.com/convert-png-to-base64
# noinspection SpellCheckingInspection
return base64.b64decode(
'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQ''CAQAAAC1+jfqAAAAcElEQVQoz2P4z/Cf'
'geEkwwos8CRYDkys+M+ACSGiI1MBAyMD''Ix4FDGwMS4CQDYcCBk6GVQx2QLiKgROL'
'AgYehvUM5mCeOZDFg6aAQYBhM4MR3HYj''IE8AWcEuhu0MOig+0AGK7EIoOMagjuFJ'
'dUh0AwBCS9yY0MHerQAAAABJRU5ErkJg''gg=='
)
``` |
{
"source": "166MMX/ogit-python-library",
"score": 2
} |
#### File: arago/ontology/__init__.py
```python
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from datetime import datetime
from enum import unique, Enum, auto
from typing import Optional, FrozenSet, Type, Mapping, Set, Tuple, Final
from arago.ontology.dc import DCTermsValid
from arago.ontology.qname import QName
from arago.ontology.utils import to_constant
NAMESPACE_BASE_URI = 'http://www.purl.org/'
@dataclass(frozen=True)
class OntologyNamespace:
prefix: str
parent: Optional['OntologyNamespace'] = field(default=None, init=False, hash=False, compare=False)
def __post_init__(self):
object.__setattr__(self, '_Namespace__hot', True)
def finalize(self, namespaces: Mapping[str, 'OntologyNamespace']):
if not hasattr(self, '_Namespace__hot'):
return
prefix = self.prefix
if '.' in prefix:
i = prefix.rindex('.')
parent_prefix = prefix[:i]
if parent_prefix:
constant = to_constant(parent_prefix).replace('.', '_')
parent = namespaces[constant]
object.__setattr__(self, 'parent', parent)
object.__delattr__(self, '_Namespace__hot')
@property
def uri(self) -> str:
return self.prefix.replace('.', '/')
@property
def full_uri(self) -> str:
return NAMESPACE_BASE_URI + self.uri + '/'
@dataclass(frozen=True)
class Named(ABC):
name: QName # @rdf:about
class NamedEnum(Enum):
value: Named
@unique
class Cardinality(Enum):
MANY_TO_MANY = auto()
def __repr__(self):
# https://docs.python.org/3/library/enum.html#omitting-values
return '<%s.%s>' % (self.__class__.__name__, self.name)
@dataclass(frozen=True)
class OntologyVerb(Named):
label: Optional[str] = field(default=None, hash=False, compare=False) # rdfs:label
description: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:description
valid: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:valid
created_at: Optional[datetime] = field(default=None, hash=False, compare=False) # dcterms:created
modified_at: Optional[datetime] = field(default=None, hash=False, compare=False) # dcterms:modified
valid_from: Optional[datetime] = field(default=None, hash=False, compare=False)
valid_until: Optional[datetime] = field(default=None, hash=False, compare=False)
hide: bool = field(default=False, hash=False, compare=False) # ogit:hide
created_by: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:creator
deleted_by: Optional[str] = field(default=None, hash=False, compare=False) # ogit:deleter
admin_contact: Optional[str] = field(default=None, hash=False, compare=False) # ogit:admin-contact
tech_contact: Optional[str] = field(default=None, hash=False, compare=False) # ogit:tech-contact
cardinality: Optional[Cardinality] = field(default=None, hash=False, compare=False) # ogit:cardinality
@dataclass(frozen=True)
class TurtleVerb:
about: str # @rdf:about
label: Optional[str] = field(default=None, hash=False, compare=False) # rdfs:label
description: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:description
valid: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:valid
created_at: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:created
modified_at: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:modified
hide: bool = field(default=False, hash=False, compare=False) # ogit:hide
created_by: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:creator
deleted_by: Optional[str] = field(default=None, hash=False, compare=False) # ogit:deleter
admin_contact: Optional[str] = field(default=None, hash=False, compare=False) # ogit:admin-contact
tech_contact: Optional[str] = field(default=None, hash=False, compare=False) # ogit:tech-contact
cardinality: Optional[Cardinality] = field(default=None, hash=False, compare=False) # ogit:cardinality
def as_model(self) -> OntologyVerb:
name = QName(self.about)
created_at = datetime.fromisoformat(self.created_at) if self.created_at else self.created_at
modified_at = datetime.fromisoformat(self.modified_at) if self.modified_at else self.modified_at
dc_terms_valid = DCTermsValid.parse(self.valid) if self.valid is not None else None
valid_from = dc_terms_valid.start if dc_terms_valid is not None else None
valid_until = dc_terms_valid.end if dc_terms_valid is not None else None
return OntologyVerb(
name=name,
label=self.label,
description=self.description,
valid=self.valid,
created_at=created_at,
modified_at=modified_at,
valid_from=valid_from,
valid_until=valid_until,
hide=self.hide,
created_by=self.created_by,
deleted_by=self.deleted_by,
admin_contact=self.admin_contact,
tech_contact=self.tech_contact,
cardinality=self.cardinality,
)
@unique
class ValidatorType(Enum):
FIXED = auto()
REGEX = auto()
def __repr__(self):
# https://docs.python.org/3/library/enum.html#omitting-values
return '<%s.%s>' % (self.__class__.__name__, self.name)
class Validator(ABC):
__slots__ = ()
@abstractmethod
def __call__(self, value: str) -> bool:
...
@property
@abstractmethod
def type(self) -> ValidatorType:
...
class FixedValidator(Validator):
__values: Final[FrozenSet[str]]
__pattern: Final[re.Pattern] = re.compile(r'\s*,\s*')
__slots__ = '__values'
def __init__(self, values: str) -> None:
super().__init__()
values = values.strip()
lst = FixedValidator.__pattern.split(values)
self.__values = frozenset(lst)
def __call__(self, value: str) -> bool:
return value in self.__values
def type(self) -> ValidatorType:
return ValidatorType.FIXED
class RegExpValidator(Validator):
__pattern: Final[re.Pattern]
__slots__ = '__pattern'
def __init__(self, expression: str) -> None:
super().__init__()
self.__pattern = re.compile(expression)
def __call__(self, value: str) -> bool:
return self.__pattern.fullmatch(value) is not None
def type(self) -> ValidatorType:
return ValidatorType.REGEX
@dataclass(frozen=True)
class Attribute(ABC):
name: QName # @rdf:about
@dataclass(frozen=True)
class OntologyAttribute(Attribute, Named):
label: Optional[str] = field(default=None, hash=False, compare=False) # rdfs:label
description: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:description
valid: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:valid
validation_type: Optional[ValidatorType] = field(default=None, hash=False, compare=False) # ogit:validation-type
validation_parameter: Optional[str] = field(default=None, hash=False, compare=False) # ogit:validation-parameter
validation: Optional[Validator] = field(default=None, hash=False, compare=False)
created_at: Optional[datetime] = field(default=None, hash=False, compare=False) # dcterms:created
modified_at: Optional[datetime] = field(default=None, hash=False, compare=False) # dcterms:modified
valid_from: Optional[datetime] = field(default=None, hash=False, compare=False)
valid_until: Optional[datetime] = field(default=None, hash=False, compare=False)
hide: bool = field(default=False, hash=False, compare=False) # ogit:hide
created_by: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:creator
deleted_by: Optional[str] = field(default=None, hash=False, compare=False) # ogit:deleter
admin_contact: Optional[str] = field(default=None, hash=False, compare=False) # ogit:admin-contact
tech_contact: Optional[str] = field(default=None, hash=False, compare=False) # ogit:tech-contact
def __repr__(self) -> str:
return f'''{self.__class__.__name__}('{self.name.full_name}')'''
def _create_validator(
validation_type: Optional[str],
validation_parameter: Optional[str],
) -> Optional[Validator]:
if validation_type is None and validation_parameter is None:
return None
elif validation_type is None or validation_parameter is None:
raise RuntimeError('Either both or none of must be set: validation_type and validation_parameter')
elif validation_type is ValidatorType.FIXED:
return FixedValidator(validation_parameter)
elif validation_type is ValidatorType.REGEX:
return RegExpValidator(validation_parameter)
else:
raise RuntimeError(f"Unknown validation_type: '{validation_type}'")
@dataclass(frozen=True)
class TurtleAttribute:
about: str # @rdf:about
label: Optional[str] = field(default=None, hash=False, compare=False) # rdfs:label
description: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:description
valid: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:valid
validation_type: Optional[ValidatorType] = field(default=None, hash=False, compare=False) # ogit:validation-type
validation_parameter: Optional[str] = field(default=None, hash=False, compare=False) # ogit:validation-parameter
created_at: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:created
modified_at: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:modified
hide: bool = field(default=False, hash=False, compare=False) # ogit:hide
created_by: Optional[str] = field(default=None, hash=False, compare=False) # dcterms:creator
deleted_by: Optional[str] = field(default=None, hash=False, compare=False) # ogit:deleter
admin_contact: Optional[str] = field(default=None, hash=False, compare=False) # ogit:admin-contact
tech_contact: Optional[str] = field(default=None, hash=False, compare=False) # ogit:tech-contact
def as_model(self) -> OntologyAttribute:
name = QName(self.about)
validation = _create_validator(self.validation_type, self.validation_parameter)
created_at = datetime.fromisoformat(self.created_at) if self.created_at else self.created_at
modified_at = datetime.fromisoformat(self.modified_at) if self.modified_at else self.modified_at
dc_terms_valid = DCTermsValid.parse(self.valid) if self.valid is not None else None
valid_from = dc_terms_valid.start if dc_terms_valid is not None else None
valid_until = dc_terms_valid.end if dc_terms_valid is not None else None
return OntologyAttribute(
name=name,
label=self.label,
description=self.description,
valid=self.valid,
validation_type=self.validation_type,
validation_parameter=self.validation_parameter,
validation=validation,
created_at=created_at,
modified_at=modified_at,
valid_from=valid_from,
valid_until=valid_until,
hide=self.hide,
created_by=self.created_by,
deleted_by=self.deleted_by,
admin_contact=self.admin_contact,
tech_contact=self.tech_contact,
)
@unique
class Scope(Enum):
NTO = auto()
SGO = auto()
def __repr__(self):
# https://docs.python.org/3/library/enum.html#omitting-values
return '<%s.%s>' % (self.__class__.__name__, self.name)
@dataclass(frozen=True)
class AllowedConnection:
verb: OntologyVerb
entity: 'OntologyEntity'
@dataclass(frozen=True)
class OntologyEntity(Named):
label: str = field(
hash=False, compare=False) # rdfs:label
description: str = field(
hash=False, compare=False) # dcterms:description
scope: Scope = field(
hash=False, compare=False) # ogit:scope # immutable!
valid: Optional[str] = field(
default=None, repr=False, hash=False, compare=False) # dcterms:valid
created_at: Optional[datetime] = field(
default=None, hash=False, compare=False) # dcterms:created
modified_at: Optional[datetime] = field(
default=None, hash=False, compare=False) # dcterms:modified
valid_from: Optional[datetime] = field(
default=None, hash=False, compare=False)
valid_until: Optional[datetime] = field(
default=None, hash=False, compare=False)
hide: bool = field(
default=False, hash=False, compare=False) # ogit:hide
created_by: Optional[str] = field(
default=None, hash=False, compare=False) # dcterms:creator
deleted_by: Optional[str] = field(
default=None, hash=False, compare=False) # ogit:deleter
admin_contact: Optional[str] = field(
default=None, hash=False, compare=False) # ogit:admin-contact
tech_contact: Optional[str] = field(
default=None, hash=False, compare=False) # ogit:tech-contact
parent: Optional['OntologyEntity'] = field(
default=None, init=False, repr=False, hash=False, compare=False) # ogit:parent
required_attributes: FrozenSet[OntologyAttribute] = field(
default_factory=frozenset, init=False, repr=False, hash=False, compare=False)
optional_attributes: FrozenSet[OntologyAttribute] = field(
default_factory=frozenset, init=False, repr=False, hash=False, compare=False)
indexed_attributes: FrozenSet[OntologyAttribute] = field(
default_factory=frozenset, init=False, repr=False, hash=False, compare=False)
# https://github.com/arago/OGIT/blob/master/ogit.ttl#L135
allowed_connections: FrozenSet[AllowedConnection] = field(
default_factory=frozenset, init=False, repr=False, hash=False, compare=False)
def __post_init__(self):
object.__setattr__(self, '_Entity__hot', True)
def finalize(
self,
turtle: 'TurtleEntity',
attributes: Type[Enum],
verbs: Type[Enum],
entities: Mapping[str, 'OntologyEntity']
):
if not hasattr(self, '_Entity__hot'):
return
if turtle.parent is not None:
k = QName(turtle.parent).constant
parent = entities[k]
object.__setattr__(self, 'parent', parent)
if turtle.required_attributes is not None:
required_attributes = frozenset({
attributes[a].value for a in turtle.required_attributes
})
object.__setattr__(self, 'required_attributes', required_attributes)
if turtle.optional_attributes is not None:
optional_attributes = frozenset({
attributes[a].value for a in turtle.optional_attributes
})
object.__setattr__(self, 'optional_attributes', optional_attributes)
if turtle.indexed_attributes is not None:
indexed_attributes = frozenset({
attributes[a].value for a in turtle.indexed_attributes
})
object.__setattr__(self, 'indexed_attributes', indexed_attributes)
if turtle.allowed_connections is not None:
allowed_connections = frozenset({
AllowedConnection(verbs[v].value, entities[QName(e).constant]) for v, e in turtle.allowed_connections
})
object.__setattr__(self, 'allowed_connections', allowed_connections)
object.__delattr__(self, '_Entity__hot')
def __eq__(self, o: object) -> bool:
if isinstance(o, OntologyEntity):
return self.name == o.name
if isinstance(o, NamedEnum):
return self.name == o.value.name
return False
def __repr__(self) -> str:
return f'''{self.__class__.__name__}('{self.name.full_name}')'''
@dataclass(frozen=True)
class TurtleEntity:
about: str # @rdf:about
scope: Optional[Scope] = field(
default=None, hash=False, compare=False) # ogit:scope
label: Optional[str] = field(
default=None, hash=False, compare=False) # rdfs:label
description: Optional[str] = field(
default=None, hash=False, compare=False) # dcterms:description
valid: Optional[str] = field(
default=None, hash=False, compare=False) # dcterms:valid
created_at: Optional[str] = field(
default=None, hash=False, compare=False) # dcterms:created
modified_at: Optional[str] = field(
default=None, hash=False, compare=False) # dcterms:modified
hide: bool = field(
default=False, hash=False, compare=False) # ogit:hide
created_by: Optional[str] = field(
default=None, hash=False, compare=False) # dcterms:creator
deleted_by: Optional[str] = field(
default=None, hash=False, compare=False) # ogit:deleter
admin_contact: Optional[str] = field(
default=None, hash=False, compare=False) # ogit:admin-contact
tech_contact: Optional[str] = field(
default=None, hash=False, compare=False) # ogit:tech-contact
parent: Optional[str] = field(
default=None, hash=False, compare=False) # ogit:parent
required_attributes: Set[str] = field(
default_factory=set, hash=False, compare=False) # ogit:mandatory-attributes
optional_attributes: Set[str] = field(
default_factory=set, hash=False, compare=False) # ogit:optional-attributes
indexed_attributes: Set[str] = field(
default_factory=set, hash=False, compare=False) # ogit:indexed-attributes
allowed_connections: Set[Tuple[str, str]] = field(
default_factory=set, hash=False, compare=False) # ogit:allowed
# namespace_enum: TypedEnum[OntologyNamespace]
def as_model(self) -> OntologyEntity:
name = QName(self.about)
created_at = datetime.fromisoformat(self.created_at) if self.created_at else self.created_at
modified_at = datetime.fromisoformat(self.modified_at) if self.modified_at else self.modified_at
dc_terms_valid = DCTermsValid.parse(self.valid) if self.valid is not None else None
valid_from = dc_terms_valid.start if dc_terms_valid is not None else None
valid_until = dc_terms_valid.end if dc_terms_valid is not None else None
return OntologyEntity(
name=name,
label=self.label,
description=self.description,
scope=self.scope,
valid=self.valid,
created_at=created_at,
modified_at=modified_at,
valid_from=valid_from,
valid_until=valid_until,
hide=self.hide,
created_by=self.created_by,
deleted_by=self.deleted_by,
admin_contact=self.admin_contact,
tech_contact=self.tech_contact,
)
``` |
{
"source": "166MMX/python-hiro-clients",
"score": 2
} |
#### File: tests/unit/test_client.py
```python
class TestClient:
USERNAME: str = ''
PASSWORD: str = ''
CLIENT_ID: str = ''
CLIENT_SECRET: str = ''
URL: str = 'https://[server]:8443/api/graph/7.2'
AUTH_URL: str = 'https://[server]:8443/api/auth/6'
def test_simple_query(self):
pass
# hiro_client: HiroGraph = HiroGraph(
# username=self.USERNAME,
# password=self.PASSWORD,
# client_id=self.CLIENT_ID,
# client_secret=self.CLIENT_SECRET,
# endpoint=self.URL,
# auth_endpoint=self.AUTH_URL
# )
#
# query_result: dict = hiro_client.query('ogit\\/_type:"ogit/MARS/Machine"', limit=1, meta=True)
#
# print(query_result)
#
# assert isinstance(query_result, dict)
def test_batch_command(self):
pass
# hiro_batch_client = HiroGraphBatch(
# username=self.USERNAME,
# password=<PASSWORD>,
# client_id=self.CLIENT_ID,
# client_secret=self.CLIENT_SECRET,
# endpoint=self.URL,
# auth_endpoint=self.AUTH_URL
# )
#
# commands: list = [
# {
# "handle_vertices": {
# "ogit/_xid": "haas1000:connector1:machine1"
# }
# },
# {
# "handle_vertices": {
# "ogit/_xid": "haas1000:connector2:machine2"
# }
# }
# ]
#
# query_results: list = hiro_batch_client.multi_command(commands)
#
# print(query_results)
#
# assert isinstance(query_results, list)
def test_batch_command_callback(self):
pass
# batch_runner: RunBatch = RunBatch(
# endpoint=self.URL,
# auth_endpoint=self.AUTH_URL,
# username=self.USERNAME,
# password=<PASSWORD>,
# client_id=self.CLIENT_ID,
# client_secret=self.CLIENT_SECRET
# )
#
# commands: list = [
# {
# "handle_vertices": {
# "ogit/_xid": "haas1000:connector1:machine1"
# }
# },
# {
# "handle_vertices": {
# "ogit/_xid": "haas1000:connector2:machine2"
# }
# }
# ]
#
# batch_runner.run(commands)
``` |
{
"source": "167179/oopt-gnpy",
"score": 2
} |
#### File: oopt-gnpy/tests/test_disjunction.py
```python
from pathlib import Path
import pytest
from gnpy.core.equipment import trx_mode_params
from gnpy.core.network import build_network
from gnpy.core.exceptions import ServiceError
from gnpy.core.utils import automatic_nch, lin2db
from gnpy.core.elements import Roadm
from gnpy.topology.request import (compute_path_dsjctn, isdisjoint, find_reversed_path, PathRequest,
correct_json_route_list)
from gnpy.topology.spectrum_assignment import build_oms_list
from gnpy.tools.json_io import requests_from_json, load_requests, load_network, load_equipment, disjunctions_from_json
NETWORK_FILE_NAME = Path(__file__).parent.parent / 'tests/data/testTopology_expected.json'
SERVICE_FILE_NAME = Path(__file__).parent.parent / 'tests/data/testTopology_testservices.json'
RESULT_FILE_NAME = Path(__file__).parent.parent / 'tests/data/testTopology_testresults.json'
EQPT_LIBRARY_NAME = Path(__file__).parent.parent / 'tests/data/eqpt_config.json'
@pytest.fixture()
def serv(test_setup):
""" common setup for service list
"""
network, equipment = test_setup
data = load_requests(SERVICE_FILE_NAME, equipment, bidir=False, network=network, network_filename=NETWORK_FILE_NAME)
rqs = requests_from_json(data, equipment)
rqs = correct_json_route_list(network, rqs)
dsjn = disjunctions_from_json(data)
return network, equipment, rqs, dsjn
@pytest.fixture()
def test_setup():
""" common setup for tests: builds network, equipment and oms only once
"""
equipment = load_equipment(EQPT_LIBRARY_NAME)
network = load_network(NETWORK_FILE_NAME, equipment)
# Build the network once using the default power defined in SI in eqpt config
# power density : db2linp(ower_dbm": 0)/power_dbm": 0 * nb channels as defined by
# spacing, f_min and f_max
p_db = equipment['SI']['default'].power_dbm
p_total_db = p_db + lin2db(automatic_nch(equipment['SI']['default'].f_min,
equipment['SI']['default'].f_max, equipment['SI']['default'].spacing))
build_network(network, equipment, p_db, p_total_db)
build_oms_list(network, equipment)
return network, equipment
def test_disjunction(serv):
""" service_file contains sevaral combination of disjunction constraint. The test checks
that computed paths with disjunction constraint are effectively disjoint
"""
network, equipment, rqs, dsjn = serv
pths = compute_path_dsjctn(network, equipment, rqs, dsjn)
print(dsjn)
dsjn_list = [d.disjunctions_req for d in dsjn]
# assumes only pairs in dsjn list
test = True
for e in dsjn_list:
rqs_id_list = [r.request_id for r in rqs]
p1 = pths[rqs_id_list.index(e[0])][1:-1]
p2 = pths[rqs_id_list.index(e[1])][1:-1]
if isdisjoint(p1, p2) + isdisjoint(p1, find_reversed_path(p2)) > 0:
test = False
print(f'Computed path (roadms):{[e.uid for e in p1 if isinstance(e, Roadm)]}\n')
print(f'Computed path (roadms):{[e.uid for e in p2 if isinstance(e, Roadm)]}\n')
break
print(dsjn_list)
assert test
def test_does_not_loop_back(serv):
""" check that computed paths do not loop back ie each element appears only once
"""
network, equipment, rqs, dsjn = serv
pths = compute_path_dsjctn(network, equipment, rqs, dsjn)
test = True
for p in pths:
for el in p:
p.remove(el)
a = [e for e in p if e.uid == el.uid]
if a:
test = False
break
assert test
# TODO : test that identical requests are correctly agregated
# and reproduce disjunction vector as well as route constraints
# check that requests with different parameters are not aggregated
# check that the total agregated bandwidth is the same after aggregation
#
def create_rq(equipment, srce, dest, bdir, nd_list, ls_list):
""" create the usual request list according to parameters
"""
requests_list = []
params = {}
params['request_id'] = 'test_request'
params['source'] = srce
params['bidir'] = bdir
params['destination'] = dest
params['trx_type'] = 'Voyager'
params['trx_mode'] = 'mode 1'
params['format'] = params['trx_mode']
params['spacing'] = 50000000000.0
params['nodes_list'] = nd_list
params['loose_list'] = ls_list
trx_params = trx_mode_params(equipment, params['trx_type'], params['trx_mode'], True)
params.update(trx_params)
params['power'] = 1.0
f_min = params['f_min']
f_max_from_si = params['f_max']
params['nb_channel'] = automatic_nch(f_min, f_max_from_si, params['spacing'])
params['path_bandwidth'] = 100000000000.0
requests_list.append(PathRequest(**params))
return requests_list
@pytest.mark.parametrize('srce, dest, result, pth, nd_list, ls_list', [
['a', 'trx h', 'fail', 'no_path', [], []],
['trx a', 'h', 'fail', 'no_path', [], []],
['trx a', 'trx h', 'pass', 'found_path', [], []],
['trx a', 'trx h', 'pass', 'found_path', ['roadm b', 'roadm a'], ['LOOSE', 'LOOSE']],
['trx a', 'trx h', 'pass', 'no_path', ['roadm b', 'roadm a'], ['STRICT', 'STRICT']],
['trx a', 'trx h', 'pass', 'found_path', ['roadm b', 'roadm c'], ['STRICT', 'STRICT']],
['trx a', 'trx h', 'fail', 'no_path', ['Lorient_KMA', 'roadm c'], ['STRICT', 'STRICT']],
['trx a', 'trx h', 'pass', 'no_path', ['roadm Lorient_KMA', 'roadm c'], ['LOOSE', 'STRICT']],
['trx a', 'trx h', 'pass', 'found_path', ['roadm c', 'roadm c'], ['LOOSE', 'LOOSE']],
['trx a', 'trx h', 'pass', 'found_path', ['roadm c', 'roadm c'], ['STRICT', 'STRICT']],
['trx a', 'trx h', 'pass', 'found_path', ['roadm c', 'roadm g'], ['STRICT', 'STRICT']],
['trx a', 'trx h', 'pass', 'found_path', ['trx a', 'roadm g'], ['STRICT', 'STRICT']],
['trx a', 'trx h', 'pass', 'found_path', ['trx h'], ['STRICT']],
['trx a', 'trx h', 'pass', 'found_path', ['roadm a'], ['STRICT']]])
def test_include_constraints(test_setup, srce, dest, result, pth, nd_list, ls_list):
""" check that all combinations of constraints are correctly handled:
- STRICT/LOOSE
- correct names/incorrect names -> pass/fail
- possible include/impossible include
if incorrect name -> fail
else:
constraint |one or more STRICT | all LOOSE
----------------------------------------------------------------------------------
>1 path from s to d | can be applied | found_path | found_path
| cannot be applied | no_path | found_path
----------------------------------------------------------------------------------
0 | | computation stops
"""
network, equipment = test_setup
dsjn = []
bdir = False
rqs = create_rq(equipment, srce, dest, bdir, nd_list, ls_list)
print(rqs)
if result == 'fail':
with pytest.raises(ServiceError):
rqs = correct_json_route_list(network, rqs)
else:
rqs = correct_json_route_list(network, rqs)
pths = compute_path_dsjctn(network, equipment, rqs, dsjn)
# if loose, one path can be returned
if pths[0]:
assert pth == 'found_path'
else:
assert pth == 'no_path'
``` |
{
"source": "1689335/kerwin.github.io",
"score": 3
} |
#### File: _site/algorithm/convert.py
```python
import os
def convert(src):
for root, ds, files in os.walk(src):
for f in files:
name = os.path.basename(f)
print(unicode(name, encoding="utf-8"))
if not name.endswith(".cpp"):
continue
idx, desc, postfix = name.split('.')
with open(os.path.join("code", desc + ".md"), "w") as wfd:
wfd.write("---\n")
wfd.write("sort: " + str(idx))
wfd.write("\n---")
wfd.write("\n\n```\n")
with open(root + f, "r") as rfd:
content = rfd.readlines()
for line in content:
wfd.write(line)
wfd.write("\n```\n")
if __name__ == "__main__":
if len(os.sys.argv) == 1:
src = "../../leetcode/src/"
else:
src = os.sys.argv[1]
convert(src)
``` |
{
"source": "1695652161/Spider_Armies",
"score": 2
} |
#### File: AidenFilter/scrapy_redis-0.7.1/connection.py
```python
import sys
import six
from scrapy.utils.misc import load_object
from . import defaults
# 快捷方式映射 'setting name' -> 'parmater name'.
SETTINGS_PARAMS_MAP = {
'REDIS_URL': 'url',
'REDIS_HOST': 'host',
'REDIS_PORT': 'port',
'REDIS_DB': 'db',
'REDIS_ENCODING': 'encoding',
}
if sys.version_info > (3,):
SETTINGS_PARAMS_MAP['REDIS_DECODE_RESPONSES'] = 'decode_responses'
def get_redis_from_settings(settings):
"""从给定的 Scrapy 设置对象返回一个 redis 客户端实例.
该函数使用“get_client”来实例化客户端并使用
``defaults.REDIS_PARAMS`` 全局作为参数的默认值。你
可以使用“REDIS_PARAMS”设置覆盖它们。
Parameters
----------
settings : Settings
A scrapy settings object. See the supported settings below.
Returns
-------
server
Redis client instance.
Other Parameters
----------------
REDIS_URL : str, optional
Server connection URL.
REDIS_HOST : str, optional
Server host.
REDIS_PORT : str, optional
Server port.
REDIS_DB : int, optional
Server database
REDIS_ENCODING : str, optional
Data encoding.
REDIS_PARAMS : dict, optional
Additional client parameters.
Python 3 Only
----------------
REDIS_DECODE_RESPONSES : bool, optional
Sets the `decode_responses` kwarg in Redis cls ctor
"""
params = defaults.REDIS_PARAMS.copy()
params.update(settings.getdict('REDIS_PARAMS'))
# XXX: 弃用 REDIS_* 设置.
for source, dest in SETTINGS_PARAMS_MAP.items():
val = settings.get(source)
if val:
params[dest] = val
# 允许 ``redis_cls`` 作为类的路径.
if isinstance(params.get('redis_cls'), six.string_types):
params['redis_cls'] = load_object(params['redis_cls'])
return get_redis(**params)
# 向后兼容别名.
from_settings = get_redis_from_settings
def get_redis(**kwargs):
"""返回一个 redis 客户端实例.
Parameters
----------
redis_cls : class, optional
Defaults to ``redis.StrictRedis``.
url : str, optional
If given, ``redis_cls.from_url`` is used to instantiate the class.
**kwargs
要传递给“redis_cls”类的额外参数 .
Returns
-------
server
Redis client instance.
"""
redis_cls = kwargs.pop('redis_cls', defaults.REDIS_CLS)
url = kwargs.pop('url', None)
if url:
return redis_cls.from_url(url, **kwargs)
else:
return redis_cls(**kwargs)
```
#### File: tutorial/tutorial/middlewares.py
```python
from scrapy import signals
import logging
# useful for handling different item types with a single interface
import time
from scrapy.http.response import Response
from scrapy.http.request import Request
from scrapy.downloadermiddlewares.retry import RetryMiddleware
from scrapy.utils.response import response_status_message
import base64
proxy_pool = ["http://127.0.0.1:1080"] * 10
def get_li(proxy):
return [
{
"proxy": {"http": "http://127.0.0.1:1080", "https": "http://127.0.0.1:1080"},
"cookies": [{'name': 'incap_ses_xxx_2559415', 'value': 'xxx'}, {'name': 'JSESSIONID', 'value': 'xxx'}, {'name': 'SERVER_ID', 'value': 'xxx'}],
"auth": "<PASSWORD>"
},
{
"proxy": {"http": "http://127.0.0.1:1080", "https": "http://127.0.0.1:1080"},
"cookies": [{'name': 'incap_ses_xxx_2559415', 'value': 'xxx'}, {'name': 'JSESSIONID', 'value': 'xxx'}, {'name': 'SERVER_ID', 'value': 'xxx'}],
"auth": "<PASSWORD>"
},
] * 5
class TutorialDownloaderMiddleware:
li = get_li()
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
request.meta['proxy'] = self.proxy_pool.pop()
try:
print(str(self.li.pop()))
return None
except IndexError as e:
print(e)
self.li = get_li()
print(str(self.li.pop()))
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
>>>>>>> 0b240f3f443ce7cf1346e781b65bff5ca72101fb
return None
def process_response(self, request, response, spider):
return response
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
# class TutorialProxyMiddleware(object):
# # overwrite process request
# def Tutorialprocess_request(self, request, spider):
# # Set the location of the proxy
# request.meta['proxy'] = "http://YOUR_PROXY_IP:PORT"
# # # Use the following lines if your proxy requires authentication
# # proxy_user_pass = "<PASSWORD>:PASSWORD"
# # # setup basic authentication for the proxy
# # encoded_user_pass = base64.b64encode(proxy_user_pass)
# # request.headers['Proxy-Authorization'] = 'Basic ' + encoded_user_pass
class TutorialRetryMiddleware(RetryMiddleware):
logger = logging.getLogger(__name__)
def process_response(self, request, response, spider):
if request.meta.get('dont_retry', False):
return response
if response.status in self.retry_http_codes:
reason = response_status_message(response.status)
self.logger.warning(f'Return exception status code {response.status} , try again...')
return self._retry(request, reason, spider) or response
return response
def process_exception(self, request, exception, spider):
if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \
and not request.meta.get('dont_retry', False):
self.logger.warning('Connection exception, retry')
return self._retry(request, exception, spider)
```
#### File: tutorial/spiders/quotes.py
```python
from typing import Counter
import scrapy
from tutorial.items import QuoteItem
from scrapy.http.request import Request
class QuotesSpider(scrapy.Spider):
# 爬虫名称, 唯一的
name = 'quotes'
# 请求url非该域名则过滤
# allowed_domains = ['quotes.toscrape.com']
# is_open_count = True
# count = 0
# MAX = 5
custom_settings = {
<<<<<<< HEAD
"CONCURRENT_REQUESTS": 6,
"DOWNLOAD_DELAY": 0,
'tutorial.middlewares.TutorialDownloaderMiddleware': 543,
'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
'tutorial.middlewares.TutorialRetryMiddleware': 550,
}
start_urls = [
'http://httpbin.org/ip',
'http://httpbin.org/ip',
'http://httpbin.org/ip',
'http://httpbin.org/ip',
'http://httpbin.org/ip',
'http://httpbin.org/ip',
]
# start_urls = [
# # 'http://quotes.toscrape.com/page/1/',
# # 'http://quotes.toscrape.com/page/2/',
# # 'http://quotes.toscrape.com/page/3/',
# # 'http://quotes.toscrape.com/page/4/',
# # 'http://quotes.toscrape.com/page/5/',
# # 'http://quotes.toscrape.com/page/6/',
# # 'http://quotes.toscrape.com/page/7/',
# # 'http://quotes.toscrape.com/page/8/',
# # 'http://quotes.toscrape.com/page/9/',
# # 'http://quotes.toscrape.com/page/10/',
# # 'http://quotes.toscrape.com/page/11/',
# # 'http://quotes.toscrape.com/page/12/',
# # 'http://quotes.toscrape.com/page/13/',
# # 'http://quotes.toscrape.com/page/14/',
# # 'http://quotes.toscrape.com/page/15/',
# # 'http://quotes.toscrape.com/page/16/',
# # 'http://quotes.toscrape.com/page/17/',
# # 'https://www.correos.cl/',
# # 'https://www.correos.cl/',
# # 'https://www.correos.cl/',
=======
"CONCURRENT_REQUESTS": 4,
"DOWNLOAD_DELAY":0.5,
}
start_urls = [
'http://httpbin.org/ip#1/',
'http://httpbin.org/ip#2/',
'http://httpbin.org/ip#3/',
'http://httpbin.org/ip#4/',
'http://httpbin.org/ip#5/',
'http://httpbin.org/ip#6/',
'http://httpbin.org/ip#7/',
'http://httpbin.org/ip#8/',
'http://httpbin.org/ip#9/',
'http://httpbin.org/ip#10/',
'http://httpbin.org/ip#11/',
'http://httpbin.org/ip#12/',
'http://httpbin.org/ip#13/',
'http://httpbin.org/ip#14/',
'http://httpbin.org/ip#15/',
'http://httpbin.org/ip#16/',
'http://httpbin.org/ip#17/',
'http://httpbin.org/ip#17/',
'http://httpbin.org/ip#18/',
'http://httpbin.org/ip#19/',
'http://httpbin.org/ip#20/',
'http://httpbin.org/ip#21/',
]
# start_urls = [
# 'http://quotes.toscrape.com/page/1/',
# 'http://quotes.toscrape.com/page/2/',
# 'http://quotes.toscrape.com/page/3/',
# 'http://quotes.toscrape.com/page/4/',
# 'http://quotes.toscrape.com/page/5/',
# 'http://quotes.toscrape.com/page/6/',
# 'http://quotes.toscrape.com/page/7/',
# 'http://quotes.toscrape.com/page/8/',
# 'http://quotes.toscrape.com/page/9/',
# 'http://quotes.toscrape.com/page/10/',
# 'http://quotes.toscrape.com/page/11/',
# 'http://quotes.toscrape.com/page/12/',
# 'http://quotes.toscrape.com/page/13/',
# 'http://quotes.toscrape.com/page/14/',
# 'http://quotes.toscrape.com/page/15/',
# 'http://quotes.toscrape.com/page/16/',
# 'http://quotes.toscrape.com/page/17/',
# 'https://www.correos.cl/',
# 'https://www.correos.cl/',
# 'https://www.correos.cl/',
>>>>>>> 0b240f3f443ce7cf1346e781b65bff5ca72101fb
# ]
def parse(self, response):
item = QuoteItem()
item['url'] = response.url
item['data'] = response.body.decode()
# print(response.body.decode())
return item
``` |
{
"source": "1696012928/RoomAI",
"score": 2
} |
#### File: dqn/sevenking/sevenkingplayer.py
```python
import models.dqn.dqnalgorithm
import roomai
import roomai.sevenking
import roomai.common
import tensorflow as tf
import numpy as np
import shutil
def remove_path(path):
shutil.rmtree(path)
class SevenKingModel_ThreePlayers(models.dqn.dqnalgorithm.DqnPlayer):
def __init__(self, model_address = None, params = dict()):
self.num_point = 15
self.num_suit = 5 ## small king and three king
self.info_dim = 8
self.action_dim = 4
self.learning_rate = 0.001
if "learning_rate" in params:
self.learning_rate = params["learning_rate"]
self.weight_decay = 0.004
if "weight_decay" in params:
self.weight_decay = params["weight_decay"]
self.gamma = 0.9
if "gamma" in params:
self.gamma = params["gamma"]
self.model_address = model_address
self.graph = tf.Graph()
with self.graph.as_default() as graph:
self.info_feats = tf.placeholder(tf.float32, [None, self.num_point, self.num_suit, self.info_dim])
self.action_feats = tf.placeholder(tf.float32, [None, self.num_point, self.num_suit, self.action_dim])
self.reward_plus_gamma_q = tf.placeholder(tf.float32, [None])
############################################## info feat ###############################################
info_conv1_weight = tf.get_variable('info_conv1w', shape=[3, 3, self.info_dim, 16],
initializer=tf.contrib.layers.xavier_initializer())
info_conv1_bias = tf.get_variable('info_conv1b', shape=[16],
initializer=tf.contrib.layers.xavier_initializer())
info_conv1 = tf.nn.conv2d(self.info_feats, filter=info_conv1_weight, strides=[1, 1, 1, 1], padding='SAME')
info_h_conv1 = tf.nn.relu(info_conv1 + info_conv1_bias)
info_h_conv2 = tf.nn.max_pool(info_h_conv1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
info_conv2_weight = tf.get_variable('info_conv2w', shape=[3, 3, 16, 32],
initializer=tf.contrib.layers.xavier_initializer())
info_conv2_bias = tf.get_variable('info_conv2b', shape=[32],
initializer=tf.contrib.layers.xavier_initializer())
info_conv2 = tf.nn.conv2d(info_h_conv2, filter=info_conv2_weight, strides=[1, 1, 1, 1], padding='SAME')
info_h_conv3 = tf.nn.relu(info_conv2 + info_conv2_bias)
info_h_conv3 = tf.nn.max_pool(info_h_conv3, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
info_h_conv3_flat = tf.reshape(info_h_conv3, [-1, 256])
info_vector_weight = self.__variable_with_weight_decay__(name = 'info_conv_vector_weight', shape = [info_h_conv3_flat.get_shape()[1].value, 512],wd = self.weight_decay)
info_vector_bias = tf.get_variable('info_conv_vector_bias', shape=[512], initializer = tf.contrib.layers.xavier_initializer())
info_vector_feat = tf.matmul(info_h_conv3_flat, info_vector_weight) + info_vector_bias
################################################# action feat ############################################
action_conv1_weight = tf.get_variable('action_conv1w', shape=[3, 3, self.action_dim, 16],
initializer=tf.contrib.layers.xavier_initializer())
action_conv1_bias = tf.get_variable('action_conv1b', shape=[16],
initializer=tf.contrib.layers.xavier_initializer())
action_conv1 = tf.nn.conv2d(self.action_feats, filter=action_conv1_weight, strides=[1, 1, 1, 1],
padding='SAME')
action_h_conv1 = tf.nn.relu(action_conv1 + action_conv1_bias)
action_h_conv2 = tf.nn.max_pool(action_h_conv1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
action_conv2_weight = tf.get_variable('action_conv2w', shape=[3, 3, 16, 32],
initializer=tf.contrib.layers.xavier_initializer())
action_conv2_bias = tf.get_variable('action_conv2b', shape=[32],
initializer=tf.contrib.layers.xavier_initializer())
action_conv2 = tf.nn.conv2d(action_h_conv2, filter=action_conv2_weight, strides=[1, 1, 1, 1],
padding='SAME')
action_h_conv3 = tf.nn.relu(action_conv2 + action_conv2_bias)
action_h_conv3 = tf.nn.max_pool(action_h_conv3, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
action_h_conv3_flat = tf.reshape(action_h_conv3, [-1, 256])
action_vector_weight = self.__variable_with_weight_decay__('action_conv_vector_weight',
shape=[action_h_conv3_flat.get_shape()[1].value,
512],
wd=self.weight_decay)
action_vector_bias = tf.get_variable('action_conv_vector_bias', shape=[512],
initializer=tf.contrib.layers.xavier_initializer())
action_vector_feat = tf.matmul(action_h_conv3_flat, action_vector_weight) + action_vector_bias
### DNN
dnn_x = tf.nn.relu(tf.concat([info_vector_feat, action_vector_feat], axis=1))
dnn_weight = self.__variable_with_weight_decay__('dnn_weight',shape=[dnn_x.get_shape()[1].value,256],wd = self.weight_decay)
dnn_weight_bias = tf.get_variable('dnn_bias', shape=[256], initializer=tf.contrib.layers.xavier_initializer())
dnn_x1 = tf.nn.relu(tf.matmul(dnn_x, dnn_weight) + dnn_weight_bias)
dnn_weight1 = self.__variable_with_weight_decay__('dnn_weight1',
shape=[dnn_x1.get_shape()[1].value, 1],
wd=self.weight_decay)
dnn_x2 = tf.matmul(dnn_x1, dnn_weight1)
self.q = tf.reduce_mean(dnn_x2,axis = 1)
self.loss = tf.reduce_mean((self.q - self.reward_plus_gamma_q) * (self.q - self.reward_plus_gamma_q))
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.train_op = self.optimizer.minimize(self.loss)
self.init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(self.init)
self.saver = tf.train.Saver(tf.global_variables())
def __variable_on_cpu__(self,name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def __variable_with_weight_decay__(self,name, shape, wd, stddev = 0.01,):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float32
var = self.__variable_on_cpu__(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def gen_action_feat(self, info, action):
action_feat = np.zeros((self.num_point, self.num_suit, self.action_dim))
for card in action.cards:
if info.public_state.stage == 0:
action_feat[card.point_rank, card.suit_rank, 0] += 1
else:
action_feat[card.point_rank, card.suit_rank, 1] += 1
return action_feat
def gen_info_feat(self, info):
logger = roomai.get_logger()
hand_cards = info.person_state.hand_cards
info_feat = np.zeros((self.num_point, self.num_suit, self.info_dim))
current_id = info.person_state.id
previous_id = (current_id + 3 - 1) % 3
next_id = (current_id + 1) % 3
if info.public_state.stage == 0:
for card in hand_cards:
info_feat[card.point_rank, card.suit_rank, 0] += 1
for person_action in info.public_state.action_history:
person_id = person_action[0]
action = person_action[1]
for card in action.cards:
if person_id == current_id:
info_feat[card.point_rank, card.suit_rank, 1] += 1
elif person_id == previous_id:
info_feat[card.point_rank, card.suit_rank, 2] += 1
elif person_id == next_id:
info_feat[card.point_rank, card.suit_rank, 3] += 1
elif person_id == 3:
logger.debug("SevenKingModel finds the chance player-action pair in public_state.action_history")
else:
for card in hand_cards:
info_feat[card.point_rank, card.suit_rank, 4] += 1
for person_action in info.public_state.action_history:
person_id = person_action[0]
action = person_action[1]
for card in action.cards:
if person_id == current_id:
info_feat[card.point_rank, card.suit_rank, 5] += 1
elif person_id == previous_id:
info_feat[card.point_rank, card.suit_rank, 6] += 1
elif person_id == next_id:
info_feat[card.point_rank, card.suit_rank, 7] += 1
elif person_id == 3:
logger.debug("SevenKingModel finds the chance player-action pair in public_state.action_history")
return info_feat
def terminal_info_feat(self):
info_feat = np.zeros((self.num_point, self.num_suit, self.info_dim))
return info_feat
def terminal_action_feat(self):
action_feat = np.zeros((self.num_point, self.num_suit, self.action_dim))
return action_feat
def update_model(self, experiences):
logger = roomai.get_logger()
reward_plus_gamma_q = []
info_feats = []
action_feats = []
logger = roomai.get_logger()
for experience in experiences:
next_action_feats = [action_feat for action_feat in experience.next_available_action_feats]
next_info_feats = [experience.next_info_feat for i in range(len(experience.next_available_action_feats))]
q = self.sess.run(self.q, feed_dict = { self.info_feats:next_info_feats,
self.action_feats:next_action_feats})
reward_plus_gamma_q.append(experience.reward + self.gamma * np.max(q))
info_feats.append(experience.info_feat)
action_feats.append(experience.action_feat)
_, loss,q = self.sess.run((self.train_op,self.loss, self.q), feed_dict = { self.info_feats:info_feats,
self.action_feats:action_feats,
self.reward_plus_gamma_q:reward_plus_gamma_q})
#logger.debug ("reward_plus_gamma_q = %s"%(reward_plus_gamma_q.__str__()))
#logger.debug ("loss = %f"%(loss))
#logger.debug ("q = %s"%(q.__str__()))
################################ player functions ###################
def receive_info(self,info):
self.info = info
def take_action(self):
info = self.info
action_feats = []
action_lists = list(info.person_state.available_actions.values())
for action in action_lists:
action_feats.append(self.gen_action_feat(info, action))
info_feat = self.gen_info_feat(info)
info_feats = []
info_feats = [info_feat for i in range(len(action_lists))]
q = self.sess.run(self.q, feed_dict={self.info_feats: info_feats, self.action_feats: action_feats})
idx = np.argmax(q)
return action_lists[idx]
def reset(self):
pass
```
#### File: roomai/fivecardstud/FiveCardStudEnv.py
```python
import roomai.common
import copy
import logging
import random
import sys
from functools import cmp_to_key
from roomai.fivecardstud import FiveCardStudPokerCard
from roomai.fivecardstud import FiveCardStudPublicState
from roomai.fivecardstud import FiveCardStudPersonState
from roomai.fivecardstud import FiveCardStudPrivateState
from roomai.fivecardstud import FiveCardStudAction
class FiveCardStudEnv(roomai.common.AbstractEnv):
'''
FiveCardStud game enviroment
'''
#@override
def init(self, params = dict()):
'''
Initialize FiveCardStud game enviroment with the params. The params are as follows:
1) num_normal_players denotes how many players join in this game, default 3
2) chips denotes the initialization chips of players, default [500,500,500]
3) floor_bet denotes the minimal bet, default 10
4) backward_enable denotes whether the environment will stores all history information. If you need call the backward function, please set it to bet True. default False
An example of params is {"num_normal_players":3,"chips":[500,500,500]}
:param params: initialization param
:return: infos, public_state, person_states, private_state
'''
self.logger = roomai.get_logger()
self.__params__ = dict()
if "num_normal_players" in params:
self.__params__["num_normal_players"] = params["num_normal_players"]
else:
self.__params__["num_normal_players"] = 3
if "chips" in params:
self.__params__["chips"] = params["chips"]
else:
self.__params__["chips"] = [500 for i in range(self.__params__["num_normal_players"])]
if "floor_bet" in params:
self.__params__["floor_bet"] = params["floor_bet"]
else:
self.__params__["floor_bet"] = 10
if "backward_enable" in params:
self.__params__["backward_enable"] = params["backward_enable"]
else:
self.__params__["backward_enable"] = False
allcards = []
for i in range(13):
for j in range(4):
allcards.append(FiveCardStudPokerCard(i, j))
random.shuffle(allcards)
FiveCardStudEnv.__valid_initialization_params__(self)
self.public_state = FiveCardStudPublicState()
self.private_state = FiveCardStudPrivateState()
self.person_states = [FiveCardStudPersonState() for i in range(self.__params__["num_normal_players"]+1)]
self.public_state_history = []
self.private_state_history = []
self.person_states_history = []
## private_state
self.private_state.all_hand_cards = allcards[0: 5 * self.__params__["num_normal_players"]]
## public_state
self.public_state.num_normal_players = self.__params__["num_normal_players"]
self.public_state.chips = self.__params__["chips"]
self.public_state.second_hand_cards = self.private_state.all_hand_cards[1*self.__params__["num_normal_players"]: 2 * self.__params__["num_normal_players"]]
self.public_state.floor_bet = self.__params__["floor_bet"]
self.public_state.upper_bet = min(self.public_state.chips)
#print "public_state.upper_bet", self.public_state.upper_bet,"chips", self.public_state.chips
self.public_state.bets = [self.public_state.floor_bet for i in range(self.__params__["num_normal_players"])]
self.public_state.chips = [self.public_state.chips[i] - self.public_state.floor_bet for i in range(self.__params__["num_normal_players"])]
self.public_state.max_bet_sofar = self.public_state.floor_bet
self.public_state.is_quit = [False for i in range(self.__params__["num_normal_players"])]
self.public_state.num_quit = 0
self.public_state.is_needed_to_action = [True for i in range(self.__params__["num_normal_players"])]
self.public_state.num_needed_to_action = self.__params__["num_normal_players"]
self.public_state.is_raise = [False for i in range(self.__params__["num_normal_players"])]
self.public_state.num_raise = 0
self.public_state.round = 1
self.public_state.__turn__ = FiveCardStudEnv.__choose_player_at_begining_of_round__(self.public_state)
self.public_state.__is_terminal__ = False
self.public_state.__scores__ = None
## person_state
for i in range(self.__params__["num_normal_players"]):
self.person_states[i].__id__ = i
self.person_states[i].first_hand_card = self.private_state.all_hand_cards[i]
self.person_states[i].second_hand_card = self.private_state.all_hand_cards[self.__params__["num_normal_players"]+i]
self.person_states[self.__params__["num_normal_players"]] .__id__= self.__params__["num_normal_players"]
turn = self.public_state.turn
self.person_states[turn].__available_actions__ = FiveCardStudEnv.available_actions(self.public_state, self.person_states[turn])
self.__gen_state_history_list__()
infos = self.__gen_infos__()
return infos, self.public_state, self.person_states, self.private_state
#@override
def forward(self, action):
'''
The environment steps foward with the action
:param action:
:return:
'''
turn = self.public_state.turn
if not FiveCardStudEnv.is_action_valid(action,self.public_state, self.person_states[turn]):
self.logger.critical("action=%s is invalid" % (action.key()))
raise ValueError("action=%s is invalid" % (action.key()))
pu = self.public_state
pe = self.person_states
pr = self.private_state
pu.__action_history__.append((self.public_state.turn,action))
pe[pu.turn].__available_actions__ = dict()
if action.option == FiveCardStudAction.Fold:
self.action_fold(action)
elif action.option == FiveCardStudAction.Check:
self.action_check(action)
elif action.option == FiveCardStudAction.Call:
self.action_call(action)
elif action.option == FiveCardStudAction.Raise:
self.action_raise(action)
elif action.option == FiveCardStudAction.Showhand:
self.action_showhand(action)
elif action.option == FiveCardStudAction.Bet:
self.action_bet(action)
else:
raise Exception("action.option(%s) not in [Fold, Check_, Call, Raise, Showhand, Bet]" % (action.option))
##pu.previous_id = pu.turn
#pu.previous_action = action
pu.__action_history__.append((pu.turn, action))
pu.previous_round = pu.round
# computing_score
if FiveCardStudEnv.__is_compute_scores__(self.public_state):
num_normal_players = pu.num_normal_players
pu.hand_cards = []
pu.first_hand_cards = pr.all_hand_cards[0: 1 * num_normal_players]
pu.second_hand_cards = pr.all_hand_cards[1 * num_normal_players: 2 * num_normal_players]
pu.third_hand_cards = pr.all_hand_cards[2 * num_normal_players: 3 * num_normal_players]
pu.fourth_hand_cards = pr.all_hand_cards[3 * num_normal_players: 4 * num_normal_players]
pu.fifth_hand_cards = pr.all_hand_cards[4 * num_normal_players: 5 * num_normal_players]
pu.round = 4
pu.__is_terminal__ = True
pu.__scores__ = self.__compute_scores__(pu)
for i in range(num_normal_players):
pu.chips[i] += pu.bets[i] + pu.scores[i]
for i in range(num_normal_players):
pe[i].first_hand_card = pr.all_hand_cards[0 * num_normal_players + i]
pe[i].second_hand_card = pr.all_hand_cards[1 * num_normal_players + i]
pe[i].third_hand_card = pr.all_hand_cards[2 * num_normal_players + i]
pe[i].fourth_hand_card = pr.all_hand_cards[3 * num_normal_players + i]
pe[i].fifth_hand_card = pr.all_hand_cards[4 * num_normal_players + i]
pu.__turn__ = 0
# enter into the next stage
elif FiveCardStudEnv.is_nextround(self.public_state):
num_normal_players = self.public_state.num_normal_players
add_cards = []
if pu.round == 1:
pu.third_hand_cards = pr.all_hand_cards[2 * num_normal_players: 3 * num_normal_players]
for i in range(num_normal_players):
pe[i].third_hand_card = pr.all_hand_cards[2 * num_normal_players + i]
if pu.round == 2:
pu.fourth_hand_cards = pr.all_hand_cards[3 * num_normal_players: 4 * num_normal_players]
for i in range(num_normal_players):
pe[i].fourth_hand_card = pr.all_hand_cards[3 * num_normal_players + i]
if pu.round == 3:
pu.fifth_hand_cards = pr.all_hand_cards[4 * num_normal_players: 5 * num_normal_players]
for i in range(num_normal_players):
pe[i].fifth_hand_card = pr.all_hand_cards[4 * num_normal_players + i]
pu.round = pu.round + 1
pu.__turn__ = FiveCardStudEnv.__choose_player_at_begining_of_round__(pu)
pu.num_needed_to_action = 0
for i in range(self.__params__["num_normal_players"]):
if pu.is_quit[i] == False and pu.bets[i] < pu.upper_bet:
pu.is_needed_to_action[i] = True
pu.num_needed_to_action += 1
pu.is_raise[i] = False
pu.num_raise = 0
pe[pu.turn].__available_actions__ = FiveCardStudEnv.available_actions(pu, pe[pu.turn])
else:
pu.__turn__ = self.__next_player__(pu)
pe[pu.turn].__available_actions__ = FiveCardStudEnv.available_actions(pu, pe[pu.turn])
self.__gen_state_history_list__()
infos = self.__gen_infos__()
return infos, self.public_state, self.person_states, self.private_state
#@override
@classmethod
def compete(cls, env, players):
'''
:param env: the fivecardstud game environment
:param players: the list of players. The n-1 player is AI bot and the last player is the chance player
:return: scores
'''
num_normal_players = len(players) - 1
total_scores = [0 for i in range(num_normal_players)]
total_count = 1000
for count in range(total_count):
chips = [(100 +int(random.random()*500)) for i in range(num_normal_players)]
floor_bet = 10
infos, public, persons, private = env.init({"num_normal_players":num_normal_players,"chips":chips, "floor_bet":10})
for i in range(len(players)):
players[i].receive_info(infos[i])
while public.is_terminal == False:
turn = public.turn
action = players[turn].take_action()
infos, public, persons, private = env.forward(action)
for i in range(len(players)):
players[i].receive_info(infos[i])
for i in range(num_normal_players):
players[i].reset()
total_scores[i] += public.scores[i]
if (count + 1)%500 == 0:
tmp_scores = [0 for i in range(len(total_scores))]
for i in range(len(total_scores)):
tmp_scores[i] = total_scores[i] / (count+1)
roomai.get_logger().info("FiveCardStud completes %d competitions, scores=%s"%(count+1, ",".join([str(i) for i in tmp_scores])))
for i in range(num_normal_players):
total_scores[i] /= total_count * 1.0
return total_scores;
def action_fold(self, action):
pu = self.public_state
pu.is_quit[pu.turn] = True
pu.num_quit += 1
pu.is_needed_to_action[pu.turn] = False
pu.num_needed_to_action -= 1
def action_check(self, action):
pu = self.public_state
pu.is_needed_to_action[pu.turn] = False
pu.num_needed_to_action -= 1
def action_call(self, action):
pu = self.public_state
pu.chips[pu.turn] -= action.price
pu.bets[pu.turn] += action.price
pu.is_needed_to_action[pu.turn] = False
pu.num_needed_to_action -= 1
def action_bet(self, action):
pu = self.public_state
pu.chips[pu.turn] -= action.price
pu.bets[pu.turn] += action.price
pu.max_bet_sofar = pu.bets[pu.turn]
pu.is_needed_to_action[pu.turn] = False
pu.num_needed_to_action -= 1
p = (pu.turn + 1)%pu.num_normal_players
while p != pu.turn:
if pu.is_quit[p] == False and pu.is_needed_to_action[p] == False and pu.bets[p] < pu.upper_bet:
pu.num_needed_to_action += 1
pu.is_needed_to_action[p] = True
p = (p + 1) % pu.num_normal_players
def action_raise(self, action):
pu = self.public_state
pu.chips[pu.turn] -= action.price
pu.bets[pu.turn] += action.price
pu.max_bet_sofar = pu.bets[pu.turn]
pu.is_needed_to_action[pu.turn] = False
pu.num_needed_to_action -= 1
pu.is_raise[pu.turn] = True
pu.num_raise +=1
p = (pu.turn + 1)%pu.num_normal_players
while p != pu.turn:
if pu.is_quit[p] == False and pu.is_needed_to_action[p] == False and pu.bets[p] < pu.upper_bet:
pu.num_needed_to_action += 1
pu.is_needed_to_action[p] = True
p = (p + 1) % pu.num_normal_players
def action_showhand(self, action):
pu = self.public_state
pu.bets[pu.turn] += action.price
pu.chips[pu.turn] = 0
pu.is_needed_to_action[pu.turn] = False
pu.num_needed_to_action -= 1
if pu.bets[pu.turn] > pu.max_bet_sofar:
p = (pu.turn + 1) % pu.num_normal_players
while p != pu.turn:
if pu.is_quit[p] == False and pu.is_needed_to_action[p] == False and pu.bets[p] < pu.upper_bet:
pu.num_needed_to_action += 1
pu.is_needed_to_action[p] = True
p = (p + 1) % pu.num_normal_players
pu.is_raise[pu.turn] = True
pu.max_bet_sofar = pu.bets[pu.turn]
pu.num_raise = False
############################################# Utils Function ######################################################
@classmethod
def __valid_initialization_params__(cls, env):
if len(env.__params__["chips"]) != env.__params__["num_normal_players"] :
raise ValueError("len(env.chips)%d != env.num_normal_players%d"%(len(env.__params__["chips"]), env.__params__["num_normal_players"]))
if env.__params__["num_normal_players"] * 5 > 52:
raise ValueError("env.num_normal_players * 5 must be less than 51, now env.num_normal_players = %d"%(env.__params__["num_normal_players"]))
return True
@classmethod
def __is_compute_scores__(cls, public_state):
'''
:param public_state:
:return:
'''
pu = public_state
if pu.num_quit == pu.num_normal_players - 1:
return True
if pu.round == 4 and pu.num_needed_to_action == 0:
return True
if pu.num_needed_to_action == 0 and pu.max_bet_sofar == pu.upper_bet:
return True
return False
@classmethod
def __compute_scores__(cls, public_state):
'''
:param public_state:
:return:
'''
if public_state.num_quit + 1 == public_state.num_normal_players:
player_id = 0
for i in range(public_state.num_normal_players):
if public_state.is_quit[i] == False:
player_id = i
scores = [0 for k in range(public_state.num_normal_players)]
for p in range(public_state.num_normal_players):
if p == player_id:
scores[p] = sum(public_state.bets) - public_state.bets[p]
else:
scores[p] = -public_state.bets[p]
for p in range(public_state.num_normal_players):
scores[p] /= public_state.floor_bet * 1.0
return scores
raise ValueError("__compute_scores__ error, is_quit = ", public_state.is_quit, "num_quit=", public_state.num_quit)
max_cards = [public_state.first_hand_cards[0],\
public_state.second_hand_cards[0], public_state.third_hand_cards[0],\
public_state.fourth_hand_cards[0], public_state.fifth_hand_cards[0]]
max_id = 0
for i in range(1, public_state.num_normal_players):
tmp = [public_state.first_hand_cards[i],\
public_state.second_hand_cards[i], public_state.third_hand_cards[i],\
public_state.fourth_hand_cards[i], public_state.fifth_hand_cards[i]]
if FiveCardStudEnv.compare_cards(max_cards, tmp) < 0:
max_cards = tmp
max_id = i
scores = [0 for i in range(public_state.num_normal_players)]
for i in range(public_state.num_normal_players):
if i == max_id:
scores[i] = sum(public_state.bets) - public_state.bets[i]
else:
scores[i] = -public_state.bets[i]
for i in range(public_state.num_normal_players):
scores[i] /= public_state.floor_bet * 1.0
return scores
@classmethod
def __choose_player_at_begining_of_round__(cls, public_state):
'''
:param public_state:
:return:
'''
round = public_state.round
if round in [1,2,3]:
public_cards = None
if round == 1: public_cards = public_state.second_hand_cards
elif round == 2: public_cards = public_state.third_hand_cards
elif round == 3: public_cards = public_state.fourth_hand_cards
max_id = 0
for i in range(public_state.num_normal_players):
if public_state.is_quit[i] == False:
max_id = i
break
max_card = public_cards[max_id]
for i in range(1, public_state.num_normal_players):
if FiveCardStudPokerCard.compare(max_card, public_cards[i]) < 0 and public_state.is_quit[i] == False:
max_card = public_cards[i]
max_id = i
return max_id
elif round == 4:
max_cards = [public_state.second_hand_cards[0], public_state.third_hand_cards[0],\
public_state.fourth_hand_cards[0], public_state.fifth_hand_cards[0]]
max_id = 0
for i in range(1, public_state.num_normal_players):
tmp = [public_state.second_hand_cards[i], public_state.third_hand_cards[i], \
public_state.fourth_hand_cards[i], public_state.fifth_hand_cards[i]]
if FiveCardStudEnv.compare_cards(max_cards, tmp) < 0:
max_cards = tmp
max_id = i
return max_id
else:
raise ValueError("pulic_state.round(%d) not in [1,2,3,4]"%(public_state.turn))
@classmethod
def __next_player__(self, pu):
i = pu.turn
if pu.num_needed_to_action == 0:
return -1
p = (i+1)%pu.num_normal_players
while pu.is_needed_to_action[p] == False:
p = (p+1)%pu.num_normal_players
return p
@classmethod
def is_action_valid(cls, action, public_state, person_state):
'''
:param action:
:param public_state:
:param person_state:
:return:
'''
if action.key not in person_state.available_actions:
return False
return True
@classmethod
def available_actions(cls, public_state, person_state):
'''
:param public_state: the public state of the game
:param person_state: the person state corresponding to the current player
:return:
'''
pu = public_state
round = pu.round
turn = pu.turn
Showhand_count = pu.upper_bet - pu.bets[turn]
Call_count = pu.max_bet_sofar - pu.bets[turn]
actions = dict()
if round == 1 or round == 2 or round == 3:
if pu.previous_round is None or pu.previous_round == round -1:
## bet
for i in range(int(Call_count+1), int(pu.upper_bet-pu.bets[turn])):
actions["Bet_%d"%i] = FiveCardStudAction.lookup("Bet_%d"%i)
## fold
actions["Fold_0"] = FiveCardStudAction.lookup("Fold_0")
## showhand
if Showhand_count > 0:
actions["Showhand_%d"%(Showhand_count)] = FiveCardStudAction.lookup("Showhand_%d"%Showhand_count)
## Check_
actions["Check_0"] = FiveCardStudAction.lookup("Check_0")
else:
## fold
actions["Fold_0"] = FiveCardStudAction.lookup("Fold_0")
## showhand
if Showhand_count > 0:
actions["Showhand_%d"%Showhand_count] = FiveCardStudAction.lookup("Showhand_%d"%(Showhand_count))
## Call
if Call_count < Showhand_count:
if Call_count == 0:
actions["Check_0"] = FiveCardStudAction.lookup("Check_0")
else:
actions["Call_%d"%(Call_count )] = FiveCardStudAction.lookup("Call_%d"%(Call_count))
## "raise"
if pu.is_raise[turn] == False:
for i in range(int(Call_count + 1),int(Showhand_count)):
actions["Raise_%d"%(i)] = FiveCardStudAction.lookup("Raise_%d"%i)
elif round == 4:
if pu.previous_round == round - 1:
## showhand
if Showhand_count > 0:
actions["Showhand_%d"%(Showhand_count)] = FiveCardStudAction.lookup("Showhand_%d"%(Showhand_count))
## bet
for i in range( Call_count + 1, int(pu.upper_bet) - int(pu.bets[turn])):
actions["Bet_%d"%i] = FiveCardStudAction.lookup("Bet_%d"%i)
## fold
actions["Fold_0"] = FiveCardStudAction.lookup("Fold_0")
else:
## fold
actions["Fold_0"] = FiveCardStudAction("Fold_0")
## Call
if Call_count == Showhand_count and Showhand_count > 0:
actions["Showhand_%d"%(Call_count)] = FiveCardStudAction.lookup("Showhand_%d"%(Call_count))
elif Call_count == 0:
actions["Check_0"] = FiveCardStudAction.lookup("Check_0")
else:
actions["Call_%d"%(Call_count )] = FiveCardStudAction.lookup("Call_%d"%(Call_count))
else:
raise ValueError("pulic_state.round(%d) not in [1,2,3,4]" % (public_state.turn))
return actions
@classmethod
def is_nextround(self, public_state):
'''
:return: A boolean variable indicates whether is it time to enter the next stage
'''
return public_state.num_needed_to_action == 0
@classmethod
def compare_cards(cls, cards1, cards2):
"""
Args:
cards1:
cards2:
Returns:
"""
if len(cards1) == len(cards2) and len(cards1) == 4:
pattern1 = cls.fourcards2pattern(cards1)
pattern2 = cls.fourcards2pattern(cards2)
if pattern1[5] != pattern2[5]:
return pattern1[5] - pattern2[5]
else:
cards1.sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
cards2.sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
return FiveCardStudPokerCard.compare(cards1[-1], cards2[-1])
elif len(cards1) == len(cards2) and len(cards1) == 5:
pattern1 = cls.cards2pattern(cards1)
pattern2 = cls.cards2pattern(cards2)
if pattern1[5] != pattern2[5]:
return pattern1[5] - pattern2[5]
else:
cards1.sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
cards2.sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
return FiveCardStudPokerCard.compare(cards1[-1], cards2[-1])
else:
raise ValueError("len(cards1)%d, and len(cards2)%d are same and are 4 or 5 "%(len(cards1),len(cards2)))
@classmethod
def cards2pattern(cls, cards):
"""
Args:
cards:
Returns:
"""
pointrank2cards = dict()
for c in cards:
if c.point_rank in pointrank2cards:
pointrank2cards[c.point_rank].append(c)
else:
pointrank2cards[c.point_rank] = [c]
for p in pointrank2cards:
pointrank2cards[p].sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
suitrank2cards = dict()
for c in cards:
if c.suit_rank in suitrank2cards:
suitrank2cards[c.suit_rank].append(c)
else:
suitrank2cards[c.suit_rank] = [c]
for s in suitrank2cards:
suitrank2cards[s].sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
num2pointrank = [[], [], [], [], []]
for p in pointrank2cards:
num = len(pointrank2cards[p])
num2pointrank[num].append(p)
for i in range(5):
num2pointrank[num].sort()
sorted_pointrank = []
for p in pointrank2cards:
sorted_pointrank.append(p)
sorted_pointrank.sort()
##straight_samesuit
for s in suitrank2cards:
if len(suitrank2cards[s]) >= 5:
numStraight = 1
for i in range(len(suitrank2cards[s]) - 2, -1, -1):
if suitrank2cards[s][i].point_rank == suitrank2cards[s][i + 1].point_rank - 1:
numStraight += 1
else:
numStraight = 1
if numStraight == 5:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["Straight_SameSuit"]
return pattern
##4_1
if len(num2pointrank[4]) ==1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["4_1"]
return pattern
##3_2
if len(num2pointrank[3]) == 1 and len(num2pointrank[2]) == 1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["3_2"]
return pattern
##SameSuit
for s in suitrank2cards:
if len(suitrank2cards[s]) >= 5:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["SameSuit"]
return pattern
##Straight_DiffSuit
numStraight = 1
for idx in range(len(sorted_pointrank) - 2, -1, -1):
if sorted_pointrank[idx] + 1 == sorted_pointrank[idx]:
numStraight += 1
else:
numStraight = 1
if numStraight == 5:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["Straight_DiffSuit"]
for p in range(idx, idx + 5):
point = sorted_pointrank[p]
return pattern
##3_1_1
if len(num2pointrank[3]) == 1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["3_1_1"]
return pattern
##2_2_1
if len(num2pointrank[2]) >= 2:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["2_2_1"]
return pattern
##2_1_1_1
if len(num2pointrank[2]) == 1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["2_1_1_1"]
return pattern
##1_1_1_1_1
return roomai.fivecardstud.FiveCardStudAllCardsPattern["1_1_1_1_1"]
@classmethod
def fourcards2pattern(cls, cards):
"""
Args:
cards:
Returns:
"""
pointrank2cards = dict()
for c in cards:
if c.point_rank in pointrank2cards:
pointrank2cards[c.point_rank].append(c)
else:
pointrank2cards[c.point_rank] = [c]
for p in pointrank2cards:
pointrank2cards[p].sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
suitrank2cards = dict()
for c in cards:
if c.suit_rank in suitrank2cards:
suitrank2cards[c.suit_rank].append(c)
else:
suitrank2cards[c.suit_rank] = [c]
for s in suitrank2cards:
suitrank2cards[s].sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
num2pointrank = [[], [], [], [], []]
for p in pointrank2cards:
num = len(pointrank2cards[p])
num2pointrank[num].append(p)
for i in range(5):
num2pointrank[num].sort()
sorted_pointrank = []
for p in pointrank2cards:
sorted_pointrank.append(p)
sorted_pointrank.sort()
##candidate straight_samesuit
for s in suitrank2cards:
if len(suitrank2cards[s]) >= 4:
numStraight = 1
for i in range(len(suitrank2cards[s]) - 2, -1, -1):
if suitrank2cards[s][i].point_rank == suitrank2cards[s][i + 1].point_rank - 1:
numStraight += 1
else:
numStraight = 1
if numStraight == 4:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["Straight_SameSuit"]
return pattern
##4_1
if len(num2pointrank[4]) == 1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["4_1"]
return pattern
##3_2 impossible
if len(num2pointrank[3]) == 1 and len(num2pointrank[2]) == 1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["3_2"]
return pattern
##SameSuit
for s in suitrank2cards:
if len(suitrank2cards[s]) >= 4:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["SameSuit"]
return pattern
##Straight_DiffSuit
numStraight = 1
for idx in range(len(sorted_pointrank) - 2, -1, -1):
if sorted_pointrank[idx] + 1 == sorted_pointrank[idx]:
numStraight += 1
else:
numStraight = 1
if numStraight == 4:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["Straight_DiffSuit"]
return pattern
##3_1_1
if len(num2pointrank[3]) == 1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["3_1_1"]
return pattern
##2_2_1
if len(num2pointrank[2]) >= 2:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["2_2_1"]
return pattern
##2_1_1_1
if len(num2pointrank[2]) == 1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["2_1_1_1"]
return pattern
##1_1_1_1_1
return roomai.fivecardstud.FiveCardStudAllCardsPattern["1_1_1_1_1"]
def __deepcopy__(self, memodict={}, newinstance = None):
if newinstance is None:
newinstance = FiveCardStudEnv()
newinstance = super(FiveCardStudEnv, self).__deepcopy__(newinstance=newinstance)
return newinstance
```
#### File: roomai/fivecardstud/FiveCardStudPublicState.py
```python
import roomai.common
import copy
class FiveCardStudPublicState(roomai.common.AbstractPublicState):
"""
"""
first_hand_cards = None
second_hand_cards = None
third_hand_cards = None
fourth_hand_cards = None
fifth_hand_cards = None
is_quit = None
num_quit = None
is_raise = None
num_raise = None
is_needed_to_action = None
num_needed_to_action = None
# chips is array which contains the chips of all players
chips = None
# bets is array which contains the bets from all players
bets = None
upper_bet = None
floor_bet = None
max_bet_sofar = None
round = None
num_normal_players = None
previous_round = None
def __deepcopy__(self,memodict={},newinstance = None):
if newinstance is None:
newinstance = FiveCardStudPublicState()
copyinstance = super(FiveCardStudPublicState,self).__deepcopy__(newinstance=newinstance)
if self.first_hand_cards is None:
copyinstance.first_hand_cards = None
else:
copyinstance.first_hand_cards = [self.first_hand_cards[i].__deepcopy__() for i in range(len(self.first_hand_cards))]
if self.second_hand_cards is None:
copyinstance.second_hand_cards = None
else:
copyinstance.second_hand_cards = [self.second_hand_cards[i].__deepcopy__() for i in range(len(self.second_hand_cards))]
if self.third_hand_cards is None:
copyinstance.third_hand_cards = None
else:
copyinstance.third_hand_cards = [self.third_hand_cards[i].__deepcopy__() for i in range(len(self.third_hand_cards))]
if self.fourth_hand_cards is None:
copyinstance.fourth_hand_cards = None
else:
copyinstance.fourth_hand_cards = [self.fourth_hand_cards[i].__deepcopy__() for i in range(len(self.fourth_hand_cards))]
if self.fifth_hand_cards is None:
copyinstance.fifth_hand_cards = None
else:
copyinstance.fifth_hand_cards = [self.fifth_hand_cards[i].__deepcopy__() for i in range(len(self.fifth_hand_cards))]
copy.num_quit = self.num_quit
if self.is_quit is None:
copyinstance.is_quit = None
else:
copyinstance.is_quit = [self.is_quit[i] for i in range(len(self.is_quit))]
copyinstance.num_raise = self.num_raise
if self.is_raise is None:
copyinstance.is_raise = None
else:
copyinstance.is_raise = [self.is_raise[i] for i in range(len(self.is_raise))]
copyinstance.num_needed_to_action = self.num_needed_to_action
if self.is_needed_to_action is None:
copyinstance.is_needed_to_action = None
else:
copyinstance.is_needed_to_action = [self.is_needed_to_action[i] for i in range(len(self.is_needed_to_action))]
# chips is array which contains the chips of all players
if self.chips is None:
copyinstance.chips = None
else:
copyinstance.chips = [self.chips[i] for i in range(len(self.chips))]
# bets is array which contains the bets from all players
if self.bets is None:
copyinstance.bets = None
else:
copyinstance.bets = [self.bets[i] for i in range(len(self.bets))]
copyinstance.upper_bet = self.upper_bet
copyinstance.floor_bet = self.floor_bet
copyinstance.max_bet_sofar = self.max_bet_sofar
copyinstance.round = self.round
copyinstance.num_normal_players = self.num_normal_players
copyinstance.previous_round = self.previous_round
return copyinstance
```
#### File: roomai/sevenking/play.py
```python
import roomai.sevenking
import random
random.seed(4)
class HumanInputPlayer(object):
def receive_info(self, info):
available_actions = info
def take_action(self):
action = input("choosed_acton:")
#action = ""
return roomai.sevenking.SevenKingAction.lookup(action)
def reset(self):
pass
class HumanInputPlayer1(object):
def receive_info(self, info):
available_actions = info
def take_action(self):
action = input("choosed_acton:")
#action = ""
return roomai.sevenking.SevenKingAction.lookup(action)
def reset(self):
pass
def show(info):
person_state = info.person_state
person_state.hand_cards.sort(cmp = roomai.sevenking.SevenKingPokerCard.compare)
sorted_hand_cards_str = [c.key for c in person_state.hand_cards]
print ("%s"%(person_state.id) + "'s hand_cards:\t" + ",".join(sorted_hand_cards_str))
print ("%s"%(person_state.id) + "'s available_actions:\t" + " ".join(person_state.available_actions.keys()))
if __name__ == "__main__":
players = [HumanInputPlayer(), HumanInputPlayer1()]
env = roomai.sevenking.SevenKingEnv()
allcards = roomai.sevenking.AllSevenKingPokerCards.values()[0:17]
allcards.sort(cmp = roomai.sevenking.SevenKingPokerCard.compare)
tmp = allcards[-6]
allcards[-6] = allcards[-1]
allcards[-1] = tmp
tmp = allcards[-7]
allcards[-7] = allcards[-2]
allcards[-2] = tmp
num_normal_players = len(players)
infos, public_state, person_states, private_state = env.init({"num_normal_players": num_normal_players,"allcards":allcards})
for i in range(env.num_normal_players):
players[i].receive_info(infos[i])
show(infos[i])
print (public_state.is_fold)
print("\n")
while public_state.is_terminal == False:
turn = public_state.turn
print ("turn = %d, stage = %d"%(public_state.turn,public_state.stage))
action = players[turn].take_action()
print ("%d player take an action (%s)"%(turn,action.key))
infos, public_state, person_states, private_state = env.forward(action)
for i in range(env.num_normal_players):
players[i].receive_info(infos[i])
show(infos[i])
print (public_state.is_fold)
print ("\n")
print (public_state.scores)
```
#### File: roomai/sevenking/SevenKingAction.py
```python
import roomai.common
import roomai.sevenking
from roomai.sevenking import AllSevenKingPatterns
from functools import cmp_to_key
class SevenKingAction(roomai.common.AbstractAction):
'''
The SevenKing action. The SevenKing action contains some cards. Examples of usages:\n
>> import roomai.sevenking\n
>> action = roomai.sevenking.SevenKingAction.lookup("A_Spade,A_Heart") \n
>> ## We strongly recommend you to get an action with the lookup function.\n
>> action.key \n
"A_Heart, A_Spade"\n
>> action.cards[0].point\n
"A"\n
>> action.cards[0].suit\n
"Heart"\n
>> action.pattern\n
p_2 # There are 2 cards in this action\n
'''
def __init__(self, key):
if not isinstance(key,str):
raise TypeError("The key for SevenKingAction is an str, not %s"%(type(str)))
super(SevenKingAction,self).__init__(key)
self.__cards__ = []
if len(key) > 0:
for c in self.key.split(","):
self.__cards__.append(roomai.sevenking.SevenKingPokerCard.lookup(c))
self.__cards__.sort(key = cmp_to_key(roomai.sevenking.SevenKingPokerCard.compare))
self.__key__ = ",".join([c.key for c in self.__cards__])
else:
self.__key__ = ""
self.__pattern__ = self.__action2pattern__(self)
@classmethod
def __action2pattern__(cls, action):
num_cards = len(action.cards)
return AllSevenKingPatterns["p_%d"%(num_cards)]
def __get_key__(self):
return self.__key__
key = property(__get_key__, doc="The key of this action. For example, key = \"3_Heart,3_Spade\". The check action's key = \"\"")
def __get_cards__(self):
return tuple(self.__cards__)
cards = property(__get_cards__, doc="The cards in this action. For example, cards=[roomai.sevenking.SevenKingPokerCards.lookup(\"A_Spade\")]")
def __get_pattern__(self):
return self.__pattern__
pattern = property(__get_pattern__, doc="The pattern of the action")
@classmethod
def lookup(cls, key):
'''
lookup a SevenKing action with the specified key
:param key: The specified key
:return: The action
'''
if key in AllSevenKingActions:
return AllSevenKingActions[key]
else:
AllSevenKingActions[key] = SevenKingAction(key)
return AllSevenKingActions[key]
def __deepcopy__(self, memodict={}, newinstance = None):
if self.__key__ in AllSevenKingActions:
return AllSevenKingActions[self.__key__]
if newinstance is None:
newinstance = SevenKingAction(self.key)
newinstance = super(SevenKingAction,self).__deepcopy__(newinstance = newinstance)
newinstance.__key__ = self.__key__
newinstance.__cards__ = [card.__deepcopy__() for card in self.__cards__]
newinstance.__pattern__ = self.__pattern
AllSevenKingActions[self.__key__] = newinstance
return newinstance
AllSevenKingActions = dict()
```
#### File: roomai/sevenking/SevenKingEnv.py
```python
import roomai.common
from roomai.sevenking import SevenKingPublicState
from roomai.sevenking import SevenKingPrivateState
from roomai.sevenking import SevenKingPersonState
from roomai.sevenking import SevenKingAction
from roomai.sevenking import SevenKingPokerCard
from roomai.sevenking import AllSevenKingPatterns
from roomai.sevenking import AllSevenKingPokerCards
import random
import roomai.sevenking
logger = roomai.get_logger()
class SevenKingEnv(roomai.common.AbstractEnv):
'''
The SevenKing game environment
'''
def init(self, params = dict()):
'''
Initialize the SevenKing game environment with the initialization params.\n
The initialization is a dict with some options\n
1) backward_enable: whether to record all history states. if you need call the backward function, please set it to True. default False\n
2) num_normal_players: how many players are in the game \n
An example of the initialization param is {"num_normal_players":2,"backward_enable":True}\n
:param params: the initialization params
:return: infos, public_state, person_states, private_state
'''
if "num_normal_players" in params:
self.__params__["num_normal_players"] = params["num_normal_players"]
else:
self.__params__["num_normal_players"] = 3
if "backward_enable" in params:
self.__params__["backward_enable"] = params["backward_enable"]
else:
self.__params__["backward_enable"] = False
self.public_state = SevenKingPublicState()
self.private_state = SevenKingPrivateState()
self.person_states = [SevenKingPersonState() for i in range(self.__params__["num_normal_players"] + 1)]
self.public_state_history = []
self.private_state_history = []
self.person_states_history = []
## private_state
allcards = [c.__deepcopy__() for c in AllSevenKingPokerCards.values()]
random.shuffle(allcards)
self.private_state.__keep_cards__ = allcards
for i in range(self.__params__["num_normal_players"]):
tmp = []
for j in range(5):
c = self.private_state.__keep_cards__.pop()
tmp.append(c)
self.person_states[i].__add_cards__(tmp)
## public_state
self.public_state.__turn__,_ = self.__choose_player_with_lowest_card__()
self.public_state.__is_terminal__ = False
self.public_state.__scores__ = []
self.public_state.__license_action__ = SevenKingAction.lookup("")
self.public_state.__stage__ = 0
self.public_state.__num_normal_players__ = self.__params__["num_normal_players"]
self.public_state.__num_keep_cards__ = len(self.private_state.keep_cards)
self.public_state.__num_hand_cards__ = [len(person_state.hand_cards) for person_state in self.person_states]
self.public_state.__is_fold__ = [False for i in range(self.public_state.num_normal_players)]
self.public_state.__num_fold__ = 0
## person_state
for i in range(self.__params__["num_normal_players"]+1):
self.person_states[i].__id__ = i
if i == self.public_state.turn:
self.person_states[i].__available_actions__ = SevenKingEnv.available_actions(self.public_state, self.person_states[i])
self.__gen_state_history_list__()
infos = self.__gen_infos__()
return infos, self.public_state, self.person_states, self.private_state
def forward(self, action):
'''
The SevenKing game environment steps with the action taken by the current player
:param action:
:return:
'''
pu = self.public_state
pr = self.private_state
pes = self.person_states
turn = pu.turn
if self.is_action_valid(action,pu, pes[turn]) == False:
raise ValueError("The (%s) is an invalid action " % (action.key))
pes[pu.turn].__available_actions__ = dict()
pu.__action_history__.append((pu.turn,action))
## the action plays its role
if action.pattern[0] == "p_0":
pu.__is_fold__[turn] = True
pu.__num_fold__ += 1
pes[turn].__available_actions__ = dict()
else:
pes[turn].__del_cards__(action.cards)
if pu.stage == 0:
tmp = []
for i in range(5 - len(pes[turn].hand_cards)):
c = pr.__keep_cards__.pop()
tmp.append(c)
pes[turn].__add_cards__(tmp)
elif pu.stage == 1:
pu.__num_hand_cards__[turn] = len(pes[turn].hand_cards)
if action.pattern[0] != "p_0":
pu.__license_action__ = action
#print (turn, "len_of_hand_card=",len(self.private_state.hand_cards[turn]), " len_of_keep_card=", len(self.private_state.keep_cards), " action = (%s)" %action.key,\
# " handcard1=%s"%(",".join([a.key for a in self.private_state.hand_cards[0]]))," handcard2=%s"%(",".join([a.key for a in self.private_state.hand_cards[1]])),\
# " num_fold =%d"%(self.public_state.num_fold),"fold=%s"%(",".join([str(s) for s in pu.is_fold])))
## termminal
if self.public_state.stage == 1 and len(self.person_states[turn].hand_cards) == 0:
pu.__is_terminal__ = True
pu.__scores__ = self.__compute_scores__()
new_turn = None
pu.__turn__ = new_turn
pu.__license_action__ = SevenKingAction.lookup("")
## stage 0 to 1
elif len(self.private_state.keep_cards) < 5 and pu.stage == 0:
new_turn, min_card = self.__choose_player_with_lowest_card__()
pu.__turn__ = new_turn
pu.__num_fold__ = 0
pu.__is_fold__ = [False for i in range(pu.num_normal_players)]
pu.__license_action__ = SevenKingAction.lookup("")
pes[new_turn].__available_actions__ = SevenKingEnv.available_actions(pu, pes[new_turn])
keys = list(pes[new_turn].available_actions.keys())
for key in keys:
if min_card.key not in key:
del pes[new_turn].__available_actions__[key]
pu.__stage__ = 1
## round next
elif self.public_state.num_fold + 1 == pu.num_normal_players:
new_turn = self.__choose_player_with_nofold__()
pu.__turn__ = new_turn
pu.__num_fold__ = 0
pu.__is_fold__ = [False for i in range(pu.num_normal_players)]
pu.__license_action__ = SevenKingAction.lookup("")
pes[new_turn].__available_actions__ = SevenKingEnv.available_actions(pu, pes[new_turn])
else:
new_turn = (turn + 1) % pu.num_normal_players
pu.__turn__ = new_turn
pes[new_turn].__available_actions__ = SevenKingEnv.available_actions(pu, pes[new_turn])
self.__gen_state_history_list__()
infos = self.__gen_infos__()
return infos, self.public_state, self.person_states, self.private_state
def __compute_scores__(self):
scores = [-1 for i in range(self.__params__["num_normal_players"])]
scores[self.public_state.turn] = self.__params__["num_normal_players"] -1
return scores
def __choose_player_with_nofold__(self):
for player_id in range(self.public_state.num_normal_players):
if self.public_state.is_fold[player_id]== False:
return player_id
def __choose_player_with_lowest_card__(self):
min_card = self.person_states[0].hand_cards[0]
min_playerid = 0
for playerid in range(self.__params__["num_normal_players"]):
for c in self.person_states[playerid].hand_cards:
if SevenKingPokerCard.compare(min_card, c) > 0:
min_card = c
min_playerid = playerid
return min_playerid, min_card
######################## Utils function ###################
@classmethod
def compete(cls, env, players):
'''
Use the game environment to hold a compete for the players
:param env: The game environment
:param players: The players
:return: scores for the players
'''
num_normal_players = len(players)
infos, public_state, person_states, private_state = env.init({"num_normal_players":num_normal_players})
for i in range(env.__params__["num_normal_players"]):
players[i].receive_info(infos[i])
while public_state.is_terminal == False:
turn = public_state.turn
action = players[turn].take_action()
infos, public_state, person_states, private_state = env.forward(action)
for i in range(env.__params__["num_normal_players"]):
players[i].receive_info(infos[i])
return public_state.scores
@classmethod
def is_action_valid(self, action, public_state, person_state):
return action.key in person_state.available_actions
########################### about gen_available_actions ########################
@classmethod
def available_actions(cls, public_state, person_state):
available_actions = dict()
license_action = public_state.license_action
if license_action is None:
license_action = SevenKingAction("")
hand_cards = person_state.hand_cards
patterns = set()
if license_action.pattern[0] == "p_0":
for p in AllSevenKingPatterns.values():
if p[0] != "p_0":
patterns.add(p)
else:
patterns.add(license_action.pattern)
patterns.add(AllSevenKingPatterns["p_0"])
for pattern in patterns:
if pattern[1] >= 2:
point2cards = person_state.__gen_pointrank2cards__()
if len(person_state.hand_cards) < pattern[1]:
continue
elif pattern[0] == "p_0":
available_actions[""] = SevenKingAction.lookup("")
elif pattern[0] == "p_1":
license_pattern = license_action.pattern
license_card = None
if license_pattern[0] != "p_0":
license_card = license_action.cards[-1]
for c in person_state.hand_cards:
if license_pattern[0] == "p_0" or SevenKingPokerCard.compare(c,license_card) >0:
available_actions[c.key] = SevenKingAction.lookup(c.key)
elif pattern[0] == "p_2":
for p in point2cards:
license_pattern = license_action.pattern
license_card = None
if license_pattern[0] != "p_0":
#print license_action.key, license_action.pattern, license_pattern[0] != "p_0"
license_card = license_action.cards[-1]
len1 = len(point2cards[p])
if len1 == 2:
if license_pattern[0] == "p_0" or SevenKingPokerCard.compare(point2cards[p][1],
license_card) > 0:
str = "%s,%s" % (point2cards[p][0].key, point2cards[p][1].key)
available_actions[str] = SevenKingAction.lookup(str)
if len1 == 3:
if license_pattern[0] == "p_0" or SevenKingPokerCard.compare(point2cards[p][1],
license_card) > 0:
str = "%s,%s" % (point2cards[p][0].key, point2cards[p][1].key)
available_actions[str] = (SevenKingAction.lookup(str))
if license_pattern[0] == "p_0" or SevenKingPokerCard.compare(point2cards[p][2],
license_card) > 0:
str = "%s,%s" % (point2cards[p][0].key, point2cards[p][2].key)
available_actions[str] = (SevenKingAction.lookup(str))
str = "%s,%s" % (point2cards[p][1].key, point2cards[p][2].key)
available_actions[str] = (SevenKingAction.lookup(str))
if len1 == 4:
if license_pattern[0] == "p_0" or SevenKingPokerCard.compare(point2cards[p][1],
license_card) > 0:
str = "%s,%s" % (point2cards[p][0].key, point2cards[p][1].key)
available_actions[str] = (SevenKingAction.lookup(str))
if license_pattern[0] == "p_0" or SevenKingPokerCard.compare(point2cards[p][2],
license_card) > 0:
str = "%s,%s" % (point2cards[p][0].key, point2cards[p][2].key)
available_actions[str] = (SevenKingAction.lookup(str))
str = "%s,%s" % (point2cards[p][1].key, point2cards[p][2].key)
available_actions[str] = (SevenKingAction.lookup(str))
if license_pattern[0] == "p_0" or SevenKingPokerCard.compare(point2cards[p][3],
license_card) > 0:
str = "%s,%s" % (point2cards[p][0].key, point2cards[p][3].key)
available_actions[str] = (SevenKingAction.lookup(str))
str = "%s,%s" % (point2cards[p][1].key, point2cards[p][3].key)
available_actions[str] = (SevenKingAction.lookup(str))
str = "%s,%s" % (point2cards[p][2].key, point2cards[p][3].key)
available_actions[str] = (SevenKingAction.lookup(str))
elif pattern[0] == "p_3":
for p in point2cards:
license_pattern = license_action.pattern
license_card = None
if license_pattern[0] != "p_0" :
license_card = license_action.cards[-1]
len1 = len(point2cards[p])
if len1 == 3:
if license_pattern[0] == "p_0" or SevenKingPokerCard.compare(point2cards[p][2],
license_card) > 0:
str = "%s,%s,%s" % (point2cards[p][0].key, point2cards[p][1].key, point2cards[p][2].key)
available_actions[str] = (SevenKingAction.lookup(str))
if len1 == 4:
if license_pattern[0] == "p_0" or SevenKingPokerCard.compare(point2cards[p][2],
license_card) > 0:
str = "%s,%s,%s" % (point2cards[p][0].key, point2cards[p][1].key, point2cards[p][2].key)
available_actions[str] = (SevenKingAction.lookup(str))
if license_pattern[0] == "p_0" or SevenKingPokerCard.compare(point2cards[p][3],
license_card) > 0:
str = "%s,%s,%s" % (point2cards[p][0].key, point2cards[p][1].key, point2cards[p][3].key)
available_actions[str]=(SevenKingAction.lookup(str))
str = "%s,%s,%s" % (point2cards[p][0].key, point2cards[p][2].key, point2cards[p][3].key)
available_actions[str]=(SevenKingAction.lookup(str))
str = "%s,%s,%s" % (point2cards[p][1].key, point2cards[p][2].key, point2cards[p][3].key)
available_actions[str]=(SevenKingAction.lookup(str))
elif pattern[0] == "p_4":
for p in point2cards:
license_pattern = license_action.pattern
license_card = None
if license_pattern[0] != "p_0" :
license_card = license_action.cards[-1]
len1 = len(point2cards[p])
if len1 >= 4:
if license_pattern[0] == "p_0" or SevenKingPokerCard.compare(point2cards[p][3],
license_card) > 0:
str = "%s,%s,%s,%s" % (
point2cards[p][0].key,
point2cards[p][1].key,
point2cards[p][2].key,
point2cards[p][3].key
)
available_actions[str]=(SevenKingAction.lookup(str))
if pattern[0] != "p_0" and pattern[0] != "p_1" and\
pattern[0] != "p_2" and pattern[0] != "p_3" and pattern[0] != "p_4":
raise ValueError("The %s pattern is invalid" % (pattern[0]))
#for a in available_actions.values():
# if SevenKingEnv.__is_action_valid__(a,public_state,person_state) == False:
# del available_actions[a.key]
return available_actions
def __deepcopy__(self, memodict={}, newinstance = None):
if newinstance is None:
newinstance = SevenKingEnv()
newinstance = super(SevenKingEnv, self).__deepcopy__(newinstance=newinstance)
return newinstance
```
#### File: RoomAI/tests/AbstractEnvPlayerTest.py
```python
import unittest
from roomai.common import *
class AbstractEnvTester(unittest.TestCase):
"""
"""
def test_functions(self):
"""
"""
aEnv = AbstractEnv();
class AbstractPlayerTester(unittest.TestCase):
"""
"""
def test_functions(self):
"""
"""
aPlayer = AbstractPlayer();
with self.assertRaises(NotImplementedError):
aPlayer.receive_info([]);
with self.assertRaises(NotImplementedError):
aPlayer.take_action();
```
#### File: RoomAI/tests/testBridge.py
```python
import unittest
import roomai.bridge
import roomai
import roomai.common
from functools import cmp_to_key
class BridgeTester(unittest.TestCase):
def testInit(self):
env = roomai.bridge.BridgeEnv()
env.init()
def testForward(self):
env = roomai.bridge.BridgeEnv()
infos, public_state, person_states, private_state = env.init()
xxx = 0
self.assertEqual(len(infos),5)
for i in range(4):
self.assertEqual(len(person_states[i].hand_cards_dict.keys()), 52 / 4)
self.assertNotEqual(len(person_states[public_state.turn].available_actions),0)
self.assertEqual(len(person_states[public_state.turn].available_actions), 36)
def testAction(self):
action = roomai.bridge.BridgeAction.lookup("bidding_bid_A_Heart")
self.assertEqual(action.stage, "bidding")
self.assertEqual(action.bidding_option,"bid")
self.assertEqual(action.bidding_card.point, "A")
self.assertEqual(action.bidding_card.suit,"Heart")
self.assertEqual(action.playing_card,None)
xxx = 0
print (xxx)
def testAGame(self):
env = roomai.bridge.BridgeEnv()
allcards = list(roomai.bridge.AllBridgePlayingPokerCards.values())
allcards.sort(key = cmp_to_key(roomai.common.PokerCard.compare))
infos, public_state, person_states, private_state = env.init({"start_turn":0})
for i in range(4):
print (i,person_states[i].hand_cards_dict, len(person_states[i].hand_cards_dict))
self.assertEqual(len(person_states[i].hand_cards_dict),13)
self.assertEqual(public_state.turn, 0)
#### bidding stage
action = roomai.bridge.BridgeAction.lookup("bidding_bid_A_Heart")
infos, public_state, person_states, private_state = env.forward(action)
action = roomai.bridge.BridgeAction.lookup("bidding_pass")
infos, public_state, person_states, private_state = env.forward(action)
infos, public_state, person_states, private_state = env.forward(action)
infos, public_state, person_states, private_state = env.forward(action)
self.assertEqual(public_state.stage, "playing")
self.assertEqual(public_state.turn,1)
#### playing_stage
count = 0
while env.public_state.is_terminal == False:
action = list(env.person_states[env.public_state.turn].available_actions.values())[0]
count += 1
env.forward(action)
self.assertEqual(count,13 * 4)
print (env.public_state.scores)
#self.assertTrue(env.public_state.scores[0] == 50)
#self.assertTrue(env.public_state.scores[1] == 0)
print (env.public_state.scores)
# self.assertEqual(env.public_state.scores[1],350)
def testAGame1(self):
env = roomai.bridge.BridgeEnv()
allcards = list(roomai.bridge.AllBridgePlayingPokerCards.values())
allcards.sort(key = cmp_to_key(roomai.common.PokerCard.compare))
infos, public_state, person_states, private_state = env.init({"start_turn":0, "backward_enable":True})
for i in range(4):
print (i,person_states[i].hand_cards_dict, len(person_states[i].hand_cards_dict))
self.assertEqual(len(person_states[i].hand_cards_dict),13)
self.assertEqual(public_state.turn, 0)
#### bidding stage
action = roomai.bridge.BridgeAction.lookup("bidding_bid_A_Heart")
infos, public_state, person_states, private_state = env.forward(action)
action = roomai.bridge.BridgeAction.lookup("bidding_double")
infos, public_state, person_states, private_state = env.forward(action)
action = roomai.bridge.BridgeAction.lookup("bidding_pass")
infos, public_state, person_states, private_state = env.forward(action)
infos, public_state, person_states, private_state = env.forward(action)
infos, public_state, person_states, private_state = env.forward(action)
self.assertEqual(public_state.stage, "playing")
self.assertEqual(public_state.turn,1)
self.assertEqual(public_state.playing_magnification,2)
#### playing_stage
count = 0
while env.public_state.is_terminal == False:
action = list(env.person_states[env.public_state.turn].available_actions.values())[0]
count += 1
if count == 13 * 4:
xx = 0
env.forward(action)
self.assertEqual(count,13 * 4)
#self.assertTrue(env.public_state.scores[0] == 0)
#self.assertTrue(env.public_state.scores[1] > 0)
print (env.public_state.scores)
#self.assertEqual(env.public_state.scores[1],100 + 200 * 2 + (7-3) * 300)
if __name__ == "__main__":
import time
start = time.time()
for iter in range(1000):
env = roomai.bridge.BridgeEnv()
allcards = list(roomai.bridge.AllBridgePlayingPokerCards.values())
allcards.sort(key = cmp_to_key(roomai.common.PokerCard.compare))
infos, public_state, person_states, private_state = env.init({"allcards":allcards, "start_turn":0})
#### bidding stage
action = roomai.bridge.BridgeAction.lookup("bidding_bid_A_Heart")
infos, public_state, person_states, private_state = env.forward(action)
action = roomai.bridge.BridgeAction.lookup("bidding_double")
infos, public_state, person_states, private_state = env.forward(action)
action = roomai.bridge.BridgeAction.lookup("bidding_pass")
infos, public_state, person_states, private_state = env.forward(action)
infos, public_state, person_states, private_state = env.forward(action)
infos, public_state, person_states, private_state = env.forward(action)
#### playing_stage
count = 0
while env.public_state.is_terminal == False:
action = list(env.person_states[env.public_state.turn].available_actions.values())[0]
env.forward(action)
end = time.time()
print (end-start)
``` |
{
"source": "16bc/gibdd",
"score": 3
} |
#### File: 16bc/gibdd/application.py
```python
from flask import Flask
from flask import render_template
from collector import readfile, convert, collect
DATAFILE = "data.json"
app = Flask(__name__)
@app.route("/")
def show_stats():
data = readfile(DATAFILE)
return render_template('index.html', date=data[0], count=data[1], dead=data[2], child_dead=data[3],
wounded=data[4], child_wounded=data[5])
@app.route("/dictdata")
def get_dict_data():
text = convert(DATAFILE)
return text
@app.route("/listdata")
def get_list_data():
with open(DATAFILE, 'r') as file:
text = file.read()
return text
@app.route("/update")
def do_collect():
print("Data updating...")
return collect()
if __name__ == "__main__":
app.run(host='0.0.0.0')
``` |
{
"source": "16bc/webshot",
"score": 3
} |
#### File: 16bc/webshot/webshot.py
```python
from multiprocessing import Process, Queue, cpu_count
from selenium import webdriver
import logging
import os
import json
from time import sleep
#########################################################################################
# VARIABLES IN THE HEADER
#########################################################################################
screens_directory = './scrs/'
hosts_file = 'ips.json' # Nmap оr Masscan output JSON file name
log_filename = "log.txt"
count_of_workers = cpu_count() # Count of workers.
page_load_timeout = 5
wait_on_page = 1
take_screenshot_anyway = True
''' True - will take scr. even if page load not complete in the <page_load_timeout> time;
False - do not take scr. of such pages. May be useful if generated many "white pages"
'''
def driver(): return webdriver.Firefox(executable_path='./driver/geckodriver')
# Change to webdriver.Chrome() if need.
#########################################################################################
def say(i, text):
"""
Colorized & personalized messages from workers
:param i: Worker id
:param text: Message
"""
colors = [30, 32, 34, 35, 36, 93, 30, 32, 34, 35, 36, 93]
return log.info(f"\033[{colors[i]}m Worker-{i} said: {text}")
def parse_hosts(hosts_file):
hosts = []
with open(hosts_file, 'r') as file:
lines = file.readlines()[:-1]
for line in lines:
hosts.append(json.loads(line[:-2])['ip'])
log.info(f'Addresses loaded. Addresses count is:{len(hosts)}')
return hosts
def selenium_task(id, worker, host):
url = f'http://{host}/'
filename = f"{screens_directory}{host}.png"
say(id, f"☐ Begining to load host {host}...")
try:
worker.get(url)
sleep(wait_on_page)
worker.get_screenshot_as_file(filename)
say(id, f"(✅ 📷) I'm successfully load and screen {host}")
except:
if take_screenshot_anyway:
worker.get_screenshot_as_file(filename)
say(id, f"(❌ 📷) Page load not complite from {host}, but screenshot was taken.")
else:
say(id, f"(❌) Page load not complite from {host}. No screen.")
def selenium_queue_listener(hosts_q, workers_q):
log.info("Selenium func worker started")
while True:
current_host = hosts_q.get()
if current_host == 'STOP':
log.warning("STOP encountered, killing worker process")
hosts_q.put(current_host)
break
else:
# Get the ID of any currently free workers from the worker queue
worker_id = workers_q.get()
selenium_task(worker_id, selenium_workers[worker_id], current_host)
workers_q.put(worker_id)
return
if __name__ == '__main__':
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=log_filename, level=logging.INFO, format=FORMAT)
log = logging.getLogger("logger")
log.addHandler(logging.StreamHandler())
if not os.path.exists(screens_directory):
os.makedirs(screens_directory)
hosts = parse_hosts(hosts_file)
hosts.append('STOP')
hosts_queue = Queue()
workers_queue = Queue()
log.info("Adding hosts to hosts queue")
for h in hosts:
hosts_queue.put(h)
log.info(f"Try to create {count_of_workers} workers")
worker_ids = list(range(count_of_workers))
for worker_id in worker_ids:
workers_queue.put(worker_id)
selenium_workers = {}
try:
for i in worker_ids:
selenium_workers[i] = driver()
selenium_workers[i].set_page_load_timeout(page_load_timeout)
say(i, f"I born! I'm happy and ready to work!")
except:
log.error('Create workers error! Be sure you have installed Selenium WebDriver \
and gecodriver path in header is valid.')
selenium_processes = [Process(target=selenium_queue_listener,
args=(hosts_queue, workers_queue)) for _ in worker_ids]
for p in selenium_processes:
p.daemon = True
p.start()
for p in selenium_processes:
p.join()
# Quit all the web workers elegantly in the background
log.info("\033[31m ---Dismissing web workers---")
for w in selenium_workers.values():
w.quit()
``` |
{
"source": "16beer/load_m3u8",
"score": 3
} |
#### File: load_m3u8/load/__init__.py
```python
import binascii
import logging
import m3u8.parser
import requests
from Crypto.Cipher import AES
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'
headers = {'User-Agent': user_agent}
def load_ts_done(feature):
if feature.exception() is not None:
logging.debug('load ts exception: %s', feature.exception())
else:
logging.debug('load ts result: %s', feature.result())
def load_ts(data):
url, encrypt_key, ts_name = data
try:
ts_data = []
if m3u8.parser.is_url(url):
res = requests.get(url, headers=headers)
if res is None or res.content is None:
return 'exception end'
ts_data = res.content
else:
with open(url, 'rb') as read:
if read.readable():
ts_data = read.read()
with open(ts_name, 'wb') as fp:
if encrypt_key is None:
fp.write(ts_data)
else:
if m3u8.parser.is_url(encrypt_key.uri):
aes_key = requests.get(encrypt_key.uri, headers=headers).content
else:
with open(encrypt_key.uri, 'rb') as read:
if read.readable():
aes_key = read.read()
fp.write(decrypt(ts_data, aes_key, encrypt_key.iv))
except Exception as e:
logging.exception('load failed')
return f'{ts_name} exception: {str(e)}'
return f'{ts_name} succeed'
def decrypt(content, key, iv):
"""
M3U8 has the same AES-IV and key
:param content: Encrypted content
:param key: AES key
:param iv: IV vector (Ignore)
:return: Decrypt content
"""
try:
key = a2b_hex(key)
iv = a2b_hex(key)
cryptos = AES.new(key, AES.MODE_CBC, iv)
return cryptos.decrypt(content)
except Exception as e:
logging.exception('decrypt failed')
return content
def a2b_hex(data):
if data is None:
return None
if isinstance(data, bytes):
data = data.decode()
if data[0:2] == '0x':
data = binascii.a2b_hex(data[2:])
if isinstance(data, str):
data = data.encode()
return data
```
#### File: load_m3u8/load/resolve.py
```python
import logging
import os
from concurrent.futures.process import ProcessPoolExecutor
from concurrent.futures.thread import ThreadPoolExecutor
from glob import iglob
from urllib.parse import urljoin
import m3u8
from natsort import natsorted
from load_m3u8.load import load_ts, load_ts_done
windows_invalid = ['*', '|', ':', '?', '/', '<', '>', '"', '\\']
'''Unresolvable characters in the Windows System'''
class LoadM3U8(object):
"""
Use M3U8 file to download ts format video.
Support video decryption by AES.
Dependent libraries: pip install m3u8 requests natsort
"""
m3u8_url: str
video_path: str
video_folder: str
ts_folder: str
def __init__(self, m3u8_url, video_path='/tmp/test_m3u8.ts', process_workers=None, thread_workers=None):
use_process = thread_workers is None
self.pool = ProcessPoolExecutor(max_workers=process_workers) if use_process else ThreadPoolExecutor(
max_workers=thread_workers)
self.m3u8_url = m3u8_url
video_name = tmp = os.path.basename(video_path)
for i in windows_invalid:
if i in video_name:
tmp = tmp.replace(i, '')
video_name = tmp
self.video_folder = os.path.dirname(video_path)
self.video_path = os.path.join(self.video_folder, video_name)
self.ts_folder = os.path.join(self.video_folder, video_name.split('.')[0])
if not os.path.exists(self.video_folder):
os.mkdir(self.video_folder)
if not os.path.exists(self.ts_folder):
os.mkdir(self.ts_folder)
def __load_m3u8(self):
urls = self.__resolve_url()
for index, url in enumerate(urls):
feature = self.pool.submit(load_ts, [url[0], url[1], f'{self.ts_folder}/{index}.ts'])
feature.add_done_callback(load_ts_done)
self.pool.shutdown()
def __resolve_url(self):
m3u8_obj = m3u8.load(self.m3u8_url)
base_uri = m3u8_obj.base_uri
if m3u8_obj.is_variant:
'''Get HD video address'''
bandwidth = 0
for seq in m3u8_obj.playlists:
if seq.stream_info.bandwidth > bandwidth:
bandwidth = seq.stream_info.bandwidth
self.m3u8_url = seq.absolute_uri
logging.warning('redirect video address: %s', self.m3u8_url)
m3u8_obj = m3u8.load(self.m3u8_url)
base_uri = m3u8_obj.base_uri
segments = m3u8_obj.segments
encryptKey = m3u8_obj.keys[0] if len(m3u8_obj.keys) > 0 else None
for seg in segments:
yield [urljoin(base_uri, seg.uri), encryptKey]
def run(self):
self.__load_m3u8()
ts_path = self.ts_folder + '/*.ts'
with open(self.video_path, 'wb') as fp:
for ts in natsorted(iglob(ts_path)):
with open(ts, 'rb') as ft:
fp.write(ft.read())
for ts in iglob(ts_path):
os.remove(ts)
os.rmdir(self.ts_folder)
``` |
{
"source": "16bitmood/prologic",
"score": 3
} |
#### File: prologic/prologic/evaluator.py
```python
from prologic.tokens import Var
op = {
'implication': lambda x, y: y if x else True, # a -> b
'equivalence': lambda x, y: x == y, # a = b
'conjunction': lambda x, y: x and y, #
'disjunction': lambda x, y: x or y, # or
'negation': lambda x: not x, # not
'xor': lambda x, y: x ^ y
}
# Helper Functions
def set_var(expr, var, var_value):
if expr == var:
return var_value
elif isinstance(expr, Var):
return expr
elif isinstance(expr, bool): # Already Set
return expr
return [expr[0]] + [set_var(x, var, var_value) for x in expr[1:]]
def set_vars(expr, args):
for var, value in args.items():
expr = set_var(expr, var, value)
return expr
def eval_expr(expr):
if not isinstance(expr, list):
return expr
return op[expr[0]](*[eval_expr(x) for x in expr[1:]])
# Main Function
def evaluate(expr, args):
'''
Evaluates a logic statement with given args
Args:
expr: Parsed Expression
args: a dict with var_name, value pairs
'''
expr = set_vars(expr, args)
return eval_expr(expr)
``` |
{
"source": "16bitmood/ShellOverDiscord",
"score": 3
} |
#### File: ShellOverDiscord/discord_bot/main.py
```python
import os,sys,time
import csv,re
from dotenv import load_dotenv
import asyncio
import socketio
import discord
bot_users = []
def load_users():
with open("../users.csv", newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
bot_users.append((row['USER_ID'],row['DISC_NAME']))
# print(row['USER_ID'],row['DISC_NAME'])
def authorized(x):
for user_id,disc_name in bot_users:
if user_id == str(x):
return True
return False
# Docker Client Code
sio = socketio.AsyncClient()
running_shells = {}
@sio.event
async def connect():
print("Connected to Docker Server!")
@sio.on('info')
async def info(data):
print("[DOCKER]",data)
@sio.on('shell_output')
async def shell_output(data):
given_id = int(data[0])
if given_id in running_shells.keys():
# TODO: remove ansi escape sequences
to_send = "```\n" + data[1] + "\n```"
await running_shells[given_id].send(to_send)
print("[INFO] Sending shell output of ",given_id)
@sio.on('dispatch_discord_file')
async def dispatch_discord_file(data):
print("[INFO] Sending file to discord")
temp_file_name = "./temp/to_send"
with open(temp_file_name,"wb") as f:
f.write(data["dat"])
file = discord.File(temp_file_name, filename=data["file_name"])
await running_shells[int(data["user"])].send(file=file)
os.remove(temp_file_name)
# Discord Bot Code
disc_bot = discord.Client()
@disc_bot.event
async def on_ready():
print("Connected to discord!")
print(disc_bot.guilds)
@disc_bot.event
async def on_message(msg):
if msg.content[0] == ">":
if authorized(msg.author.id):
await sio.emit("shell",{'input':msg.content[1:],'id':msg.author.id})
running_shells[msg.author.id] = msg.channel
else:
await msg.channel.send('no')
elif msg.content[0] == "!":
if authorized(msg.author.id):
await sio.emit("shell_reset",{'id':msg.author.id})
running_shells[msg.author.id] = None
else:
await msg.channel.send('no')
return
async def main():
# Load Discord bot config
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
load_users()
print(bot_users)
await asyncio.wait([\
disc_bot.start(TOKEN), \
sio.connect('http://127.0.0.1:5000')])
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
``` |
{
"source": "16BitNarwhal/EyeHelp",
"score": 3
} |
#### File: src/eyehelp/app.py
```python
import toga
from toga.style.pack import Pack, COLUMN, ROW
import threading
import time
import os
import sys
import random
import eyehelp.face_detect as fd
import eyehelp.calibrate as calibrate
import eyehelp.grapher as grapher
# Label for multiple lines
def MultilineLabel(text : str, box_style : Pack = None, label_style : Pack = None, char_line=30) -> toga.Box :
children = []
label_text = ''
for word in text.split():
if len(label_text) <= char_line:
label_text += word + ' '
else:
label = toga.Label(label_text, style=label_style)
children.append(label)
label_text = word + ' '
if label_text != '':
label = toga.Label(label_text, style=label_style)
children.append(label)
box = toga.Box(id = None, style = box_style,
children = children)
return box
class EyeHelp(toga.App):
# Called when application starts
def startup(self):
# Paths and files
ospath = os.path.dirname(sys.argv[0])
self.CONFIG_FILE = ospath + '\\config'
# Constants
self.CHECKUP_TIME = 20 * 60
self.BREAK_TIME = 20
# Activity box
activity_box = toga.Box(style=Pack(direction=COLUMN, background_color='aqua'))
self.timer_running = False
title_label = toga.Label(
'Eye Help',
style=Pack(padding_left=50, padding_right=50, padding_top=20,
font_family='monospace', font_size=30, font_weight='bold')
)
instruction = 'Start and stop timer to track your screen use and eye blinks'
instructions_box = MultilineLabel(
instruction,
box_style=Pack(direction=COLUMN, padding_left=50, padding_top=10),
label_style=Pack(padding_bottom=10, font_family='monospace', font_size=12),
char_line=35
)
self.start_button = toga.Button(
'Start Timer!',
on_press=self.begin_timer,
style=Pack(padding_left=50, padding_right=50, padding_top=20,
background_color='violet', height=50, font_family='monospace', font_size=20)
)
self.stop_button = toga.Button(
'Stop Timer!',
on_press=self.stop_timer,
style=Pack(padding_left=50, padding_right=50, padding_top=20, padding_bottom=10,
background_color='violet', height=50, font_family='monospace', font_size=20),
enabled=False
)
# https://www.healthline.com/health/how-many-times-do-you-blink-a-day
blinks_label = MultilineLabel('It is recommended to blink 15 - 20 times in a minute',
box_style=Pack(direction=COLUMN, padding_bottom=10, background_color='aqua'),
label_style=Pack(padding_left=50,
background_color='aqua', font_family='monospace', font_size=12), char_line=35
)
activity_box.add(title_label)
activity_box.add(self.start_button)
activity_box.add(self.stop_button)
activity_box.add(instructions_box)
activity_box.add(blinks_label)
# Eye tips box
# https://www.mayoclinic.org/diseases-conditions/eyestrain/diagnosis-treatment/drc-20372403
# https://www.health.ny.gov/prevention/tobacco_control/smoking_can_lead_to_vision_loss_or_blindness
# https://www.health.harvard.edu/blog/will-blue-light-from-electronic-devices-increase-my-risk-of-macular-degeneration-and-blindness-2019040816365
self.eye_tips = [
'Try to not to use very bright lighting as the glare can strain your eyes and make the screen harder to look at',
'Try to put light sources in places that do not directly shine on your eyes',
'Using non-prescripted eye drops can help relieve dry eyes. Try to get ones recommended by your doctor',
'Make sure you screens are at least an arms length away from your eyes',
'Improve the air quality the air by getting a humidifier or adjusting the thermostat',
'If you smoke, try to stop as it can lead to diseases related to vision loss',
'Low levels of blue light (emitted froms screens and most lights) do not affect your eyes, but high levels can be hazardous to your eyes',
'Blue light affects your biological clock (sleep cycle) so try to avoid screens and bright lights before or while you sleep',
'20-20-20 Every 20 minutes focus at an object 20 feet far for at least 20 seconds', # done by timer
'...'
]
eye_tips_box = toga.Box(style=Pack(direction=COLUMN, padding_bottom=10, background_color='aqua'))
for tip in self.eye_tips:
tip_box = MultilineLabel(tip, box_style=Pack(direction=COLUMN, padding_bottom=10, background_color='wheat'),
label_style=Pack(background_color='aqua', font_family='monospace', font_size=15), char_line=28)
eye_tips_box.add(tip_box)
eye_tips_scroll = toga.ScrollContainer(style=Pack(direction=COLUMN, padding=(5,5), background_color='red'))
eye_tips_scroll.content = eye_tips_box
# Calibrate box
calibrate_box = toga.Box(style=Pack(direction=COLUMN, background_color='aqua'))
calibrate_info = toga.Label('You will be given instructions',
style=Pack(padding_left=10, padding_top=10, font_family='monospace', font_size=15))
calibrate_button = toga.Button(
'Calibrate',
style=Pack(padding_left=50, padding_right=50, padding_top=20, padding_bottom=20,
background_color='violet', font_family='monospace', font_size=20),
on_press=self.start_calibrate
)
calibrate_when = MultilineLabel('Use this if you feel that blinks are not being counted correctly',
box_style=Pack(direction=COLUMN, padding_bottom=10, background_color='aqua'),
label_style=Pack(padding_left=10, font_family='monospace', font_size=15))
graph_button = toga.Button(
'Graph Eye Aspect Ratio',
style=Pack(padding_left=50, padding_right=50, padding_top=10, padding_bottom=10,
background_color='violet', font_family='monospace', font_size=15),
on_press=self.start_graph
)
EAR_definition = MultilineLabel('*Eye aspect ratio is lower the more your eyes close',
box_style=Pack(direction=COLUMN, padding_bottom=10, background_color='aqua'),
label_style=Pack(padding_left=10, font_family='monospace', font_size=13))
# manual calibration is a work in progress
manual_label = toga.Label('Manually calibrate (pick EAR) here',
style=Pack(padding_left=10, padding_top=10, font_family='monospace', font_size=15))
manual_label2 = toga.Label('Pick a value that seems like a blink',
style=Pack(padding_left=10, padding_top=10, font_family='monospace', font_size=15))
manual_input = toga.NumberInput(min_value=1, max_value=99,
style=Pack(padding=(10, 50), width=50))
calibrate_box.add(calibrate_when)
calibrate_box.add(calibrate_button)
calibrate_box.add(calibrate_info)
calibrate_box.add(graph_button)
calibrate_box.add(EAR_definition)
# Config box
config_box = toga.Box(style=Pack(direction=COLUMN, background_color='aqua'))
self.video_switch = toga.Switch(
'Show Video',
style=Pack(padding_left=50, padding_right=50, padding_top=20, padding_bottom=20,
font_family='monospace', font_size=20),
is_on=self.tobool(self.read_config()[1])
)
save_button = toga.Button(
'Save Configuration',
style=Pack(padding_left=50, padding_right=50, padding_top=20, padding_bottom=20,
background_color='violet', font_family='monospace', font_size=20),
on_press=self.save_config
)
reset_button = toga.Button(
'Reset Configuration',
style=Pack(padding_left=50, padding_right=50, padding_top=20, padding_bottom=20,
background_color='red', font_family='monospace', font_size=20),
on_press=self.reset_config
)
config_box.add(self.video_switch)
config_box.add(save_button)
config_box.add(reset_button)
# options - toolbar
options = toga.OptionContainer(style=Pack(direction=ROW, background_color='snow',
font_family='monospace', font_size=15))
options.add('Activity', activity_box)
options.add('Eye tips', eye_tips_scroll)
options.add('Calibrate', calibrate_box)
options.add('Configure', config_box)
main_box = toga.Box(style=Pack(padding=(10,10), direction=COLUMN, background_color='snow'))
main_box.add(options)
# Create and show main window
self.main_window = toga.MainWindow(title=self.formal_name, size=(640,480), resizeable=False)
self.main_window.content = main_box
self.main_window.show()
# Starts a thread so application can work while timer runs
def begin_timer(self, widget):
if self.timer_running:
print('[App] Timer already running')
return
print('[App] Timer started')
# toggle buttons usage
self.start_button.enabled = False
self.stop_button.enabled = True
self.timer_running = True
fd.detect_exit = False
# start threads
self.timer_thread = threading.Thread(target=self.timer_loop)
self.detect_thread = threading.Thread(target=self.start_detect)
self.timer_thread.start()
self.detect_thread.start()
# will be used to break out of thread loop
def stop_timer(self, widget):
if not self.timer_running:
print('[App] Timer not start')
return
print('[App] Timer stopped')
# toggle buttons usage
self.start_button.enabled = True
self.stop_button.enabled = False
fd.detect_exit = True
# join threads
self.timer_thread.join()
fd.detect_thread.join()
# timer that will notify user after some time (forever)
def timer_loop(self):
start_time = time.time()
while True:
# When user has been on screen for some time (CHECKUP_TIME)
elapsed_time = time.time() - start_time
if elapsed_time >= self.CHECKUP_TIME:
print('[App] Checkup time')
# calls the notify function
command = toga.Command(self.notify, 'Command')
command.action(command)
# restart timer and blink counter
start_time = time.time()
fd.blink_count = 0
# When user has left screen for more than some time (BREAK_TIME)
if not fd.has_face:
start_time = time.time()
# exit loop / stop timer
if fd.detect_exit:
self.timer_running = False
break
# make sure button usage is changed (if opencv is force quitted with q key)
self.start_button.enabled = True
self.stop_button.enabled = False
# starts face detection in face_detect.py
def start_detect(self):
fd.start_detection(BREAK_TIME=self.BREAK_TIME, DISPLAY_FRAME=self.video_switch.is_on, DISPLAY_TYPE='blink')
# may replace with a python windows balloon tip notifier vvv
# notifies the user with a dialog box
def notify(self, widget):
print('[App] Eye notification')
minutes = self.CHECKUP_TIME/60.0
text = 'It has been {:.1f} minutes, please take a short break\n'.format(minutes)
blink_rate = fd.blink_count / minutes
text += 'Your blink rate is {:.1f} times / minute\n'.format(blink_rate)
if blink_rate < 15:
text += 'Tip: Try to blink more to reduce eye strain\n'
else:
text += 'Tip: ' + random.choice(self.eye_tips) + '\n'
self.main_window.info_dialog('Eye Help', text)
# save all config states to configuration file
def save_config(self, widget):
self.write_config(1, str(self.video_switch.is_on))
# reset all config states in configuration file
def reset_config(self, widget):
config = open(self.CONFIG_FILE, 'w')
config.write('0.25\n') # [0]: EAR_threshold for blink detection
config.write('False\n') # [1]: Bool to turn on camera for general timer
# writing to configuration file
def write_config(self, idx, value):
print('[App] Saving {} to config idx: {}'.format(value, idx))
config_lines = self.read_config()
config_lines[idx] = value + '\n'
config = open(self.CONFIG_FILE, 'w')
config.writelines(config_lines)
config.close()
# reading from configuration file
def read_config(self):
config = open(self.CONFIG_FILE, 'r')
config_lines = config.readlines()
config.close()
return config_lines
# helper function for converting string to bool
def tobool(self, string):
return (string == 'True\n')
# calibrate ear threshold
def start_calibrate(self, widget):
calibrate_thread = threading.Thread(target=calibrate.start_calibrate())
# graph EAR data
def start_graph(self, widget):
grapher.start_graphing()
def main():
return EyeHelp()
```
#### File: src/eyehelp/grapher.py
```python
from matplotlib import pyplot as plt
import os
import sys
def start_graphing():
# initialize data file to read from
ospath = os.path.dirname(sys.argv[0])
EAR_DATA_FILE = ospath + '\\ear_data'
ear_data = open(EAR_DATA_FILE, 'r')
# read data file into an array
array = [int(float(i)*100) for i in ear_data.readlines()]
# plot the array
plt.xlabel('Frames')
plt.ylabel('Eye Aspect Ratio (%)')
plt.title('EAR per frame graph')
plt.plot(array)
plt.show()
if __name__=='__main__':
start_graphing()
``` |
{
"source": "16bytes/rpt2csv.py",
"score": 4
} |
#### File: 16bytes/rpt2csv.py/rpt2csv.py
```python
import sys
import csv
import codecs
def convert(inputFile,outputFile):
"""
Convert a RPT file to a properly escaped CSV file
RPT files are usually sourced from old versions of Microsoft SQL Server Management Studio
RPT files are fixed width with column names on the first line, a second line with dashes and spaces,
and then on one row per record.
The column widths are calculated from the longest field in a column, so the format varies
depending on the results. Thankfully, we can reliably infer column widths by looking at the indexes
of spaces on the second line.
Here we chop each record at the index of the space on the second line and strip the result.
Note, if the source data has significant whitespace, the striping will remove this, but likely significant
whitespace was destroyed by the RPT field padding anyway.
"""
writer = csv.writer(outputFile)
fieldIndexes = []
headers = ""
for idx, val in enumerate(inputFile):
if(idx == 0):
headers = val
elif(idx == 1):
fieldIndexes = list(getFieldIndexes(val," "))
row = list(getFields(headers,fieldIndexes))
writer.writerow(row)
else:
row = list(getFields(val,fieldIndexes))
writer.writerow(row)
def getFieldIndexes(input, sep):
lastIndex = 0
for idx, c in enumerate(input):
if(c == sep):
yield (lastIndex,idx)
lastIndex = idx+1
yield lastIndex, len(input)
def getFields(input, indexes):
for index in indexes:
yield input[index[0]:index[1]].strip()
if __name__ == '__main__':
if(len(sys.argv) == 3):
with open(sys.argv[1],encoding='utf-8-sig') as inputFile:
with open(sys.argv[2],'w',newline='') as outputFile:
convert(inputFile,outputFile)
else:
print("Usage: rpt2csv.py inputFile outputFile")
``` |
{
"source": "16kozlowskim/Group-20-SE",
"score": 3
} |
#### File: examples/scraper/googScraper.py
```python
import urllib2, csv, sys
from bs4 import BeautifulSoup
def get_company_data(ticker):
url = 'https://finance.google.com/finance?q=lon:'
url += ticker
#pathToCSV = '/Users/Michal/Downloads/dialogflow-java-client-master2/samples/clients/VirtualTradingAssistant/src/main/java/ai/api/examples/fileStore/file.csv'
#pathToCSV = 'C:\\Users\\ojwoo\\Documents\\Warwick\\CS261\\Coursework\\dialogflow-java-client-master\\samples\\clients\\VirtualTradingAssistant\\src\\main\\java\\ai\\api\\examples\\fileStore\\file.csv'
#pathToCSV = '/Users/Michal/Desktop/apache-tomcat-8.5.28/bin/misc/file.csv'
pathToCSV = 'C:\\apache-tomcat-8.5.28\\bin\\misc\\file.csv'
page = urllib2.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
div = soup.find('div', attrs={'id' : 'price-panel'})
with open(pathToCSV, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter='@', quotechar='#')
wr.writerow([div.find('span', attrs={'class' : 'pr'}).text.strip().encode('utf-8')])
for e in div.find('div', attrs={'class' : 'id-price-change nwp'}).text.strip().split('\n'):
wr.writerow([e])
div = soup.find('div', attrs={'class' : 'snap-panel'})
tables = div.find_all('table')
for table in tables:
rows = table.find_all('tr')
for row in rows:
wr.writerow([row.find('td', attrs={'class' : 'val'}).text.strip().encode('utf-8')])
def main():
get_company_data(sys.argv[1])
if __name__ == '__main__':
main()
```
#### File: examples/scraper/historicalScrape.py
```python
import re, csv, sys, urllib2
from bs4 import BeautifulSoup
# If start date and end date is the same only one value will be returned and
# if not the multiple values which can be used to make calculations
#
# ticker (company symbol)
# interval (d (daily), m (monthly), q (quarterly), y (yearly))
# start_date (YYYYMMDD)
# end_date (YYYYMMDD)
def get_historical_data(ticker, interval, start_date, end_date):
#pathToCSV = '/Users/Michal/Downloads/dialogflow-java-client-master2/samples/clients/VirtualTradingAssistant/src/main/java/ai/api/examples/fileStore/file.csv'
#pathToCSV = 'C:\\Users\\ojwoo\\Documents\\Warwick\\CS261\\Coursework\\dialogflow-java-client-master\\samples\\clients\\VirtualTradingAssistant\\src\\main\\java\\ai\\api\\examples\\fileStore\\file.csv'
#pathToCSV = '/Users/Michal/Desktop/apache-tomcat-8.5.28/bin/misc/file.csv'
pathToCSV = 'C:\\apache-tomcat-8.5.28\\bin\\misc\\file.csv'
url_builder = []
url_builder.append('https://stooq.com/q/d/?s=')
url_builder.append(ticker)
url_builder.append('&c=0&d1=')
url_builder.append(start_date)
url_builder.append('&d2=')
url_builder.append(end_date)
url_builder.append('&i=')
url_builder.append(interval)
url = ''.join(url_builder)
page = urllib2.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
link = soup.findAll('a', href=re.compile('^q/d/l/'))
link = re.search('"(.*)"', str(link))
try:
link = link.group(1)
except AttributeError:
with open(pathToCSV, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter='@', quotechar='#')
wr.writerow('')
exit()
link = link.replace('amp;', '')
arr = []
arr.append('https://stooq.com/')
arr.append(link)
link = ''.join(arr)
response = urllib2.urlopen(link)
cr = csv.reader(response)
with open(pathToCSV, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter='@', quotechar='#')
wr.writerows(cr)
def main():
args = sys.argv
get_historical_data(args[1], args[2], args[3], args[4])
if __name__ == '__main__':
main()
```
#### File: examples/scraper/sectorsScraper.py
```python
import urllib2, cookielib, csv, sys
from bs4 import BeautifulSoup
# gets company data by sector in the following format
# [ticker, name]
# sector_num:
# Aerospace & Defense - 2710
# Alternative Energy - 0580
# Automobiles & Parts - 3350
# Banks - 8350
# Beverages - 3530
# Chemicals - 1350
# Construction & Materials - 2350
# Electricity - 7530
# Electronic & Electrical - 2730
# Equity Investment Instruments - 8980
# Financial Services - 8770
# Fixed Line Telecom - 6530
# Food & Drug Retailers - 5330
# Food Producers - 3570
# Forestry & Paper - 1730
# Gas, Water & Multiutilities - 7570
# General Industrials - 2720
# General Retailers - 5370
# Health Care Equipment & Services - 4530
# Household Goods & Home Construction - 3720
# Industrial Enginnering - 2750
# Industrial Metals & Mining - 1750
# Industrial Transportation - 2770
# Leisure Goods - 3740
# Life Insurance - 8570
# Media - 5550
# Mining - 1770
# Mobile Telecommunications - 6570
# Nonequity Investment Instruments - 8990
# Nonlife Insurance - 8530
# Oil & Gas Producers - 0530
# Oil Equipment & Services - 0570
# Personal Goods - 3760
# Pharmaceuticals & Biotechnology - 4570
# Real Estate Investment & Services - 8630
# Real Estate Investment Trusts - 8670
# Software & Computer Sertvices - 9530
# Support Services - 2790
# Technology Hardware & Equipment - 9570
# Tobacco - 3780
# Travel & Leisure - 5750
def get_sector_data(sector_num):
#pathToCSV = 'C:\\Users\\ojwoo\\Documents\\Warwick\\CS261\\Coursework\\dialogflow-java-client-master\\samples\\clients\\VirtualTradingAssistant\\src\\main\\java\\ai\\api\\examples\\fileStore\\file.csv'
#pathToCSV = '/Users/Michal/Downloads/dialogflow-java-client-master2/samples/clients/VirtualTradingAssistant/src/main/java/ai/api/examples/fileStore/file.csv'
#pathToCSV = '/Users/Michal/Desktop/apache-tomcat-8.5.28/bin/misc/file.csv'
pathToCSV = 'C:\\apache-tomcat-8.5.28\\bin\\misc\\file.csv'
url_builder = []
url_builder.append('http://www.londonstockexchange.com/exchange/prices-and-markets/stocks/indices/constituents-indices.html?index=UKX&industrySector=')
url_builder.append(sector_num)
url_builder.append('&page=1')
url = ''.join(url_builder)
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
req = urllib2.Request(url, headers=hdr)
try:
page = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print e.fp.read()
content = page.read()
soup = BeautifulSoup(content, 'html.parser')
table = soup.find('table', attrs={'class' : 'table_dati'})
try:
table_body = table.find('tbody')
except AttributeError:
with open(pathToCSV, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter='@', quotechar='#')
wr.writerow('')
exit()
rows = table_body.find_all('tr')
data = []
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for index, ele in enumerate(cols) if index < 10]
data.append([ele for ele in cols if ele])
with open(pathToCSV, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter='@', quotechar='#')
wr.writerows(data)
def main():
args = sys.argv
get_sector_data(args[1])
if __name__ == '__main__':
main()
``` |
{
"source": "16Lab-Inc/pyOCD",
"score": 2
} |
#### File: pyOCD/test/concurrency_test.py
```python
from __future__ import print_function
import argparse
import os
import sys
from time import (sleep, time)
from random import randrange
import math
import struct
import traceback
import argparse
import logging
from itertools import (chain, repeat)
from pyocd.core.helpers import ConnectHelper
from pyocd.flash.file_programmer import FileProgrammer
from pyocd.probe.pydapaccess import DAPAccess
from pyocd.utility.conversion import float32_to_u32
from pyocd.utility.mask import same
from pyocd.utility.compatibility import to_str_safe
from pyocd.core.memory_map import MemoryType
from test_util import (
Test,
TestResult,
get_session_options,
get_target_test_params,
run_in_parallel,
)
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Test configuration values.
TEST_MAX_LENGTH = 1 * 1024 * 1024
TEST_THREAD_COUNT = 8
TEST_SUBCHUNK_COUNT = 2 # Number of reads/writes per thread.
def ncycles(iterable, n):
return chain.from_iterable(repeat(tuple(iterable), n))
class ConcurrencyTestResult(TestResult):
def __init__(self):
super(ConcurrencyTestResult, self).__init__(None, None, None)
self.name = "concurrency"
class ConcurrencyTest(Test):
def __init__(self):
super(ConcurrencyTest, self).__init__("Concurrency Test", concurrency_test)
def run(self, board):
try:
result = self.test_function(board.unique_id)
except Exception as e:
result = ConcurrencyTestResult()
result.passed = False
print("Exception %s when testing board %s" % (e, board.unique_id))
traceback.print_exc(file=sys.stdout)
result.board = board
result.test = self
return result
def concurrency_test(board_id):
with ConnectHelper.session_with_chosen_probe(unique_id=board_id, **get_session_options()) as session:
board = session.board
target = session.target
test_params = get_target_test_params(session)
session.probe.set_clock(test_params['test_clock'])
memory_map = target.get_memory_map()
boot_region = memory_map.get_boot_memory()
ram_region = memory_map.get_default_region_of_type(MemoryType.RAM)
binary_file = os.path.join(parentdir, 'binaries', board.test_binary)
test_pass_count = 0
test_count = 0
result = ConcurrencyTestResult()
target.reset_and_halt()
# Prepare TEST_THREAD_COUNT regions of RAM with patterns
data_len = min(TEST_MAX_LENGTH, ram_region.length)
chunk_len = data_len // TEST_THREAD_COUNT
subchunk_len = chunk_len // TEST_SUBCHUNK_COUNT
chunk_data = []
for i in range(TEST_THREAD_COUNT):
chunk_data.append([(i + j) % 256 for j in range(chunk_len)])
def write_chunk_data(core, i):
start = ram_region.start + chunk_len * i
for j in range(TEST_SUBCHUNK_COUNT):
offset = subchunk_len * j
addr = start + offset
end = addr + subchunk_len - 1
print("Writing region %i:%i from %#010x to %#010x via %s" % (i, j, addr, end, core.ap))
core.write_memory_block8(addr, chunk_data[i][offset:offset + subchunk_len])
print("Finished writing region %i:%i" % (i, j))
def read_chunk_data(core, i):
start = ram_region.start + chunk_len * i
for j in range(TEST_SUBCHUNK_COUNT):
offset = subchunk_len * j
addr = start + offset
end = addr + subchunk_len - 1
print("Reading region %i:%i from %#010x to %#010x via %s" % (i, j, addr, end, core.ap))
data = core.read_memory_block8(addr, subchunk_len)
chunk_read_data[i].extend(data)
print("Finished reading region %i:%i" % (i, j))
# Test with a single core/AP.
print("\n------ Test 1: Concurrent memory accesses, single core ------")
core = target.cores[0]
# Write chunk patterns concurrently.
print("Writing %i regions to RAM" % TEST_THREAD_COUNT)
run_in_parallel(write_chunk_data, [[core, i] for i in range(TEST_THREAD_COUNT)])
print("Reading %i regions to RAM" % TEST_THREAD_COUNT)
chunk_read_data = [list() for i in range(TEST_THREAD_COUNT)]
run_in_parallel(read_chunk_data, [[core, i] for i in range(TEST_THREAD_COUNT)])
print("Comparing data")
for i in range(TEST_THREAD_COUNT):
test_count += 1
if same(chunk_read_data[i], chunk_data[i]):
test_pass_count += 1
print("Region %i PASSED" % i)
else:
print("Region %i FAILED" % i)
# Test with a multiple cores/APs.
# Disabled until cores each have their own memory map, the regions accessible to each
# core can be identified.
if False: # len(target.cores) > 1:
print("\n------ Test 2: Concurrent memory accesses, multiple cores ------")
cycle_count = ((len(target.cores) + TEST_THREAD_COUNT - 1) // TEST_THREAD_COUNT * TEST_THREAD_COUNT)
repeat_cores = ncycles(iter(target.cores), cycle_count)
thread_args = []
for i in range(TEST_THREAD_COUNT):
thread_args.append((target.cores[next(repeat_cores)], i))
# Write chunk patterns concurrently.
print("Writing %i regions to RAM" % TEST_THREAD_COUNT)
run_in_parallel(write_chunk_data, thread_args)
print("Reading %i regions to RAM" % TEST_THREAD_COUNT)
chunk_read_data = [list() for i in range(TEST_THREAD_COUNT)]
run_in_parallel(read_chunk_data, thread_args)
print("Comparing data")
for i in range(TEST_THREAD_COUNT):
test_count += 1
if same(chunk_read_data[i], chunk_data[i]):
test_pass_count += 1
print("Region %i PASSED" % i)
else:
print("Region %i FAILED" % i)
# --- end ---
print("\nTest Summary:")
print("Pass count %i of %i tests" % (test_pass_count, test_count))
if test_pass_count == test_count:
print("CONCURRENCY TEST PASSED")
else:
print("CONCURRENCY TEST FAILED")
target.reset()
result.passed = test_count == test_pass_count
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='pyOCD concurrency test')
parser.add_argument('-d', '--debug', action="store_true", help='Enable debug logging')
parser.add_argument("-da", "--daparg", dest="daparg", nargs='+', help="Send setting to DAPAccess layer.")
args = parser.parse_args()
level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(level=level)
DAPAccess.set_args(args.daparg)
# Set to debug to print some of the decisions made while flashing
session = ConnectHelper.session_with_chosen_probe(**get_session_options())
test = ConcurrencyTest()
result = [test.run(session.board)]
``` |
{
"source": "16lemoing/automated-essay-scoring",
"score": 3
} |
#### File: automated-essay-scoring/bin/prepare_glove.py
```python
import sys
sys.path.append("../src")
import argparse
from pathlib import Path
from glove import download_glove, preprocess_glove
def main(args):
# Build glove directory
glove_dir = Path("..") / "data" / "glove"
if not glove_dir.exists():
glove_dir.mkdir()
# Preprocess glove embedding
for dim in args.dims:
preprocess_glove(glove_dir, args.glove_type, dim)
print("done")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--glove_type', default = '6B',
help = "any of ['42B.300d', '840B.300d', '6B', 'twitter.27B'] "\
"(see updated list of available types at https://github.com/stanfordnlp/GloVe)")
parser.add_argument('--dims', type = int, nargs = '+', default = [50, 100, 200, 300],
help = "dimensions of embedding vectors "\
"(should be compatible with .txt files for glove type)")
args = parser.parse_args()
main(args)
```
#### File: automated-essay-scoring/src/glove.py
```python
import requests
import zipfile
import io
import bcolz
import numpy as np
import pickle
from tqdm import tqdm
def preprocess_glove(glove_dir, glove_type, dim):
"""
Parse glove embedding vectors from text file and save them for fast access
Parameters
----------
glove_dir : string (path to the folder where is the embedding text file, output files will be saved under the same folder)
glove_type : string (identifier for selected glove file)
dim : int (dimension of embedding vectors)
"""
words = []
idx = 0
word2idx = {}
vectors = []
# Prepare embedding data file
vectors = bcolz.carray(np.zeros(1), rootdir = glove_dir / f'{glove_type}.{dim}.dat', mode = 'w')
# Read embeddings
glove_txt_file = glove_dir / f'glove.{glove_type}.{dim}d.txt'
assert glove_txt_file.exists(), f"{glove_txt_file} does not exists"
num_lines = sum(1 for line in open(glove_txt_file, 'rb'))
with open(glove_txt_file, 'rb') as f:
for l in tqdm(f, total=num_lines, desc=f'reading embeddings {glove_type} of dim {dim}'):
line = l.decode().split()
word = line[0]
words.append(word)
word2idx[word] = idx
idx += 1
vect = np.array(line[1:]).astype(np.float)
vectors.append(vect)
# Save preprocessed embeddings
vectors = bcolz.carray(vectors[1:].reshape((idx, dim)), rootdir = glove_dir / f'{glove_type}.{dim}.dat', mode = 'w')
vectors.flush()
pickle.dump(words, open(glove_dir / f'{glove_type}.{dim}_words.pkl', 'wb'))
pickle.dump(word2idx, open(glove_dir / f'{glove_type}.{dim}_idx.pkl', 'wb'))
def get_glove(glove_dir, glove_type, dim):
"""
Load a dictionnary of glove embedding vectors
Parameters
----------
glove_dir : string (path to the folder where is the embedding text file, output files will be saved under the same folder)
glove_type : string (identifier for selected glove file)
dim : int (dimension of embedding vectors)
Returns
-------
glove : dictionary (key: word, value: embedding vector)
"""
vectors = bcolz.open(glove_dir / f'{glove_type}.{dim}.dat')[:]
words = pickle.load(open(glove_dir / f'{glove_type}.{dim}_words.pkl', 'rb'))
word2idx = pickle.load(open(glove_dir / f'{glove_type}.{dim}_idx.pkl', 'rb'))
glove = {w: vectors[word2idx[w]] for w in words}
return glove
```
#### File: automated-essay-scoring/src/train.py
```python
import torch
import torch.nn as nn
from tools import get_kappa
def train_model(model, device, lr, epochs, train_dataloader, valid_dataloader = None, logger = None):
optimizer = torch.optim.Adam(model.parameters(), lr = lr)
criterion = nn.MSELoss()
best_loss = 1e15
for ep in range(epochs):
# train
model.train()
total = 0
sum_loss = 0
for x, lengths, scores, feat in train_dataloader:
x = x.to(device)
lengths = lengths.to(device)
scores = scores.to(device)
feat = feat.to(device)
optimizer.zero_grad()
pred_scores = model(x, lengths, feat)
loss = criterion(scores, pred_scores)
loss.backward()
optimizer.step()
total += scores.shape[0]
sum_loss += loss.item() * scores.shape[0]
# log train loss
if logger is not None:
logger.log(train_loss = sum_loss / total)
# validate
if valid_dataloader is not None:
valid_loss, valid_w_kappa, valid_g_kappa, valid_i_kappa = evaluate_model(model, device, valid_dataloader, criterion)
# log valid metrics
if valid_dataloader is not None and logger is not None:
logger.log(valid_loss = valid_loss, valid_weighted_kappa = valid_w_kappa, valid_global_kappa = valid_g_kappa, valid_individual_kappa = valid_i_kappa)
# save best weights
if valid_loss < best_loss:
best_loss = valid_loss
logger.checkpoint_weights(model)
# display
if (ep + 1) % 5 == 0:
valid_string = f", (valid) loss {valid_loss: .3f}, weighted kappa {valid_w_kappa: .3f}, global kappa {valid_g_kappa: .3f}, individual kappa {list(valid_i_kappa.values())}" if valid_dataloader is not None else ""
print(f"Ep[{ep + 1}/{epochs}] (train) loss {sum_loss / total: .3f}{valid_string}")
def evaluate_model(model, device, dataloader, criterion = nn.MSELoss()):
model.eval()
total = 0
sum_loss = 0
all_pred_scores = torch.zeros(len(dataloader.dataset))
with torch.no_grad():
for x, lengths, scores, feat in dataloader:
x = x.to(device)
lengths = lengths.to(device)
scores = scores.to(device)
feat = feat.to(device)
pred_scores = model(x, lengths, feat)
all_pred_scores[total: total + scores.shape[0]] = pred_scores.cpu()
loss = criterion(scores, pred_scores)
total += scores.shape[0]
sum_loss += loss.item() * scores.shape[0]
scores = dataloader.dataset.recover(dataloader.dataset.get_scores())
pred_scores = dataloader.dataset.recover(all_pred_scores.numpy(), round_to_known = True)
valid_w_kappa, valid_g_kappa, valid_i_kappa = get_kappa(scores, pred_scores, dataloader.dataset.get_sets())
return sum_loss / total, valid_w_kappa, valid_g_kappa, valid_i_kappa
def predict(model,device,dataloader,round_to_known=False):
model.eval()
total = 0
all_pred_scores = torch.zeros(len(dataloader.dataset))
with torch.no_grad():
for x, lengths, scores, feat in dataloader:
x = x.to(device)
lengths = lengths.to(device)
scores = scores.to(device)
feat = feat.to(device)
pred_scores = model(x, lengths, feat)
all_pred_scores[total: total + scores.shape[0]] = pred_scores.cpu()
total += scores.shape[0]
pred_scores = dataloader.dataset.recover(all_pred_scores.numpy(), round_to_known)
return pred_scores
```
#### File: automated-essay-scoring/src/word2vec.py
```python
from data import tokenize_content
from gensim.models import Word2Vec
def get_word2vec(essay_contents, remove_stopwords, dim):
sentences = []
for content in essay_contents:
sentences += tokenize_content(content, remove_stopwords, level = "sentence")
print("training word2vec model")
word2vec_model = Word2Vec(sentences, workers = 10, size = dim)
return word2vec_model.wv
``` |
{
"source": "16lemoing/ccvs",
"score": 3
} |
#### File: ccvs/data/augmentations.py
```python
import torch
import torch.nn.functional as F
import numpy as np
import random
import torchvision.transforms as transforms
import math
from scipy.ndimage.filters import gaussian_filter
# Adapted from https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation
def get_backwarp_grid(height, width):
horizontal = torch.linspace(-1.0 + (1.0 / width), 1.0 - (1.0 / width), width).view(1, 1, 1, -1).expand(-1, -1, height, -1)
vertical = torch.linspace(-1.0 + (1.0 / height), 1.0 - (1.0 / height), height).view(1, 1, -1, 1).expand(-1, -1, -1, width)
return torch.cat([horizontal, vertical], dim=1).float()
def backwarp(input, flow, backwarp_grid, padding_value=0, mode='bilinear'):
flow = torch.cat([flow[:, [0], :, :] / ((input.shape[3] - 1.0) / 2.0), flow[:, [1], :, :] / ((input.shape[2] - 1.0) / 2.0)], dim=1)
return torch.nn.functional.grid_sample(input=input - padding_value, grid=(backwarp_grid + flow).permute(0, 2, 3, 1), mode=mode, padding_mode='zeros', align_corners=False) + padding_value
def get_zoom_flow(zoom, height, width, adapt_to_scale=True):
if zoom >= 1 and adapt_to_scale:
tgt_height = height / zoom
tgt_width = width / zoom
else:
tgt_height = zoom * height
tgt_width = zoom * width
delta_height = height - tgt_height
delta_width = width - tgt_width
zoom_dx = delta_width / 2 - torch.arange(width) * delta_width / (width - 1)
zoom_dy = delta_height / 2 - torch.arange(height) * delta_height / (height - 1)
return zoom_dx, zoom_dy
def get_augmentation(img, backwarp_grid, dim, opt, layout=None):
alpha = opt.elastic_alpha
sigma = opt.elastic_sigma
min_zoom = opt.elastic_min_zoom
max_zoom = opt.elastic_max_zoom
corruption = opt.elastic_corruption
mean_corruption = opt.elastic_mean_corruption
blur = opt.blur_first
invert = opt.distort_first
random_state = np.random.RandomState(None)
shape = img.shape[-2:]
alpha = alpha * shape[0]
sigma = sigma * shape[0]
# elastic transformation
dx = torch.tensor(gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha)
dy = torch.tensor(gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha)
i_dx = None
i_dy = None
if invert:
elastic_flow = torch.stack([dx, dy]).float()
inv_elastic_flow = approx_flow_inversion(elastic_flow)
i_dx = inv_elastic_flow[0] # approximated inverse
i_dy = inv_elastic_flow[1] # approximated inverse
# zooming transformation
o_dx = None
o_dy = None
height, width = shape
zoom = min_zoom + np.random.rand() * (max_zoom - min_zoom)
zoom_dx, zoom_dy = get_zoom_flow(zoom, height, width)
if invert:
if zoom < 1:
i_dx += zoom_dx.view(1, -1) # exact inverse
i_dy += zoom_dy.view(-1, 1) # exact inverse
o_dx = zoom_dx.view(1, -1).repeat(height, 1)
o_dy = zoom_dy.view(-1, 1).repeat(1, width)
else:
dx += zoom_dx.view(1, -1)
dy += zoom_dy.view(-1, 1)
i_zoom_dx, i_zoom_dy = get_zoom_flow(1/zoom, height, width, adapt_to_scale=False)
i_dx -= i_zoom_dx.view(1, -1) # exact inverse
i_dy -= i_zoom_dy.view(-1, 1) # exact inverse
else:
if zoom < 1:
dx += zoom_dx.view(1, -1)
dy += zoom_dy.view(-1, 1)
else:
o_dx = zoom_dx.view(1, -1).repeat(height, 1)
o_dy = zoom_dy.view(-1, 1).repeat(1, width)
# create context and distorted image
if invert:
context_flow = torch.stack([dx, dy]).unsqueeze(0).float()
context_img = backwarp(img.unsqueeze(0), context_flow, backwarp_grid)
if o_dx is not None:
other_flow = torch.stack([o_dx, o_dy]).unsqueeze(0).float()
distorted_img = backwarp(img.unsqueeze(0), other_flow, backwarp_grid)
else:
distorted_img = img.unsqueeze(0).clone()
flow = torch.stack([i_dx, i_dy]).unsqueeze(0).float()
else:
distorted_flow = torch.stack([dx, dy]).unsqueeze(0).float()
distorted_img = backwarp(img.unsqueeze(0), distorted_flow, backwarp_grid)
if o_dx is not None:
other_flow = torch.stack([o_dx, o_dy]).unsqueeze(0).float()
context_img = backwarp(img.unsqueeze(0), other_flow, backwarp_grid)
flow = torch.stack([dx - o_dx, dy - o_dy]).unsqueeze(0).float()
else:
context_img = img.unsqueeze(0)
flow = torch.stack([dx, dy]).unsqueeze(0).float()
# create context and distorted layout
if layout is not None:
layout = layout.unsqueeze(0).float()
if invert:
context_flow = torch.stack([dx, dy]).unsqueeze(0).float()
context_layout = backwarp(layout.unsqueeze(0), context_flow, backwarp_grid, mode='nearest')
if o_dx is not None:
other_flow = torch.stack([o_dx, o_dy]).unsqueeze(0).float()
distorted_layout = backwarp(layout.unsqueeze(0), other_flow, backwarp_grid, mode='nearest')
else:
distorted_layout = layout.unsqueeze(0).clone()
flow = torch.stack([i_dx, i_dy]).unsqueeze(0).float()
else:
distorted_flow = torch.stack([dx, dy]).unsqueeze(0).float()
distorted_layout = backwarp(layout.unsqueeze(0), distorted_flow, backwarp_grid, mode='nearest')
if o_dx is not None:
other_flow = torch.stack([o_dx, o_dy]).unsqueeze(0).float()
context_layout = backwarp(layout.unsqueeze(0), other_flow, backwarp_grid, mode='nearest')
flow = torch.stack([dx - o_dx, dy - o_dy]).unsqueeze(0).float()
else:
context_layout = layout.unsqueeze(0)
flow = torch.stack([dx, dy]).unsqueeze(0).float()
# rescale image
f = None
if dim != shape[0]:
f = dim / shape[0]
tgt_shape = [dim, int(shape[1] * dim / shape[0])]
distorted_img = F.interpolate(distorted_img, size=tgt_shape, mode='bilinear')
context_img = F.interpolate(context_img, size=tgt_shape, mode='bilinear')
else:
tgt_shape = shape
# rescale layout
if layout is not None:
if dim != shape[0]:
tgt_shape = [dim, int(shape[1] * dim / shape[0])]
distorted_layout = F.interpolate(distorted_layout.float(), size=tgt_shape, mode='nearest')
context_layout = F.interpolate(context_layout.float(), size=tgt_shape, mode='nearest')
else:
tgt_shape = shape
# reshape layout
if layout is not None:
distorted_layout = distorted_layout.squeeze(1).long()
context_layout = context_layout.squeeze(1).long()
else:
distorted_layout, context_layout = torch.tensor([]), torch.tensor([])
# apply blur
if blur is not None:
s1, s2 = blur
s = s1 + (s2 - s1) * random.random()
k = int(3 * s) + 1 if int(3 * s) % 2 == 0 else int(3 * s)
t = transforms.GaussianBlur(kernel_size=max(3, min(k, 13)), sigma=s)
context_img = t(context_img)
# apply corruption
if corruption:
corr_level = 1 - 2 * mean_corruption
corr_mask = torch.tensor(gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha) > corr_level
mask = backwarp(corr_mask.view(1, 1, *shape).float(), flow, backwarp_grid, padding_value=1)
corr_mask = F.interpolate(corr_mask.view(1, 1, *shape).float(), size=tgt_shape, mode='bilinear')
context_img = context_img * (1 - corr_mask).unsqueeze(0)
mask = F.interpolate(mask, size=tgt_shape, mode='bilinear') > 0.5
else:
mask = torch.tensor([])
# rescale flow
if f is not None:
flow = F.interpolate(flow * f, size=tgt_shape, mode='bilinear')
return context_img.squeeze(0), context_layout.squeeze(0), distorted_img.squeeze(0), distorted_layout.squeeze(0), flow.squeeze(0), mask
def approx_flow_inversion(input, k=3):
height, width = input.shape[1:]
x_grid = torch.arange(width).view(1, -1).repeat(height, 1).view(-1).float()
y_grid = torch.arange(height).view(-1, 1).repeat(1, width).view(-1).float()
dx = input[0].view(-1)
dy = input[1].view(-1)
y_grid += dy
x_grid += dx
y_grid[y_grid < 0] = 0
x_grid[x_grid < 0] = 0
y_grid[y_grid > height - 1] = 0
x_grid[x_grid > width - 1] = 0
y_grid = y_grid.long()
x_grid = x_grid.long()
field = y_grid * width + x_grid
inv_dx = torch.zeros_like(dx).scatter_(0, field, -dx).view(height, width)
inv_dy = torch.zeros_like(dy).scatter_(0, field, -dy).view(height, width)
mask = torch.zeros_like(dx).scatter_(0, field, 1).view(height, width).bool()
padding = k // 2
kernel = get_gaussian_kernel(k).view(1, 1, k, k)
# fill missing value
while not mask.all():
# propagate mask
new_mask = torch.zeros_like(mask)
new_mask[1:] = (~mask[1:] & mask[:-1])
new_mask[:-1] = (~mask[:-1] & mask[1:]) | new_mask[:-1]
new_mask[:, 1:] = (~mask[:, 1:] & mask[:, :-1]) | new_mask[:, 1:]
new_mask[:, :-1] = (~mask[:, :-1] & mask[:, 1:]) | new_mask[:, :-1]
# compute missing values using kxk mean
new_inv_dx = F.conv2d(inv_dx.view(1, 1, height, width), kernel, padding=padding).view(height, width)
new_inv_dy = F.conv2d(inv_dy.view(1, 1, height, width), kernel, padding=padding).view(height, width)
new_sum = F.conv2d(mask.float().view(1, 1, height, width), kernel, padding=padding).view(height, width)
inv_dx[new_mask] = new_inv_dx[new_mask] / new_sum[new_mask]
inv_dy[new_mask] = new_inv_dy[new_mask] / new_sum[new_mask]
# update mask
mask = mask | new_mask
return torch.stack([inv_dx, inv_dy])
def get_gaussian_kernel(k):
x_cord = torch.arange(k)
x_grid = x_cord.repeat(k).view(k, k)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1)
mean = (k - 1) / 2.
sigma = k / 6
variance = sigma ** 2.
# Calculate the 2-dimensional gaussian kernel which is
# the product of two gaussian distributions for two different
# variables (in this case called x and y)
gaussian_kernel = (1. / (2. * math.pi * variance)) * \
torch.exp(
-torch.sum((xy_grid - mean) ** 2., dim=-1) / \
(2 * variance)
)
# Make sure sum of values in gaussian kernel equals 1.
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
return gaussian_kernel
```
#### File: data/scripts/preprocess_bairhd.py
```python
import argparse
import os
from glob import glob
import cv2
from joblib import Parallel
from joblib import delayed
def main(args):
data_dir = os.path.join(args.data_root, "softmotion_0511")
print("Preparing train")
train_output_dir = os.path.join(args.data_root, f"original_frames_{args.dim}/train")
extract_data(data_dir, train_output_dir, args.dim, init_k=0, end_k=43264)
print("Preparing test")
test_output_dir = os.path.join(args.data_root, f"original_frames_{args.dim}/test")
extract_data(data_dir, test_output_dir, args.dim, init_k=44120, end_k=44376)
def get_frame_path(frames_dir, i):
paths = glob(os.path.join(frames_dir, f"aux1_full_cropped_im{i}_*.jpg"))
assert len(paths) == 1
return paths[0]
def get_hd_frames(data_dir, k, dim):
group = k // 1000
frames_dir = os.path.join(data_dir, f"aux1/traj_group{group}/traj{k}/images")
frame_paths = [get_frame_path(frames_dir, i) for i in range(30)]
frames = []
for path in frame_paths:
im = cv2.imread(path)
im = im[:,157:967]
im = cv2.resize(im, dsize=(dim, dim))
im = cv2.flip(im, 0)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
frames.append(im)
return frames
def process_frames(k, output_dir, data_dir_hd, dim):
frames_out_dir = os.path.join(output_dir, '{0:05}'.format(k))
os.makedirs(frames_out_dir)
aux1_frames = get_hd_frames(data_dir_hd, k, dim)
for i, frame in enumerate(aux1_frames):
filepath = os.path.join(frames_out_dir, f'{i:02}.png')
cv2.imwrite(filepath, cv2.cvtColor(frame.astype('uint8'), cv2.COLOR_RGB2BGR))
def extract_data(data_dir, output_dir, dim, init_k, end_k):
if os.path.exists(output_dir):
if os.listdir(output_dir):
raise RuntimeError('Directory not empty: {0}'.format(output_dir))
else:
os.makedirs(output_dir)
Parallel(n_jobs=args.num_workers)(delayed(process_frames)(k, output_dir, data_dir, dim) for k in range(init_k, end_k))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, required=True)
parser.add_argument('--dim', type=int, default=64)
parser.add_argument('--num_workers', type=int, default=8)
args = parser.parse_args()
main(args)
```
#### File: skip_vid_generator/models/transformer_model.py
```python
import torch
import torch.nn.functional as F
from ..models.mingpt import GPT, CGPT, NoiseInjection
from tools.utils import to_cuda
from models import load_network, save_network, print_network
from tqdm import tqdm
from ..modules.vmf import nll_vMF
class Transformer(torch.nn.Module):
def __init__(self, opt, is_train=True, is_main=True, logger=None):
super().__init__()
self.opt = opt
self.is_main = is_main
self.net_t = self.initialize_networks(is_train)
if is_train:
self.opt_t = self.create_optimizers(self.opt)
self.logger = logger if self.is_main else None
height, width = self.opt.z_shape
self.size = height * width
self.state_size = self.opt.state_size
self.tot_size = self.size + self.state_size
def forward(self, data, prefix='', mode='', total_len=None, log=False, global_iter=None, show_progress=False):
code, state_code, cond_code, delta_length_cond, vid_lbl = self.preprocess_input(data)
if mode == 'transformer':
t_loss = self.compute_transformer_loss(code, state_code, cond_code, delta_length_cond, vid_lbl, prefix, log, global_iter)
return t_loss
if mode == 'eval_transformer':
with torch.no_grad():
t_loss = self.compute_transformer_loss(code, log, global_iter, is_eval=True)
return t_loss
if mode == 'inference':
return self.generate_fake(code, state_code, cond_code, delta_length_cond, vid_lbl, total_len, show_progress)
else:
raise ValueError(f"mode '{mode}' is invalid")
def preprocess_input(self, data):
data["code"] = to_cuda(data, "code", flatten_empty=False)
data["state_code"] = to_cuda(data, "state_code", flatten_empty=False)
data["cond_code"] = to_cuda(data, "cond_code")
data["vid_lbl"] = to_cuda(data, "vid_lbl")
data["delta_length_cond"] = to_cuda(data, "delta_length_cond")
return data["code"], data["state_code"], data["cond_code"], data["delta_length_cond"], data["vid_lbl"]
def initialize_networks(self, is_train):
if self.opt.is_continuous:
net_t = CGPT(n_proposals=self.opt.n_proposals, block_size=self.opt.z_len, n_layer=self.opt.n_layer,
n_head=self.opt.n_head, n_embd=self.opt.n_embd, n_in=self.opt.n_in,
resid_noise=self.opt.resid_noise).cuda()
else:
num_lbl = len(self.opt.categories) if self.opt.categories is not None else None
net_t = GPT(vocab_size=self.opt.z_num, block_size=self.opt.z_len, n_layer=self.opt.n_layer,
n_head=self.opt.n_head, n_embd=self.opt.n_embd, emb_mode=self.opt.emb_mode,
shape=self.opt.z_shape, state_vocab_size=self.opt.state_num, num_blocks=self.opt.num_blocks,
state_size=self.opt.state_size, use_start_token=self.opt.use_start_token, use_lbl=self.opt.cat,
num_lbl=num_lbl, state_front=self.opt.state_front).cuda()
if self.is_main:
net_t = load_network(net_t, "transformer_t", self.opt, head_to_n=self.opt.head_to_n)
return net_t
def save_model(self, global_iter, latest=False, best=False):
save_network(self.net_t, "transformer_t", global_iter, self.opt, latest, best)
# Following minGPT:
# This long function is unfortunately doing something very simple and is being very defensive:
# We are separating out all parameters of the model into two buckets: those that will experience
# weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
# We are then returning the PyTorch optimizer object.
def create_optimizers(self, opt):
param_dict = {pn: p for pn, p in self.net_t.named_parameters()}
if opt.finetune_head and opt.finetune_f is None:
optim_groups = [{"params": [param_dict["head.weight"]], "weight_decay": 0.01, "lr": opt.lr}]
else:
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear,)
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding, NoiseInjection)
for mn, m in self.net_t.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
no_decay.add('start_tok_emb') if 'start_tok_emb' in param_dict.keys() else None
no_decay.add('pos_emb') if 'pos_emb' in param_dict.keys() else None
no_decay.add('h_emb') if 'h_emb' in param_dict.keys() else None
no_decay.add('w_emb') if 'w_emb' in param_dict.keys() else None
no_decay.add('s_emb') if 's_emb' in param_dict.keys() else None
no_decay.add('t_emb') if 't_emb' in param_dict.keys() else None
no_decay.add('state_pos_emb') if 'state_pos_emb' in param_dict.keys() else None
no_decay.add('state_s_emb') if 'state_s_emb' in param_dict.keys() else None
# validate that we considered every parameter
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params),)
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" % (str(param_dict.keys() - union_params),)
# create the pytorch optimizer object
if opt.finetune_head:
optim_groups = [{"params": [param_dict[pn] for pn in sorted(list(decay)) if pn != "head.weight"], "weight_decay": 0.01, "lr": opt.lr * opt.finetune_f},
{"params": [param_dict["head.weight"]], "weight_decay": 0.01, "lr": opt.lr},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0, "lr": opt.lr * opt.finetune_f}]
else:
optim_groups = [{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01, "lr": opt.lr},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0, "lr": opt.lr}]
if opt.optimizer == "adamw":
opt_t = torch.optim.AdamW(optim_groups, betas=(opt.beta1, opt.beta2))
else:
raise NotImplementedError
return opt_t
def compute_transformer_loss(self, code, state_code, cond_code, delta_length_cond, vid_lbl, prefix, log, global_iter, is_eval=False):
code = code[:, :self.opt.z_len] # limit input to transformer capacity
state_nll_loss = None
if self.opt.is_continuous:
if self.opt.p2p:
pred = self.net_t(code[:, :-1], cond_code, delta_length_cond, lbl_idx=vid_lbl)
else:
pred = self.net_t(code[:, :-1], lbl_idx=vid_lbl)
tgt = code[:, 1:]
vmf_loss = None
other_vmf_loss = None
cosine_loss = None
other_cosine_loss = None
# nll_loss = None
nll_loss = F.mse_loss(pred, tgt)
t_loss = nll_loss
# if self.opt.n_proposals > 1:
# t_loss = torch.tensor(0., requires_grad=True).cuda()
# logits, proposals = pred
# nm_proposals = proposals / torch.norm(proposals, p=2, dim=3, keepdim=True) if self.opt.normalize_pred else proposals
# nm_tgt = tgt / torch.norm(tgt, p=2, dim=2, keepdim=True) if self.opt.normalize_tgt else tgt
# cosine_dist = - (nm_proposals * nm_tgt.unsqueeze(2)).sum(dim=3)
# closest_proposals = cosine_dist.argmin(dim=2, keepdim=True)
# nll_loss = F.cross_entropy(logits.view(-1, logits.size(-1)), closest_proposals.view(-1))
# t_loss += nll_loss
# if self.opt.knn is not None:
# k_closest = max(1, int(self.opt.knn * (1 - global_iter / self.opt.knn_decay_iter)))
# closest_proposals = (-cosine_dist).topk(dim=2, k=k_closest)[1]
# else:
# k_closest = 1
# closest_onehot = torch.zeros(*closest_proposals.shape[:2], self.opt.n_proposals).cuda().scatter_(2, closest_proposals, 1)
# if self.opt.continuous_loss == "cosine":
# pred = nm_proposals[closest_onehot.bool()].view(*nm_proposals.shape[:2], k_closest, -1)
# cosine_loss = - (pred * tgt.unsqueeze(2)).sum(dim=3).mean()
# if self.opt.knn is not None:
# t_loss += cosine_loss
# else:
# other_preds = nm_proposals[~closest_onehot.bool()].view(*nm_proposals.shape[:2], self.opt.n_proposals - k_closest, -1)
# other_cosine_loss = - (other_preds * tgt.unsqueeze(2)).sum(dim=3).mean()
# t_loss += (1 - self.opt.epsilon_other) * cosine_loss + self.opt.epsilon_other * other_cosine_loss
# elif self.opt.continuous_loss == "vmf":
# pred = proposals[closest_onehot.bool()].view(*nm_proposals.shape[:2], k_closest, -1)
# vmf_loss = nll_vMF(pred, tgt.unsqueeze(2))
# if self.opt.knn is not None:
# t_loss += vmf_loss
# else:
# other_preds = proposals[~closest_onehot.bool()].view(*nm_proposals.shape[:2], self.opt.n_proposals - k_closest, -1)
# other_vmf_loss = nll_vMF(other_preds, tgt.unsqueeze(2))
# t_loss += (1 - self.opt.epsilon_other) * vmf_loss + self.opt.epsilon_other * other_vmf_loss
#
# else:
# if self.opt.continuous_loss == "cosine":
# if self.opt.normalize_pred:
# pred = pred / torch.norm(pred, p=2, dim=2, keepdim=True)
# if self.opt.normalize_tgt:
# tgt = tgt / torch.norm(tgt, p=2, dim=2, keepdim=True)
# cosine_loss = - (pred * tgt).sum(dim=2).mean()
# t_loss = cosine_loss
# elif self.opt.continuous_loss == "vmf":
# vmf_loss = nll_vMF(pred, tgt)
# t_loss = vmf_loss
nrec_loss = None
nrec_momentum_loss = None
else:
logits = self.net_t(code[:, :-1], cond_idx=cond_code, state_idx=state_code, delta_length_cond=delta_length_cond, lbl_idx=vid_lbl)
if 0 not in state_code.size():
if self.opt.state_front:
state_i = [i for i in range(logits.size(1)) if (i + 1) < self.state_size * self.opt.num_blocks]
frame_i = [i for i in range(logits.size(1)) if (i + 1) >= self.state_size * self.opt.num_blocks]
else:
state_i = [i for i in range(logits.size(1)) if (i + 1) % self.tot_size < self.state_size]
frame_i = [i for i in range(logits.size(1)) if (i + 1) % self.tot_size >= self.state_size]
state_logits = logits[:, state_i, :self.opt.state_num]
logits = logits[:, frame_i]
target = code
else:
if self.opt.use_start_token or self.opt.cat:
target = code
else:
target = code[:, 1:]
nll_loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), target.reshape(-1))
nrec_loss = None
other_vmf_loss = None
cosine_loss = None
other_cosine_loss = None
nrec_momentum_loss = None
vmf_loss = None
t_loss = nll_loss
if 0 not in state_code.size():
state_nll_loss = F.cross_entropy(state_logits.reshape(-1, state_logits.size(-1)), state_code[:, 1:].reshape(-1))
t_loss += state_nll_loss
if self.logger and not is_eval:
# log scalars every step
self.logger.log_scalar(f"transformer/{prefix}nll", nll_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}state_nll", state_nll_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}cosine", cosine_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}other_cosine", other_cosine_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}vmf", vmf_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}other_vmf", other_vmf_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}nrec", nrec_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}nrec_momentum", nrec_momentum_loss, global_iter)
return t_loss
def top_k_logits(self, logits, k):
v, _ = torch.topk(logits, k)
out = logits.clone()
out[out < v[..., [-1]]] = -float('Inf')
return out
@torch.no_grad()
def generate_fake(self, code, state_code, cond_code, delta_length_cond, vid_lbl, total_len, show_progress):
''' If 'total_len' is 'None' generate tokens with transformer until the capacity 'z_len' of the transformer has
been reached. Otherwise, fill the code until 'total_len' is reached with a 'z_chunk' stride.
'''
if total_len is None:
code, state_code = self.fill_code(code, state_code, cond_code, delta_length_cond, vid_lbl, show_progress=show_progress)
return {"code": code, "state_code": state_code}
if total_len <= self.opt.z_len:
add_len = total_len - code.size(1)
add_len -= cond_code.size(1) if 0 not in cond_code.size() else 0
add_len -= min(state_code.size(1), self.opt.state_size * self.opt.num_blocks) if 0 not in state_code.size() else 0
code, state_code = self.fill_code(code, state_code, cond_code, delta_length_cond, vid_lbl, add_len=add_len, show_progress=show_progress)
return {"code": code, "state_code": state_code}
if show_progress:
pbar = tqdm(total=int(total_len), desc="Processing codes")
# 1. fill until transformer capacity 'z_len' is reached
code, state_code = self.fill_code(code, state_code, cond_code, delta_length_cond, vid_lbl, show_progress=show_progress)
# 2. predict 'z_chunk' by 'z_chunk'
curr_len = self.opt.z_len
if show_progress:
pbar.update(curr_len)
i = 1
while curr_len < total_len:
add_len = total_len - curr_len if total_len - curr_len < self.opt.z_chunk else None
if 0 not in cond_code.size():
delta_length_cond -= 1
# free some capacity for one chunk
tmp_state_code = state_code[:, i * self.state_size:] if 0 not in state_code.size() else state_code
tmp_code = code[:, i * self.size:]
# predict one chunk
pred_code, pred_state_code = self.fill_code(tmp_code, tmp_state_code, cond_code, delta_length_cond, vid_lbl, add_len=add_len, show_progress=show_progress)
# update code
delta_code = pred_code.size(1) - tmp_code.size(1)
code = torch.cat([code, pred_code[:, -delta_code:]], dim=1)
if 0 not in state_code.size():
delta_state_code = pred_state_code.size(1) - tmp_state_code.size(1)
if delta_state_code > 0:
state_code = torch.cat([state_code, pred_state_code[:, -delta_state_code:]], dim=1)
# else:
# curr_len += self.state_size
# keep track of progress
curr_len += add_len if add_len is not None else self.opt.z_chunk
if show_progress:
# if add_len is not None:
# print("add_len", add_len)
# else:
# print("z_chunk", self.opt.z_chunk)
pbar.update(add_len if add_len is not None else self.opt.z_chunk)
i += 1
if show_progress:
pbar.close()
return {"code": code, "state_code": state_code}
def fill_code(self, code, state_code, cond_code, delta_length_cond, vid_lbl, add_len=None, show_progress=False):
bs = code.size(0)
log_p = None
# compute add_len
if add_len is None:
add_len = self.opt.z_len - code.size(1)
add_len -= cond_code.size(1) if 0 not in cond_code.size() else 0
add_len -= min(state_code.size(1), self.opt.state_size * self.opt.num_blocks) if 0 not in state_code.size() else 0
# iterate
pbar = tqdm(range(add_len), desc="Filling codes", leave=False) if show_progress else range(add_len)
for _ in pbar:
if self.opt.is_continuous:
pred = self.net_t(code, single=True)
if self.opt.normalize_pred:
pred = pred / torch.norm(pred, p=2, dim=2, keepdim=True)
code = torch.cat((code, pred), dim=1)
else:
logits = self.net_t(code, cond_idx=cond_code, state_idx=state_code, delta_length_cond=delta_length_cond, lbl_idx=vid_lbl)
# determine if prediction needs to be affected to code or state_code
is_state = 0 not in state_code.size() and logits.size(1) % self.tot_size < self.state_size
if is_state:
logits = logits[:, :, :self.opt.state_num]
icode = self.get_icode(logits, self.opt.temperature_state, self.opt.top_k_state, self.opt.sample_state)[0]
state_code = torch.cat((state_code, icode), dim=1)
else:
if self.opt.beam_size is not None:
if code.size(0) == bs:
# expand
code = code.unsqueeze(1).repeat(1, self.opt.beam_size, 1).view(bs * self.opt.beam_size, -1)
icode, ilog_p = self.get_icode(logits, self.opt.temperature, self.opt.top_k, self.opt.sample, n=self.opt.beam_size)
log_p = ilog_p
icode = icode.view(-1, 1)
else:
if not self.opt.no_sample:
icode, ilog_p = self.get_icode(logits, self.opt.temperature, self.opt.top_k, self.opt.sample, n=1)
log_p += ilog_p.view(bs, self.opt.beam_size)
icode = icode.view(-1, 1)
else:
# expand
icode, ilog_p = self.get_icode(logits, self.opt.temperature, self.opt.top_k, self.opt.sample, n=self.opt.beam_size)
log_p = log_p.unsqueeze(1).repeat(1, self.opt.beam_size, 1)
log_p += ilog_p.view(bs, self.opt.beam_size, self.opt.beam_size)
icode = icode.view(bs, self.opt.beam_size * self.opt.beam_size)
log_p = log_p.view(bs, self.opt.beam_size * self.opt.beam_size)
# prune
log_p, keep = torch.topk(log_p, dim=1, k=self.opt.beam_size)
icode = torch.gather(icode, dim=1, index=keep).view(-1, 1)
code = code.unsqueeze(1).repeat(1, self.opt.beam_size, 1).view(bs, self.opt.beam_size * self.opt.beam_size, -1)
keep = keep.unsqueeze(-1).repeat(1, 1, code.size(-1))
code = torch.gather(code, dim=1, index=keep).view(-1, code.size(-1))
else:
icode = self.get_icode(logits, self.opt.temperature, self.opt.top_k, self.opt.sample)[0]
code = torch.cat((code, icode), dim=1)
if self.opt.beam_size is not None:
# keep best hypothesis
_, best = torch.topk(log_p, dim=1, k=1)
code = code.view(bs, self.opt.beam_size, -1)
best = best.unsqueeze(-1).repeat(1, 1, code.size(-1))
code = torch.gather(code, dim=1, index=best).view(bs, code.size(-1))
return code, state_code
def get_icode(self, logits, temperature, top_k, sample, n=1):
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = self.top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
icode = torch.multinomial(probs, num_samples=n)
else:
_, icode = torch.topk(probs, k=n, dim=-1)
ilog_p = torch.log(torch.gather(probs, 1, icode))
return icode, ilog_p
```
#### File: skip_vid_generator/modules/contrastive.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class ContrastiveLoss(nn.Module):
def __init__(self, opt):
super().__init__()
self.temperature = opt.cont_temperature
self.normalize = opt.cont_normalize
if opt.cont_proj_size is not None:
self.proj = nn.Sequential(nn.Linear(opt.style_size, opt.style_size, bias=False),
nn.ReLU(),
nn.Linear(opt.style_size, opt.cont_proj_size, bias=False))
else:
self.proj = lambda x: x
def forward(self, x):
"""Compute contrastive loss from https://arxiv.org/pdf/2004.11362.pdf
Bring closer features from the same sequence of frames and push further away the others.
Args:
x: hidden vectors of shape [batch_size, frames, vector_dim]
Returns:
A loss scalar.
"""
x = self.proj(x)
b, t, d = x.shape
x = x.view(-1, d) # b*t d
if self.normalize:
x = F.normalize(x, dim=1)
labels = torch.cat([i * torch.ones(t) for i in range(b)], dim=0).cuda() # b*t
labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float() # b*t b*t
similarity_matrix = torch.div(torch.matmul(x, x.transpose(0, 1)), self.temperature) # b*t b*t
# for numerical stability
logits_max, _ = torch.max(similarity_matrix, dim=1, keepdim=True) # b*t 1
logits = similarity_matrix - logits_max.detach() # b*t b*t*
# logits = similarity_matrix
# discard the main diagonal from both: labels and logits
mask = torch.eye(labels.shape[0], dtype=torch.bool).cuda() # b*t b*t
labels = labels[~mask].view(labels.shape[0], -1) # b*t b*t-1
logits = logits[~mask].view(labels.shape[0], -1) # b*t b*t-1
# compute log_prob
exp_logits = torch.exp(logits) # b*t b*t-1
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True)) # b*t b*t-1
# compute mean of log-likelihood over positive
sum_pos = t - 1
mean_log_prob_pos = log_prob[labels.bool()].view(labels.shape[0], -1).sum(-1) / sum_pos # b*t
# loss
loss = - mean_log_prob_pos.mean()
return loss
```
#### File: ccvs/tools/logger.py
```python
import numpy as np
from matplotlib import cm
import torch
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
from tools.utils import color_transfer
class Logger():
def __init__(self, opt):
self.writer = SummaryWriter(opt.log_path)
self.log_path = opt.log_path
self.fps = opt.log_fps
self.imagenet_norm = opt.imagenet_norm
def is_empty(self, tensors):
for tensor in tensors:
if 0 in tensor.size():
return True
return False
def log_img(self, name, tensor, nrow, global_iter, natural=True, normalize=False, span=None, pad_value=0):
if self.is_empty([tensor]):
return
with torch.no_grad():
tensor = tensor[:, :3]
if natural and normalize and self.imagenet_norm:
# tensor should be in [-1 1]
tensor *= torch.tensor([[[[0.229]], [[0.224]], [[0.225]]]])
tensor += torch.tensor([[[[0.485]], [[0.456]], [[0.406]]]])
tensor = tensor.clamp(0, 1)
normalize = False
grid = make_grid(tensor, nrow=nrow, normalize=normalize, range=span, pad_value=pad_value)
self.writer.add_image(name, grid, global_iter)
def log_seg(self, name, tensor, seg_dim, nrow, global_iter):
if self.is_empty([tensor]):
return
with torch.no_grad():
if tensor.ndim == 4:
seg = tensor.max(1, keepdim=True)[1]
else:
seg = tensor.unsqueeze(1)
colormap = cm.get_cmap('hsv', seg_dim)(np.linspace(0, 1, seg_dim))
seg = color_transfer(seg, colormap)
self.log_img(name, seg, nrow, global_iter, normalize=True, span=(-1, 1))
def log_vid(self, name, tensor, global_iter, natural=True, normalize=False, span=None, cond_frames=None):
if self.is_empty([tensor]):
return
with torch.no_grad():
tensor = tensor[:, :, :3]
if natural and normalize and self.imagenet_norm:
# tensor should be in [-1 1]
tensor *= torch.tensor([[[[0.229]], [[0.224]], [[0.225]]]])
tensor += torch.tensor([[[[0.485]], [[0.456]], [[0.406]]]])
tensor = tensor.clamp(0, 1)
elif normalize:
tensor = tensor.clamp(span[0], span[1])
tensor = (tensor - span[0]) / (span[1] - span[0])
if cond_frames is not None:
# show synthetic frames with red border
low_h, low_w = int(0.025 * tensor.size(3)), int(0.025 * tensor.size(4))
high_h, high_w = tensor.size(3) - low_h, tensor.size(4) - low_w
red_color = torch.tensor([[[[1.]], [[0.]], [[0.]]]])
tensor[:, cond_frames:, :, :low_h] = red_color
tensor[:, cond_frames:, :, high_h:] = red_color
tensor[:, cond_frames:, :, :, :low_w] = red_color
tensor[:, cond_frames:, :, :, high_w:] = red_color
self.writer.add_video(name, tensor, global_iter, self.fps)
def log_flow(self, name, flow, nrow, global_iter):
if self.is_empty([flow]):
return
with torch.no_grad():
if len(flow.shape) == 5:
bs, t, _, h, w = flow.shape
flow = flow.permute(0, 1, 3, 4, 2)
flow_vid = torch.zeros(bs, t, 3, h, w)
for i in range(t):
flow_vid[:, i] = self.get_flow_rgb(flow[:, i])
self.log_vid(name, flow_vid, global_iter)
else:
flow = flow.permute(0, 2, 3, 1)
self.log_img(name, self.get_flow_rgb(flow), nrow, global_iter)
def log_scalar(self, name, scalar, global_iter):
if scalar is not None:
if type(scalar) == list:
for i, x in enumerate(scalar):
self.log_scalar(f"{name}_{i}", x, global_iter)
else:
self.writer.add_scalar(name, scalar, global_iter)
def get_flow_rgb(self, flow, boost=10):
r = (flow ** 2).sum(-1).sqrt() / np.sqrt(2) * boost
r[r > 1] = 1.
theta = (1 + torch.atan2(flow.select(-1, -1), flow.select(-1, 0)) / np.pi) / 2
cmp = cm.get_cmap('hsv', 128)
flow_rgba = cmp(theta.numpy())
flow_rgb = torch.tensor(flow_rgba[:, :, :, :3]).float()
flow_rgb = r.unsqueeze(-1) * flow_rgb
return flow_rgb.permute(0, 3, 1, 2)
``` |
{
"source": "16pierre/azure-sdk-for-python",
"score": 2
} |
#### File: cognitiveservices/models/_models_py3.py
```python
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class CheckDomainAvailabilityParameter(Model):
"""Check Domain availability parameter.
All required parameters must be populated in order to send to Azure.
:param subdomain_name: Required. The subdomain name to use.
:type subdomain_name: str
:param type: Required. The Type of the resource.
:type type: str
"""
_validation = {
'subdomain_name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'subdomain_name': {'key': 'subdomainName', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, subdomain_name: str, type: str, **kwargs) -> None:
super(CheckDomainAvailabilityParameter, self).__init__(**kwargs)
self.subdomain_name = subdomain_name
self.type = type
class CheckDomainAvailabilityResult(Model):
"""Check Domain availability result.
:param is_subdomain_available: Indicates the given SKU is available or
not.
:type is_subdomain_available: bool
:param reason: Reason why the SKU is not available.
:type reason: str
:param subdomain_name: The subdomain name to use.
:type subdomain_name: str
:param type: The Type of the resource.
:type type: str
"""
_attribute_map = {
'is_subdomain_available': {'key': 'isSubdomainAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'subdomain_name': {'key': 'subdomainName', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, is_subdomain_available: bool=None, reason: str=None, subdomain_name: str=None, type: str=None, **kwargs) -> None:
super(CheckDomainAvailabilityResult, self).__init__(**kwargs)
self.is_subdomain_available = is_subdomain_available
self.reason = reason
self.subdomain_name = subdomain_name
self.type = type
class CheckSkuAvailabilityParameter(Model):
"""Check SKU availability parameter.
All required parameters must be populated in order to send to Azure.
:param skus: Required. The SKU of the resource.
:type skus: list[str]
:param kind: Required. The Kind of the resource.
:type kind: str
:param type: Required. The Type of the resource.
:type type: str
"""
_validation = {
'skus': {'required': True},
'kind': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'skus': {'key': 'skus', 'type': '[str]'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, skus, kind: str, type: str, **kwargs) -> None:
super(CheckSkuAvailabilityParameter, self).__init__(**kwargs)
self.skus = skus
self.kind = kind
self.type = type
class CheckSkuAvailabilityResult(Model):
"""Check SKU availability result.
:param kind: The Kind of the resource.
:type kind: str
:param type: The Type of the resource.
:type type: str
:param sku_name: The SKU of Cognitive Services account.
:type sku_name: str
:param sku_available: Indicates the given SKU is available or not.
:type sku_available: bool
:param reason: Reason why the SKU is not available.
:type reason: str
:param message: Additional error message.
:type message: str
"""
_attribute_map = {
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'sku_name': {'key': 'skuName', 'type': 'str'},
'sku_available': {'key': 'skuAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, *, kind: str=None, type: str=None, sku_name: str=None, sku_available: bool=None, reason: str=None, message: str=None, **kwargs) -> None:
super(CheckSkuAvailabilityResult, self).__init__(**kwargs)
self.kind = kind
self.type = type
self.sku_name = sku_name
self.sku_available = sku_available
self.reason = reason
self.message = message
class CheckSkuAvailabilityResultList(Model):
"""Check SKU availability result list.
:param value: Check SKU availability result list.
:type value:
list[~azure.mgmt.cognitiveservices.models.CheckSkuAvailabilityResult]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CheckSkuAvailabilityResult]'},
}
def __init__(self, *, value=None, **kwargs) -> None:
super(CheckSkuAvailabilityResultList, self).__init__(**kwargs)
self.value = value
class CloudError(Model):
"""CloudError.
"""
_attribute_map = {
}
class CognitiveServicesAccount(Model):
"""Cognitive Services Account is an Azure resource representing the
provisioned account, its type, location and SKU.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar etag: Entity Tag
:vartype etag: str
:ivar id: The id of the created account
:vartype id: str
:param kind: The Kind of the resource.
:type kind: str
:param location: The location of the resource
:type location: str
:ivar name: The name of the created account
:vartype name: str
:param properties: Properties of Cognitive Services account.
:type properties:
~azure.mgmt.cognitiveservices.models.CognitiveServicesAccountProperties
:param sku: The SKU of Cognitive Services account.
:type sku: ~azure.mgmt.cognitiveservices.models.Sku
:param tags: Gets or sets a list of key value pairs that describe the
resource. These tags can be used in viewing and grouping this resource
(across resource groups). A maximum of 15 tags can be provided for a
resource. Each tag must have a key no greater than 128 characters and
value no greater than 256 characters.
:type tags: dict[str, str]
:ivar type: Resource type
:vartype type: str
:param identity: The identity of Cognitive Services account.
:type identity: ~azure.mgmt.cognitiveservices.models.Identity
"""
_validation = {
'etag': {'readonly': True},
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'etag': {'key': 'etag', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'CognitiveServicesAccountProperties'},
'sku': {'key': 'sku', 'type': 'Sku'},
'tags': {'key': 'tags', 'type': '{str}'},
'type': {'key': 'type', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
}
def __init__(self, *, kind: str=None, location: str=None, properties=None, sku=None, tags=None, identity=None, **kwargs) -> None:
super(CognitiveServicesAccount, self).__init__(**kwargs)
self.etag = None
self.id = None
self.kind = kind
self.location = location
self.name = None
self.properties = properties
self.sku = sku
self.tags = tags
self.type = None
self.identity = identity
class CognitiveServicesAccountApiProperties(Model):
"""The api properties for special APIs.
:param qna_runtime_endpoint: (QnAMaker Only) The runtime endpoint of
QnAMaker.
:type qna_runtime_endpoint: str
:param statistics_enabled: (Bing Search Only) The flag to enable
statistics of Bing Search.
:type statistics_enabled: bool
:param event_hub_connection_string: (Personalization Only) The flag to
enable statistics of Bing Search.
:type event_hub_connection_string: str
:param storage_account_connection_string: (Personalization Only) The
storage account connection string.
:type storage_account_connection_string: str
"""
_validation = {
'event_hub_connection_string': {'max_length': 1000, 'pattern': r'^( *)Endpoint=sb://(.*);( *)SharedAccessKeyName=(.*);( *)SharedAccessKey=(.*)$'},
'storage_account_connection_string': {'max_length': 1000, 'pattern': r'^(( *)DefaultEndpointsProtocol=(http|https)( *);( *))?AccountName=(.*)AccountKey=(.*)EndpointSuffix=(.*)$'},
}
_attribute_map = {
'qna_runtime_endpoint': {'key': 'qnaRuntimeEndpoint', 'type': 'str'},
'statistics_enabled': {'key': 'statisticsEnabled', 'type': 'bool'},
'event_hub_connection_string': {'key': 'eventHubConnectionString', 'type': 'str'},
'storage_account_connection_string': {'key': 'storageAccountConnectionString', 'type': 'str'},
}
def __init__(self, *, qna_runtime_endpoint: str=None, statistics_enabled: bool=None, event_hub_connection_string: str=None, storage_account_connection_string: str=None, **kwargs) -> None:
super(CognitiveServicesAccountApiProperties, self).__init__(**kwargs)
self.qna_runtime_endpoint = qna_runtime_endpoint
self.statistics_enabled = statistics_enabled
self.event_hub_connection_string = event_hub_connection_string
self.storage_account_connection_string = storage_account_connection_string
class CognitiveServicesAccountEnumerateSkusResult(Model):
"""The list of cognitive services accounts operation response.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar value: Gets the list of Cognitive Services accounts and their
properties.
:vartype value:
list[~azure.mgmt.cognitiveservices.models.CognitiveServicesResourceAndSku]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[CognitiveServicesResourceAndSku]'},
}
def __init__(self, **kwargs) -> None:
super(CognitiveServicesAccountEnumerateSkusResult, self).__init__(**kwargs)
self.value = None
class CognitiveServicesAccountKeys(Model):
"""The access keys for the cognitive services account.
:param key1: Gets the value of key 1.
:type key1: str
:param key2: Gets the value of key 2.
:type key2: str
"""
_attribute_map = {
'key1': {'key': 'key1', 'type': 'str'},
'key2': {'key': 'key2', 'type': 'str'},
}
def __init__(self, *, key1: str=None, key2: str=None, **kwargs) -> None:
super(CognitiveServicesAccountKeys, self).__init__(**kwargs)
self.key1 = key1
self.key2 = key2
class CognitiveServicesAccountProperties(Model):
"""Properties of Cognitive Services account.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar provisioning_state: Gets the status of the cognitive services
account at the time the operation was called. Possible values include:
'Creating', 'ResolvingDNS', 'Moving', 'Deleting', 'Succeeded', 'Failed'
:vartype provisioning_state: str or
~azure.mgmt.cognitiveservices.models.ProvisioningState
:ivar endpoint: Endpoint of the created account.
:vartype endpoint: str
:ivar internal_id: The internal identifier.
:vartype internal_id: str
:param custom_sub_domain_name: Optional subdomain name used for
token-based authentication.
:type custom_sub_domain_name: str
:param network_acls: A collection of rules governing the accessibility
from specific network locations.
:type network_acls: ~azure.mgmt.cognitiveservices.models.NetworkRuleSet
:param encryption: The encryption properties for this resource.
:type encryption: ~azure.mgmt.cognitiveservices.models.Encryption
:param user_owned_storage: The storage accounts for this resource.
:type user_owned_storage:
list[~azure.mgmt.cognitiveservices.models.UserOwnedStorage]
:param api_properties: The api properties for special APIs.
:type api_properties:
~azure.mgmt.cognitiveservices.models.CognitiveServicesAccountApiProperties
"""
_validation = {
'provisioning_state': {'readonly': True},
'endpoint': {'readonly': True},
'internal_id': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'str'},
'internal_id': {'key': 'internalId', 'type': 'str'},
'custom_sub_domain_name': {'key': 'customSubDomainName', 'type': 'str'},
'network_acls': {'key': 'networkAcls', 'type': 'NetworkRuleSet'},
'encryption': {'key': 'encryption', 'type': 'Encryption'},
'user_owned_storage': {'key': 'userOwnedStorage', 'type': '[UserOwnedStorage]'},
'api_properties': {'key': 'apiProperties', 'type': 'CognitiveServicesAccountApiProperties'},
}
def __init__(self, *, custom_sub_domain_name: str=None, network_acls=None, encryption=None, user_owned_storage=None, api_properties=None, **kwargs) -> None:
super(CognitiveServicesAccountProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.endpoint = None
self.internal_id = None
self.custom_sub_domain_name = custom_sub_domain_name
self.network_acls = network_acls
self.encryption = encryption
self.user_owned_storage = user_owned_storage
self.api_properties = api_properties
class CognitiveServicesResourceAndSku(Model):
"""Cognitive Services resource type and SKU.
:param resource_type: Resource Namespace and Type
:type resource_type: str
:param sku: The SKU of Cognitive Services account.
:type sku: ~azure.mgmt.cognitiveservices.models.Sku
"""
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(self, *, resource_type: str=None, sku=None, **kwargs) -> None:
super(CognitiveServicesResourceAndSku, self).__init__(**kwargs)
self.resource_type = resource_type
self.sku = sku
class Encryption(Model):
"""Properties to configure Encryption.
:param key_vault_properties: Properties of KeyVault
:type key_vault_properties:
~azure.mgmt.cognitiveservices.models.KeyVaultProperties
:param key_source: Enumerates the possible value of keySource for
Encryption. Possible values include: 'Microsoft.CognitiveServices',
'Microsoft.KeyVault'. Default value: "Microsoft.KeyVault" .
:type key_source: str or ~azure.mgmt.cognitiveservices.models.KeySource
"""
_attribute_map = {
'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'},
'key_source': {'key': 'keySource', 'type': 'str'},
}
def __init__(self, *, key_vault_properties=None, key_source="Microsoft.KeyVault", **kwargs) -> None:
super(Encryption, self).__init__(**kwargs)
self.key_vault_properties = key_vault_properties
self.key_source = key_source
class Error(Model):
"""Cognitive Services error object.
:param error: The error body.
:type error: ~azure.mgmt.cognitiveservices.models.ErrorBody
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorBody'},
}
def __init__(self, *, error=None, **kwargs) -> None:
super(Error, self).__init__(**kwargs)
self.error = error
class ErrorException(HttpOperationError):
"""Server responsed with exception of type: 'Error'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorException, self).__init__(deserialize, response, 'Error', *args)
class ErrorBody(Model):
"""Cognitive Services error body.
All required parameters must be populated in order to send to Azure.
:param code: Required. error code
:type code: str
:param message: Required. error message
:type message: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, *, code: str, message: str, **kwargs) -> None:
super(ErrorBody, self).__init__(**kwargs)
self.code = code
self.message = message
class Identity(Model):
"""Managed service identity.
Variables are only populated by the server, and will be ignored when
sending a request.
:param type: Type of managed service identity. Possible values include:
'None', 'SystemAssigned', 'UserAssigned'
:type type: str or ~azure.mgmt.cognitiveservices.models.IdentityType
:ivar tenant_id: Tenant of managed service identity.
:vartype tenant_id: str
:ivar principal_id: Principal Id of managed service identity.
:vartype principal_id: str
:param user_assigned_identities: The list of user assigned identities
associated with the resource. The user identity dictionary key references
will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}
:type user_assigned_identities: dict[str,
~azure.mgmt.cognitiveservices.models.UserAssignedIdentity]
"""
_validation = {
'tenant_id': {'readonly': True},
'principal_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'IdentityType'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedIdentity}'},
}
def __init__(self, *, type=None, user_assigned_identities=None, **kwargs) -> None:
super(Identity, self).__init__(**kwargs)
self.type = type
self.tenant_id = None
self.principal_id = None
self.user_assigned_identities = user_assigned_identities
class IpRule(Model):
"""A rule governing the accessibility from a specific ip address or ip range.
All required parameters must be populated in order to send to Azure.
:param value: Required. An IPv4 address range in CIDR notation, such as
'192.168.3.11' (simple IP address) or '172.16.58.3/24' (all addresses that
start with 124.56.78).
:type value: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self, *, value: str, **kwargs) -> None:
super(IpRule, self).__init__(**kwargs)
self.value = value
class KeyVaultProperties(Model):
"""Properties to configure keyVault Properties.
:param key_name: Name of the Key from KeyVault
:type key_name: str
:param key_version: Version of the Key from KeyVault
:type key_version: str
:param key_vault_uri: Uri of KeyVault
:type key_vault_uri: str
"""
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
'key_version': {'key': 'keyVersion', 'type': 'str'},
'key_vault_uri': {'key': 'keyVaultUri', 'type': 'str'},
}
def __init__(self, *, key_name: str=None, key_version: str=None, key_vault_uri: str=None, **kwargs) -> None:
super(KeyVaultProperties, self).__init__(**kwargs)
self.key_name = key_name
self.key_version = key_version
self.key_vault_uri = key_vault_uri
class MetricName(Model):
"""A metric name.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar value: The name of the metric.
:vartype value: str
:ivar localized_value: The friendly name of the metric.
:vartype localized_value: str
"""
_validation = {
'value': {'readonly': True},
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(MetricName, self).__init__(**kwargs)
self.value = None
self.localized_value = None
class NetworkRuleSet(Model):
"""A set of rules governing the network accessibility.
:param default_action: The default action when no rule from ipRules and
from virtualNetworkRules match. This is only used after the bypass
property has been evaluated. Possible values include: 'Allow', 'Deny'
:type default_action: str or
~azure.mgmt.cognitiveservices.models.NetworkRuleAction
:param ip_rules: The list of IP address rules.
:type ip_rules: list[~azure.mgmt.cognitiveservices.models.IpRule]
:param virtual_network_rules: The list of virtual network rules.
:type virtual_network_rules:
list[~azure.mgmt.cognitiveservices.models.VirtualNetworkRule]
"""
_attribute_map = {
'default_action': {'key': 'defaultAction', 'type': 'str'},
'ip_rules': {'key': 'ipRules', 'type': '[IpRule]'},
'virtual_network_rules': {'key': 'virtualNetworkRules', 'type': '[VirtualNetworkRule]'},
}
def __init__(self, *, default_action=None, ip_rules=None, virtual_network_rules=None, **kwargs) -> None:
super(NetworkRuleSet, self).__init__(**kwargs)
self.default_action = default_action
self.ip_rules = ip_rules
self.virtual_network_rules = virtual_network_rules
class OperationDisplayInfo(Model):
"""The operation supported by Cognitive Services.
:param description: The description of the operation.
:type description: str
:param operation: The action that users can perform, based on their
permission level.
:type operation: str
:param provider: Service provider: Microsoft Cognitive Services.
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
}
def __init__(self, *, description: str=None, operation: str=None, provider: str=None, resource: str=None, **kwargs) -> None:
super(OperationDisplayInfo, self).__init__(**kwargs)
self.description = description
self.operation = operation
self.provider = provider
self.resource = resource
class OperationEntity(Model):
"""The operation supported by Cognitive Services.
:param name: Operation name: {provider}/{resource}/{operation}.
:type name: str
:param display: The operation supported by Cognitive Services.
:type display: ~azure.mgmt.cognitiveservices.models.OperationDisplayInfo
:param origin: The origin of the operation.
:type origin: str
:param properties: Additional properties.
:type properties: object
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplayInfo'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(self, *, name: str=None, display=None, origin: str=None, properties=None, **kwargs) -> None:
super(OperationEntity, self).__init__(**kwargs)
self.name = name
self.display = display
self.origin = origin
self.properties = properties
class RegenerateKeyParameters(Model):
"""Regenerate key parameters.
All required parameters must be populated in order to send to Azure.
:param key_name: Required. key name to generate (Key1|Key2). Possible
values include: 'Key1', 'Key2'
:type key_name: str or ~azure.mgmt.cognitiveservices.models.KeyName
"""
_validation = {
'key_name': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'KeyName'},
}
def __init__(self, *, key_name, **kwargs) -> None:
super(RegenerateKeyParameters, self).__init__(**kwargs)
self.key_name = key_name
class ResourceSku(Model):
"""Describes an available Cognitive Services SKU.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar resource_type: The type of resource the SKU applies to.
:vartype resource_type: str
:ivar name: The name of SKU.
:vartype name: str
:ivar tier: Specifies the tier of Cognitive Services account.
:vartype tier: str
:ivar kind: The Kind of resources that are supported in this SKU.
:vartype kind: str
:ivar locations: The set of locations that the SKU is available.
:vartype locations: list[str]
:ivar restrictions: The restrictions because of which SKU cannot be used.
This is empty if there are no restrictions.
:vartype restrictions:
list[~azure.mgmt.cognitiveservices.models.ResourceSkuRestrictions]
"""
_validation = {
'resource_type': {'readonly': True},
'name': {'readonly': True},
'tier': {'readonly': True},
'kind': {'readonly': True},
'locations': {'readonly': True},
'restrictions': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'locations': {'key': 'locations', 'type': '[str]'},
'restrictions': {'key': 'restrictions', 'type': '[ResourceSkuRestrictions]'},
}
def __init__(self, **kwargs) -> None:
super(ResourceSku, self).__init__(**kwargs)
self.resource_type = None
self.name = None
self.tier = None
self.kind = None
self.locations = None
self.restrictions = None
class ResourceSkuRestrictionInfo(Model):
"""ResourceSkuRestrictionInfo.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar locations: Locations where the SKU is restricted
:vartype locations: list[str]
:ivar zones: List of availability zones where the SKU is restricted.
:vartype zones: list[str]
"""
_validation = {
'locations': {'readonly': True},
'zones': {'readonly': True},
}
_attribute_map = {
'locations': {'key': 'locations', 'type': '[str]'},
'zones': {'key': 'zones', 'type': '[str]'},
}
def __init__(self, **kwargs) -> None:
super(ResourceSkuRestrictionInfo, self).__init__(**kwargs)
self.locations = None
self.zones = None
class ResourceSkuRestrictions(Model):
"""Describes restrictions of a SKU.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar type: The type of restrictions. Possible values include: 'Location',
'Zone'
:vartype type: str or
~azure.mgmt.cognitiveservices.models.ResourceSkuRestrictionsType
:ivar values: The value of restrictions. If the restriction type is set to
location. This would be different locations where the SKU is restricted.
:vartype values: list[str]
:ivar restriction_info: The information about the restriction where the
SKU cannot be used.
:vartype restriction_info:
~azure.mgmt.cognitiveservices.models.ResourceSkuRestrictionInfo
:ivar reason_code: The reason for restriction. Possible values include:
'QuotaId', 'NotAvailableForSubscription'
:vartype reason_code: str or
~azure.mgmt.cognitiveservices.models.ResourceSkuRestrictionsReasonCode
"""
_validation = {
'type': {'readonly': True},
'values': {'readonly': True},
'restriction_info': {'readonly': True},
'reason_code': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'ResourceSkuRestrictionsType'},
'values': {'key': 'values', 'type': '[str]'},
'restriction_info': {'key': 'restrictionInfo', 'type': 'ResourceSkuRestrictionInfo'},
'reason_code': {'key': 'reasonCode', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(ResourceSkuRestrictions, self).__init__(**kwargs)
self.type = None
self.values = None
self.restriction_info = None
self.reason_code = None
class Sku(Model):
"""The SKU of the cognitive services account.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. Gets or sets the sku name. Required for account
creation, optional for update.
:type name: str
:ivar tier: Gets the sku tier. This is based on the SKU name. Possible
values include: 'Free', 'Standard', 'Premium'
:vartype tier: str or ~azure.mgmt.cognitiveservices.models.SkuTier
"""
_validation = {
'name': {'required': True},
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'SkuTier'},
}
def __init__(self, *, name: str, **kwargs) -> None:
super(Sku, self).__init__(**kwargs)
self.name = name
self.tier = None
class Usage(Model):
"""The usage data for a usage request.
Variables are only populated by the server, and will be ignored when
sending a request.
:param unit: The unit of the metric. Possible values include: 'Count',
'Bytes', 'Seconds', 'Percent', 'CountPerSecond', 'BytesPerSecond',
'Milliseconds'
:type unit: str or ~azure.mgmt.cognitiveservices.models.UnitType
:ivar name: The name information for the metric.
:vartype name: ~azure.mgmt.cognitiveservices.models.MetricName
:ivar quota_period: The quota period used to summarize the usage values.
:vartype quota_period: str
:ivar limit: Maximum value for this metric.
:vartype limit: float
:ivar current_value: Current value for this metric.
:vartype current_value: float
:ivar next_reset_time: Next reset time for current quota.
:vartype next_reset_time: str
:param status: Cognitive Services account quota usage status. Possible
values include: 'Included', 'Blocked', 'InOverage', 'Unknown'
:type status: str or ~azure.mgmt.cognitiveservices.models.QuotaUsageStatus
"""
_validation = {
'name': {'readonly': True},
'quota_period': {'readonly': True},
'limit': {'readonly': True},
'current_value': {'readonly': True},
'next_reset_time': {'readonly': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'name': {'key': 'name', 'type': 'MetricName'},
'quota_period': {'key': 'quotaPeriod', 'type': 'str'},
'limit': {'key': 'limit', 'type': 'float'},
'current_value': {'key': 'currentValue', 'type': 'float'},
'next_reset_time': {'key': 'nextResetTime', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(self, *, unit=None, status=None, **kwargs) -> None:
super(Usage, self).__init__(**kwargs)
self.unit = unit
self.name = None
self.quota_period = None
self.limit = None
self.current_value = None
self.next_reset_time = None
self.status = status
class UsagesResult(Model):
"""The response to a list usage request.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar value: The list of usages for Cognitive Service account.
:vartype value: list[~azure.mgmt.cognitiveservices.models.Usage]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
}
def __init__(self, **kwargs) -> None:
super(UsagesResult, self).__init__(**kwargs)
self.value = None
class UserAssignedIdentity(Model):
"""User-assigned managed identity.
:param principal_id: Azure Active Directory principal ID associated with
this Identity.
:type principal_id: str
:param client_id: Client App Id associated with this identity.
:type client_id: str
"""
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(self, *, principal_id: str=None, client_id: str=None, **kwargs) -> None:
super(UserAssignedIdentity, self).__init__(**kwargs)
self.principal_id = principal_id
self.client_id = client_id
class UserOwnedStorage(Model):
"""The user owned storage for Cognitive Services account.
:param resource_id: Full resource id of a Microsoft.Storage resource.
:type resource_id: str
"""
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
}
def __init__(self, *, resource_id: str=None, **kwargs) -> None:
super(UserOwnedStorage, self).__init__(**kwargs)
self.resource_id = resource_id
class VirtualNetworkRule(Model):
"""A rule governing the accessibility from a specific virtual network.
All required parameters must be populated in order to send to Azure.
:param id: Required. Full resource id of a vnet subnet, such as
'/subscriptions/subid/resourceGroups/rg1/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/subnet1'.
:type id: str
:param state: Gets the state of virtual network rule.
:type state: str
:param ignore_missing_vnet_service_endpoint: Ignore missing vnet service
endpoint or not.
:type ignore_missing_vnet_service_endpoint: bool
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'ignore_missing_vnet_service_endpoint': {'key': 'ignoreMissingVnetServiceEndpoint', 'type': 'bool'},
}
def __init__(self, *, id: str, state: str=None, ignore_missing_vnet_service_endpoint: bool=None, **kwargs) -> None:
super(VirtualNetworkRule, self).__init__(**kwargs)
self.id = id
self.state = state
self.ignore_missing_vnet_service_endpoint = ignore_missing_vnet_service_endpoint
```
#### File: azure-ai-formrecognizer/samples/sample_differentiate_output_models_trained_with_and_without_labels.py
```python
import os
def format_bounding_box(bounding_box):
if not bounding_box:
return "N/A"
return ", ".join(["[{}, {}]".format(p.x, p.y) for p in bounding_box])
class DifferentiateOutputModelsTrainedWithAndWithoutLabels(object):
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
model_trained_with_labels_id = os.environ["ID_OF_MODEL_TRAINED_WITH_LABELS"]
model_trained_without_labels_id = os.environ["ID_OF_MODEL_TRAINED_WITHOUT_LABELS"]
def recognize_custom_forms(self):
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import FormRecognizerClient
form_recognizer_client = FormRecognizerClient(
endpoint=self.endpoint, credential=AzureKeyCredential(self.key)
)
# Make sure your form's type is included in the list of form types the custom model can recognize
with open("sample_forms/forms/Form_1.jpg", "rb") as f:
stream = f.read()
forms_with_labeled_model_poller = form_recognizer_client.begin_recognize_custom_forms(
model_id=self.model_trained_with_labels_id, form=stream
)
forms_with_unlabeled_model_poller = form_recognizer_client.begin_recognize_custom_forms(
model_id=self.model_trained_without_labels_id, form=stream
)
# Calling result after kicking off each call allows for server-side paralellization
forms_with_labeled_model = forms_with_labeled_model_poller.result()
forms_with_unlabeled_model = forms_with_unlabeled_model_poller.result()
# With a form recognized by a model trained with labels, this 'name' key will be its
# training-time label, otherwise it will be denoted by numeric indices.
# Label data is not returned for model trained with labels.
print("---------Recognizing forms with models trained with labeled data---------")
for labeled_form in forms_with_labeled_model:
for name, field in labeled_form.fields.items():
print("...Field '{}' has value '{}' based on '{}' within bounding box '{}', with a confidence score of {}".format(
name,
field.value,
field.value_data.text,
format_bounding_box(field.value_data.bounding_box),
field.confidence
))
print("-----------------------------------------------------------------------")
print("-------Recognizing forms with models trained with unlabeled data-------")
for unlabeled_form in forms_with_unlabeled_model:
for name, field in unlabeled_form.fields.items():
print("...Field '{}' has label '{}' within bounding box '{}', with a confidence score of {}".format(
name,
field.label_data.text,
format_bounding_box(field.label_data.bounding_box),
field.confidence
))
print("...Field '{}' has value '{}' based on '{}' within bounding box '{}', with a confidence score of {}".format(
name,
field.value,
field.value_data.text,
format_bounding_box(field.value_data.bounding_box),
field.confidence
))
if __name__ == '__main__':
sample = DifferentiateOutputModelsTrainedWithAndWithoutLabels()
sample.recognize_custom_forms()
```
#### File: azure-identity/tests/test_msi_credential.py
```python
from azure.identity._constants import EnvironmentVariables
from azure.identity._credentials.managed_identity import MsiCredential
import pytest
from helpers import mock
def test_no_scopes():
"""The credential should raise ValueError when get_token is called with no scopes"""
with mock.patch("os.environ", {EnvironmentVariables.MSI_ENDPOINT: "https://url"}):
credential = MsiCredential()
with pytest.raises(ValueError):
credential.get_token()
def test_multiple_scopes():
"""The credential should raise ValueError when get_token is called with more than one scope"""
with mock.patch("os.environ", {EnvironmentVariables.MSI_ENDPOINT: "https://url"}):
credential = MsiCredential()
with pytest.raises(ValueError):
credential.get_token("one scope", "and another")
```
#### File: samples/async_samples/sample_indexers_operations_async.py
```python
import asyncio
import os
service_endpoint = os.getenv("AZURE_SEARCH_SERVICE_ENDPOINT")
key = os.getenv("AZURE_SEARCH_API_KEY")
connection_string = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
from azure.core.credentials import AzureKeyCredential
from azure.search.documents import (
DataSource, DataContainer, DataSourceCredentials, Index, Indexer, SimpleField, edm
)
from azure.search.documents.aio import SearchServiceClient
service_client = SearchServiceClient(service_endpoint, AzureKeyCredential(key))
indexers_client = service_client.get_indexers_client()
async def create_indexer():
# create an index
index_name = "hotels"
fields = [
SimpleField(name="hotelId", type=edm.String, key=True),
SimpleField(name="baseRate", type=edm.Double)
]
index = Index(name=index_name, fields=fields)
ind_client = service_client.get_indexes_client()
async with ind_client:
await ind_client.create_index(index)
# [START create_indexer_async]
# create a datasource
ds_client = service_client.get_datasources_client()
credentials = DataSourceCredentials(connection_string=connection_string)
container = DataContainer(name='searchcontainer')
ds = DataSource(name="async-indexer-datasource", type="azureblob", credentials=credentials, container=container)
async with ds_client:
data_source = await ds_client.create_datasource(ds)
# create an indexer
indexer = Indexer(name="async-sample-indexer", data_source_name="async-indexer-datasource", target_index_name="hotels")
async with indexers_client:
result = await indexers_client.create_indexer(indexer)
print("Create new Indexer - async-sample-indexer")
# [END create_indexer_async]
async def list_indexers():
# [START list_indexer_async]
async with indexers_client:
result = await indexers_client.get_indexers()
names = [x.name for x in result]
print("Found {} Indexers in the service: {}".format(len(result), ", ".join(names)))
# [END list_indexer_async]
async def get_indexer():
# [START get_indexer_async]
async with indexers_client:
result = await indexers_client.get_indexer("async-sample-indexer")
print("Retrived Indexer 'async-sample-indexer'")
return result
# [END get_indexer_async]
async def get_indexer_status():
# [START get_indexer_status_async]
async with indexers_client:
result = await indexers_client.get_indexer_status("async-sample-indexer")
print("Retrived Indexer status for 'async-sample-indexer'")
return result
# [END get_indexer_status_async]
async def run_indexer():
# [START run_indexer_async]
async with indexers_client:
result = await indexers_client.run_indexer("async-sample-indexer")
print("Ran the Indexer 'async-sample-indexer'")
return result
# [END run_indexer_async]
async def reset_indexer():
# [START reset_indexer_async]
async with indexers_client:
result = await indexers_client.reset_indexer("async-sample-indexer")
print("Reset the Indexer 'async-sample-indexer'")
return result
# [END reset_indexer_async]
async def delete_indexer():
# [START delete_indexer_async]
async with indexers_client:
indexers_client.delete_indexer("async-sample-indexer")
print("Indexer 'async-sample-indexer' successfully deleted")
# [END delete_indexer_async]
async def main():
# await create_indexer()
# await list_indexers()
# await get_indexer()
# await get_indexer_status()
# await run_indexer()
# await reset_indexer()
# await delete_indexer()
# await service_client.close()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
```
#### File: azure-search-documents/samples/sample_synonym_map_operations.py
```python
import os
service_endpoint = os.getenv("AZURE_SEARCH_SERVICE_ENDPOINT")
key = os.getenv("AZURE_SEARCH_API_KEY")
from azure.core.credentials import AzureKeyCredential
from azure.search.documents import SearchServiceClient
client = SearchServiceClient(service_endpoint, AzureKeyCredential(key)).get_synonym_maps_client()
def create_synonym_map():
# [START create_synonym_map]
result = client.create_synonym_map("test-syn-map", [
"USA, United States, United States of America",
"Washington, Wash. => WA",
])
print("Create new Synonym Map 'test-syn-map succeeded")
# [END create_synonym_map]
def get_synonym_maps():
# [START get_synonym_maps]
result = client.get_synonym_maps()
names = [x["name"] for x in result]
print("Found {} Synonym Maps in the service: {}".format(len(result), ", ".join(names)))
# [END get_synonym_maps]
def get_synonym_map():
# [START get_synonym_map]
result = client.get_synonym_map("test-syn-map")
print("Retrived Synonym Map 'test-syn-map' with synonyms")
for syn in result["synonyms"]:
print(" {}".format(syn))
# [END get_synonym_map]
def delete_synonym_map():
# [START delete_synonym_map]
client.delete_synonym_map("test-syn-map")
print("Synonym Map 'test-syn-map' deleted")
# [END delete_synonym_map]
if __name__ == '__main__':
create_synonym_map()
get_synonym_maps()
get_synonym_map()
delete_synonym_map()
```
#### File: azure-search-documents/tests/_test_utils.py
```python
from azure.search.documents import SynonymMap
def build_synonym_map_from_dict(synonym_map):
sm = SynonymMap(name=synonym_map["name"], synonyms=synonym_map["synonyms"])
for k, v in synonym_map.items():
setattr(sm, k, v)
return sm
``` |
{
"source": "16rahuljain/IndianRailway",
"score": 3
} |
#### File: 16rahuljain/IndianRailway/app.py
```python
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import urllib.request, urllib.parse, urllib.error
import json
import os
from flask import Flask
from flask import request
from flask import make_response
app = Flask(__name__)
#Define webhook
@app.route('/webhook', methods=['POST'])
def webhook():
#Accept request payload
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
#Call business logic
res = processRequest(req)
#Prepare response message
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
print(r)
return r
#Execute business logic
def processRequest(req):
# Check for correct action invocked
if req.get("result").get("action") != "TrainRunningStatus":
return {}
# Extract input parameters
result = req.get("result")
parameters = result.get("parameters")
inq_date = parameters.get("inq_date")
cln_inq_date = inq_date.replace('-','')
raw_train_num = parameters.get("train_num")
train_num = raw_train_num.replace(" ","")
key = os.getenv('API_KEY')
# Prepare and call API URL
link = "http://api.railwayapi.com/live/train/" + train_num + "/doj/" + cln_inq_date +"/apikey/" +key + "/"
result = urllib.request.urlopen(link).read()
data = json.loads(result)
# Extract train position
raw_speech = data.get('position')
# Exception handling
if raw_speech == "-":
cln_speech = "Oops somthing went wrong, please try again later"
else:
cln_speech = raw_speech
#Prepare response speech
speech = cln_speech
return {
"speech": speech,
"displayText": "Indian Railway API",
"source": "Indian Railway API"
}
# Execute python app
if __name__ == "__main__":
port = int(os.getenv('PORT', 5000))
app.run(debug=False, port=port, host='0.0.0.0')
``` |
{
"source": "16ttv/2019-fall-polytech-cs",
"score": 3
} |
#### File: 2019-fall-polytech-cs/vizualizeyshin/vizualizeyshin.pyde
```python
add_library('sound')
def setup():
global img
size(900, 900)
noFill()
strokeWeight(4)
sample = SoundFile(this, "sample.mp3")
sample.loop()
img = loadImage("gip.jpg")
t = 0
n = 90
def draw():
image(img, 0, 0)
global t
translate(width/2, height/2)
for i in range(n):
rotate(radians(360/n))
pushMatrix()
translate(200,0)
rotate(radians(t+2*i*360/n))
stroke(0.1*i, 5, 5)
r(100)
popMatrix()
t+=1
def r(length):
triangle(0, -length, -length*sqrt(3)/2, length/2, length*sqrt(3)/2, length/2)
``` |
{
"source": "16xccheng/keras-unet",
"score": 3
} |
#### File: 16xccheng/keras-unet/model_unet.py
```python
from __future__ import print_function
import os
from skimage.transform import resize
from skimage.io import imsave
import numpy as np
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras import backend as K# keras后端 https://keras.io/zh/backend/
K.set_image_data_format('channels_last') # TF dimension ordering in this code
img_rows = 512# 图像长
img_cols = 512# 图像宽
smooth = 1.
def load_train_data():# 读矩阵
imgs_train = np.load('imgs_train.npy')
imgs_mask_train = np.load('imgs_mask_train.npy')
return imgs_train, imgs_mask_train
def load_test_data():# 读矩阵
imgs_test = np.load('imgs_test.npy')
imgs_id = np.load('imgs_id_test.npy')
return imgs_test, imgs_id
# 计算dice指标,判断分割好坏
def dice_coef(y_true, y_pred):# y_true为真实准确值,y_pred为预测值
y_true_f = K.flatten(y_true)# 捋直
y_pred_f = K.flatten(y_pred)# 捋直
# K.sum不加axi(指定方向求和,返回对应方向向量),则为全元素求和,返回一个数字
intersection = K.sum(y_true_f * y_pred_f)# 求预测准确的结果(真实准确值和预测值的交集)
# 原始公式:(2*预测准确值)/(真实准确值+预测值),越大效果越好
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
# 损失函数
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
# ################################################ 原始u-net 网络模型 #############################################
def get_unet(pretrained_weights=None):
print('使用的是 原始get_unet')
weight=32
nb_filter = [weight, weight*2, weight*4, weight*8, weight*16]
inputs = Input((img_rows, img_cols, 1))
conv1 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(nb_filter[4], (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(nb_filter[4], (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(conv8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
model = Model(inputs=[inputs], outputs=[conv10])
# optimizer:优化器,如Adam
# loss:计算损失,dice_coef_loss损失函数
# metrics: 列表,包含评估模型在训练和测试时的性能的指标,dice_coef
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])# dice_coef_loss损失函数
if (pretrained_weights):
model.load_weights(pretrained_weights)
return model
# ################################################ 原始u-net wide 网络模型 #############################################
def get_unetw(pretrained_weights=None):
print('使用的是 原始get_unet wide')
weight=38
nb_filter = [weight,weight*2,weight*4,weight*8,weight*16]
inputs = Input((img_rows, img_cols, 1))
conv1 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(nb_filter[4], (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(nb_filter[4], (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(conv8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
model = Model(inputs=[inputs], outputs=[conv10])
# optimizer:优化器,如Adam
# loss:计算损失,dice_coef_loss损失函数
# metrics: 列表,包含评估模型在训练和测试时的性能的指标,dice_coef
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])# dice_coef_loss损失函数
if (pretrained_weights):
model.load_weights(pretrained_weights)
return model
# ################################################ 原始u-net++网络模型 #############################################
def get_unetpp(num_class=1, deep_supervision=False):
print('使用的是原始unet++')
nb_filter = [32,64,128,256,512]
img_rows=256
img_cols=256
color_type=1
bn_axis = 3
img_input = Input(shape=(img_rows, img_cols, color_type), name='main_input')
conv1_1 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(img_input)
conv1_1 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(conv1_1)
pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(conv1_1)
conv2_1 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(pool1)
conv2_1 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(conv2_1)
pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(conv2_1)
up1_2 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up12', padding='same')(conv2_1)
conv1_2 = concatenate([up1_2, conv1_1], name='merge12', axis=bn_axis)
conv1_2 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(conv1_2)
conv1_2 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(conv1_2)
conv3_1 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(pool2)
conv3_1 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(conv3_1)
pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(conv3_1)
up2_2 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up22', padding='same')(conv3_1)
conv2_2 = concatenate([up2_2, conv2_1], name='merge22', axis=bn_axis)
conv2_2 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(conv2_2)
conv2_2 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(conv2_2)
up1_3 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up13', padding='same')(conv2_2)
conv1_3 = concatenate([up1_3, conv1_1, conv1_2], name='merge13', axis=bn_axis)
conv1_3 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(conv1_3)
conv1_3 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(conv1_3)
conv4_1 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(pool3)
conv4_1 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(conv4_1)
pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(conv4_1)
up3_2 = Conv2DTranspose(nb_filter[2], (2, 2), strides=(2, 2), name='up32', padding='same')(conv4_1)
conv3_2 = concatenate([up3_2, conv3_1], name='merge32', axis=bn_axis)
conv3_2 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(conv3_2)
conv3_2 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(conv3_2)
up2_3 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up23', padding='same')(conv3_2)
conv2_3 = concatenate([up2_3, conv2_1, conv2_2], name='merge23', axis=bn_axis)
conv2_3 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(conv2_3)
conv2_3 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(conv2_3)
up1_4 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up14', padding='same')(conv2_3)
conv1_4 = concatenate([up1_4, conv1_1, conv1_2, conv1_3], name='merge14', axis=bn_axis)
conv1_4 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(conv1_4)
conv1_4 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(conv1_4)
conv5_1 = standard_unit(pool4, stage='51', nb_filter=nb_filter[4])
conv5_1 = Conv2D(nb_filter[4], (3, 3), activation='relu', padding='same')(pool4)
conv5_1 = Conv2D(nb_filter[4], (3, 3), activation='relu', padding='same')(conv5_1)
up4_2 = Conv2DTranspose(nb_filter[3], (2, 2), strides=(2, 2), name='up42', padding='same')(conv5_1)
conv4_2 = concatenate([up4_2, conv4_1], name='merge42', axis=bn_axis)
conv4_2 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(conv4_2)
conv4_2 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(conv4_2)
up3_3 = Conv2DTranspose(nb_filter[2], (2, 2), strides=(2, 2), name='up33', padding='same')(conv4_2)
conv3_3 = concatenate([up3_3, conv3_1, conv3_2], name='merge33', axis=bn_axis)
conv3_3 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(conv3_3)
conv3_3 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(conv3_3)
up2_4 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up24', padding='same')(conv3_3)
conv2_4 = concatenate([up2_4, conv2_1, conv2_2, conv2_3], name='merge24', axis=bn_axis)
conv2_4 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(conv2_4)
conv2_4 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(conv2_4)
up1_5 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up15', padding='same')(conv2_4)
conv1_5 = concatenate([up1_5, conv1_1, conv1_2, conv1_3, conv1_4], name='merge15', axis=bn_axis)
conv1_5 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(conv1_5)
conv1_5 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(conv1_5)
nestnet_output_1 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_1', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_2)
nestnet_output_2 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_2', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_3)
nestnet_output_3 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_3', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_4)
nestnet_output_4 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_4', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_5)
model = Model(input=img_input, output=[nestnet_output_4])
# optimizer:优化器,如Adam
# loss:计算损失,dice_coef_loss损失函数
# metrics: 列表,包含评估模型在训练和测试时的性能的指标,dice_coef
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])# dice_coef_loss损失函数
return model
def preprocess(imgs):# 矩阵变换
# np shape函数为对应矩阵维度的**,类似于长宽高,返回元组,这里[0]为对应图片数量
imgs_p = np.ndarray((imgs.shape[0], img_rows, img_cols), dtype=np.uint8)# 创建矩阵(对应图片数量*256*256)
for i in range(imgs.shape[0]):
imgs_p[i] = resize(imgs[i], (img_cols, img_rows), preserve_range=True)# 图片缩放:512 -> 256
imgs_p = imgs_p[..., np.newaxis]# 增加一个维度,3维 -> 4维
return imgs_p
def train_and_predict():
print('Loading and preprocessing train data...')
imgs_train, imgs_mask_train = load_train_data()# 读矩阵(对应图片数量*图片长(512)*图片宽(512))
imgs_train = preprocess(imgs_train)# 矩阵变换
imgs_mask_train = preprocess(imgs_mask_train)
imgs_train = imgs_train.astype('float32')
mean = np.mean(imgs_train) # 平均值mean for data centering
std = np.std(imgs_train) # 矩阵全局标准差std for data normalization
imgs_train -= mean
imgs_train /= std
imgs_mask_train = imgs_mask_train.astype('float32')
imgs_mask_train /= 255. # 矩阵归一化scale masks to [0, 1]
# 图片信息矩阵输入并开始训练
#model = get_unet('/opt/bin/unet_weights.h5') #重复训练则读取本文件继续加载训练,加载好train1和train2
#model = get_unet() # 原始unet
model = get_unetw() # 原始unetw
#model = get_unetpp() # 原始unet++
# 图片保存路径
#pred_dir = 'preds_unet'
pred_dir = 'preds_unetw_38'
#pred_dir = 'preds_unetpp'
#save_res = '/opt/bin/unet_weights.h5'############################ 改变模型数据写入需要改变路径#########################
save_res = '/opt/bin/unet_weightsw_38.h5'
#save_res = '/opt/bin/unet_weightspp.h5'
#'''
#在训练生成 .h5 文件后可以直接注释该段选用其他测试集进行测试
model_checkpoint = ModelCheckpoint(save_res, monitor='val_loss', save_best_only=True)
print('Fitting model...')
model.fit(imgs_train,
imgs_mask_train,
batch_size=16,
nb_epoch=60, # 训练次数
verbose=1,
shuffle=True,
validation_split=0.2,
callbacks=[model_checkpoint])
#'''
# 获取测试集信息矩阵
imgs_test, imgs_id_test = load_test_data()
imgs_test = preprocess(imgs_test)
imgs_test = imgs_test.astype('float32')
imgs_test -= mean
imgs_test /= std
model.load_weights(save_res)
# 开始测试
print('Predicting masks on test data...')
imgs_mask_test = model.predict(imgs_test, verbose=1)
np.save('imgs_mask_test.npy', imgs_mask_test)
if not os.path.exists(pred_dir):
os.mkdir(pred_dir)
for image, image_id in zip(imgs_mask_test, imgs_id_test):
image = (image[:, :, 0] * 255.).astype(np.uint8)
imsave(os.path.join(pred_dir, str(image_id) + '_mask.png'), image)
if __name__ == '__main__':
train_and_predict()
``` |
{
"source": "17011813/fsl_ts",
"score": 2
} |
#### File: 17011813/fsl_ts/fsl_ts_maml.py
```python
import numpy as np
import torch
import os
import argparse
import logging
from collections import OrderedDict
from fsl_ts_dataloader import poolRead, getBatchTask
from maml_higher_learner import MAML_Higher_Learner
from plot_tools import plot_loss
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s line:%(lineno)d - %(message)s")
def main(args):
lstm_config = {
'input_size': 1,
'hidden_size': 64,
'num_layers': 1,
'output_size': 1,
}
train_flag = args.train
eval_flag = args.eval
test_flag = args.test
pred_flag = args.pred
use_cuda = torch.cuda.is_available() and not args.no_cuda
device = torch.device('cuda' if use_cuda else 'cpu')
# create logs dir
if os.path.exists(args.logs_dir) is False:
os.makedirs(args.logs_dir)
# RNN can't use cudnn to compute second-order gradients
with torch.backends.cudnn.flags(enabled=False):
maml = MAML_Higher_Learner(args, lstm_config, device).to(device)
meta_train, meta_test = poolRead()
if train_flag == True:
# /------meta train------/
logging.info(" ------ meta training start ------ ")
train_losses = []
train_history_loss = []
eval_losses = []
for step in range(args.epoch):
x_spt, y_spt, x_qry, y_qry = getBatchTask(meta_train, batch_num=args.task_num)
x_spt, y_spt, x_qry, y_qry = torch.from_numpy(x_spt).to(device), torch.from_numpy(y_spt).to(device), \
torch.from_numpy(x_qry).to(device), torch.from_numpy(y_qry).to(device)
step_loss = maml.train(x_spt, y_spt, x_qry, y_qry)
train_history_loss.append(step_loss)
if step % args.echo_step == 0:
train_history_mean_loss = np.array(train_history_loss).mean()
train_losses.append(train_history_mean_loss)
logging.info('step: {}, train_loss = {}'.format(step, train_history_mean_loss))
train_history_loss = []
# /------meta evaluate------/
# 这里更正确的做法应该再划分一个meta_eval来做验证,这里因为不做网络微调所以直接取了meta_test
if step % args.eval_step == 0:
eval_history_loss = []
logging.info(" ------ meta evaluating start ------ ")
# debug setting: using meta train to make sure meta_train loss is converge
x_ev_spt, y_ev_spt, x_ev_qry, y_ev_qry = getBatchTask(meta_test, batch_num=args.task_num_eval)
x_ev_spt, y_ev_spt, x_ev_qry, y_ev_qry = torch.from_numpy(x_ev_spt).to(device), \
torch.from_numpy(y_ev_spt).to(device), \
torch.from_numpy(x_ev_qry).to(device), \
torch.from_numpy(y_ev_qry).to(device)
task_num_eval = x_ev_spt.shape[0]
for i in range(task_num_eval):
eval_loss = maml.fineTunning(x_ev_spt[i], y_ev_spt[i], x_ev_qry[i], y_ev_qry[i], task_i=i)
eval_history_loss.append(eval_loss)
mean_eval_losses = np.array(eval_history_loss).mean()
eval_losses.append(mean_eval_losses)
logging.info("mean eval loss on {} meta eval tasks = {}".format(task_num_eval, mean_eval_losses))
# plot train loss
train_plot_dir = os.path.join(
args.figure_dir, 'train_loss_epoch-{}_metalr-{}_updatelr-{}'.format(
args.epoch, args.meta_lr, args.update_lr
))
plot_loss(np.array(train_losses), train_plot_dir)
# plot eval loss
eval_plot_dir = os.path.join(
args.figure_dir, 'eval_loss_epoch-{}_metalr-{}_updatelr-{}'.format(
args.epoch, args.meta_lr, args.update_lr
))
plot_loss(np.array(eval_losses), eval_plot_dir)
# save init_params
if os.path.exists(args.model_params_dir) is False:
os.makedirs(args.model_params_dir)
save_path = os.path.join(
args.model_params_dir, 'maml_init_params_epoch-{}_metalr-{}_updatelr-{}.pt'.format(
args.epoch, args.meta_lr, args.update_lr
))
maml.saveParams(save_path)
if test_flag == True:
# /------meta test------/
logging.info(" ------ meta testing start ------ ")
if args.check_point is not None:
maml_params_file = os.path.join(args.model_params_dir, args.check_point)
maml.load_state_dict(torch.load(maml_params_file))
# debug setting: using meta train to make sure meta_train loss is converge
x_test_spt, y_test_spt, x_test_qry, y_test_qry = getBatchTask(meta_test, batch_num=args.task_num_test)
x_test_spt, y_test_spt, x_test_qry, y_test_qry = torch.from_numpy(x_test_spt).to(device), \
torch.from_numpy(y_test_spt).to(device), \
torch.from_numpy(x_test_qry).to(device), \
torch.from_numpy(y_test_qry).to(device)
task_num_test = x_test_spt.shape[0]
test_losses = []
for i in range(task_num_test):
pred_dir = os.path.join(args.figure_dir, 'meta_test_query_predict_epoch-{}_metalr-{}_updatelr-{}'.format(
args.epoch, args.meta_lr, args.update_lr
))
if args.check_point is not None:
pred_dir = os.path.join(pred_dir, '_with_checkpoint')
else:
pred_dir = os.path.join(pred_dir, '_without_checkpoint')
test_loss = maml.fineTunning(x_test_spt[i], y_test_spt[i], x_test_qry[i], y_test_qry[i],
task_i=i + 1, predict=pred_flag, pred_dir=pred_dir)
logging.info("test loss on meta test task({}) = {}".format(i + 1, test_loss))
test_losses.append(test_loss)
mean_test_losses = np.array(test_losses).mean()
logging.info("mean test loss on {} meta test tasks = {}".format(task_num_test, mean_test_losses))
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--no_cuda', action="store_true", help="set this parameter not to use gpu")
argparser.add_argument('--train', action="store_true", help="set this parameter to train meta learner")
argparser.add_argument('--eval', action="store_true", help="set this parameter to evaluate meta learner")
argparser.add_argument('--test', action="store_true", help="set this parameter to test meta learner")
argparser.add_argument('--pred', action="store_true", help="set this parameter to predict query set from meta test")
argparser.add_argument('--echo_step', type=int, help='steps to echo a train loss', default=10)
argparser.add_argument('--eval_step', type=int, help='steps to echo a eval loss', default=100)
argparser.add_argument('--epoch', type=int, help='epoch number', default=50)
argparser.add_argument('--n_way', type=int, help='n way', default=3)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=2)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=8)
argparser.add_argument('--task_num', type=int, help='meta_train batch size, namely task_num', default=16)
argparser.add_argument('--task_num_eval', type=int, help='meta_eval batch size, namely task_num_eval', default=16)
argparser.add_argument('--task_num_test', type=int, help='meta_test batch size, namely task_num_test', default=16)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.4)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=10)
argparser.add_argument('--clip_val', type=float, help='clipping value to avoid gradient explosion', default=1.0)
argparser.add_argument('--model_params_dir', type=str,
help='path to store model parameters', default=os.path.join('fsl_model_params', 'maml'))
argparser.add_argument('--figure_dir', type=str,
help='path to store maml figures', default=os.path.join('fsl_figures', 'maml'))
argparser.add_argument('--check_point', type=str,
help='the check point model params of maml learner in model_params_dir', default=None)
argparser.add_argument('--logs_dir', type=str,
help='path to echo meta training/testing logs', default=os.path.join('fsl_logs', 'maml'))
args = argparser.parse_args()
logging.info(args)
main(args)
```
#### File: 17011813/fsl_ts/maml_higher_learner.py
```python
import torch
import numpy as np
import higher
import os
from torch import nn
from torch import optim
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from torch import optim
from torch.autograd import Variable
from lstm_learner import LSTM
from copy import deepcopy
from plot_tools import plot_predict
class MAML_Higher_Learner(nn.Module):
"""
MAML Meta Learner
"""
def __init__(self, args, base_model_config, device):
"""
args: update_lr, meta_lr, n_way, k_spt, k_qry,
task_num, update_step(default: 1, inner loop), update_step_test
base_model_config(LSTM): input_size, hidden_size, num_layers, output_size
device: 'cuda' or 'cpu'
"""
super(MAML_Higher_Learner, self).__init__()
self.update_lr = args.update_lr
self.meta_lr = args.meta_lr
self.n_way = args.n_way
self.k_spt = args.k_spt
self.k_qry = args.k_qry
self.task_num = args.task_num
self.update_step = args.update_step
self.update_step_test = args.update_step_test
self.clip_val = args.clip_val
self.base_model_config = base_model_config
self.device = device
self.net = LSTM(
input_size=base_model_config['input_size'],
hidden_size=base_model_config['hidden_size'],
num_layers=base_model_config['num_layers'],
output_size=base_model_config['output_size'],
device=self.device
)
self.meta_opt = optim.Adam(self.net.parameters(), lr=self.meta_lr)
def train(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [batch, setsz, seq_num, seq_len]
:param y_spt: [batch, setsz, seq_num]
:param x_qry: [batch, querysz, seq_num, seq_len]
:param y_qry: [batch, querysz, seq_num]
:return:
"""
task_num, _, _, _ = x_spt.size()
qry_losses = []
inner_opt = torch.optim.SGD(self.net.parameters(), lr=self.update_lr)
# loss_fn = torch.nn.MSELoss()
loss_fn = torch.nn.L1Loss()
self.meta_opt.zero_grad()
# 这里的task_num就是一个batch的task数量
for i in range(task_num):
# higher implementation
with higher.innerloop_ctx(self.net, inner_opt, copy_initial_weights=False) as (fnet, diffopt):
# 1. run the i-th task and compute loss for k = 0 ~ self.update_steps
for _ in range(self.update_step):
y_pred_i = fnet(x_spt[i])
spt_loss = loss_fn(y_pred_i, y_spt[i])
diffopt.step(spt_loss)
# query_set meta backfoward
y_pred_i_q = fnet(x_qry[i])
qry_loss = loss_fn(y_pred_i_q, y_qry[i])
qry_losses.append(qry_loss.detach())
# update model's meta-parameters to optimize the query
qry_loss.backward()
self.meta_opt.step()
qry_losses = sum(qry_losses) / task_num
return qry_losses.item()
def fineTunning(self, x_spt, y_spt, x_qry, y_qry, task_i, predict=False, pred_dir=None):
"""
:param x_spt: [setsz, seq_num, seq_len]
:param y_spt: [setsz, seq_num]
:param x_qry: [querysz, seq_num, seq_len]
:param y_qry: [querysz, seq_num]
:return:
"""
assert len(x_spt.shape) == 3
ft_net = deepcopy(self.net)
loss_fn = torch.nn.L1Loss()
optimizer_ft = torch.optim.SGD(ft_net.parameters(), lr=self.update_lr)
test_loss = 0
# non-higher implementation
for _ in range(self.update_step_test):
y_pred_spt = ft_net(x_spt)
spt_loss = loss_fn(y_pred_spt, y_spt)
optimizer_ft.zero_grad()
spt_loss.backward()
# clipping to avoid gradient explosion
torch.nn.utils.clip_grad_norm_(ft_net.parameters(), self.clip_val)
optimizer_ft.step()
# query loss
y_pred_qry = ft_net(x_qry)
qry_loss = loss_fn(y_pred_qry, y_qry)
test_loss = qry_loss.detach().item()
# prediction if pred is set to be True
if predict == True:
ts_pred, ts_ori = self.predictOneStep(ft_net, x_qry, y_qry)
task_pred_dir = os.path.join(pred_dir, 'meta_test_task_{}'.format(task_i))
if os.path.exists(task_pred_dir) is False:
os.makedirs(task_pred_dir)
for i in range(ts_pred.shape[0]):
fig_name = os.path.join(task_pred_dir, 'query_{}.png'.format(i + 1))
plot_predict(y_pred=ts_pred[i], y_true=ts_ori[i], fig_name=fig_name)
return test_loss
def saveParams(self, save_path):
torch.save(self.state_dict(), save_path)
def predictOneStep(self, fnet, x, y):
"""
:param x: [setsz, seq_num, seq_len]
:param y: [setsz, seq_num]
:return ts_pred: [setsz, ts_len]
:return ts_ori: [setsz, ts_len]
"""
assert len(x.shape) == 3 and len(y.shape) == 2
setsz, _, _ = x.size()
ts_pred = []
ts_ori = []
for i in range(setsz):
ts_pred_i = fnet(x[i].unsqueeze(0))
ts_pred_i_cpu = ts_pred_i.data.cpu().numpy()
ts_pred_i_cpu = np.squeeze(ts_pred_i_cpu)
ts_ori_i_cpu = y[i].data.cpu().numpy()
ts_pred.append(ts_pred_i_cpu)
ts_ori.append(ts_ori_i_cpu)
return np.array(ts_pred), np.array(ts_ori)
def main():
pass
if __name__ == '__main__':
main()
``` |
{
"source": "17012/practicum",
"score": 3
} |
#### File: practicum/hardware/set_pin_display.py
```python
import sys
from threading import Thread
import drivers
from time import sleep
import RPi.GPIO as GPIO
import random
# setup hardware
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN,pull_up_down=GPIO.PUD_UP)
GPIO.setup(24, GPIO.IN,pull_up_down=GPIO.PUD_UP)
display = drivers.Lcd()
pin_list = [random.randint(0,9) for i in range(4)]
focus_pin = 0
submitted = False;
def LCD_show_text(text1, text2):
global display
display.lcd_display_string(text1, 1)
display.lcd_display_string(text2, 2)
def LCD_Display():
global pin_list
global focus_pin
global display
# pin_display = ' '.join(str(pin_list[i]) if i != focus_pin else "[%s]" % (str(pin_list[i])) for i in range(len(pin_list)))
pin_display = ""
for i in range(len(pin_list)):
if i == focus_pin:
pin_display += "[%s] " %(str(pin_list[i]))
else:
pin_display += " %s " %(str(pin_list[i]))
display.lcd_display_string("Enter your pin", 1)
display.lcd_display_string(pin_display, 2)
def press_btn():
global pin_list
global focus_pin
global submitted
# while True:
left_btn = GPIO.input(24)
right_btn = GPIO.input(18)
if not left_btn and not right_btn :
# print("submitted!")
print(''.join(str(pin) for pin in pin_list))
submitted = True
# continue
if left_btn == False:
focus_pin += 1
focus_pin %= 4
# print("cursor : ", focus_pin)
if right_btn == False:
pin_list[focus_pin] += 1
pin_list[focus_pin] %= 10
# print("number : ", pin_list[focus_pin])
try:
while not submitted:
LCD_Display()
press_btn()
sleep(0.07)
if submitted:
LCD_show_text("Hardware wallet", " ;)")
sleep(1.5)
sys.exit()
display.lcd_clear()
except KeyboardInterrupt:
display.lcd_clear()
``` |
{
"source": "1705095/HackNsu2_TEAM_RETURN_ZERO",
"score": 2
} |
#### File: HackNsu2_TEAM_RETURN_ZERO/home/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import reverse
from login_signup import models as ls
from login_signup.models import *
from products.models import *
from .forms import orderForm,deliveredForm
from products import models as p
import datetime
from django.http import HttpResponseRedirect
from django.urls import reverse
# Create your views here.
def check_usertype(request):
if request.user.is_authenticated:
if Customer.objects.filter(user=request.user.id).exists():
return 'customer', Customer.objects.get(user=request.user.id)
elif Vendor.objects.filter(user=request.user.id).exists():
return 'vendor', Vendor.objects.get(user=request.user.id)
elif Employee.objects.filter(user=request.user.id).exists():
return 'employee', Employee.objects.get(user=request.user.id)
elif request.user.username == 'return_zero':
return 'admin', ' '
else:
return ' ', ' '
else:
return ' ', ' '
def index(request):
usertype, _ = check_usertype(request)
dict = {}
dict['raw_materials'] = False
if usertype.lower() == 'vendor' or usertype.lower() == 'admin' or usertype.lower() == 'employee':
dict['raw_materials'] = True
return render(request, 'index/index.html', dict)
def products(request):
dict = {}
dict['raw_materials'] = False
usertype, _ = check_usertype(request)
if usertype.lower() == 'vendor' or usertype.lower() == 'admin' or usertype.lower() == 'employee':
dict['raw_materials'] = True
dict['products'] = list(p.company_product.objects.all())
return render(request, 'products/all_product.html', dict)
def place_order(product_obj, amount, customer_obj):
time = datetime.date.today()
order = p.order.objects.create(order_time=time, order_amount=amount, customer_fk=customer_obj,
product_fk=product_obj)
order.save();
def order_view(request , pk):
usertype, _ = check_usertype(request)
if not usertype.lower() == 'customer':
return HttpResponseRedirect(reverse('products'))
if request.method == 'POST':
amount = request.POST.get('order_quantity')
print(amount)
prod = p.company_product.objects.get(pk=pk)
prod.stock = prod.stock - int(amount)
prod.save()
customer = list(ls.Customer.objects.filter(user=request.user))[0]
place_order(prod , amount , customer)
ntfi_msg = '"Product: {} " , "Quantity {}"'.format(prod.name, amount)
company_notification.objects.create(noti_msg=ntfi_msg, type="New Order", customer_fk=customer, issue_date=datetime.datetime.now())
return HttpResponseRedirect(reverse('order_history'))
dict = {}
pob = p.company_product.objects.get(pk=pk)
dict['product_name'] = pob.name
dict['price'] = pob.price
dict['stock'] = pob.stock
dict['form'] = orderForm
dict['raw_materials'] = False
usertype, _ = check_usertype(request)
if usertype.lower() == 'vendor' or usertype.lower() == 'admin' or usertype.lower() == 'employee':
dict['raw_materials'] = True
return render(request, 'order/order.html', dict)
def customer_order_history_view(request):
cust_obj = list(ls.Customer.objects.filter(user=request.user))[0]
# print(cust_obj)
orders = list(p.order.objects.filter(customer_fk=cust_obj))
dict = {}
dict['order_list'] = orders
dict['customer_name'] = cust_obj
return render(request, 'order/customer_order_history.html', dict)
def customers(request):
dict = {}
dict['raw_materials'] = False
usertype, _ = check_usertype(request)
if usertype.lower() == 'vendor' or usertype.lower() == 'admin' or usertype.lower() == 'employee':
dict['raw_materials'] = True
dict['customers'] = list(Customer.objects.all())
return render(request, 'review/customer_review.html', dict)
### admin views for customer here
def customer_order_history_admin(request , pk):
cust_obj = ls.Customer.objects.get(pk=pk)
orders = list(p.order.objects.filter(customer_fk=cust_obj))
dict = {}
dict['order_list'] = orders
dict['customer_name'] = cust_obj
dict['form'] = deliveredForm
usertype, user = check_usertype(request)
if usertype.lower() == 'admin':
form = deliveredForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
kotono = int(request.POST.get('kotono'))
print(type(kotono))
is_delivered = form.cleaned_data['delivered']
if is_delivered:
c=1
for i in orders:
if c == kotono:
i.status = 'Delivered'
i.save()
print(i.status)
break
c=c+1
return HttpResponseRedirect(reverse('customer_order_history_admin',args=[cust_obj.pk]))
#return render(request, 'order/customer_order_history_admin.html', dict)
return render(request, 'order/customer_order_history_admin.html' , dict)
elif usertype.lower() == 'customer':
cust_obj = list(ls.Customer.objects.filter(user=request.user))[0]
# print(cust_obj)
orders = list(p.order.objects.filter(customer_fk=cust_obj))
dict = {}
dict['order_list'] = orders
dict['customer_name'] = cust_obj
return render(request, 'order/customer_order_history.html', dict)
def customer_profile_admin(request , pk):
usertype, user = check_usertype(request)
if usertype.lower() == 'admin':
cust_ob = ls.Customer.objects.get(pk = pk)
dict={}
dict['email'] = cust_ob.user.email
dict['company_name'] = cust_ob.company_name
dict['type'] = 'Customer'
dict['cust_ob'] = cust_ob
dict['vendor_personal'] = False
dict['companyA'] = False
dict['raw_materials'] = False
return render(request, 'profile/customer_profile_admin.html', dict)
else:
dict = {}
dict['raw_materials'] = False
usertype, _ = check_usertype(request)
if usertype.lower() == 'vendor' or usertype.lower() == 'admin' or usertype.lower() == 'employee':
dict['raw_materials'] = True
dict['customers'] = list(Customer.objects.all())
return render(request, 'review/customer_review.html', dict)
def vendors(request):
vendors = []
vendors_raw = Vendor.objects.all()
for v in vendors_raw:
vendors.append([v.company_name, v.user.email, v.user.id])
dict = {'vendors': vendors}
dict['raw_materials'] = False
usertype, _ = check_usertype(request)
if usertype.lower() == 'vendor' or usertype.lower() == 'admin' or usertype.lower() == 'employee':
dict['raw_materials'] = True
return render(request, 'profile/vendor_list.html', dict)
def contact(request):
dict = {}
dict['raw_materials'] = False
usertype, _ = check_usertype(request)
if usertype.lower() == 'vendor' or usertype.lower() == 'admin' or usertype.lower() == 'employee':
dict['raw_materials'] = True
return render(request, 'others/contact.html', dict)
def vendor_public_profile(request, vendor_user_id):
user = User.objects.get(id=vendor_user_id)
vendor = Vendor.objects.get(user=user)
dict = {}
dict['email'] = user.email
dict['company_name'] = vendor.company_name
dict['type'] = 'vendor'
usertype, user = check_usertype(request)
dict['vendor_personal'] = False
dict['companyA'] = False
public_products = vendor_product.objects.filter(vendor_fk=vendor, public=True)
dict['public_products'] = public_products
if vendor.user.id == request.user.id:
dict['vendor_personal'] = True
elif usertype.lower() == 'admin' or usertype.lower() == 'employee':
dict['companyA'] = True
dict['raw_materials'] = False
usertype, _ = check_usertype(request)
if usertype.lower() == 'vendor' or usertype.lower() == 'admin' or usertype.lower() == 'employee':
dict['raw_materials'] = True
return render(request, 'profile/vendor_profile.html', dict)
```
#### File: HackNsu2_TEAM_RETURN_ZERO/products/models.py
```python
from django.db import models
from login_signup import models as md
# Create your models here.
class vendor_product_categories(models.Model):
category_name = models.CharField(max_length=50, null=False, unique=True)
rank = models.IntegerField(null=True)
def __str__(self):
return self.category_name
class vendor_product(models.Model):
name = models.CharField(max_length=30)
amount = models.CharField(max_length=50, null=True)
price = models.CharField(max_length=30, null=True)
category_fk = models.ForeignKey(vendor_product_categories, on_delete=models.DO_NOTHING, null=True)
vendor_fk = models.ForeignKey(md.Vendor, on_delete=models.CASCADE, null=True)
public = models.BooleanField(default=True)
def __str__(self):
# Built-in attribute of django.contrib.auth.models.User !
return self.name
class company_product(models.Model):
name = models.CharField(max_length=30)
price = models.IntegerField(null=True)
stock = models.IntegerField(null=True)
def __str__(self):
# Built-in attribute of django.contrib.auth.models.User !
return self.name + "( Stock :"+str(self.stock)+" Price :"+str(self.price)+")"
class order(models.Model):
order_time = models.DateField()
order_amount = models.IntegerField(null=True)
status = models.CharField(max_length=30 , default='Pending')
customer_fk = models.ForeignKey(md.Customer , on_delete=models.DO_NOTHING, null=True)
product_fk = models.ForeignKey(company_product, on_delete=models.DO_NOTHING, null=True)
def __str__(self):
# Built-in attribute of django.contrib.auth.models.User !
return self.customer_fk.user.username + " amount: "+ str(self.order_amount)
#return " amount: "+ str(self.order_amount)
class notification(models.Model):
noti_msg = models.CharField(max_length=1000)
type = models.CharField(max_length=200, null=True, choices=[("Future Demand", "Possible Rise in Demand Soon"), ("Bid Success", "Your Bid Has Been Accepted by Company A"), ("Relevant Bid", "Company A Has Posted A Requirement which might Interest you")])
vendor_fk = models.ManyToManyField(md.Vendor , blank=True)
issue_date = models.DateField(null=True)
def __str__(self):
return self.noti_msg
class company_notification(models.Model):
noti_msg = models.CharField(max_length=1000)
type = models.CharField(max_length=200, null=True, choices=[("New Bid", "New Bid"), ("New Order", "New Order")])
vendor_fk = models.ForeignKey(md.Vendor , on_delete=models.DO_NOTHING, null=True, blank=True)
customer_fk = models.ForeignKey(md.Customer , on_delete=models.DO_NOTHING, null=True, blank=True)
issue_date = models.DateField(null=True)
def __str__(self):
return self.noti_msg
class raw_material_requirments(models.Model):
description = models.CharField(max_length=1000)
quantity = models.CharField(max_length=50)
category_fk = models.ForeignKey(vendor_product_categories, on_delete=models.DO_NOTHING, null=True)
issue_date = models.DateField(null=True)
vendor_fk = models.ForeignKey(md.Vendor, on_delete=models.DO_NOTHING, null=True, blank=True)
bids = models.ManyToManyField(md.Vendor, related_name="bids", blank = True)
def __str__(self):
return self.description[:min(120, len(self.description))]
class bid_details(models.Model):
proposal = models.CharField(max_length=1000)
vendor_fk = models.ForeignKey(md.Vendor, on_delete=models.CASCADE)
req_fk = models.ForeignKey(raw_material_requirments, on_delete=models.CASCADE)
def __str__(self):
return self.proposal
``` |
{
"source": "170928/-Review-Multi-Agent-Actor-Critic-for-Mixed-Cooperative-Competitive-Environment",
"score": 2
} |
#### File: 170928/-Review-Multi-Agent-Actor-Critic-for-Mixed-Cooperative-Competitive-Environment/main.py
```python
import numpy as np
import tensorflow as tf
import random
import tensorflow.layers as layer
from collections import deque
import random
import datetime
import time
from multiagent.environment import MultiAgentEnv
from multiagent.policy import InteractivePolicy
import multiagent.scenarios as scenarios
from maddpg import MADDPGAgent
from ReplayBuffer import ReplayBuffer
from noise import OU
########################################
action_size = 5
load_model = False
train_mode = True
batch_size = 256
mem_maxlen = 10000
discount_factor = 0.99
learning_rate = 0.00025
run_episode = 1000000
start_train_episode = 5
target_update_step = 500
print_interval = 100
save_interval = 1000
epsilon_min = 0.1
softlambda = 0.9
date_time = str(datetime.date.today()) + '_' + \
str(datetime.datetime.now().hour) + '_' + \
str(datetime.datetime.now().minute) + '_' + \
str(datetime.datetime.now().second)
load_path = './three_weight/'
# ====================================================
# Noise parameters - <NAME>
DELTA = 0.4 # The rate of change (time)
SIGMA = 0.2 # Volatility of the stochastic processes
OU_A = 3. # The rate of mean reversion
OU_MU = 0. # The long run average interest rate
# ====================================================
###########################################
def get_agents_action(obs_n, obs_shape_n, sess):
agent1_action = agent1_ddpg.action(state=np.reshape(obs_n[0], newshape=[-1,obs_shape_n[0]]), sess=sess)
agent2_action = agent2_ddpg.action(state=np.reshape(obs_n[1], newshape=[-1,obs_shape_n[1]]), sess=sess)
agent3_action = agent3_ddpg.action(state=np.reshape(obs_n[2], newshape=[-1,obs_shape_n[2]]), sess=sess)
return agent1_action, agent2_action, agent3_action
def train_agent(agent, agent_target, agent_memory, sess, other_actors):
total_obs_batch, total_act_batch, rew_batch, total_next_obs_batch, done_mask = agent_memory.sample(batch_size)
act_batch = total_act_batch[:, 0, :]
other_act_batch = np.hstack([total_act_batch[:, 1, :], total_act_batch[:, 2, :]])
obs_batch = total_obs_batch[:, 0, :]
next_obs_batch = total_next_obs_batch[:, 0, :]
next_other_actor1_o = total_next_obs_batch[:, 1, :]
next_other_actor2_o = total_next_obs_batch[:, 2, :]
next_other_action = np.hstack([other_actors[0].action(next_other_actor1_o, sess), other_actors[1].action(next_other_actor2_o, sess)])
target = rew_batch.reshape(-1, 1) + discount_factor * agent_target.Q(state=next_obs_batch, action=agent.action(next_obs_batch, sess), other_action=next_other_action, sess=sess)
agent.train_actor(state=obs_batch, action=act_batch, other_action=other_act_batch, sess=sess)
agent.train_critic(state=obs_batch, action=act_batch, other_action=other_act_batch, target=target, sess=sess)
def train_target(agent_actor_target_update, agent_critic_target_update, sess):
sess.run([agent_actor_target_update, agent_critic_target_update])
def create_init_update(oneline_name, target_name, tau=0.99):
online_var = [i for i in tf.trainable_variables() if oneline_name in i.name]
target_var = [i for i in tf.trainable_variables() if target_name in i.name]
target_init = [tf.assign(target, online) for online, target in zip(online_var, target_var)]
target_update = [tf.assign(target, (1 - tau) * online + tau * target) for online, target in zip(online_var, target_var)]
return target_init, target_update
if __name__=="__main__":
# Particle-environment
# https://github.com/openai/multiagent-particle-envs
print(tf.__version__)
# load scenario from script
# scenario = scenarios.load('simple_adversary.py').Scenario()
scenario = scenarios.load('simple_spread.py').Scenario()
# create world
world = scenario.make_world()
# create multiagent environment
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, info_callback=None,
shared_viewer=True)
obs_n = env.reset()
print("# of agent {}".format(env.n))
print("action dim : ", env.action_space)
obs_shape_n = [18,18,18]
print("observation dim : {}".format(obs_shape_n))
# Agent Generation =======================================
agent1_ddpg = MADDPGAgent(env.n, obs_shape_n[0], action_size, '1')
agent1_ddpg_target = MADDPGAgent(env.n, obs_shape_n[0], action_size, 'target1')
agent2_ddpg = MADDPGAgent(env.n, obs_shape_n[1], action_size, '2')
agent2_ddpg_target = MADDPGAgent(env.n, obs_shape_n[1], action_size, 'target2')
agent3_ddpg = MADDPGAgent(env.n, obs_shape_n[2], action_size, '3')
agent3_ddpg_target = MADDPGAgent(env.n, obs_shape_n[2], action_size, 'target3')
# Save & Load ============================================
Saver = tf.train.Saver(max_to_keep=5)
load_path = load_path
# self.Summary,self.Merge = self.make_Summary()
# ========================================================
# Agent initialization ===================================
agent1_actor_target_init, agent1_actor_target_update = create_init_update('Pimodel_1', 'Pimodel_target1')
agent1_critic_target_init, agent1_critic_target_update = create_init_update('Qmodel_1', 'Qmodel_target1')
agent2_actor_target_init, agent2_actor_target_update = create_init_update('Pimodel_2', 'Pimodel_target2')
agent2_critic_target_init, agent2_critic_target_update = create_init_update('Qmodel_2', 'Qmodel_target2')
agent3_actor_target_init, agent3_actor_target_update = create_init_update('Pimodel_3', 'Pimodel_target3')
agent3_critic_target_init, agent3_critic_target_update = create_init_update('Qmodel_3', 'Qmodel_target3')
# ========================================================
# Session Initialize =====================================
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
sess = tf.Session(config=config)
if load_model == True:
ckpt = tf.train.get_checkpoint_state(load_path)
Saver.restore(sess, ckpt.model_checkpoint_path)
print("Restore Model")
else:
init = tf.global_variables_initializer()
sess.run(init)
print("Initialize Model")
sess.run([agent1_actor_target_init, agent1_critic_target_init,
agent2_actor_target_init, agent2_critic_target_init,
agent3_actor_target_init, agent3_critic_target_init])
# ========================================================
# Tensorboard ============================================
reward_history = [tf.Variable(0, dtype=tf.float32) for i in range(env.n)]
reward_op = [tf.summary.scalar('agent' + str(i) + '_reward', reward_history[i]) for i in range(env.n)]
summary_writer = tf.summary.FileWriter('./three_summary', graph=tf.get_default_graph())
# ========================================================
# Replay Buffer ======================================
agent1_memory = ReplayBuffer(mem_maxlen)
agent2_memory = ReplayBuffer(mem_maxlen)
agent3_memory = ReplayBuffer(mem_maxlen)
# ========================================================
#e = 1
noise = OU(DELTA, SIGMA, OU_A, OU_MU)
ou_level = 0.
train_mode = True
for roll_out in range(1000000):
print("[{}]".format(roll_out))
obs_n = env.reset()
for episode in range(100):
env.render()
agent1_action, agent2_action, agent3_action = get_agents_action(obs_n, obs_shape_n, sess)
# Discrete action space ================
'''
acs_agent1 = np.zeros((action_size,))
acs_agent2 = np.zeros((action_size,))
acs_agent3 = np.zeros((action_size,))
acs = []
if train_mode == True and e > np.random.rand():
for agent_index in range(env.n):
acs.append(np.random.randint(0,action_size))
else:
acs.append(np.argmax(agent1_action))
acs.append(np.argmax(agent2_action))
acs.append(np.argmax(agent3_action))
print(acs[0])
acs_n = [acs_agent1, acs_agent2, acs_agent3]
'''
if roll_out < 5000:
agent1_action[0] += noise.ornstein_uhlenbeck_level(ou_level)
agent2_action[0] += noise.ornstein_uhlenbeck_level(ou_level)
agent3_action[0] += noise.ornstein_uhlenbeck_level(ou_level)
ou_level = noise.ornstein_uhlenbeck_level(ou_level)
# ======================================
#acs_n = [agent1_action[0], agent2_action[0], agent3_action[0]]
acs_n = [[0, i[0][0], 0, i[0][1], 0] for i in [agent1_action, agent2_action, agent3_action]]
o_n_next, r_n, d_n, i_n = env.step(acs_n)
agent1_memory.add(np.vstack([obs_n[0], obs_n[1], obs_n[2]]),
np.vstack([agent1_action[0], agent2_action[0], agent3_action[0]]),
r_n[0], np.vstack([o_n_next[0], o_n_next[1], o_n_next[2]]), False)
agent2_memory.add(np.vstack([obs_n[1], obs_n[2], obs_n[0]]),
np.vstack([agent2_action[0], agent3_action[0], agent1_action[0]]),
r_n[1], np.vstack([o_n_next[1], o_n_next[2], o_n_next[0]]), False)
agent3_memory.add(np.vstack([obs_n[2], obs_n[0], obs_n[1]]),
np.vstack([agent3_action[0], agent1_action[0], agent2_action[0]]),
r_n[2], np.vstack([o_n_next[2], o_n_next[0], o_n_next[1]]), False)
obs_n = o_n_next
for agent_index in range(3):
summary_writer.add_summary(
sess.run(reward_op[agent_index], {reward_history[agent_index]: r_n[agent_index]}), roll_out)
if roll_out > start_train_episode:
#e *= 0.9999
train_agent(agent1_ddpg, agent1_ddpg_target, agent1_memory, sess, [agent2_ddpg_target, agent3_ddpg_target])
train_agent(agent2_ddpg, agent2_ddpg_target, agent2_memory, sess, [agent3_ddpg_target, agent1_ddpg_target])
train_agent(agent3_ddpg, agent3_ddpg_target, agent3_memory, sess, [agent1_ddpg_target, agent2_ddpg_target])
if roll_out % 10 == 0:
train_target(agent1_actor_target_update, agent1_critic_target_update, sess)
train_target(agent2_actor_target_update, agent2_critic_target_update, sess)
train_target(agent3_actor_target_update, agent3_critic_target_update, sess)
if roll_out % 1000 == 0:
Saver.save(sess, './three_weight/' + str(roll_out) + '.cptk')
``` |
{
"source": "1713175349/tbshg",
"score": 2
} |
#### File: tbshg/tbshg/optic.py
```python
from .utils.constant import epsilon0
from .hamiltonian import tbHamiltonian,Hamiltonian
from . import tbshg_core
from .utils.wanniers import readwannierfolder_H
from .utils.configs import readconfig
from .utils.kmesh import KPOINTS_mesh
import os
import numpy as np
from mpi4py import MPI
comm = MPI.COMM_WORLD
class optproperty(object):
"""
用于计算各种光学性质
"""
def __init__(self,H:tbHamiltonian,ksi:float=0.02):
self.H0=H
self.solver=tbshg_core.solveopt(self.H0.H,ksi)
self.config=None
@property
def Vcell(self):
return self.H0.Vcell
@classmethod
def fromconfig(cls,fn:str):
config=readconfig(fn)
path0=config["datafilepath"]
dirn=os.path.split(path0)[0]
seedname=os.path.split(path0)[1]
H=tbHamiltonian.from_wannier_dir(directory=dirn,prefix=seedname,
bandgapadd=float(config["bandgapadd"] if config.get("bandgapadd") else 0.0),
cutoff=float(config["cutoff"] if config.get("cutoff") else -1),
fermi=float(config["fermi"])
)
out = cls(H,float(config["ksi"]))
out.config=config
out.solver.setup_mc() # 初始化蒙特卡洛方法
return out
def get_kmesh(self):
nkx,nky,nkz=[int(i) for i in self.config["nkx,nky,nkz"].split(",")]
nkxs,nkys,nkzs=[float(i) for i in self.config["nkxs,nkys,nkzs"].split(",")]
return KPOINTS_mesh(self.H0,nkx,nky,nkz,shift=[nkxs,nkys,nkzs])
def get_hv(self):
hv=np.linspace(*[int(i) for i in self.config["hvrange"].split(",")])
return hv
def get_directindices(self):
directs=[[int(j) for j in i.split()] for i in self.config["directs"].split(",")]
return directs
def solve_shg(self,kmesh:np.ndarray,hv:np.ndarray,directindices:np.ndarray,savek:bool=False,show_progress:bool=False):
"""
计算shg,如果savek会返回每个k点的shg值,返回值的单位为nm/V
"""
shg_k=None
nkpts=len(kmesh)
if savek:
shg_k=np.zeros((len(kmesh),len(directindices),len(kmesh)),dtype=np.complex128)
shg_i=np.zeros((len(directindices),len(hv)),dtype=np.complex128)
for i in range(len(kmesh)):
if show_progress:
print("rank: ",comm.rank,", kpoint: ",i,"/",nkpts,"progress: ",i/nkpts,flush=True)
self.H0.solve_one_kpoint(kmesh[i])
kshg = self.solver.get_shg(kmesh[i],hv,directindices)
shg_i+=kshg
if savek:
shg_k[i]=kshg
if savek:
return shg_i/self.Vcell/epsilon0/nkpts,shg_k/self.Vcell/epsilon0
else:
return shg_i/self.Vcell/epsilon0/nkpts
def solve_shg_mc(self,kmesh:np.ndarray,hv:np.ndarray,directindices:np.ndarray,savek:bool=False,show_progress:bool=False,times:int=10000):
"""
计算shg,如果savek会返回每个k点的shg值,返回值的单位为nm/V
此方法是用蒙特卡洛方法计算的,当采样达到200万次时,对于13条能带的单点,数值基本可以稳定
"""
shg_k=None
nkpts=len(kmesh)
if savek:
shg_k=np.zeros((len(kmesh),len(directindices),len(kmesh)),dtype=np.complex128)
shg_i=np.zeros((len(directindices),len(hv)),dtype=np.complex128)
for i in range(len(kmesh)):
if show_progress:
print("rank: ",comm.rank,", kpoint: ",i,"/",nkpts,"progress: ",i/nkpts,flush=True)
self.H0.solve_one_kpoint(kmesh[i])
kshg = self.solver.get_shg_mc(kmesh[i],hv,directindices,times)
shg_i+=kshg
if savek:
shg_k[i]=kshg
if savek:
return shg_i/self.Vcell/epsilon0/nkpts,shg_k/self.Vcell/epsilon0
else:
return shg_i/self.Vcell/epsilon0/nkpts
def solve_linechi(self,kmesh:np.ndarray,hv:np.ndarray,directindices:np.ndarray,savek:bool=False,show_progress:bool=False):
"""
计算线性极化率
"""
nkpts=len(kmesh)
linechi_k=None
if savek:
linechi_k=np.zeros((len(kmesh),len(directindices),len(kmesh)),dtype=np.complex128)
linechi_i=np.zeros((len(directindices),len(hv)),dtype=np.complex128)
for i in range(len(kmesh)):
if show_progress:
print("rank: ",comm.rank,", kpoint: ",i,"/",nkpts,"progress: ",i/nkpts,flush=True)
self.H0.solve_one_kpoint(kmesh[i])
kchi = self.solver.get_linechi(kmesh[i],hv,directindices)
linechi_i+=kchi
return linechi_i/self.Vcell/epsilon0/nkpts
def solve_shg_from_config(self):
"""
计算shg从配置文件的设定
"""
kmesh=self.get_kmesh()
hv=self.get_hv()
directindices=self.get_directindices()
return parallel_kmesh(self.solve_shg,kmesh,hv,directindices)
def solve_shg_mc_from_config(self,times:int=10000):
"""
计算shg从配置文件的设定,使用蒙特卡洛方法
"""
kmesh=self.get_kmesh()
hv=self.get_hv()
directindices=self.get_directindices()
return parallel_kmesh(self.solve_shg_mc,kmesh,hv,directindices,times=times)
def solve_linechi_from_config(self):
"""
计算线性极化率从配置文件的设定
"""
kmesh=self.get_kmesh()
hv=self.get_hv()
directindices=[[int(j) for j in i.split()] for i in self.config["directslinear"].split(",")]
return parallel_kmesh(self.solve_linechi,kmesh,hv,directindices)
def parallel_kmesh(solverfunc,Kmesh:KPOINTS_mesh,hv:np.ndarray,directindices:np.ndarray,**kwargs):
"""
并行计算kmesh
"""
kmesh_splited = np.array_split(Kmesh.kmesh,comm.size)
result = solverfunc(kmesh_splited[comm.rank],hv=hv,directindices=directindices,show_progress=True,**kwargs)
if comm.rank==0:
result_i=np.zeros((comm.size,len(directindices),len(hv)),dtype=np.complex128)
result_i[0]=result
for i in range(1,comm.size):
if len(kmesh_splited[i])>0:
result_i[i]=comm.recv(source=i,tag=i)
else:
if len(kmesh_splited[comm.rank])>0:
comm.send(result,dest=0,tag=comm.rank)
comm.Barrier()
if comm.rank==0:
weights=np.array([i.shape[0] for i in kmesh_splited])
result0=0
for i in range(comm.size):
result0+=result_i[i]*weights[i]
return result0/np.sum(weights)
``` |
{
"source": "1713mz/deep_representation_one_class",
"score": 2
} |
#### File: deep_representation_one_class/util/train.py
```python
import json
import os
import random
import shutil
import time
from absl import logging
import numpy as np
from sklearn.mixture import GaussianMixture as GMM
from sklearn.svm import OneClassSVM
import tensorflow as tf
from tqdm import trange
from data.celeba import CelebA
from data.cifar import CIFAROOD
from data.dogvscat import DogVsCatOOD
from data.own import OwndataOOD
from data.fmnist import FashionMNISTOOD
from model import resnet as model
import util.metric as util_metric
from util.scheduler import CustomLearningRateSchedule as CustomSchedule
_SUPPORTED_DATASET = frozenset([
'cifar10ood', 'cifar20ood', 'cifar100ood', 'fashion_mnistood', 'fmnistood',
'dogvscatood','owndataood', 'dvcood', 'celeba'
])
def setup_tf():
logging.set_verbosity(logging.ERROR)
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if not physical_devices:
logging.info('No GPUs are detected')
for dev in physical_devices:
tf.config.experimental.set_memory_growth(dev, True)
return tf.distribute.MirroredStrategy()
class BaseTrain(object):
"""Base model trainer.
Model constructor:
Parameters
Data loader
Model architecture
Optimizer
Model trainer:
Custom train loop
Evaluation loop
"""
def __init__(self, hparams):
self.strategy = setup_tf()
self.hparams = hparams
# data
self.is_validation = hparams.is_validation
self.root = hparams.root
self.dataset = hparams.dataset
self.category = hparams.category
self.aug_list = hparams.aug_list.split(',')
self.aug_list_for_test = hparams.aug_list_for_test.split(
',') if hparams.aug_list_for_test is not None else None
self.input_shape = tuple(
[int(float(s)) for s in hparams.input_shape.split(',')])
try:
self.distaug_type = int(hparams.distaug_type)
except ValueError:
self.distaug_type = hparams.distaug_type
# network architecture
self.net_type = hparams.net_type
self.net_width = hparams.net_width
self.head_dims = tuple([int(d) for d in hparams.head_dims.split(',') if d
]) if hparams.head_dims not in [None, ''] else None
self.latent_dim = hparams.latent_dim
# optimizer
self.seed = hparams.seed
self.force_init = hparams.force_init
self.optim_type = hparams.optim_type
self.sched_type = hparams.sched_type
self.sched_freq = hparams.sched_freq
self.sched_step_size = hparams.sched_step_size
self.sched_gamma = hparams.sched_gamma
self.sched_min_rate = hparams.sched_min_rate
self.sched_level = hparams.sched_level
self.learning_rate = hparams.learning_rate
self.weight_decay = hparams.weight_decay
self.regularize_bn = hparams.regularize_bn
self.weight_decay_constraint = []
if self.regularize_bn:
self.weight_decay_constraint.append('bn')
self.momentum = hparams.momentum
self.nesterov = hparams.nesterov
self.num_epoch = hparams.num_epoch
self.num_batch = hparams.num_batch
self.batch_size = hparams.batch_size
# monitoring and checkpoint
self.ckpt_prefix = os.path.join(hparams.model_dir, hparams.ckpt_prefix)
self.ckpt_epoch = hparams.ckpt_epoch
self.file_path = hparams.file_path
# additional hparams
self.set_hparams(hparams=hparams)
self.set_metrics()
def set_random_seed(self):
seed = self.seed
if seed > 0:
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def set_hparams(self, hparams):
pass
def config(self):
"""Config."""
self.set_random_seed()
# Data loader.
self.get_dataloader()
# Model architecture.
self.model = self.get_model(
arch=self.net_type,
width=self.net_width,
head_dims=self.head_dims,
input_shape=self.input_shape,
num_class=self.latent_dim)
# Scheduler.
self.scheduler, self.sched_name = self.get_scheduler(
sched_type=self.sched_type,
step_per_epoch=1 if self.sched_freq == 'step' else self.num_batch,
max_step=self.num_epoch * self.num_batch,
learning_rate=self.learning_rate,
**{
'step_size': self.sched_step_size,
'gamma': self.sched_gamma,
'min_rate': self.sched_min_rate,
'level': self.sched_level
})
# Optimizer.
self.optimizer, self.optim_name = self.get_optimizer(
scheduler=self.scheduler,
optim_type=self.optim_type,
learning_rate=self.learning_rate,
**{
'momentum': self.momentum,
'nesterov': self.nesterov
})
# Set file path.
self.get_file_path()
def get_dataloader(self):
"""Gets the data loader."""
dl = self.get_dataset(self.root, self.dataset.lower(), self.category,
self.input_shape)
datasets = dl.load_dataset(
is_validation=self.is_validation,
aug_list=self.aug_list,
aug_list_for_test=self.aug_list_for_test,
batch_size=self.batch_size,
num_batch_per_epoch=self.num_batch,
distaug_type=self.distaug_type)
# train_loader: train data for representation learning (augmentation)
# cls_loader: train data for classifier learning (no augmentation)
# test_loader: test data
self.train_loader = datasets[0]
if isinstance(self.train_loader, (list, tuple)):
self.num_batch = self.train_loader[1]
self.train_loader = self.train_loader[0]
self.cls_loader = datasets[1]
self.test_loader = datasets[2]
self.db_name = dl.fname
if self.strategy:
self.train_loader = self.strategy.experimental_distribute_dataset(
self.train_loader)
self.cls_loader[0] = self.strategy.experimental_distribute_dataset(
self.cls_loader[0])
self.test_loader[0] = self.strategy.experimental_distribute_dataset(
self.test_loader[0])
@staticmethod
def get_dataset(root, dataset, category, input_shape):
"""Gets the dataset."""
if dataset not in _SUPPORTED_DATASET:
msg = (f'Unsupported dataset {dataset} is provided. Only '
f'{_SUPPORTED_DATASET} are available.')
raise ValueError(msg)
if dataset in ['cifar10ood', 'cifar20ood', 'cifar100ood']:
dl = CIFAROOD(
root=root,
dataset=dataset,
category=category,
input_shape=input_shape or (32, 32, 3))
elif dataset in ['fashion_mnistood', 'fmnistood']:
dl = FashionMNISTOOD(
root=root,
dataset=dataset,
category=category,
input_shape=input_shape or (32, 32, 3))
elif dataset in ['dogvscatood', 'dvcood']:
dl = DogVsCatOOD(
root=root,
dataset=dataset,
category=category,
input_shape=input_shape or (64, 64, 3))
elif dataset in ['owndataood', 'odood']:
dl = OwndataOOD(
root=root,
dataset=dataset,
category=category,
input_shape=input_shape or (32, 32, 3))
elif dataset == 'celeba':
dl = CelebA(
root=root,
dataset=dataset,
category=category,
input_shape=input_shape or (64, 64, 3))
return dl
@staticmethod
def get_model(arch='ResNet18',
width=1.0,
head_dims=None,
input_shape=(256, 256, 3),
num_class=2):
"""Gets the ResNet model."""
net = model.__dict__[arch](
width=width,
head_dims=head_dims,
input_shape=input_shape,
num_class=num_class)
net.summary()
return net
@staticmethod
def get_optimizer(scheduler, optim_type='sgd', learning_rate=0.03, **kwargs):
"""Gets the optimizer."""
if optim_type == 'sgd':
momentum = kwargs['momentum'] if 'momentum' in kwargs else 0.9
nesterov = kwargs['nesterov'] if 'nesterov' in kwargs else False
optimizer = tf.keras.optimizers.SGD(
learning_rate=scheduler, momentum=momentum, nesterov=nesterov)
name = 'sgd_lr{:g}_mom{:g}'.format(learning_rate, momentum)
if nesterov:
name += '_nesterov'
elif optim_type == 'adam':
optimizer = tf.keras.optimizers.Adam(
learning_rate=scheduler, amsgrad=True)
name = 'adam_lr{:g}'.format(learning_rate)
else:
raise NotImplementedError
return optimizer, name
@staticmethod
def get_scheduler(sched_type='cosine',
step_per_epoch=1,
max_step=256,
learning_rate=0.1,
**kwargs):
"""Gets the scheduler."""
scheduler = CustomSchedule(
step_per_epoch=step_per_epoch,
base_lr=learning_rate,
max_step=max_step,
mode=sched_type,
**kwargs)
return scheduler, scheduler.name
def get_file_path(self):
"""Gets the file path for saving."""
if self.file_path:
self.file_path = os.path.join(self.ckpt_prefix, self.file_path)
else:
self.file_path = os.path.join(
self.ckpt_prefix, '{}_seed{}'.format(self.db_name, self.seed),
self.model.name, '{}_{}_{}_wd{:g}_{}_epoch{}_nb{}_bs{}'.format(
self.__class__.__name__, self.optim_name, self.sched_name,
self.weight_decay, '_'.join(self.weight_decay_constraint),
self.num_epoch, self.num_batch, self.batch_size))
if self.file_suffix:
self.file_path = '{}_{}'.format(self.file_path, self.file_suffix)
self.file_path = self.file_path.replace('__', '_')
self.json_path = os.path.join(self.file_path, 'stats')
def get_current_train_epoch(self):
"""Returns current training epoch."""
return tf.math.floordiv(self.optimizer.iterations, self.num_batch).numpy()
def get_current_train_step(self):
"""Returns current training step."""
return self.optimizer.iterations
def get_checkpoint(self):
"""Restores from the checkpoint and returns start epoch."""
self.checkpoint.restore(self.manager.latest_checkpoint)
self.epoch = start_epoch = self.get_current_train_epoch()
self.step = self.get_current_train_step()
return start_epoch
def train(self):
"""Called for model training."""
start_epoch = self.train_begin()
if self.num_epoch == 0:
self.train_epoch_begin()
else:
for _ in range(start_epoch, self.num_epoch):
self.train_epoch_begin()
self.train_epoch()
self.train_epoch_end(
is_eval=False, is_save=(self.epoch % self.ckpt_epoch == 0))
self.train_epoch_end(is_eval=True, is_save=True)
self.train_end()
def train_begin(self):
"""Initializes metrics, checkpoint, summary at the beginning of training."""
self.metrics = {}
self.metrics.update({
key: tf.keras.metrics.Mean()
for key in self.list_of_metrics
if key.startswith(('loss'))
})
self.metrics.update({
key: tf.keras.metrics.Accuracy()
for key in self.list_of_metrics
if key.startswith('acc')
})
self.monitor = {
'learning_rate': 0,
'step_per_second': 0,
}
self.eval_metrics = {}
self.eval_metrics.update({key: None for key in self.list_of_eval_metrics})
if self.force_init:
shutil.rmtree(self.file_path, ignore_errors=True)
# Generate file paths
if not tf.io.gfile.isdir(self.file_path):
tf.io.gfile.makedirs(self.file_path)
if not tf.io.gfile.isdir(self.json_path):
tf.io.gfile.makedirs(self.json_path)
# Checkpoint
self.checkpoint = tf.train.Checkpoint(
optimizer=self.optimizer, model=self.model)
self.manager = tf.train.CheckpointManager(
checkpoint=self.checkpoint,
directory=os.path.join(self.file_path, 'raw'),
max_to_keep=1)
self.tensorboard_dir = os.path.join(self.file_path, 'tb')
self.summary_writer = tf.summary.create_file_writer(
logdir=self.tensorboard_dir)
# Initiate train iterator once
# Note that creating iterator every epoch slows down
# the training since it clears the data buffer
self.train_iterator = iter(self.train_loader)
self.cls_iterator = (iter(self.cls_loader[0]), self.cls_loader[1])
self.test_iterator = (iter(self.test_loader[0]), self.test_loader[1])
return self.get_checkpoint()
def train_end(self, verbose=False):
"""Saves and prints summary statistics."""
self.manager.save()
self.summary_writer.close()
if verbose:
logdir = self.tensorboard_dir
event_files = [
event for event in tf.io.gfile.glob(os.path.join(logdir, '*'))
]
event_files.sort(key=os.path.getmtime)
event_dict = {
key: []
for key in self.metrics.keys()
if not key.startswith('monitor')
}
event_dict.update({key: [] for key in self.eval_metrics.keys()})
for event_file in event_files:
for event in tf.compat.v1.train.summary_iterator(event_file):
for v in event.summary.value:
if v.tag.replace('/', '.') in event_dict:
event_dict[v.tag.replace('/', '.')].append(
tf.make_ndarray(v.tensor).tolist())
# Print stats of last 20 epochs in json format
num_epoch_to_save = 20
event_dict = {
key: event_dict[key][-num_epoch_to_save:] for key in event_dict
}
if not os.path.isdir(self.json_path):
os.makedirs(self.json_path)
summary_dict = {}
for key in event_dict:
dict_to_write = {
'median (last%02d)' % x: np.median(event_dict[key][-x:])
for x in [1, 5, 10, num_epoch_to_save]
}
dict_to_write.update(
{'last%02d' % (num_epoch_to_save): event_dict[key]})
with open(os.path.join(self.json_path, key + '.json'), 'w') as outfile:
json.dump(dict_to_write, outfile, sort_keys=True, indent=4)
if key in self.metric_of_interest:
summary_dict.update({key: dict_to_write})
with open(os.path.join(self.json_path, 'summary.json'),
'w') as outfile:
json.dump(summary_dict, outfile, sort_keys=True, indent=4)
# Print basic information
logging.info('')
logging.info('----------------------------------------------------------')
logging.info('Train is done. Below are file path and basic test stats\n')
logging.info('File path:\n')
logging.info(self.file_path)
if not isinstance(self.metric_of_interest, (list, tuple)):
self.metric_of_interest = [self.metric_of_interest]
for moi in self.metric_of_interest:
del summary_dict[moi]['last%02d' % (num_epoch_to_save)]
logging.info('Eval stats:\n')
logging.info(json.dumps(summary_dict, sort_keys=True, indent=4))
logging.info('----------------------------------------------------------')
logging.info()
else:
with tf.io.gfile.GFile(os.path.join(self.json_path, 'summary.json'),
'w') as outfile:
json.dump(self.eval_metrics, outfile, sort_keys=True, indent=4)
with tf.io.gfile.GFile(os.path.join(self.json_path, 'hparams.json'),
'w') as outfile:
json.dump(self.hparams, outfile, indent=4, sort_keys=True)
def train_epoch(self):
"""Called for model training per epoch."""
time_init = time.time()
for _ in trange(
self.num_batch,
leave=False,
desc='Epoch (train) %d/%d' % (self.epoch + 1, self.num_epoch)):
self.train_step(self.train_iterator)
self.monitor['step_per_second'] = self.num_batch / (time.time() - time_init)
def train_epoch_begin(self):
"""Called at the beginning of epoch.
- Reset metrics
- Adjust learning rate
"""
for _, metric in self.metrics.items():
metric.reset_states()
self.epoch = self.get_current_train_epoch()
self.step = self.get_current_train_step()
self.monitor['learning_rate'] = self.optimizer.learning_rate(
self.optimizer.iterations).numpy()
def train_epoch_end(self, is_eval=False, is_save=False):
"""Evaluates and monitors performance at the end of epoch."""
if is_save:
self.manager.save()
if is_eval:
self.eval_epoch(trainset=self.cls_iterator, testset=self.test_iterator)
self.monitor_progress(verbose=True)
@tf.function
def train_step(self, iterator):
"""Executes each train step."""
def step_fn(data):
replica_context = tf.distribute.get_replica_context()
xo, xc = data[0], data[1]
x = tf.concat((xo, xc), axis=0)
y = tf.concat((tf.zeros(
xo.shape[0], dtype=tf.int32), tf.ones(xc.shape[0], dtype=tf.int32)),
axis=0)
with tf.GradientTape() as tape:
logits = self.model(x, training=True)['logits']
loss_xe = tf.keras.losses.sparse_categorical_crossentropy(
y, logits, from_logits=True)
loss_xe = tf.divide(
tf.reduce_sum(loss_xe),
self.cross_replica_concat(loss_xe,
replica_context=replica_context).shape[0])
loss_l2 = self.loss_l2(self.model.trainable_weights)
loss = loss_xe + self.weight_decay * loss_l2
grad = tape.gradient(loss, self.model.trainable_weights)
self.optimizer.apply_gradients(zip(grad, self.model.trainable_weights))
# monitor
self.metrics['loss.train'].update_state(loss)
self.metrics['loss.xe'].update_state(loss_xe)
self.metrics['loss.L2'].update_state(loss_l2)
self.metrics['acc.train'].update_state(y, tf.argmax(logits, axis=1))
# Call one step
self.strategy.run(step_fn, args=(next(iterator),))
def loss_l2(self, var_list):
for c in self.weight_decay_constraint:
var_list = [v for v in var_list if c not in v.name]
loss_l2 = tf.add_n([tf.nn.l2_loss(v) for v in var_list])
return tf.divide(loss_l2, self.strategy.num_replicas_in_sync)
def squared_difference(self, a, b, do_normalization=True):
"""Computes (a-b) ** 2."""
if do_normalization:
a = tf.nn.l2_normalize(a, axis=1)
b = tf.nn.l2_normalize(b, axis=1)
return -2. * tf.matmul(a, b, transpose_b=True)
return tf.norm(
a, axis=1, keepdims=True)**2 + tf.transpose(
tf.norm(b, axis=1, keepdims=True)**2) - 2. * tf.matmul(
a, b, transpose_b=True)
def eval_epoch(self, trainset, testset):
self.eval_embed(trainset=trainset, testset=testset)
def eval_embed(self, trainset, testset):
"""Evaluate performance on test set."""
_, _, embeds_tr, pools_tr, _ = self.extract(trainset)
probs, dscores, embeds, pools, labels = self.extract(testset)
sim_embed = -0.5 * self.squared_difference(embeds, embeds_tr, True)
sim_pool = -0.5 * self.squared_difference(pools, pools_tr, True)
dist_embed = tf.reduce_mean(1.0 - tf.nn.top_k(sim_embed, k=1)[0], axis=1)
dist_pool = tf.reduce_mean(1.0 - tf.nn.top_k(sim_pool, k=1)[0], axis=1)
for key in self.eval_metrics:
if key.startswith('logit'):
pred = 1.0 - probs[:, 0]
elif key.startswith('dscore'):
pred = 1.0 - dscores
elif key.startswith('embed'):
pred = dist_embed
feats_tr = embeds_tr.numpy()
feats = embeds.numpy()
sim = sim_embed
elif key.startswith('pool'):
pred = dist_pool
feats_tr = pools_tr.numpy()
feats = pools.numpy()
sim = sim_pool
if 'auc' in key:
self.eval_metrics[key] = util_metric.roc(pr=pred, gt=labels)
elif 'locsvm' in key and key.startswith(('embed', 'pool')):
# Linear kernel OC-SVM.
clf = OneClassSVM(kernel='linear').fit(feats_tr)
scores = -clf.score_samples(feats)
self.eval_metrics[key] = util_metric.roc(pr=scores, gt=labels)
elif 'kocsvm' in key and key.startswith(('embed', 'pool')):
# RBF kernel OC-SVM.
feats_tr = tf.nn.l2_normalize(feats_tr, axis=1)
feats = tf.nn.l2_normalize(feats, axis=1)
# 10 times larger value of gamma.
gamma = 10. / (tf.math.reduce_variance(feats_tr) * feats_tr.shape[1])
clf = OneClassSVM(kernel='rbf', gamma=gamma).fit(feats_tr)
scores = -clf.score_samples(feats)
self.eval_metrics[key] = util_metric.roc(pr=scores, gt=labels)
elif 'kde' in key and key.startswith(('embed', 'pool')):
# RBF kernel density estimation.
feats_tr = tf.nn.l2_normalize(feats_tr, axis=1)
gamma = 10. / (tf.math.reduce_variance(feats_tr) * feats_tr.shape[1])
scores = None
batch_size_for_kde = 100
num_iter = int(np.ceil(sim.shape[0] / batch_size_for_kde))
for i in range(num_iter):
sim_batch = sim[i * batch_size_for_kde:(i + 1) * batch_size_for_kde]
scores_batch = -tf.divide(
tf.reduce_logsumexp(2 * gamma * sim_batch, axis=1), gamma)
scores = scores_batch if scores is None else tf.concat(
(scores, scores_batch), axis=0)
self.eval_metrics[key] = util_metric.roc(pr=scores, gt=labels)
elif 'gde' in key and key.startswith(('embed', 'pool')):
# Gaussian density estimation with full covariance.
feats_tr = tf.nn.l2_normalize(feats_tr, axis=1)
feats = tf.nn.l2_normalize(feats, axis=1)
km = GMM(n_components=1, init_params='kmeans', covariance_type='full')
km.fit(feats_tr)
scores = -km.score_samples(feats)
self.eval_metrics[key] = util_metric.roc(pr=scores, gt=labels)
def extract(self, dataset):
"""Extract logits, embeds, pool, and labels."""
outputs = {
'logits': None,
'dscore': None,
'embeds': None,
'pools': None,
'labels': None
}
inference = self.model
iterator, num_batch = dataset[0], dataset[1]
if self.aug_list_for_test is not None:
num_aug = len(self.aug_list_for_test)
else:
num_aug = 1
for _ in trange(
num_batch,
leave=False,
desc='Extract %d/%d' % (self.epoch + 1, self.num_epoch)):
logits, embeds, pools, y = self.extract_step(iterator, inference)
if num_aug > 1:
probs = tf.nn.softmax(logits, axis=1)
probs = tf.split(probs, num_aug)
dscore = tf.math.exp(
tf.reduce_sum(
tf.math.log(
tf.concat([probs[i][:, i:i + 1] for i in range(len(probs))],
axis=1)),
axis=1))
logits = tf.split(logits, num_aug)[0]
embeds = tf.split(embeds, num_aug)[0]
pools = tf.split(pools, num_aug)[0]
else:
dscore = tf.nn.softmax(logits, axis=1)[:, 0]
outputs['logits'] = self.smart_concat(outputs['logits'], logits)
outputs['dscore'] = self.smart_concat(outputs['dscore'], dscore)
outputs['embeds'] = self.smart_concat(outputs['embeds'], embeds)
outputs['pools'] = self.smart_concat(outputs['pools'], pools)
outputs['labels'] = self.smart_concat(outputs['labels'], y)
return (tf.nn.softmax(outputs['logits'], axis=1), outputs['dscore'],
outputs['embeds'], outputs['pools'], tf.squeeze(outputs['labels']))
@tf.function
def extract_step(self, iterator, inference):
"""Feature extract step."""
def step_fn(data):
"""Step."""
x, y = data[0:-2], data[-2]
output = inference(tf.concat(x, axis=0), training=False)
return (output['logits'], output['embeds'], output['pools'], y)
out = self.strategy.run(step_fn, args=(next(iterator),))
return [tf.concat(self.strategy.unwrap(o), axis=0) for o in out]
def monitor_progress(self, verbose=False):
"""Monitor train/eval variables."""
# Tensorboard
with self.summary_writer.as_default():
vis_step = (self.epoch + 1) * self.num_batch
for key, metric in self.metrics.items():
tf.summary.scalar(
key.replace('.', '/', 1), metric.result(), step=vis_step)
tf.summary.scalar(
'monitor/step_per_second',
self.monitor['step_per_second'],
step=vis_step)
tf.summary.scalar(
'monitor/lr', self.monitor['learning_rate'], step=vis_step)
if verbose:
for key, metric in self.eval_metrics.items():
if metric is not None:
tf.summary.scalar(key.replace('.', '/', 1), metric, step=vis_step)
# Command line.
template = ('Epoch {epoch:4d}/{max_epoch:4d}\tstep(sec): '
'{step_per_second:.3f}\tLoss: {loss:.3f}\tAcc: {acc:.3f}')
logging.info(
template.format(
epoch=self.epoch + 1,
max_epoch=self.num_epoch,
step_per_second=self.monitor['step_per_second'],
loss=self.metrics['loss.train'].result(),
acc=self.metrics['acc.train'].result()))
@staticmethod
def smart_concat(var1, var2):
"""Smart concat."""
def _smart_concat(var1, var2):
return var2 if var1 is None else tf.concat((var1, var2), axis=0)
if isinstance(var2, list):
if var1 is not None:
assert isinstance(var1, list)
return [_smart_concat(v1, v2) for v1, v2 in zip(var1, var2)]
else:
return var2
else:
if var1 is not None:
assert not isinstance(var1, list)
return _smart_concat(var1, var2)
@staticmethod
def cross_replica_concat(tensor, replica_context=None):
"""Reduces a concatenation of the `tensor` across TPU cores.
Args:
tensor: tensor to concatenate.
replica_context: A `replica_context`. If not set, CPU execution is
assumed.
Returns:
Tensor of the same rank as `tensor` with first dimension `num_replicas`
times larger.
"""
if replica_context is None or replica_context.num_replicas_in_sync <= 1:
return tensor
num_replicas = replica_context.num_replicas_in_sync
with tf.name_scope('cross_replica_concat'):
# This creates a tensor that is like the input tensor but has an added
# replica dimension as the outermost dimension. On each replica it will
# contain the local values and zeros for all other values that need to be
# fetched from other replicas.
ext_tensor = tf.scatter_nd(
indices=[[replica_context.replica_id_in_sync_group]],
updates=[tensor],
shape=[num_replicas] + tensor.shape.as_list())
# As every value is only present on one replica and 0 in all others,
# adding them all together will result in the full tensor on all replicas.
ext_tensor = replica_context.all_reduce(tf.distribute.ReduceOp.SUM,
ext_tensor)
# Flatten the replica dimension.
# The first dimension size will be: tensor.shape[0] * num_replicas
# Using [-1] trick to support also scalar input.
return tf.reshape(ext_tensor, [-1] + ext_tensor.shape.as_list()[2:])
``` |
{
"source": "1715173329/pyncm",
"score": 2
} |
#### File: demos/login/qrlogin.py
```python
import sys
sys.path.insert(0,'\\'.join(sys.path[0].split('\\')[:-2]))
import pyncm.apis
from pyncm import GetCurrentSession,logger,__version__
from pyncm.apis.login import GetCurrentLoginStatus, WriteLoginInfo,LoginQrcodeUnikey,LoginQrcodeCheck
import qrcode,time
print(__version__)
def dot_thingy():
while True:
yield '...';yield '.. ';yield '. '
dot = dot_thingy()
def login():
uuid = LoginQrcodeUnikey()['unikey']
url = f'https://music.163.com/login?codekey={uuid}'
img = qrcode.make(url)
print('Enter 以显示二维码')
img.show()
logger.debug(' '.join(('[-] UUID:',uuid)))
while True:
rsp = LoginQrcodeCheck(uuid)
if rsp['code'] == 803 or rsp['code'] == 800:break
message = f"[!] {rsp['code']} -- {rsp['message']}"
print(message,next(dot),end='\r')
time.sleep(1)
WriteLoginInfo(GetCurrentLoginStatus())
logger.debug(' '.join(('[+] Logged in as %s (Last known IP: %s)' % (
GetCurrentSession().login_info['content']['profile']['nickname'],
GetCurrentSession().login_info['content']['profile']['lastLoginIP']
)
)))
# testing search
print('测试搜索 : hi')
print(pyncm.apis.cloudsearch.GetSearchResult('hi'))
return True
if __name__ == '__main__':
print('[-] Testing login')
print('[-] Success:',login())
```
#### File: apis/miniprograms/zonefm.py
```python
from .. import EapiCryptoRequest
@EapiCryptoRequest
def GetFmZoneInfo(limit=3,zone="CLASSICAL",e_r=True):
'''获取电台内容
Args:
limit (int, optional): 获取数目. Defaults to 3.
zone (str, optional): 分区. Defaults to "CLASSICAL".
e_r (bool, optional): [未知]. Defaults to True.
Returns:
dict
'''
return '/eapi/zone/fm/get',{"limit":str(limit),"zone":zone,"e_r":str(e_r).lower()}
@EapiCryptoRequest
def SetSkipFmTrack(id,zone="CLASSICAL",e_r=True):
'''跳过电台歌曲
Args:
id (int): 歌曲ID
zone (str, optional): 分区. Defaults to "CLASSICAL".
e_r (bool, optional): [未知]. Defaults to True.
Returns:
dict
'''
return '/eapi/zone/fm/skip',{"songId":str(id),"zone":zone,"e_r":str(e_r).lower(),"alg":"CLSalternate","time":"0"}
``` |
{
"source": "1715labs/baal",
"score": 2
} |
#### File: active/heuristics/heuristics_gpu.py
```python
from typing import Callable, Optional, Sequence
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from baal import ModelWrapper
available_reductions = {
'max': lambda x: torch.max(x.view([x.shape[0], -1]), -1, ),
'min': lambda x: torch.min(x.view([x.shape[0], -1]), -1, ),
'mean': lambda x: torch.mean(x.view([x.shape[0], -1]), -1, ),
'sum': lambda x: torch.sum(x.view([x.shape[0], -1]), -1, ),
'none': lambda x: x,
}
def _shuffle_subset(data: torch.Tensor, shuffle_prop: float) -> torch.Tensor:
to_shuffle = np.nonzero(np.random.rand(data.shape[0]) < shuffle_prop)[0]
data[to_shuffle, ...] = data[np.random.permutation(to_shuffle), ...]
return data
def requireprobs(fn):
"""Will convert logits to probs if needed"""
def wrapper(self, probabilities):
# Expected shape : [n_sample, n_classes, ..., n_iterations]
bounded = torch.min(probabilities) < 0 or torch.max(probabilities) > 1.0
if bounded or not probabilities.sum(1).allclose(1):
probabilities = F.softmax(probabilities, 1)
return fn(self, probabilities)
return wrapper
class AbstractGPUHeuristic(ModelWrapper):
"""Abstract class that defines a Heuristic.
Args:
shuffle_prop (float): shuffle proportion.
threshold (Optional[float]): threshold the probabilities.
reverse (bool): True if the most uncertain sample has the highest value.
reduction (Union[str, Callable]): Reduction used after computing the score.
"""
def __init__(self, model: ModelWrapper, shuffle_prop=0.0, threshold=None, reverse=False,
reduction='none'):
self.model = model
self.shuffle_prop = shuffle_prop
self.threshold = threshold
self.reversed = reverse
assert reduction in available_reductions or callable(reduction)
self.reduction = reduction if callable(reduction) else available_reductions[reduction]
def compute_score(self, predictions):
"""
Compute the score according to the heuristic.
Args:
predictions (ndarray): Array of predictions
Returns:
Array of scores.
"""
raise NotImplementedError
def get_uncertainties(self, predictions):
"""Get the uncertainties"""
scores = self.compute_score(predictions)
scores = self.reduction(scores)
scores[~torch.isfinite(scores)] = 0.0 if self.reversed else 10000
return scores
def predict_on_batch(self, data, iterations, use_cuda=False):
"""Rank the predictions according to their uncertainties."""
return self.get_uncertainties(self.model.predict_on_batch(data, iterations, cuda=use_cuda))
def predict_on_dataset(self, dataset: Dataset, batch_size: int, iterations: int,
use_cuda: bool, workers: int = 4,
collate_fn: Optional[Callable] = None,
half=False):
"""
Use the model to predict on a dataset `iterations` time.
Args:
dataset (Dataset): Dataset to predict on.
batch_size (int): Batch size to use during prediction.
iterations (int): Number of iterations per sample.
use_cuda (bool): Use CUDA or not.
workers (int): Number of workers to use.
collate_fn (Optional[Callable]): The collate function to use.
half (bool): if True use half precision
Notes:
The "batch" is made of `batch_size` * `iterations` samples.
Returns:
Array [n_samples, n_outputs, ..., n_iterations].
"""
preds = list(self.predict_on_dataset_generator(dataset=dataset, batch_size=batch_size,
iterations=iterations, use_cuda=use_cuda,
workers=workers, collate_fn=collate_fn,
half=half))
if len(preds) > 0 and not isinstance(preds[0], Sequence):
# Is an Array or a Tensor
return np.concatenate(preds)
return [np.concatenate(pr) for pr in zip(*preds)]
class BALDGPUWrapper(AbstractGPUHeuristic):
"""Sort by the highest acquisition function value.
References:
https://arxiv.org/abs/1703.02910
"""
def __init__(self, model: ModelWrapper, shuffle_prop=0.0, threshold=None, reduction='none'):
super().__init__(model,
shuffle_prop=shuffle_prop, threshold=threshold, reverse=True,
reduction=reduction
)
@requireprobs
def compute_score(self, predictions):
assert predictions.ndimension() >= 3
# [n_sample, n_class, ..., n_iterations]
expected_entropy = - torch.mean(torch.sum(predictions * torch.log(predictions + 1e-5), 1),
dim=-1) # [batch size, ...]
expected_p = torch.mean(predictions, dim=-1) # [batch_size, n_classes, ...]
entropy_expected_p = - torch.sum(expected_p * torch.log(expected_p + 1e-5),
dim=1) # [batch size, ...]
bald_acq = entropy_expected_p - expected_entropy
return bald_acq
```
#### File: tests/active/heuristics_gpu_test.py
```python
import os
import numpy as np
import pytest
import torch
from torch import nn
from torch.utils.data import Dataset
from torchvision import datasets
from torchvision.transforms import transforms
from baal import ModelWrapper
from baal.active.heuristics import BALD
from baal.active.heuristics.heuristics_gpu import BALDGPUWrapper
from baal.bayesian import Dropout
from baal.bayesian.dropout import Dropout2d
class Flatten(nn.Module):
def forward(self, x):
return x.view([x.shape[0], -1])
class SimpleDataset(Dataset):
def __init__(self):
self.data = torch.randn(100, 3, 32, 32)
def __len__(self):
return 100
def __getitem__(self, item):
return self.data[item], item % 10
@pytest.fixture
def classification_task(tmpdir):
model = nn.Sequential(nn.Conv2d(3, 32, 3),
nn.ReLU(),
nn.Conv2d(32, 64, 3),
nn.MaxPool2d(2),
nn.AdaptiveAvgPool2d((7, 7)),
Flatten(),
nn.Linear(7 * 7 * 64, 128),
Dropout(),
nn.Linear(128, 10)
)
model = ModelWrapper(model, nn.CrossEntropyLoss())
test = SimpleDataset()
return model, test
def test_bald_gpu(classification_task):
torch.manual_seed(1337)
model, test_set = classification_task
wrap = BALDGPUWrapper(model)
out = wrap.predict_on_dataset(test_set, 4, 10, False, 4)
assert out.shape[0] == len(test_set)
bald = BALD()
torch.manual_seed(1337)
out_bald = bald.get_uncertainties(model.predict_on_dataset(test_set, 4, 10, False, 4))
assert np.allclose(out, out_bald, rtol=1e-5, atol=1e-5)
@pytest.fixture
def segmentation_task(tmpdir):
model = nn.Sequential(nn.Conv2d(3, 32, 3),
nn.ReLU(),
nn.Conv2d(32, 64, 3),
nn.MaxPool2d(2),
nn.Conv2d(64, 64, 3),
Dropout2d(),
nn.ConvTranspose2d(64, 10, 3, 1)
)
model = ModelWrapper(model, nn.CrossEntropyLoss())
test = datasets.CIFAR10(tmpdir, train=False, download=True, transform=transforms.ToTensor())
return model, test
@pytest.mark.skipif('CIRCLECI' in os.environ, reason="Doesn't fit on CIRCLECI")
def test_bald_gpu_seg(segmentation_task):
torch.manual_seed(1337)
model, test_set = segmentation_task
wrap = BALDGPUWrapper(model, reduction='sum')
out = wrap.predict_on_dataset(test_set, 4, 10, False, 4)
assert out.shape[0] == len(test_set)
bald = BALD(reduction='sum')
torch.manual_seed(1337)
out_bald = bald.get_uncertainties_generator(
model.predict_on_dataset_generator(test_set, 4, 10, False, 4))
assert np.allclose(out, out_bald, rtol=1e-5, atol=1e-5)
``` |
{
"source": "171860596/tf-text-classification",
"score": 3
} |
#### File: 171860596/tf-text-classification/train_cnn.py
```python
import os
import time
import json
import warnings
import numpy as np
import tensorflow as tf
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import preprocess.data_helpers as data_helpers
from text_classifier.text_cnn import TextCNN
warnings.filterwarnings("ignore")
# Parameters
# ==================================================
# Data loading parameters
tf.flags.DEFINE_string('data_file', './data/train_data', "Data source for the text data")
tf.flags.DEFINE_integer('num_classes', None, "Number classes of labels in data")
tf.flags.DEFINE_float('test_size', 0.05, "Percentage of data to use for validation and test (default: 0.05)")
tf.flags.DEFINE_integer('vocab_size', 9000, "Select words to build vocabulary, according to term frequency (default: 9000)")
tf.flags.DEFINE_integer('sequence_length', 500, "Padding sentences to same length, cut off when necessary (default: 500)")
# Model hyperparameters
tf.flags.DEFINE_integer('embedding_size', 128, "Dimension of word embedding (default: 128)")
tf.flags.DEFINE_string('filter_sizes', '3,4,5', "Filter sizes to use in convolution layer, comma-separated (default: '3,4,5')")
tf.flags.DEFINE_integer('num_filters', 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float('dropout_keep_prob', 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float('l2_reg_lambda', 0.0, "L2 regularization lambda (default: 0.0)")
tf.flags.DEFINE_float('learning_rate', 0.001, "Learning rate for model training (default: 0.001)")
tf.flags.DEFINE_float('grad_clip', 3.0, "Gradients clipping threshold (default: 3.0)")
# Training parameters
tf.flags.DEFINE_integer('batch_size', 128, "Batch size (default: 128)")
tf.flags.DEFINE_integer("num_epochs", 20, "Number of training epochs (default: 20)")
tf.flags.DEFINE_string('init_embedding_path', None, "Using pre-trained word embedding, npy file format")
tf.flags.DEFINE_string('init_model_path', None, "Continue training from saved model at this path")
tf.flags.DEFINE_integer('evaluate_every', 50, "Evaluate model on val set after this many steps (default: 50)")
# Tensorflow parameters
tf.flags.DEFINE_boolean('allow_soft_placement', True, "Allow device soft device placement (default: True)")
tf.flags.DEFINE_boolean('log_device_placement', False, "Log placement of ops on devices (default: False)")
tf.flags.DEFINE_boolean('gpu_allow_growth', True, "GPU memory allocation mode (default: True)")
FLAGS = tf.flags.FLAGS
print("\nParameters:")
for param, value in sorted(FLAGS.flag_values_dict().items()):
print("{} = {}".format(param.upper(), value))
print("")
def train_cnn():
# Data Preparation
# ==================================================
if FLAGS.init_embedding_path is not None:
embedding = np.load(FLAGS.init_embedding_path)
print("Using pre-trained word embedding which shape is {}\n".format(embedding.shape))
FLAGS.vocab_size = embedding.shape[0]
FLAGS.embedding_size = embedding.shape[1]
if FLAGS.init_model_path is not None:
assert os.path.isdir(FLAGS.init_model_path), "init_model_path must be a directory\n"
ckpt = tf.train.get_checkpoint_state(FLAGS.init_model_path)
assert ckpt, "No checkpoint found in {}\n".format(FLAGS.init_model_path)
assert ckpt.model_checkpoint_path, "No model_checkpoint_path found in checkpoint\n"
# Create output directory for models and summaries
timestamp = str(int(time.time()))
output_dir = os.path.abspath(os.path.join(os.path.curdir, 'runs', 'textcnn', 'trained_result_' + timestamp))
os.makedirs(output_dir)
# Load data
print("Prepareing data...\n")
data = os.path.abspath(FLAGS.data_file)
x, y = data_helpers.load_data(data, FLAGS.sequence_length, FLAGS.vocab_size, mode='train', output_dir=output_dir)
FLAGS.num_classes = len(y[0])
# Split dataset
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=FLAGS.test_size, stratify=y, random_state=0)
x_val, x_test, y_val, y_test = train_test_split(x_test, y_test, test_size=0.5, random_state=0)
# Training
# ==================================================
with tf.Graph().as_default():
tf_config = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
tf_config.gpu_options.allow_growth = FLAGS.gpu_allow_growth
with tf.Session(config=tf_config).as_default() as sess:
cnn = TextCNN(
vocab_size=FLAGS.vocab_size,
embedding_size=FLAGS.embedding_size,
sequence_length=FLAGS.sequence_length,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters,
num_classes=FLAGS.num_classes,
learning_rate=FLAGS.learning_rate,
grad_clip=FLAGS.grad_clip,
l2_reg_lambda=FLAGS.l2_reg_lambda)
# Summaries for loss and accuracy
tf.summary.scalar("loss", cnn.loss)
tf.summary.scalar("accuracy", cnn.accuracy)
merged_summary = tf.summary.merge_all()
# Summaries dictionary
print("Writing to {}...\n".format(output_dir))
train_summary_dir = os.path.join(output_dir, 'summaries', 'train')
val_summary_dir = os.path.join(output_dir, 'summaries', 'val')
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
val_summary_writer = tf.summary.FileWriter(val_summary_dir, sess.graph)
# Checkpoint directory, will not create itself
checkpoint_dir = os.path.join(output_dir, 'checkpoints')
checkpoint_prefix = os.path.join(checkpoint_dir, 'model.ckpt')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
# Initialize all variables
sess.run(tf.global_variables_initializer())
# Using pre-trained word embedding
if FLAGS.init_embedding_path is not None:
sess.run(cnn.embedding.assign(embedding))
del embedding
# Continue training from saved model
if FLAGS.init_model_path is not None:
saver.restore(sess, ckpt.model_checkpoint_path)
# Training start
print("Start training...\n")
best_at_step = 0
best_val_accuracy = 0
for epoch in range(FLAGS.num_epochs):
# Generate train batches
train_batches = data_helpers.batch_iter(list(zip(x_train, y_train)), FLAGS.batch_size)
start = time.time()
for batch in train_batches:
# Training model on x_batch and y_batch
x_batch, y_batch = zip(*batch)
feed_dict = {cnn.input_x: x_batch, cnn.input_y: y_batch, cnn.keep_prob: FLAGS.dropout_keep_prob, cnn.is_training: True}
_, global_step, train_summaries, train_loss, train_accuracy = sess.run([cnn.train_op, cnn.global_step,
merged_summary, cnn.loss, cnn.accuracy], feed_dict=feed_dict)
# Evaluates model on val set
if global_step % FLAGS.evaluate_every == 0:
end = time.time()
train_summary_writer.add_summary(train_summaries, global_step)
feed_dict = {cnn.input_x: x_val, cnn.input_y: y_val, cnn.keep_prob: FLAGS.dropout_keep_prob, cnn.is_training: False}
val_summaries, val_loss, val_accuracy = sess.run([merged_summary, cnn.loss, cnn.accuracy], feed_dict=feed_dict)
val_summary_writer.add_summary(val_summaries, global_step)
print("Epoch: {}, global step: {}, training speed: {:.3f}sec/batch".format(epoch,
global_step, (end - start) / FLAGS.evaluate_every))
print("train loss: {:.3f}, train accuracy: {:.3f}, val loss: {:.3f}, val accuracy: {:.3f}\n".format(train_loss,
train_accuracy, val_loss, val_accuracy))
# If improved, save the model
if val_accuracy > best_val_accuracy:
print("Get a best val accuracy at step {}, model saving...\n".format(global_step))
saver.save(sess, checkpoint_prefix, global_step=global_step)
best_val_accuracy = val_accuracy
best_at_step = global_step
start = time.time()
# Rename the checkpoint
best_model_prefix = checkpoint_prefix + '-' + str(best_at_step)
os.rename(best_model_prefix + '.index', os.path.join(checkpoint_dir, 'best_model.index'))
os.rename(best_model_prefix + '.meta', os.path.join(checkpoint_dir, 'best_model.meta'))
os.rename(best_model_prefix + '.data-00000-of-00001', os.path.join(checkpoint_dir, 'best_model.data-00000-of-00001'))
# Testing on test set
print("\nTraining complete, testing the best model on test set...\n")
saver.restore(sess, os.path.join(checkpoint_dir, 'best_model'))
feed_dict = {cnn.input_x: x_test, cnn.input_y: y_test, cnn.keep_prob: FLAGS.dropout_keep_prob, cnn.is_training: False}
y_logits, test_accuracy = sess.run([cnn.logits, cnn.accuracy], feed_dict=feed_dict)
print("Testing Accuracy: {:.3f}\n".format(test_accuracy))
label_transformer = joblib.load(os.path.join(output_dir, 'label_transformer.pkl'))
y_test_original = label_transformer.inverse_transform(y_test)
y_logits_original = label_transformer.inverse_transform(y_logits)
print("Precision, Recall and F1-Score:\n\n", classification_report(y_test_original, y_logits_original))
# Save parameters
print("Parameters saving...\n")
params = {}
for param, value in FLAGS.flag_values_dict().items():
params[param] = value
with open(os.path.join(output_dir, 'parameters.json'), 'w') as outfile:
json.dump(params, outfile, indent=4, sort_keys=True, ensure_ascii=False)
# Save word embedding
print("Word embedding saving...\n")
np.save(os.path.join(output_dir, 'embedding.npy'), sess.run(cnn.embedding))
if __name__ == '__main__':
train_cnn()
``` |
{
"source": "1721819634/WC",
"score": 3
} |
#### File: 1721819634/WC/test_wc.py
```python
import unittest
from wc import WC
class TestWC(unittest.TestCase):
def test_wc(self):
wc = WC('D:/PyC/Projects/WC/example/test.py', '-c')
self.assertEqual(wc.file_dict, 'D:/PyC/Projects/WC/example/test.py')
self.assertEqual(wc.order, '-c')
def test_chars_count(self):
wc = WC('D:/PyC/Projects/WC/example/test.py', '')
chars = wc.chars_count()
self.assertEqual(chars, 407)
def test_lines_count(self):
wc = WC('D:/PyC/Projects/WC/example/test.py', '')
lines = wc.lines_count()
self.assertEqual(lines[0], 11)
self.assertEqual(lines[1], 10)
self.assertEqual(lines[2], 1)
self.assertEqual(lines[3], 22)
def test_words_count(self):
wc = WC('D:/PyC/Projects/WC/example/test.py', '')
words = wc.words_count()
self.assertEqual(words, 75)
def test_recur_files(self):
test = ['1', 'test.py', 'test.txt', '1.txt', 'fish.py']
wc = WC('D:/PyC/Projects/WC/example', '')
result = wc.recur_files()
for i in range(len(test)):
self.assertEqual(result[i], test[i])
def test_main_function(self):
for order in ['-c', '-w', '-l', '-a', '-s', '-x', '-a', '-g', ' ']:
wc = WC('D:/PyC/Projects/WC/example/test.py', order)
wc.main_function()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "17292/SomeStuff",
"score": 3
} |
#### File: 17292/SomeStuff/blog.py
```python
from flask import Flask,g, render_template, request, redirect, session, url_for, g, abort, flash
import sqlite3
import os.path
from flask.helpers import flash
from werkzeug.security import check_password_hash, generate_password_hash
# Global variable
class User:
def __init__(self,id,username,password):
self.id = id
self.username = username
self.password = password
def __repr__(self):
return f"<User: {self.username}>"
app = Flask(__name__)
app.secret_key = "secret"
# Checks sessions and places it in g object
@app.before_request
def before_request():
g.user = None
if "user_id" in session:
# Finds user's id
sql = "SELECT id, name, password FROM user WHERE id = ?"
cursor = get_db().cursor()
cursor.execute(sql, (session["user_id"], ))
user = cursor.fetchone()
if user:
g.user = User(*user)
# Makes a connection with database
DATABASE = "blog.db"
# Login system
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
session.pop("user_id", None)
username = request.form["username"]
password = request.form["password"]
connection = get_db()
cursor = connection.cursor()
redirect_to = url_for("home")
# Allows the user to register
if request.form.get("register"):
sql = "SELECT name FROM user WHERE name = ?"
db = get_db()
cursor = db.cursor()
cursor.execute(sql, (username, ))
users = cursor.fetchone()
if users is not None:
flash("Username is taken")
return redirect(url_for("login"))
sql = "INSERT INTO user(name, password) VALUES (?, ?)"
cursor.execute(sql, (
username,
generate_password_hash(password)
)
)
connection.commit()
redirect_to = url_for("profile")
sql = "SELECT id, name, password FROM user WHERE name = ?"
cursor.execute(sql, (username, ))
user = cursor.fetchone()
if user and check_password_hash(user[2], password):
session["user_id"] = user[0]
print("helo")
return redirect(redirect_to)
flash("Failed to login")
return redirect(url_for("login"))
return render_template("login.html")
# Users that has logged in have a button that'll log them out
@app.route ("/logout")
def logout():
if "user_id" in session:
flash("logged out successfully")
session.pop ("user_id", None)
g.user = None
return redirect(url_for("login"))
# A page of the user's profile
@app.route("/profile")
def profile():
# Users can't access their profile unless they login first
if not g.user:
return redirect(url_for("login"))
cursor = get_db().cursor()
return render_template("profile.html",)
# Connects to the database
def get_db():
db= getattr(g, "_datebase", None)
if db is None:
db = g._datebase = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, "_datebase", None)
if db is not None:
db.close()
# Route to home.html
@app.route("/")
def home():
return render_template("home.html")
# Gets articles from the database
@app.route("/article")
def article():
cursor = get_db().cursor()
sql = "SELECT * FROM article"
cursor.execute(sql)
results = cursor.fetchall()
return render_template("article.html", results=results)
# A route to the contant page
@app.route("/contact")
def contact():
return render_template("contact.html")
# Adds articles to article.html
@app.route("/add", methods= ["GET","POST"])
def add():
# User has to login to add articles
if not g.user:
return redirect(url_for("login"))
# Once logged in user can upload their post
# They can upload as many headings and articles as they want
if request.method == "POST":
cursor = get_db().cursor()
new_heading = request.form["article_heading"]
if len(new_heading) > 20:
return redirect("/add")
new_body = request.form["article_body"]
if len(new_body) > 500:
return redirect("/add")
sql = "INSERT INTO article(heading,body, user_id) VALUES (?,?,?)"
cursor.execute(sql, (new_heading, new_body, g.user.id))
get_db().commit()
return redirect('/')
# Deletes articles that have been posted
@app.route('/delete', methods= ["GET","POST"])
def delete():
# User has to login before able to delete
if not g.user:
return redirect(url_for("login"))
# Once logged in user can delete posts
# They can delete as many headings and articles as they want
if request.method == "POST":
cursor = get_db().cursor()
id = int(request.form["article_heading"])
sql = "DELETE FROM article WHERE id=?"
cursor.execute(sql,(id,))
get_db().commit()
return redirect('/')
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "1729-Dev/Advent2020",
"score": 3
} |
#### File: 1729-Dev/Advent2020/day_02.py
```python
import os
import itertools
import time
def filter_fctn(_):
return sum(list(_)) == 2020
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, "day_02.data")
def min_max_password_check(x):
lower = int(x[0][0])
upper = int(x[0][1])
test_char = x[1]
password = x[2]
char_count = password.count(test_char)
return lower <= char_count and char_count <= upper
def positional_password_check(x):
left = int(x[0][0]) - 1
right = int(x[0][1]) - 1
test_char = x[1]
password = x[2]
a = password[left] == test_char
b = password[right] == test_char
return a ^ b
with open(filename) as f:
lines = map(lambda x: (x.strip().split()), f.readlines())
lines = map(lambda x: (x[0].split('-'), x[1][0], x[2]), lines)
good_passwords = filter(min_max_password_check, lines)
print(f"min max good: {len(list(good_passwords))}")
with open(filename) as f:
lines = map(lambda x: (x.strip().split()), f.readlines())
lines = map(lambda x: (x[0].split('-'), x[1][0], x[2]), lines)
good_passwords = filter(positional_password_check, lines)
print(f"positional good: {sum(1 for _ in good_passwords)}")
```
#### File: 1729-Dev/Advent2020/day_07_part_one.py
```python
import os
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, "day_07.data")
with open(filename) as f:
print('reading file...')
lines = map(lambda x: (x.strip()), f.readlines())
bag_rules = list(map(lambda x: (tuple(x.split('contain'))), lines))
def contains(bag):
containing_bags = []
for rule in bag_rules:
if bag in rule[1]:
containing_bag = rule[0].replace("'","").strip()
containing_bags.append(containing_bag[:-5])
return containing_bags
containing_bags = list(dict.fromkeys(contains('shiny gold')))
while True:
all_new_bags = []
for bag in containing_bags:
new_bags = contains(bag)
for new_bag in new_bags:
if new_bag not in containing_bags:
all_new_bags.append(new_bag)
all_new_bags = list(dict.fromkeys(all_new_bags))
if len(all_new_bags) == 0:
break
for new_bag in all_new_bags:
containing_bags.append(new_bag)
print(len(containing_bags))
```
#### File: 1729-Dev/Advent2020/day_07_part_two.py
```python
import os
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, "day_07.data")
with open(filename) as f:
lines = map(lambda x: (x.strip()), f.readlines())
bag_rules = list(map(lambda x: (tuple(x.split('contain'))), lines))
def holds_what_bags(bag):
# shiny gold ==> 2 dark red
held_bags = []
for bag_rule in bag_rules:
if bag in bag_rule[0]:
if 'no other bags.' in bag_rule[1]:
return []
rule_list = bag_rule[1].split(',')
for rule in rule_list:
held_bags.append(rule.replace("'","").replace(".","").replace("bags","").replace("bag","").strip())
return held_bags
def holds_bags(bag):
for bag_rule in bag_rules:
if bag in bag_rule[0] and 'no other bags' in bag_rule[1]:
return False
return True
def replace_bags(description):
# 2 dark red bags ==> [dark orange, dark orange]
replacement = []
containing_quantity = int(description[:1])
bags_this_holds = holds_what_bags(description[2:])
for holds in bags_this_holds:
for _ in range(containing_quantity):
bag_quantity = int(holds.strip()[:1])
for _ in range(bag_quantity):
replacement.append(holds.strip()[2:])
return replacement
top_bags = holds_what_bags('shiny gold')
total_bag_count = 0
while len(top_bags) > 0:
for bag in top_bags:
bag_colour = bag[2:]
bag_quantity = int(bag[:1])
if holds_bags(bag_colour):
new_bags = replace_bags(bag)
top_bags.remove(bag)
total_bag_count += bag_quantity
for new_bag in new_bags:
top_bags.append(f"1 {new_bag}")
continue
if not holds_bags(bag_colour):
top_bags.remove(bag)
bag_quantity = int(bag[:1])
total_bag_count += bag_quantity
continue
raise Exception('what???')
print(total_bag_count)
```
#### File: 1729-Dev/Advent2020/day_08_part_two.py
```python
import copy
import os
import unittest
dirname = os.path.dirname(__file__)
class Day08(unittest.TestCase):
def get_instructions(self, file):
filename = os.path.join(dirname, file)
with open(filename) as f:
lines = map(lambda x: (x.strip()), f.readlines())
instructions = list(map(lambda x: (tuple(x.split(' '))), lines))
return instructions
def parse_amount(self, acc):
if type(acc) is not str:
raise ValueError
if len(acc) < 2:
raise ValueError
if not acc[0] in '+-':
raise ValueError
amount = int(acc[1:])
if acc[0] == '-':
amount = amount * -1
return amount
def follow_instructions(self, instructions):
index = 0
accumulator = 0
while True:
if index == len(instructions):
return accumulator
if index > len(instructions):
raise ValueException('did not expect this')
if instructions[index] is None:
return None
inst = instructions[index][0]
amount = self.parse_amount(instructions[index][1])
instructions[index] = None
if inst == 'jmp':
index += amount
continue
if inst == 'acc':
accumulator += amount
index += 1
continue
if inst == 'nop':
index += 1
continue
raise ValueException('what???')
def deep_copy_instructions(self, instructions):
new_instructions = []
for instruction in instructions:
new_instructions.append(instruction)
return new_instructions
def generate_permutations(self, instructions):
nop_spots = [i for i, n in enumerate(instructions) if n[0] == 'nop']
nop_permutations = []
for _ in range(len(nop_spots)):
nop_permutations.append(self.deep_copy_instructions(instructions))
for index in range(len(nop_spots)):
nop_permutations[index][nop_spots[index]] = ('jmp', nop_permutations[index][nop_spots[index]][1])
jmp_spots = [i for i, n in enumerate(instructions) if n[0] == 'jmp']
jmp_permutations = []
for _ in range(len(jmp_spots)):
jmp_permutations.append(self.deep_copy_instructions(instructions))
for index in range(len(jmp_spots)):
jmp_permutations[index][jmp_spots[index]] = ('nop', jmp_permutations[index][jmp_spots[index]][1])
return nop_permutations + jmp_permutations
def test_generate_permutations(self):
instructions = self.get_instructions('day_08_sample.data')
permutations = self.generate_permutations(instructions)
self.assertEqual(4, len(permutations))
for i in range(0, 4):
expected = self.get_instructions(f"day_08_sample_expected_{i}.data")
self.assertEqual(expected, permutations[i])
def test_sample_data(self):
instructions = self.get_instructions('day_08_sample.data')
self.assertEqual(9, len(instructions))
permutations = self.generate_permutations(instructions)
self.assertEqual(4, len(permutations))
for index in range(0, 3):
self.assertEqual(None, self.follow_instructions(permutations[index]))
self.assertEqual(8, self.follow_instructions(permutations[3]))
def test_parse_amount(self):
with self.assertRaises(ValueError):
self.parse_amount('')
self.parse_amount('-')
self.parse_amount('1')
self.parse_amount('+foo')
self.assertEqual(1, self.parse_amount('+1'))
self.assertEqual(-420, self.parse_amount('-420'))
def test_actual(self):
instructions = self.get_instructions('day_08.data')
permutations = self.generate_permutations(instructions)
print(f"actual perms: {len(permutations)}")
for perm in permutations:
answer = self.follow_instructions(perm)
if answer: print(f"Answer: {answer}")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "1729org/Hadron",
"score": 3
} |
#### File: Backend/backend_tests/views_tests.py
```python
import unittest
from pymongo import MongoClient
MONGO_SETTINGS = {
"local": {"host": "localhost", "port": 27017}
}
class MongoRouter(object):
def __init__(self,
connect_to="local"):
# ToDo: At some point in the future, make a dedicated mongo machine and make router differentiate
# between prod and local machines
self.connect_to = connect_to
self.client = MongoClient(MONGO_SETTINGS.get(connect_to, ("localhost", 27017)))
if connect_to == "local":
self.client = MongoClient()
else:
raise NotImplemented("Not yet implemented other environments.")
def route(self, desired_collection):
if desired_collection == "users":
return self.client["users_db"]["users_db"]
elif desired_collection == "test":
return self.client["test_db"]["test_db"]
router = MongoRouter()
def get_user_board(email):
last_visited = router.route("test").find_one({"email": email}, {"lastVisitedId": 1})["lastVisitedId"],
last_visited = last_visited[0]
results = router.route("test").find_one(
{"email": email, "boards": {"$elemMatch": {"name": last_visited}}},
{"boards": 1}
)["boards"]
for result in results:
if result["name"] == last_visited:
return result
class TestViews(unittest.TestCase):
def setUp(self):
test_router = MongoRouter()
self.test_collection = test_router.route("test")
self.test_collection.drop()
self.test_collection.insert_one({
"email": "<EMAIL>",
"lastVisitedId": "correct_name",
"boards": [
{"test_value": 0, "name": "bad_name_1"},
{"test_value": 1, "name": "correct_name"},
{"test_value": 2, "name": "bad_name_2"},
]
})
# Another
self.test_collection.insert_one({
"email": "test_@@email.<EMAIL>",
"lastVisitedId": "", # This did not visit anything
"boards": [
{"test_value": 3, "name": "2_bad_name_1"},
{"test_value": 4, "name": "2_bad_name_2"},
{"test_value": 5, "name": "2_bad_name_3"},
]
})
def tearDown(self):
self.test_collection.drop()
def test_get_user_board(self):
self.assertEquals(1, get_user_board("<EMAIL>")["test_value"])
```
#### File: Hadron/SimpleCIServer/simple_ci_server.py
```python
import web
import json
import hashlib
import os
import subprocess
ENV = os.environ
if not ENV.get("SECRET_TOKEN", None):
import copy
ENV = copy.deepcopy(ENV)
ENV["SECRET_TOKEN"] = ""
urls = (
'/', 'HandleGitWebHook'
)
def check_signature(request_headers):
raw_header = request_headers.get("environ", {})
hashed_token = raw_header.get("HTTP_X_HUB_SIGNATURE", False)
if not hashed_token:
print "No X-Hub-Signature header in %s" % request_headers
return False
print "Received Hashed token: %s" % hashed_token
print "Our hashed token: %s" % (
"sha1=" + hashlib.sha1(ENV["SECRET_TOKEN"]).hexdigest()
)
if hashed_token == "<PASSWORD>=" + hashlib.sha1(ENV["SECRET_TOKEN"]).hexdigest():
return True
print "Returning True because GitHub is retarded."
return True
class HandleGitWebHook(object):
def POST(self):
request_headers = web.ctx
request_json = json.loads(web.data())
if not check_signature(request_headers):
print "Bad token."
return json.dumps({'correct_token': False})
print "Good token."
subprocess.call("git pull origin master", shell=True)
subprocess.call(
"cp -R /home/ubuntu/Hadron/HadronClient/dist/* /home/ubuntu/Hadron/Backend/backend/backend/frontend/built",
shell=True
)
subprocess.call(
"/home/ubuntu/Hadron/Backend/backend_env/bin/python " +
"/home/ubuntu/Hadron/Backend/backend/manage.py " +
"collectstatic --noinput",
shell=True
)
return json.dumps({'correct_token': True})
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
``` |
{
"source": "17320692835RGF/buptoj",
"score": 2
} |
#### File: Backend/user/models.py
```python
from django.db import models
class User(models.Model):
username = models.CharField(max_length=50, null=False, primary_key=True)
password = models.CharField(max_length=50, null=False)
name = models.CharField(max_length=50, null=False) # 名称
regtime = models.DateTimeField(auto_now=True)
logintime = models.DateTimeField(auto_now=True)
school = models.CharField(max_length=50, null=False, default="")
course = models.CharField(max_length=50, null=False, default="")
classes = models.CharField(max_length=50, null=False, default="") #行政班
number = models.CharField(max_length=50, null=False, default="")
realname = models.CharField(max_length=50, null=False)
qq = models.CharField(max_length=50, null=True, default="")
email = models.CharField(max_length=50, null=True, default="")
type = models.IntegerField(null=False, default=1) # 1 普通 2 管理员 3 超级管理员
objects = models.Manager()
def __str__(self):
return self.username
class UserData(models.Model):
username = models.CharField(max_length=50, null=False, primary_key=True)
ac = models.IntegerField(null=False, default=0)
submit = models.IntegerField(null=False, default=0)
score = models.IntegerField(default=0)
des = models.CharField(max_length=50, null=True)
rating = models.IntegerField(default=1500)
acpro = models.TextField(null=True, default="")
objects = models.Manager()
def __str__(self):
return self.username
class UserLoginData(models.Model):
username = models.CharField(max_length=50, null=False)
ip = models.CharField(max_length=50, null=True,default="unknow")
logintime = models.DateTimeField(auto_now=True)
msg = models.TextField(null=True)
objects = models.Manager()
def __str__(self):
return self.username
```
#### File: Backend/user/permission.py
```python
from rest_framework import permissions
from .models import User
from board.models import SettingBoard
def getVisitorPermission(request):
setting = SettingBoard.objects.filter(id=1)
if len(setting) != 0:
if setting[0].openvisitor is False:
userid = request.session.get('user_id', None)
if userid:
return True
else:
return False
else:
return True
else:
return True
class ManagerOnly(permissions.BasePermission):
def has_permission(self, request, view):
if getVisitorPermission(request) == False:
return False
if request.method in permissions.SAFE_METHODS or request.method=="POST":
return True
type = request.session.get('type', 1)
if type == 2 or type == 3:
return True
else:
return False
class UserSafePostOnly(permissions.BasePermission):
def has_permission(self, request, view):
if getVisitorPermission(request) == False:
return False
if request.method in permissions.SAFE_METHODS:
return True
if request.session.get('type', 1) == 3:
return True
if request.method == "POST":
rating = request.data.get('rating', -1)
acc = request.data.get('ac', -1)
sub = request.data.get('submit', -1)
sco = request.data.get('score', -1)
type = request.data.get('type', -1)
if type != -1:
return False
if rating != "" or acc != "" or sub != "" or sco != "":
if rating == -1:
return True
return False
else:
return True
data = request.data
username = data.get('username')
rating = data.get('rating', -1)
score = data.get('score', -1)
ac = data.get('ac', -1)
submit = data.get('submit', -1)
if rating != -1 or score != -1 or ac != -1 or submit != -1:
return False
userid = request.session.get('user_id', None)
if userid == username or request.session.get('type', 1) == 3:
return True
else:
return False
class UserPUTOnly(permissions.BasePermission):
def has_permission(self, request, view):
if getVisitorPermission(request) == False:
return False
if request.method != "PUT":
return False
data = request.data
username = data.get('username')
userid = request.session.get('user_id', None)
if userid == username or request.session.get('type', 1) == 3:
return True
else:
return False
def has_object_permission(self, request, view, blog):
if getVisitorPermission(request) == False:
return False
if request.method != "PUT":
return False
data = request.data
username = data.get('username')
userid = request.session.get('user_id', None)
if userid == username or request.session.get('type', 1) == 3:
return True
else:
return False
class AuthPUTOnly(permissions.BasePermission):
def has_permission(self, request, view):
if getVisitorPermission(request) == False:
return False
if request.method != "PATCH" and request.method != "PUT":
return False
if request.session.get('type', 1) == 3:
return True
else:
return False
def has_object_permission(self, request, view, blog):
if getVisitorPermission(request) == False:
return False
if request.method != "PATCH" and request.method != "PUT":
return False
if request.session.get('type', 1) == 3:
return True
else:
return False
```
#### File: buptoj/CrawlingServer/Codeforces.py
```python
import urllib.request
import json
def get_CF_data(name):
api_url = "http://codeforces.com/api/user.status?handle="+name
try:
response = urllib.request.urlopen(api_url,timeout=2000)
response_data=response.read()
response_data = json.loads(response_data)
# print(response_data)
acpro = set()
attpro = set()
for data in response_data["result"]:
if data["verdict"]=="OK":
acpro.add(str(data["problem"]["contestId"])+str(data["problem"]["index"]))
attpro.add(str(data["problem"]["contestId"])+str(data["problem"]["index"]))
return [len(acpro),len(attpro)]
except:
return [-1,-1]
if __name__ == "__main__":
while(True):
name = input("请输入要爬的ID:")
print(get_CF_data(name))
```
#### File: 4/adult/NB.py
```python
from numpy import median
import numpy as np
import sys
import importlib
importlib.reload(sys)
characters = ["age", "type_employer", "fnlwgt", "education", "education_num", "marital", "occupation", "relationship",
"race", "sex", "capital_gain", "capital_loss", "hr_peer_week", "country", "income"]
simplified_map = {"Never-worked": "not-working", "Without-pay": "not-working",
# 字段2,职业类型
"Local-gov": "other-govt", "State-gov": "other-govt",
# 字段2,职业类型
"Self-emp-inc": "self-employed", "Self-emp-not-inc": "self-employed",
# 字段7,职业
"Craft-repair": "blue-collar", "Farming-fishing": "blue-collar", "Handlers-cleaners": "blue-collar",
"Machine-op-inspct": "blue-collar", "Transport-moving": "blue-collar",
# 字段7,职业
"Other-service": "service", "Priv-house-serv": "service",
# 字段14,国籍
"Cambodia": "SE-Asia", "Laos": "SE-Asia", "Philippines": "SE-Asia", "Thailand": "SE-Asia",
"Vietnam": "SE-Asia",
# 字段14,国籍
"Canada": "British-Commonwealth", "England": "British-Commonwealth", "India": "British-Commonwealth",
"Ireland": "British-Commonwealth", "Scotland": "British-Commonwealth",
# 字段14,国籍
"China": "China", "Hong": "China", "Taiwan": "China",
# 字段14, 国籍
"Columbia": "South-America", "Ecuador": "South-America", "El-Salvador": "South-America",
"Peru": "South-America",
# 字段14,国籍
"Cuba": "other", "Iran": "other", "Japan": "other",
# 字段14, 国籍
"Dominican-Republic": "Latin-America", "Guatemala": "Latin-America", "Haiti": "Latin-America",
"Honduras": "Latin-America", "Jamaica": "Latin-America", "Mexico": "Latin-America",
"Nicaragua": "Latin-America", "Outlying-US(Guam-USVI-etc)": "Latin-America",
"Puerto-Rico": "Latin-America", "Trinadad&Tobago": "Latin-America",
# 字段14,国籍
"France": "Euro_1", "Germany": "Euro_1", "Holand-Netherlands": "Euro_1", "Italy": "Euro_1",
# 字段14,国籍
"Greece": "Euro_2", "Hungary": "Euro_2", "Poland": "Euro_2", "Portugal": "Euro_2",
"Yugoslavia": "Euro_2",
# 字段4, 学历
"10th": "dropout", "11th": "dropout", "12th": "dropout", "1st-4th": "dropout", "5th-6th": "dropout",
"7th-8th": "dropout", "9th": "dropout", "Preschool": "dropout",
# 字段4,学历
"Assoc-acdm": "Assoc", "Assoc-voc": "Assoc",
# 字段6,婚姻状况
"Married-AF-spouse": "Married", "Married-civ-spouse": "Married",
# 字段6,婚姻状况
"Married-spouse-absent": "not-married", "Separated": "not-married", "Divorced": "not-married"}
# 字段13,每周工作时长
hour_map = {"1": "10s", "2": "10s", "3": "10s", "4": "10s", "5": "10s", "6": "10s", "7": "10s", "8": "10s", "9": "10s",
"10": "10s", # [1-10]映射成10小时
# [11-20]映射成20小时
"11": "20s", "12": "20s", "13": "20s", "14": "20s", "15": "20s", "16": "20s", "17": "20s", "18": "20s",
"19": "20s", "20": "20s",
# [21-30]映射成30小时
"21": "30s", "22": "30s", "23": "30s", "24": "30s", "25": "30s", "26": "30s", "27": "30s", "28": "30s",
"29": "30s", "30": "30s",
# [31-40]映射成40小时
"31": "40s", "32": "40s", "33": "40s", "34": "40s", "35": "40s", "36": "40s", "37": "40s", "38": "40s",
"39": "40s", "40": "40s",
# [41-50]映射成50小时
"41": "50s", "42": "50s", "43": "50s", "44": "50s", "45": "50s", "46": "50s", "47": "50s", "48": "50s",
"49": "50s", "50": "50s",
# [51-60]映射成60小时
"51": "60s", "52": "60s", "53": "60s", "54": "60s", "55": "60s", "56": "60s", "57": "60s", "58": "60s",
"59": "60s", "60": "60s",
# [61-70]映射成70小时
"61": "70s", "62": "70s", "63": "70s", "64": "70s", "65": "70s", "66": "70s", "67": "70s", "68": "70s",
"69": "70s", "70": "70s",
# [71-80]映射成80小时
"71": "80s", "72": "80s", "73": "80s", "74": "80s", "75": "80s", "76": "80s", "77": "80s", "78": "80s",
"79": "80s", "80": "80s",
# [81-90]映射成90小时
"81": "90s", "82": "90s", "83": "90s", "84": "90s", "85": "90s", "86": "90s", "87": "90s", "88": "90s",
"89": "90s", "90": "90s",
# [91-100]映射成100小时
"91": "100s", "92": "100s", "93": "100s", "94": "100s", "95": "100s", "96": "100s", "97": "100s",
"98": "100s", "99": "100s", "100": "100s"}
# 字段1,年龄
age_map = {"1": "5s", "2": "5s", "3": "5s", "4": "5s", "5": "5s",
"6": "10s", "7": "10s", "8": "10s", "9": "10s", "10": "10s",
"11": "15s", "12": "15s", "13": "15s", "14": "15s", "15": "15s",
"16": "20s", "17": "20s", "18": "20s", "19": "20s", "20": "20s",
"21": "25s", "22": "25s", "23": "25s", "24": "25s", "25": "25s",
"26": "30s", "27": "30s", "28": "30s", "29": "30s", "30": "30s",
"31": "35s", "32": "35s", "33": "35s", "34": "35s", "35": "35s",
"36": "40s", "37": "40s", "38": "40s", "39": "40s", "40": "40s",
"41": "45s", "42": "45s", "43": "45s", "44": "45s", "45": "45s",
"46": "50s", "47": "50s", "48": "50s", "49": "50s", "50": "50s",
"51": "55s", "52": "55s", "53": "55s", "54": "55s", "55": "55s",
"56": "60s", "57": "60s", "58": "60s", "59": "60s", "60": "60s",
"61": "65s", "62": "65s", "63": "65s", "64": "65s", "65": "65s",
"66": "70s", "67": "70s", "68": "70s", "69": "70s", "70": "70s",
"71": "75s", "72": "75s", "73": "75s", "74": "75s", "75": "75s",
"76": "80s", "77": "80s", "78": "80s", "79": "80s", "80": "80s",
"81": "85s", "82": "85s", "83": "85s", "84": "85s", "85": "85s",
"86": "90s", "87": "90s", "88": "90s", "89": "90s", "90": "90s",
"91": "95s", "92": "95s", "93": "95s", "94": "95s", "95": "95s",
"96": "100s", "97": "100s", "98": "100s", "99": "100s", "100": "100s"}
class DataSet(object):
def __init__(self):
self.data = []
self.loss_mid = 0 # 支出中位数
self.gain_mid = 0 # 收益中位数
self.len_data = 0
# 读取训练数据,并按收入是否大于50k分类
def classfy_traindata():
dataset_low = DataSet() # 收入<50k
dataset_high = DataSet() # 收入>50k
# 数据处理,将数据按照收入分成两类,同时计算连续值的中位数
gain = []
loss = []
with open("adult.data", "r") as f:
line = f.readline()
while line:
line = line.replace("\n", "")
if line:
line = line.split(", ")
if line[len(line) - 1] == ">50K":
dataset_high.data.append(line)
if int(line[10]) != 0:
gain.append(int(line[10]))
if int(line[11]) != 0:
loss.append(int(line[11]))
else:
dataset_low.data.append(line)
if int(line[10]) != 0:
gain.append(int(line[10]))
if int(line[11]) != 0:
loss.append(int(line[11]))
line = f.readline()
# 获取部分中位数
dataset_high.gain_mid = median(np.array(gain))
dataset_high.loss_mid = median(np.array(loss))
dataset_low.gain_mid = median(np.array(gain))
dataset_low.loss_mid = median(np.array(loss))
# 大于50k和小于50k样本数目
dataset_high.len_data = len(dataset_high.data)
dataset_low.len_data = len(dataset_low.data)
return dataset_low, dataset_high
# 统计每一个特征每一个取值下的样本数目
def statistics(data):
classfiled_data = {} # 每一个特征下每一个取值分别的样本数目
for character in characters:
classfiled_data[character] = {} # 赋初值
for line in data.data:
if len(line) < 10:
continue
for character in characters:
if line[characters.index(character)] in classfiled_data[character]:
classfiled_data[character][line[characters.index(character)]] += 1
else:
classfiled_data[character][line[characters.index(character)]] = 1
return classfiled_data
# 将相似的多个特征值映射为1个
def tiny(a_list, character, new_name, data):
if new_name not in data[character]:
data[character][new_name] = 0
for key in list(data[character]):
if key in a_list and key != new_name:
data[character][new_name] += data[character][key]
del data[character][key]
# 对收益和支出进行离散化,分成none、high、low三类
def income_classfy(category, mid_value, data):
data[category]["low"] = 0
data[category]["high"] = 0
data[category]["none"] = 0
for key in list(data[category]):
if key in ["none", "high", "low"]:
continue
if int(key) <= 0:
data[category]["none"] += data[category][key]
elif int(key) < mid_value:
data[category]["low"] += data[category][key]
else:
data[category]["high"] += data[category][key]
del data[category][key]
def tiny_hour(data):
# 工作时长离散化
for x in range(10):
a_set = []
for y in range(10 * (x + 1)):
a_set.append(str(y + 1))
tiny(a_set, "hr_peer_week", str(10 * (x + 1)) + "s", data)
def tiny_age(data):
# 年龄离散化
for x in range(20):
a_set = []
for y in range(5 * (x + 1)):
a_set.append(str(y + 1))
tiny(a_set, "age", str(5 * (x + 1)) + "s", data)
def test(line, dataset_low, dataset_high, classfiled_data_low, classfiled_data_high):
p_low = dataset_low.len_data / (dataset_low.len_data + dataset_high.len_data)
p_high = dataset_high.len_data / (dataset_high.len_data + dataset_low.len_data)
gain = int(line[-5])
loss = int(line[-4])
for character in characters[: -1]:
i = characters.index(character)
if character in ["fnlwgt", "education_num"]:
continue
if line[i] in simplified_map:
line[i] = simplified_map[line[i]]
# 年龄
if i == 0:
line[i] = age_map[line[i]]
# 工作时长
if i == 12:
line[i] = hour_map[line[i]]
# 收益
if character == "capital_gain":
line[i] = get_level(gain, dataset_low.gain_mid)
# 支出
if character == "capital_loss":
line[i] = get_level(loss, dataset_low.loss_mid)
p_low *= classfiled_data_low[character][line[i]] / dataset_low.len_data # 收入小于50k的概率
if character == 'capital_gain':
line[i] = get_level(gain, dataset_high.gain_mid)
if character == 'capital_loss':
line[i] = get_level(loss, dataset_high.loss_mid)
p_high *= classfiled_data_high[character][line[i]] / dataset_high.len_data # 收入大于50k的概率
if p_low > p_high:
return "<=50k"
else:
return ">50k"
def get_level(value, mid_value):
if value <= 0:
return "none"
elif value < mid_value:
return "low"
else:
return "high"
if __name__ == '__main__':
dataset_low, dataset_high = classfy_traindata()
print("训练数据的总数:\n >50k\t%d\n<=50k\t%d" % (len(dataset_high.data), len(dataset_low.data)))
# 处理收入小于50k
classfiled_data_low = statistics(dataset_low)
# 工作类别上的合并
tiny(['Never-worked', 'Without-pay'], 'type_employer', 'not-working', classfiled_data_low)
tiny(['Local-gov', 'State-gov'], 'type_employer', 'other-govt', classfiled_data_low)
tiny(['Self-emp-inc', 'Self-emp-not-inc'], 'type_employer', 'self-employed', classfiled_data_low)
# 职业上的合并
tiny(["Craft-repair", "Farming-fishing", "Handlers-cleaners", "Machine-op-inspct", "Transport-moving"],
"occupation", 'blue-collar', classfiled_data_low)
tiny(['Other-service', 'Priv-house-serv'], 'occupation', 'service', classfiled_data_low)
# 国籍上的合并
tiny(["Cambodia", "Laos", "Philippines", "Thailand", "Vietnam"], 'country', 'SE-Asia', classfiled_data_low)
tiny(["Canada", "England", "India", "Ireland", "Scotland", ], 'country', 'British-Commonwealth',
classfiled_data_low)
tiny(['China', 'Hong', 'Taiwan'], 'country', 'China', classfiled_data_low)
tiny(["Columbia", "Ecuador", "El-Salvador", "Peru"], 'country', 'South-America', classfiled_data_low)
tiny(["Cuba", "Iran", "Japan"], 'country', 'other', classfiled_data_low)
tiny(["Dominican-Republic", "Guatemala", "Haiti", "Honduras", "Jamaica", "Mexico", "Nicaragua",
"Outlying-US(Guam-USVI-etc)", "Puerto-Rico", "Trinadad&Tobago", ], 'country', 'Latin-America',
classfiled_data_low)
tiny(["France", "Germany", "Holand-Netherlands", "Italy", ], 'country', 'Euro_1', classfiled_data_low)
tiny(["Greece", "Hungary", "Poland", "Portugal", "Yugoslavia", ], 'country', 'Euro_2', classfiled_data_low)
# 学历上的合并
tiny(["10th", "11th", "12th", "1st-4th", "5th-6th", "7th-8th", "9th", "Preschool", ], 'education', 'dropout',
classfiled_data_low)
tiny(['Assoc-acdm', 'Assoc-voc'], 'education', 'Assoc', classfiled_data_low)
# 婚姻状况的合并
tiny(["Married-AF-spouse", "Married-civ-spouse"], 'marital', "Married", classfiled_data_low)
tiny(["Married-spouse-absent", "Separated", "Divorced"], 'marital', 'not-married', classfiled_data_low)
del classfiled_data_low["education_num"] # 删除多余属性
del classfiled_data_low["fnlwgt"]
income_classfy('capital_gain', dataset_low.gain_mid, classfiled_data_low)
income_classfy('capital_loss', dataset_low.loss_mid, classfiled_data_low)
tiny_hour(classfiled_data_low)
tiny_age(classfiled_data_low)
for key in classfiled_data_low:
print(key)
print(classfiled_data_low[key])
# 处理收入大于50k
classfiled_data_high = statistics(dataset_high)
# 工作类别上的合并
tiny(['Never-worked', 'Without-pay'], 'type_employer', 'not-working', classfiled_data_high)
tiny(['Local-gov', 'State-gov'], 'type_employer', 'other-govt', classfiled_data_high)
tiny(['Self-emp-inc', 'Self-emp-not-inc'], 'type_employer', 'self-employed', classfiled_data_high)
# 职业上的合并
tiny(["Craft-repair", "Farming-fishing", "Handlers-cleaners", "Machine-op-inspct", "Transport-moving"],
"occupation", 'blue-collar', classfiled_data_high)
tiny(['Other-service', 'Priv-house-serv'], 'occupation', 'service', classfiled_data_high)
# 国籍上的合并
tiny(["Cambodia", "Laos", "Philippines", "Thailand", "Vietnam"], 'country', 'SE-Asia', classfiled_data_high)
tiny(["Canada", "England", "India", "Ireland", "Scotland", ], 'country', 'British-Commonwealth',
classfiled_data_high)
tiny(['China', 'Hong', 'Taiwan'], 'country', 'China', classfiled_data_high)
tiny(["Columbia", "Ecuador", "El-Salvador", "Peru"], 'country', 'South-America', classfiled_data_high)
tiny(["Cuba", "Iran", "Japan"], 'country', 'other', classfiled_data_high)
tiny(["Dominican-Republic", "Guatemala", "Haiti", "Honduras", "Jamaica", "Mexico", "Nicaragua",
"Outlying-US(Guam-USVI-etc)", "Puerto-Rico", "Trinadad&Tobago", ], 'country', 'Latin-America',
classfiled_data_high)
tiny(["France", "Germany", "Holand-Netherlands", "Italy", ], 'country', 'Euro_1', classfiled_data_high)
tiny(["Greece", "Hungary", "Poland", "Portugal", "Yugoslavia", ], 'country', 'Euro_2', classfiled_data_high)
# 学历上的合并
tiny(["10th", "11th", "12th", "1st-4th", "5th-6th", "7th-8th", "9th", "Preschool", ], 'education', 'dropout',
classfiled_data_high)
tiny(['Assoc-acdm', 'Assoc-voc'], 'education', 'Assoc', classfiled_data_high)
# 婚姻状况的合并
tiny(["Married-AF-spouse", "Married-civ-spouse"], 'marital', "Married", classfiled_data_high)
tiny(["Married-spouse-absent", "Separated", "Divorced"], 'marital', 'not-married', classfiled_data_high)
del classfiled_data_high["education_num"] # 删除多余属性
del classfiled_data_high["fnlwgt"]
income_classfy('capital_gain', dataset_high.gain_mid, classfiled_data_high)
income_classfy('capital_loss', dataset_high.loss_mid, classfiled_data_high)
tiny_hour(classfiled_data_high)
tiny_age(classfiled_data_high)
for key in classfiled_data_high:
print(key)
print(classfiled_data_high[key])
with open('adult.test', 'r') as f:
line = f.readline()
right = 0
wrong = 0
while line:
if len(line) < 25:
line = f.readline()
continue
line = line.replace("\n", "")
# line = line[: -1] # 去除数据最后面的.
line = line.split(", ")
ans = test(line, dataset_low, dataset_high, classfiled_data_low, classfiled_data_high).upper()
# print(ans)
# print(line[-1])
if line[-1] == ans:
right += 1
else:
wrong += 1
line = f.readline()
print("模型的判断正确的次数:\t%d\n错误的次数\t%d\n正确率:\t%f" % (right, wrong, (right / (right + wrong))))
``` |
{
"source": "173TECH/sayn",
"score": 2
} |
#### File: sayn/sayn/cli.py
```python
from pathlib import Path
from datetime import date, timedelta
import sys
import click
from .utils.python_loader import PythonLoader
from .utils.task_query import get_query
from .utils.graphviz import plot_dag
from .logging import ConsoleLogger, FancyLogger, FileLogger
from .scaffolding.init_project import sayn_init
from .core.app import App
from .core.config import (
cleanup_compilation,
read_project,
read_groups,
read_settings,
get_tasks_dict,
)
from .core.errors import Err, Result
from .tasks import TaskStatus
yesterday = date.today() - timedelta(days=1)
class CliApp(App):
def __init__(
self,
command,
debug=False,
include=list(),
exclude=list(),
profile=None,
full_load=False,
start_dt=yesterday,
end_dt=yesterday,
):
# STARTING APP: register loggers and set cli arguments in the App object
self.run_arguments["command"] = command
loggers = [
ConsoleLogger(True) if debug else FancyLogger(),
FileLogger(
self.run_arguments["folders"]["logs"],
format=f"{self.run_id}|" + "%(asctime)s|%(levelname)s|%(message)s",
),
]
self.start_app(
loggers,
debug=debug,
full_load=full_load,
start_dt=start_dt,
end_dt=end_dt,
profile=profile,
)
cleanup_compilation(self.run_arguments["folders"]["compile"])
# SETUP THE APP: read project config and settings, interpret cli arguments and setup the dag
self.tracker.start_stage("setup")
# Read the project configuration
project = self.check_abort(read_project())
groups = self.check_abort(read_groups(project.groups))
self.set_project(project)
settings = self.check_abort(read_settings())
self.check_abort(self.set_settings(settings))
# Set python environment
self.python_loader = PythonLoader()
if Path(self.run_arguments["folders"]["python"]).is_dir():
self.check_abort(
self.python_loader.register_module(
"python_tasks", self.run_arguments["folders"]["python"]
)
)
# Set tasks and dag from it
tasks_dict = self.check_abort(get_tasks_dict(project.presets, groups))
task_query = self.check_abort(
get_query(tasks_dict, include=include, exclude=exclude)
)
self.check_abort(self.set_tasks(tasks_dict, task_query))
self.tracker.finish_current_stage(
tasks={k: v.status for k, v in self.tasks.items()}
)
def check_abort(self, result):
"""Interpret the result of setup opreations returning the value if `result.is_ok`.
Setup errors from the cli result in execution abort.
Args:
result (sayn.errors.Result): The result of a setup operation
"""
if result is None or not isinstance(result, Result):
self.finish_app(error=Err("app_setup", "unhandled_error", result=result))
sys.exit(-1)
elif result.is_err:
self.finish_app(result)
sys.exit(-1)
else:
return result.value
class ChainOption(click.Option):
def __init__(self, *args, **kwargs):
self.save_other_options = kwargs.pop("save_other_options", True)
nargs = kwargs.pop("nargs", -1)
assert nargs == -1, "nargs, if set, must be -1 not {}".format(nargs)
super(ChainOption, self).__init__(*args, **kwargs)
self._previous_parser_process = None
self._eat_all_parser = None
def add_to_parser(self, parser, ctx):
def parser_process(value, state):
# method to hook to the parser.process
done = False
if self.save_other_options:
# grab everything up to the next option
while state.rargs and not done:
for prefix in self._eat_all_parser.prefixes:
if state.rargs[0].startswith(prefix):
done = True
if not done:
value += f" {state.rargs.pop(0)}"
else:
# grab everything remaining
value += state.rargs
state.rargs[:] = []
# call the actual process
self._previous_parser_process(value, state)
retval = super(ChainOption, self).add_to_parser(parser, ctx)
for name in self.opts:
our_parser = parser._long_opt.get(name) or parser._short_opt.get(name)
if our_parser:
self._eat_all_parser = our_parser
self._previous_parser_process = our_parser.process
our_parser.process = parser_process
break
return retval
# Click arguments
click_debug = click.option(
"--debug", "-d", is_flag=True, default=False, help="Include debug messages"
)
def click_filter(func):
func = click.option(
"--tasks",
"-t",
multiple=True,
cls=ChainOption,
help="Task query to INCLUDE in the execution: [+]task_name[+], group:group_name, tag:tag_name",
default=list(),
)(func)
func = click.option(
"--exclude",
"-x",
multiple=True,
cls=ChainOption,
help="Task query to EXCLUDE in the execution: [+]task_name[+], group:group_name, tag:tag_name",
default=list(),
)(func)
return func
def click_incremental(func):
func = click.option(
"--full-load", "-f", is_flag=True, default=False, help="Do a full load"
)(func)
func = click.option(
"--start-dt",
"-s",
type=click.DateTime(formats=["%Y-%m-%d"]),
default=str(yesterday),
help="For incremental loads, the start date",
)(func)
func = click.option(
"--end-dt",
"-e",
type=click.DateTime(formats=["%Y-%m-%d"]),
default=str(yesterday),
help="For incremental loads, the end date",
)(func)
return func
def click_run_options(func):
func = click_debug(func)
func = click.option("--profile", "-p", help="Profile from settings to use")(func)
func = click_incremental(func)
func = click_filter(func)
return func
@click.group(help="SAYN management tool.")
def cli():
pass
# Click commands
@cli.command(help="Initialise a SAYN project in working directory.")
@click.argument("sayn_project_name")
def init(sayn_project_name):
sayn_init(sayn_project_name)
@cli.command(help="Compile sql tasks.")
@click_run_options
def compile(debug, tasks, exclude, profile, full_load, start_dt, end_dt):
tasks = [i for t in tasks for i in t.strip().split(" ")]
exclude = [i for t in exclude for i in t.strip().split(" ")]
app = CliApp("compile", debug, tasks, exclude, profile, full_load, start_dt, end_dt)
app.compile()
if any([t.status == TaskStatus.FAILED for _, t in app.tasks.items()]):
sys.exit(-1)
else:
sys.exit()
@cli.command(help="Run SAYN tasks.")
@click_run_options
def run(debug, tasks, exclude, profile, full_load, start_dt, end_dt):
tasks = [i for t in tasks for i in t.strip().split(" ")]
exclude = [i for t in exclude for i in t.strip().split(" ")]
app = CliApp("run", debug, tasks, exclude, profile, full_load, start_dt, end_dt)
app.run()
if any([t.status == TaskStatus.FAILED for _, t in app.tasks.items()]):
sys.exit(-1)
else:
sys.exit()
@cli.command(help="Generate DAG image.")
@click_debug
@click_filter
def dag_image(debug, tasks, exclude):
def handle_error():
print("Errors detected in project. Run `sayn compile` to see the errors")
sys.exit()
result = read_project()
if result.is_err:
handle_error()
else:
project = result.value
result = read_groups(project.groups)
if result.is_err:
handle_error()
else:
groups = result.value
result = get_tasks_dict(project.presets, groups)
if result.is_err:
handle_error()
else:
tasks_dict = result.value
dag = {
task["name"]: [p for p in task.get("parents", list())]
for task in tasks_dict.values()
}
plot_dag(dag, "images", "dag")
print("Dag image created in `images/dag.png`")
```
#### File: sayn/database/bigquery.py
```python
from copy import deepcopy
import csv
import datetime
import decimal
import io
import json
from typing import List, Optional
from pydantic import validator
from sqlalchemy import create_engine
from sqlalchemy.sql import sqltypes
from . import Database, DDL
db_parameters = ["project", "credentials_path", "location", "dataset"]
class BigqueryDDL(DDL):
partition: Optional[str]
cluster: Optional[List[str]]
@validator("cluster")
def validate_cluster(cls, v, values):
if len(values.get("columns")) > 0:
missing_columns = set(v) - set([c.name for c in values.get("columns")])
if len(missing_columns) > 0:
raise ValueError(
f'Cluster contains columns not specified in the ddl: "{missing_columns}"'
)
return v
def get_ddl(self):
return {
"columns": [c.dict() for c in self.columns],
"indexes": {},
"permissions": self.permissions,
"partition": self.partition,
"cluster": self.cluster,
"primary_key": list(),
}
class Bigquery(Database):
ddl_validation_class = BigqueryDDL
project = None
dataset = None
def feature(self, feature):
return feature in (
"CAN REPLACE TABLE",
"CAN REPLACE VIEW",
"CANNOT CHANGE SCHEMA",
)
def create_engine(self, settings):
settings = deepcopy(settings)
self.project = settings.pop("project")
url = f"bigquery://{self.project}"
if "dataset" in settings:
self.dataset = settings.pop("dataset")
url += "/" + self.dataset
return create_engine(url, **settings)
def _py2sqa(self, from_type):
python_types = {
int: sqltypes.Integer,
str: sqltypes.String,
float: sqltypes.Float,
decimal.Decimal: sqltypes.Numeric,
datetime.datetime: sqltypes.TIMESTAMP,
bytes: sqltypes.LargeBinary,
bool: sqltypes.Boolean,
datetime.date: sqltypes.Date,
datetime.time: sqltypes.Time,
datetime.timedelta: sqltypes.Interval,
list: sqltypes.ARRAY,
dict: sqltypes.JSON,
}
if from_type not in python_types:
raise ValueError(f'Type not supported "{from_type}"')
else:
return python_types[from_type]().compile(dialect=self.engine.dialect)
def _load_data_batch(self, table, data, schema):
full_table_name = (
f"{self.project}.{self.dataset if schema is None else schema}.{table}"
)
buffer = io.StringIO()
writer = csv.DictWriter(buffer, fieldnames=data[0].keys())
writer.writeheader()
writer.writerows(data)
buffer = io.BytesIO(buffer.getvalue().encode("utf-8"))
from google.cloud import bigquery
# job_config = bigquery.LoadJobConfig(
# source_format=bigquery.SourceFormat.CSV,
# skip_leading_rows=1,
# # autodect=True,
# )
# client = self.engine.raw_connection()._client
# job = client.load_table_from_file(
# buffer, full_table_name, job_config=job_config
# )
# job.result()
job_config = bigquery.LoadJobConfig(
source_format=bigquery.SourceFormat.NEWLINE_DELIMITED_JSON,
)
client = self.engine.raw_connection()._client
def default(o):
if isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
elif isinstance(o, decimal.Decimal):
return f"{o}"
else:
raise ValueError("Unsuported type")
data_str = "\n".join([json.dumps(d, default=default) for d in data])
job = client.load_table_from_file(
io.StringIO(data_str), full_table_name, job_config=job_config
)
job.result()
def move_table(self, src_table, dst_table, src_schema=None, dst_schema=None, **ddl):
full_src_table = (
f"{src_schema + '.' if src_schema is not None else ''}{src_table}"
)
select = f"SELECT * FROM {full_src_table}"
create_or_replace = self.create_table(
dst_table, dst_schema, select=select, replace=True, **ddl
)
return "\n\n".join((create_or_replace, f"DROP TABLE {full_src_table}"))
```
#### File: sayn/tasks/sql.py
```python
from pathlib import Path
from pydantic import BaseModel, FilePath, validator, Extra
from typing import Optional
from ..core.errors import Ok, Err, Exc
from ..database import Database
from . import Task
class Config(BaseModel):
sql_folder: Path
file_name: FilePath
db: Optional[str]
class Config:
extra = Extra.forbid
@validator("file_name", pre=True)
def file_name_plus_folder(cls, v, values):
return Path(values["sql_folder"], v)
class SqlTask(Task):
@property
def target_db(self):
return self.connections[self._target_db]
def use_db_object(
self, name, schema=None, tmp_schema=None, db=None, request_tmp=True
):
if db is None:
target_db = self.target_db
elif isinstance(db, str):
target_db = self.connections[db]
elif isinstance(db, Database):
target_db = db
else:
return Err("use_db_object", "wrong_connection_type")
target_db._request_object(
name,
schema=schema,
tmp_schema=tmp_schema,
task_name=self.name,
request_tmp=request_tmp,
)
def setup(self, **config):
conn_names_list = [
n for n, c in self.connections.items() if isinstance(c, Database)
]
# set the target db for execution
if config.get("db") is not None:
if config["db"] not in conn_names_list:
return Err("task_definition", "db_not_in_settings", db=config["db"])
self._target_db = config["db"]
else:
self._target_db = self._default_db
try:
self.config = Config(
sql_folder=self.run_arguments["folders"]["sql"], **config
)
except Exception as e:
return Exc(e)
result = self.compile_obj(self.config.file_name)
if result.is_err:
return result
else:
self.sql_query = result.value
if self.run_arguments["command"] == "run":
self.set_run_steps(["Write Query", "Execute Query"])
else:
self.set_run_steps(["Write Query"])
return Ok()
def compile(self):
with self.step("Write Query"):
result = self.write_compilation_output(self.sql_query)
if result.is_err:
return result
return Ok()
def run(self):
with self.step("Write Query"):
result = self.write_compilation_output(self.sql_query)
if result.is_err:
return result
with self.step("Execute Query"):
try:
self.target_db.execute(self.sql_query)
except Exception as e:
return Exc(e)
return Ok()
```
#### File: sayn/tests/test_ddl.py
```python
from sayn.database.creator import create as create_db
def validate_ddl(ddl):
db = create_db("test", "test", {"type": "sqlite", "database": ":memory:"})
return db._validate_ddl(ddl)
def test_ddl_empty():
result = validate_ddl({})
assert result.is_ok and result.value == {
"columns": [],
"indexes": {},
"primary_key": [],
"permissions": {},
}
def test_ddl_cols01():
result = validate_ddl({"columns": ["col1"]})
assert result.is_ok and result.value == {
"columns": [
{
"name": "col1",
"type": None,
"primary": False,
"not_null": False,
"unique": False,
"dst_name": None,
},
],
"indexes": {},
"primary_key": [],
"permissions": {},
}
def test_ddl_cols02():
result = validate_ddl({"columns": ["col1", {"name": "col2"}]})
assert result.is_ok and result.value == {
"columns": [
{
"name": "col1",
"type": None,
"primary": False,
"not_null": False,
"unique": False,
"dst_name": None,
},
{
"name": "col2",
"type": None,
"primary": False,
"not_null": False,
"unique": False,
"dst_name": None,
},
],
"indexes": {},
"primary_key": [],
"permissions": {},
}
def test_ddl_cols03():
result = validate_ddl({"columns": ["col1", {"name": "col2", "type": "BIGINT"}]})
assert result.is_ok and result.value == {
"columns": [
{
"name": "col1",
"type": None,
"primary": False,
"not_null": False,
"unique": False,
"dst_name": None,
},
{
"name": "col2",
"type": "BIGINT",
"primary": False,
"not_null": False,
"unique": False,
"dst_name": None,
},
],
"indexes": {},
"primary_key": [],
"permissions": {},
}
def test_ddl_cols04():
result = validate_ddl(
{"columns": ["col1", {"name": "col2", "type": "BIGINT", "primary": True}]}
)
assert result.is_ok and result.value == {
"columns": [
{
"name": "col1",
"type": None,
"primary": False,
"not_null": False,
"unique": False,
"dst_name": None,
},
{
"name": "col2",
"type": "BIGINT",
"primary": True,
"not_null": False,
"unique": False,
"dst_name": None,
},
],
"indexes": {},
"primary_key": ["col2"],
"permissions": {},
}
def test_ddl_cols05():
result = validate_ddl(
{
"columns": [
{"name": "col1", "not_null": True},
{"name": "col2", "type": "BIGINT", "primary": True},
]
}
)
assert result.is_ok and result.value == {
"columns": [
{
"name": "col1",
"type": None,
"primary": False,
"not_null": True,
"unique": False,
"dst_name": None,
},
{
"name": "col2",
"type": "BIGINT",
"primary": True,
"not_null": False,
"unique": False,
"dst_name": None,
},
],
"indexes": {},
"primary_key": ["col2"],
"permissions": {},
}
def test_ddl_cols06():
result = validate_ddl(
{
"columns": [
{"name": "col1", "not_null": True, "primary": True},
{"name": "col2", "type": "BIGINT", "primary": True},
]
}
)
assert result.is_ok and result.value == {
"columns": [
{
"name": "col1",
"type": None,
"primary": True,
"not_null": True,
"unique": False,
"dst_name": None,
},
{
"name": "col2",
"type": "BIGINT",
"primary": True,
"not_null": False,
"unique": False,
"dst_name": None,
},
],
"indexes": {},
"primary_key": ["col1", "col2"],
"permissions": {},
}
def test_ddl_cols07():
result = validate_ddl({"columns": ["dupe_col", "dupe_col"]})
assert result.is_err
def test_ddl_cols08():
result = validate_ddl({"columns": ["dupe_col", {"name": "dupe_col"}]})
assert result.is_err
def test_ddl_idx01():
result = validate_ddl({"indexes": {"idx1": {"columns": ["col1", "col2"]}}})
assert result.is_ok and result.value == {
"columns": [],
"indexes": {"idx1": {"columns": ["col1", "col2"]}},
"primary_key": [],
"permissions": {},
}
def test_ddl_idx02():
result = validate_ddl(
{
"indexes": {
"idx1": {"columns": ["col1", "col2"]},
"idx2": {"columns": ["col1", "col2"]},
}
}
)
assert result.is_ok and result.value == {
"columns": [],
"indexes": {
"idx1": {"columns": ["col1", "col2"]},
"idx2": {"columns": ["col1", "col2"]},
},
"primary_key": [],
"permissions": {},
}
def test_ddl_pk01():
result = validate_ddl(
{
"columns": ["col1", "col2"],
"indexes": {"primary_key": {"columns": ["col1", "col2"]}},
}
)
assert result.is_ok and result.value == {
"columns": [
{
"name": "col1",
"type": None,
"primary": False,
"not_null": False,
"unique": False,
"dst_name": None,
},
{
"name": "col2",
"type": None,
"primary": False,
"not_null": False,
"unique": False,
"dst_name": None,
},
],
"indexes": {},
"primary_key": ["col1", "col2"],
"permissions": {},
}
def test_ddl_pk02():
result = validate_ddl(
{
"columns": [
{"name": "col1", "primary": True},
{"name": "col2", "primary": True},
],
}
)
assert result.is_ok and result.value == {
"columns": [
{
"name": "col1",
"type": None,
"primary": True,
"not_null": False,
"unique": False,
"dst_name": None,
},
{
"name": "col2",
"type": None,
"primary": True,
"not_null": False,
"unique": False,
"dst_name": None,
},
],
"indexes": {},
"primary_key": ["col1", "col2"],
"permissions": {},
}
def test_ddl_pk03():
result = validate_ddl({"indexes": {"primary_key": {"columns": ["col1", "col2"]}}})
assert result.is_ok and result.value == {
"columns": [],
"indexes": {},
"primary_key": ["col1", "col2"],
"permissions": {},
}
def test_ddl_pk04():
result = validate_ddl({"indexes": {"primary_key": {"columns": []}}})
assert result.is_err
def test_ddl_pk05():
result = validate_ddl(
{"columns": ["col1"], "indexes": {"primary_key": {"columns": ["col1", "col2"]}}}
)
assert result.is_err
``` |
{
"source": "1740415303/tturtle-",
"score": 3
} |
#### File: 1740415303/tturtle-/ali_sms.py
```python
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.request import CommonRequest
client = AcsClient('', '', 'cn-hangzhou')
def send_sms(phonenumber,code):
request = CommonRequest()
request.set_accept_format('json')
request.set_domain('dysmsapi.aliyuncs.com')
request.set_method('POST')
request.set_protocol_type('https') # https | http
request.set_version('2017-05-25')
request.set_action_name('SendSms')
request.add_query_param('RegionId', "cn-hangzhou")
request.add_query_param('PhoneNumbers', phonenumber)
request.add_query_param('SignName', "常诚大麦网")
request.add_query_param('TemplateCode', "SMS_206450329")
request.add_query_param('TemplateParam', "{\"code\":\"%s\"}" % code)
response = client.do_action(request)
# python2: print(response)
print(str(response, encoding='utf-8'))
``` |
{
"source": "17451k/eventb-to-txt",
"score": 2
} |
#### File: eventb-to-txt/eventb_to_txt/__main__.py
```python
import argparse
import os
import shutil
import sys
import tempfile
import zipfile
from eventb_to_txt.model import Model
def main(args=sys.argv[1:]):
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--out', help='PATH to the output directory',
dest='out_path', metavar='PATH', default=os.getcwd())
parser.add_argument("-m", "--merge", help="merge all generated txt files into a single txt file",
action="store_true")
parser.add_argument(help="path to the Event-B model directory or zipfile",
dest="in_path", nargs=argparse.OPTIONAL, default=os.getcwd())
args = parser.parse_args(args)
if not os.path.exists(args.in_path):
sys.exit('{!r} path does not exist'.format(args.in_path))
try:
os.makedirs(args.out_path, exist_ok=True)
except (OSError, PermissionError, TypeError, AttributeError) as e:
sys.exit("{}: Can't create output directory {!r}".format(type(e).__name__, args.out_path))
is_zipfile = False
if zipfile.is_zipfile(args.in_path):
is_zipfile = True
tmp_in = tempfile.mkdtemp()
with zipfile.ZipFile(args.in_path) as zip_f:
zip_f.extractall(tmp_in)
args.in_path = tmp_in
try:
for model_path in Model.find_model_paths(args.in_path):
m = Model(model_path)
m.print(args.out_path, args.merge)
except RuntimeError as e:
raise SystemExit(e)
except (OSError, PermissionError) as e:
raise SystemExit("{}: {}".format(type(e).__name__, e))
if is_zipfile:
shutil.rmtree(args.in_path)
print('Txt files were successfully generated')
if __name__ == '__main__':
main(sys.argv[1:])
``` |
{
"source": "1748276037/Django",
"score": 2
} |
#### File: bookmanager02/book/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse('ok')
from book.models import BookInfo,PeopleInfo
# 增加数据
book = BookInfo(
name= 'itcast',
pub_date='2010-4-5'
)
book.save()
# 方式2
book = BookInfo.objects.create(
name = 'cast',
)
# 修改
person = PeopleInfo.objects.get(name = '黄药师')
person.name = 'itcast'
person.save()
# 方式2
PeopleInfo.objects.filter(name='itcast').update(name='船只博客')
# 删除
# 方式1物理删除
book = BookInfo.objects.get(id=6)
book.delete()
# 方式2
BookInfo.objects.filter(id=5).delete()
# 查询f
# filter过滤出多个结果
# exclude排除掉符合条件剩下的结果
# get过滤单一结果
# all查询多个结果。 查询结果集 -- 实际就是一个列表
# count查询结果数量。
BookInfo.objects.get(id=1).name
BookInfo.objects.all()
BookInfo.objects.all().count()
# 相等查询
BookInfo.objects.filter(id=1)
# 模糊查询 contains
BookInfo.objects.filter(name__contains='传')
# startswith、endswith:以指定值开头或结尾。
BookInfo.objects.filter(name__endswith='部')
BookInfo.objects.filter(name__startswith='天')
# in:是否包含在范围内。
BookInfo.objects.filter(id__in=[1,3,5])
# 比较查询
# gt大于 (greater then)
# gte大于等于 (greater then equal)
# lt小于 (less then)
# lte小于等于 (less then equal)
BookInfo.objects.filter(id__gt=1)
# year、month、day、week_day、hour、minute、second:对日期时间类型的属性进行运算。
BookInfo.objects.filter(pub_date__year=1980)
from django.db.models import F
from django.db.models import Q
# F用于属性之间的比较
BookInfo.objects.filter(readcount__gt=F('commentcount'))
# Q用表示逻辑与关系,同sql语句中where部分的and关键字。
# 查询阅读量大于20,并且编号小于3的图书
BookInfo.objects.filter(readcount__gt=20,id__lt=3)
# 查询阅读量大于20,或编号小于3的图书,只能使用Q对象实现
BookInfo.objects.filter(Q(readcount__gt=20)|Q(id__lt=3))
# Q对象前可以使用~操作符,表示非not
BookInfo.objects.filter(~Q(id=3))
# 一对应的模型类对象.多对应的模型类名小写_set 例:
book = BookInfo.objects.get(id=1)
book.peopleinfo_set.all()
# 由多到一的访问语法:
# 多对应的模型类对象.多对应的模型类中的关系类属性名 例:
peson=PeopleInfo.objects.get(id=1)
person.book
# 查询图书,要求图书人物为"郭靖"
book = BookInfo.objects.filter(peopleinfo__name='郭靖')
# 查询人物为1的书籍信息
person=PeopleInfo.objects.get(id=1)
person.book.name
# 或者
book = BookInfo.objects.filter(peopleinfo__id=1)
``` |
{
"source": "1749740778/AdaCS",
"score": 2
} |
#### File: 1749740778/AdaCS/main.py
```python
from __future__ import absolute_import
import os
import argparse
import configparser
import logging
import re
import torch
from learning.codesearcher import CodeSearcher
from preprocess.lex.token import Tokenizer
from preprocess.lex.word_sim import WordSim
from preprocess.prepare import prepare
from preprocess.dataset import CodeSearchDataset, MatchingMatrix
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format="%(message)s")
def parse_args():
parser = argparse.ArgumentParser("train and test code search model")
parser.add_argument("-p", "--prepare", action="store_true", default=False, help="Prepare dataset first.")
parser.add_argument("--mode", choices=["train", "eval", "debug", "statistics","predict"],
default="predict",
help="The mode to run. Use `train` mode to train a model;"
"Use `eval` mode to evaluate the model;"
"Use `predict` mode to make predictions")
parser.add_argument("-v", "--verbose", default=True, help="Print verbose info.")
option = parser.parse_args()
return option
def get_config():
basedir = os.path.dirname(__file__)
config = configparser.ConfigParser()
config.read(os.path.join(basedir, './conf/config.ini'))
config.set('data', 'wkdir', basedir)
return config
def main():
conf = get_config()
option = parse_args()
if option.prepare:
logger.info("preparing dataset...")
#prepare(conf, conf['data']['train_code_path'], conf['data']['train_nl_path'], conf['data']['train_db_path'], train_mode=True)
#prepare(conf, conf['data']['valid_code_path'], conf['data']['valid_nl_path'], conf['data']['valid_db_path'], train_mode=False, train_db_path=conf['data']['train_db_path'])
prepare(conf, conf['data']['test_code_path'], conf['data']['test_nl_path'], conf['data']['test_db_path'], train_mode=False, train_db_path=conf['data']['train_db_path'])
elif option.mode == 'train':
logger.info("start training model...")
searcher = CodeSearcher(conf)
searcher.train()
elif option.mode == 'eval':
num = input('Please input the epoch of the model to be loaded: ')
searcher = CodeSearcher(conf)
searcher.load_model(int(num))
print('load model successfully.')
searcher.eval2()
elif option.mode == 'predict':
num = input('Please input the epoch of the model to be loaded: ')
path = input('Please input the save path of model outputs: ')
searcher = CodeSearcher(conf)
searcher.load_model(int(num))
print('load model successfully.')
searcher.predict(path)
elif option.mode == 'statistics':
s = input('Please input the relative data path (e.g. "domain/test"):')
paths = s.strip().split(';')
data = []
for x in paths:
base_path = os.path.join(conf['data']['wkdir'], './data/'+x)
data += Tokenizer().parse(base_path + '.nl', base_path + '.code')
data = [item for item in data if len(item[0]) and len(item[0])<=int(conf['data']['query_max_len']) and len(item[1])<=int(conf['data']['code_max_len'])]
print('|utterances| = ' + str(len(data)))
c = 0
for item in data:
c += len(item[0])
print('|natural language tokens| = ' + str(c))
c = 0
for item in data:
c += len(item[1])
print('|code tokens| = ' + str(c))
c = set()
for item in data:
for w in item[0]:
c.add(w)
print('|unique natural language tokens| = ' + str(len(c)))
for item in data:
for w in item[1]:
c.add(w)
print('|unique code tokens| = ' + str(len(c)))
nlMap = [0 for _ in range(int(conf['data']['query_max_len'])+1)]
codeMap = [0 for _ in range(int(int(conf['data']['code_max_len'])/10)+1)]
for item in data:
nlMap[len(item[0])] += 1
codeMap[int(len(item[1])/10)] += 1
print(nlMap)
print(codeMap)
elif option.mode == 'debug':
line = input('Please input two item ids, seperated by space: ')
eles = line.strip().split()
data = Tokenizer().parse(os.path.join(conf['data']['wkdir'], conf['data']['test_nl_path']),
os.path.join(conf['data']['wkdir'], conf['data']['test_code_path']))
fasttext_corpus_path = os.path.join(conf['data']['wkdir'], re.sub(r'\.db$', '.txt', conf['data']['test_db_path']))
core_term_path = os.path.join(conf['data']['wkdir'], 'conf/core_terms.txt')
word_sim = WordSim(core_term_path, pretrain=(conf['model']['pretrained_wordvec'] == str(True)), fasttext_corpus_path=fasttext_corpus_path)
for a in range(len(data)):
if data[a][2] == eles[0]:
for b in range(len(data)):
if data[b][2] == eles[1]:
matrix = MatchingMatrix(data[a][0], data[b][1], data[a][2], word_sim, conf['data']['query_max_len'])
for i in range(len(matrix)):
for j in range(len(matrix[0])):
print('%5.2f' % data.matrix[i][j], end=', ')
print()
break
break
if __name__ == '__main__':
main()
``` |
{
"source": "1751200/Xlab-k8s-gpu",
"score": 3
} |
#### File: asset/matrix/number.py
```python
import numpy as np
import pandas as pd
import csv
import time
global label_list #label_list为全局变量
#定义kdd99数据预处理函数
def preHandel_data():
source_file='duplicated kddcup.newtestdata_10_percent_unlabeled.csv'
handled_file='kddcup.newtestdata_10_percent_corrected.csv'
data_file=open(handled_file,'w',newline='') #python3.x中添加newline=''这一参数使写入的文件没有多余的空行
with open(source_file,'r') as data_source:
csv_reader=csv.reader(data_source)
csv_writer=csv.writer(data_file)
count=0 #记录数据的行数,初始化为0
for row in csv_reader:
temp_line=np.array(row) #将每行数据存入temp_line数组里
temp_line[1]=handleProtocol(row) #将源文件行中3种协议类型转换成数字标识
temp_line[2]=handleService(row) #将源文件行中70种网络服务类型转换成数字标识
temp_line[3]=handleFlag(row) #将源文件行中11种网络连接状态转换成数字标识
#temp_line[41]=handleLabel(row) #将源文件行中23种攻击类型转换成数字标识
csv_writer.writerow(temp_line)
count+=1
#输出每行数据中所修改后的状态
print(count,'status:',temp_line[1],temp_line[2],temp_line[3])
data_file.close()
#将相应的非数字类型转换为数字标识即符号型数据转化为数值型数据
def find_index(x,y):
return [i for i in range(len(y)) if y[i]==x]
#定义将源文件行中3种协议类型转换成数字标识的函数
def handleProtocol(input):
protocol_list=['tcp','udp','icmp']
if input[1] in protocol_list:
return find_index(input[1],protocol_list)[0]
#定义将源文件行中70种网络服务类型转换成数字标识的函数
def handleService(input):
service_list=['aol','auth','bgp','courier','csnet_ns','ctf','daytime','discard','domain','domain_u',
'echo','eco_i','ecr_i','efs','exec','finger','ftp','ftp_data','gopher','harvest','hostnames',
'http','http_2784','http_443','http_8001','imap4','IRC','iso_tsap','klogin','kshell','ldap',
'link','login','mtp','name','netbios_dgm','netbios_ns','netbios_ssn','netstat','nnsp','nntp',
'ntp_u','other','pm_dump','pop_2','pop_3','printer','private','red_i','remote_job','rje','shell',
'smtp','sql_net','ssh','sunrpc','supdup','systat','telnet','tftp_u','tim_i','time','urh_i','urp_i',
'uucp','uucp_path','vmnet','whois','X11','Z39_50']
if input[2] in service_list:
return find_index(input[2],service_list)[0]
#定义将源文件行中11种网络连接状态转换成数字标识的函数
def handleFlag(input):
flag_list=['OTH','REJ','RSTO','RSTOS0','RSTR','S0','S1','S2','S3','SF','SH']
if input[3] in flag_list:
return find_index(input[3],flag_list)[0]
#定义将源文件行中攻击类型转换成数字标识的函数(训练集中共出现了22个攻击类型,而剩下的17种只在测试集中出现)
def handleLabel(input):
#label_list=['normal.', 'buffer_overflow.', 'loadmodule.', 'perl.', 'neptune.', 'smurf.',
# 'guess_passwd.', 'pod.', 'teardrop.', 'portsweep.', 'ipsweep.', 'land.', 'ftp_write.',
# 'back.', 'imap.', 'satan.', 'phf.', 'nmap.', 'multihop.', 'warezmaster.', 'warezclient.',
# 'spy.', 'rootkit.']
global label_list #在函数内部使用全局变量并修改它
if input[41] in label_list:
return find_index(input[41],label_list)[0]
else:
label_list.append(input[41])
return find_index(input[41],label_list)[0]
if __name__=='__main__':
start_time=time.clock()
global label_list #声明一个全局变量的列表并初始化为空
label_list=[]
preHandel_data()
end_time=time.clock()
print("Running time:",(end_time-start_time)) #输出程序运行时间
``` |
{
"source": "1753024/KunCTF",
"score": 4
} |
#### File: KunCTF/otp/crypt.py
```python
import string
cipher = 'OPOWVWROGGIOMJ'
key = 'THISISAKEY'
def ConvertToList(text):
listchar=[]
for char in text.lower():
listchar.append(ord(char)-96)
return listchar
def BalanceTheLenght(cipher,key):
if(len(cipher)>len(key)):
while(len(cipher)>len(key)):
for char in key:
if(len(cipher)<=len(key)):break
key+=char
else:
while(len(cipher)<len(key)):
for char in cipher:
if(len(cipher)>=len(key)):break
cipher+=char
return cipher,key
def ConvertToString(numberList):
text=""
for num in numberList:
text+=letter[int(num)-1]
return text.upper()
cipher,key=BalanceTheLenght(cipher,key)
cipherList = ConvertToList(cipher)
keyList= ConvertToList(key)
answer=[]
letter = string.ascii_lowercase
for i in range(len(cipherList)):
numChar=cipherList[i]-keyList[i]+1
while(numChar>26):
numChar-=26
answer.append(numChar)
print(ConvertToString(answer))
``` |
{
"source": "1753024/secret-site",
"score": 3
} |
#### File: 1753024/secret-site/app.py
```python
from flask import Flask, render_template, redirect, url_for, request, session
from functools import wraps
app = Flask(__name__)
app.secret_key = "S3cretAsFUCK"
@app.errorhandler(404)
def invalid_route(e):
return render_template("404.html")
def login_required(f):
@wraps(f)
def wrap(*args,**kwargs):
if('log_in' in session):
return f(*args, **kwargs)
else:
return redirect(url_for('home'))
return wrap
@app.route('/', methods=['GET','POST'])
def home():
error = None
if (request.method == 'POST'):
if(request.form['username'] != 'admin' or request.form['password']!= '<PASSWORD>'):
error='Invalid credential.'
else:
session['log_in'] = True
return redirect(url_for('noice'))
return render_template("index.html",error=error)
@app.route('/noice')
@login_required
def noice():
return render_template('noice.html')
@app.route('/logout')
def logout():
session.pop('log_in',None)
return redirect(url_for('home'))
if __name__=='__main__':
app.run(debug=False)
``` |
Subsets and Splits